gimple.h: Remove all includes.
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-ssa-alias.h"
32 #include "internal-fn.h"
33 #include "gimple-expr.h"
34 #include "is-a.h"
35 #include "gimple.h"
36 #include "gimplify.h"
37 #include "gimple-iterator.h"
38 #include "gimplify-me.h"
39 #include "gimple-ssa.h"
40 #include "tree-phinodes.h"
41 #include "ssa-iterators.h"
42 #include "stringpool.h"
43 #include "tree-ssanames.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "expr.h"
50 #include "recog.h"
51 #include "optabs.h"
52 #include "params.h"
53 #include "diagnostic-core.h"
54 #include "tree-chrec.h"
55 #include "tree-scalar-evolution.h"
56 #include "tree-vectorizer.h"
57 #include "target.h"
58
59 /* Loop Vectorization Pass.
60
61 This pass tries to vectorize loops.
62
63 For example, the vectorizer transforms the following simple loop:
64
65 short a[N]; short b[N]; short c[N]; int i;
66
67 for (i=0; i<N; i++){
68 a[i] = b[i] + c[i];
69 }
70
71 as if it was manually vectorized by rewriting the source code into:
72
73 typedef int __attribute__((mode(V8HI))) v8hi;
74 short a[N]; short b[N]; short c[N]; int i;
75 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
76 v8hi va, vb, vc;
77
78 for (i=0; i<N/8; i++){
79 vb = pb[i];
80 vc = pc[i];
81 va = vb + vc;
82 pa[i] = va;
83 }
84
85 The main entry to this pass is vectorize_loops(), in which
86 the vectorizer applies a set of analyses on a given set of loops,
87 followed by the actual vectorization transformation for the loops that
88 had successfully passed the analysis phase.
89 Throughout this pass we make a distinction between two types of
90 data: scalars (which are represented by SSA_NAMES), and memory references
91 ("data-refs"). These two types of data require different handling both
92 during analysis and transformation. The types of data-refs that the
93 vectorizer currently supports are ARRAY_REFS which base is an array DECL
94 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
95 accesses are required to have a simple (consecutive) access pattern.
96
97 Analysis phase:
98 ===============
99 The driver for the analysis phase is vect_analyze_loop().
100 It applies a set of analyses, some of which rely on the scalar evolution
101 analyzer (scev) developed by Sebastian Pop.
102
103 During the analysis phase the vectorizer records some information
104 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
105 loop, as well as general information about the loop as a whole, which is
106 recorded in a "loop_vec_info" struct attached to each loop.
107
108 Transformation phase:
109 =====================
110 The loop transformation phase scans all the stmts in the loop, and
111 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
112 the loop that needs to be vectorized. It inserts the vector code sequence
113 just before the scalar stmt S, and records a pointer to the vector code
114 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
115 attached to S). This pointer will be used for the vectorization of following
116 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
117 otherwise, we rely on dead code elimination for removing it.
118
119 For example, say stmt S1 was vectorized into stmt VS1:
120
121 VS1: vb = px[i];
122 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
123 S2: a = b;
124
125 To vectorize stmt S2, the vectorizer first finds the stmt that defines
126 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
127 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
128 resulting sequence would be:
129
130 VS1: vb = px[i];
131 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
132 VS2: va = vb;
133 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134
135 Operands that are not SSA_NAMEs, are data-refs that appear in
136 load/store operations (like 'x[i]' in S1), and are handled differently.
137
138 Target modeling:
139 =================
140 Currently the only target specific information that is used is the
141 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
142 Targets that can support different sizes of vectors, for now will need
143 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
144 flexibility will be added in the future.
145
146 Since we only vectorize operations which vector form can be
147 expressed using existing tree codes, to verify that an operation is
148 supported, the vectorizer checks the relevant optab at the relevant
149 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
150 the value found is CODE_FOR_nothing, then there's no target support, and
151 we can't vectorize the stmt.
152
153 For additional information on this project see:
154 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
155 */
156
157 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
158
159 /* Function vect_determine_vectorization_factor
160
161 Determine the vectorization factor (VF). VF is the number of data elements
162 that are operated upon in parallel in a single iteration of the vectorized
163 loop. For example, when vectorizing a loop that operates on 4byte elements,
164 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
165 elements can fit in a single vector register.
166
167 We currently support vectorization of loops in which all types operated upon
168 are of the same size. Therefore this function currently sets VF according to
169 the size of the types operated upon, and fails if there are multiple sizes
170 in the loop.
171
172 VF is also the factor by which the loop iterations are strip-mined, e.g.:
173 original loop:
174 for (i=0; i<N; i++){
175 a[i] = b[i] + c[i];
176 }
177
178 vectorized loop:
179 for (i=0; i<N; i+=VF){
180 a[i:VF] = b[i:VF] + c[i:VF];
181 }
182 */
183
184 static bool
185 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
186 {
187 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
188 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
189 int nbbs = loop->num_nodes;
190 gimple_stmt_iterator si;
191 unsigned int vectorization_factor = 0;
192 tree scalar_type;
193 gimple phi;
194 tree vectype;
195 unsigned int nunits;
196 stmt_vec_info stmt_info;
197 int i;
198 HOST_WIDE_INT dummy;
199 gimple stmt, pattern_stmt = NULL;
200 gimple_seq pattern_def_seq = NULL;
201 gimple_stmt_iterator pattern_def_si = gsi_none ();
202 bool analyze_pattern_stmt = false;
203
204 if (dump_enabled_p ())
205 dump_printf_loc (MSG_NOTE, vect_location,
206 "=== vect_determine_vectorization_factor ===\n");
207
208 for (i = 0; i < nbbs; i++)
209 {
210 basic_block bb = bbs[i];
211
212 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
213 {
214 phi = gsi_stmt (si);
215 stmt_info = vinfo_for_stmt (phi);
216 if (dump_enabled_p ())
217 {
218 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
219 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
220 dump_printf (MSG_NOTE, "\n");
221 }
222
223 gcc_assert (stmt_info);
224
225 if (STMT_VINFO_RELEVANT_P (stmt_info))
226 {
227 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
228 scalar_type = TREE_TYPE (PHI_RESULT (phi));
229
230 if (dump_enabled_p ())
231 {
232 dump_printf_loc (MSG_NOTE, vect_location,
233 "get vectype for scalar type: ");
234 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
235 dump_printf (MSG_NOTE, "\n");
236 }
237
238 vectype = get_vectype_for_scalar_type (scalar_type);
239 if (!vectype)
240 {
241 if (dump_enabled_p ())
242 {
243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
244 "not vectorized: unsupported "
245 "data-type ");
246 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
247 scalar_type);
248 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
249 }
250 return false;
251 }
252 STMT_VINFO_VECTYPE (stmt_info) = vectype;
253
254 if (dump_enabled_p ())
255 {
256 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
257 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
258 dump_printf (MSG_NOTE, "\n");
259 }
260
261 nunits = TYPE_VECTOR_SUBPARTS (vectype);
262 if (dump_enabled_p ())
263 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
264 nunits);
265
266 if (!vectorization_factor
267 || (nunits > vectorization_factor))
268 vectorization_factor = nunits;
269 }
270 }
271
272 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
273 {
274 tree vf_vectype;
275
276 if (analyze_pattern_stmt)
277 stmt = pattern_stmt;
278 else
279 stmt = gsi_stmt (si);
280
281 stmt_info = vinfo_for_stmt (stmt);
282
283 if (dump_enabled_p ())
284 {
285 dump_printf_loc (MSG_NOTE, vect_location,
286 "==> examining statement: ");
287 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
288 dump_printf (MSG_NOTE, "\n");
289 }
290
291 gcc_assert (stmt_info);
292
293 /* Skip stmts which do not need to be vectorized. */
294 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
295 && !STMT_VINFO_LIVE_P (stmt_info))
296 || gimple_clobber_p (stmt))
297 {
298 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
299 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
300 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
301 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
302 {
303 stmt = pattern_stmt;
304 stmt_info = vinfo_for_stmt (pattern_stmt);
305 if (dump_enabled_p ())
306 {
307 dump_printf_loc (MSG_NOTE, vect_location,
308 "==> examining pattern statement: ");
309 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
310 dump_printf (MSG_NOTE, "\n");
311 }
312 }
313 else
314 {
315 if (dump_enabled_p ())
316 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
317 gsi_next (&si);
318 continue;
319 }
320 }
321 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
322 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
323 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
324 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
325 analyze_pattern_stmt = true;
326
327 /* If a pattern statement has def stmts, analyze them too. */
328 if (is_pattern_stmt_p (stmt_info))
329 {
330 if (pattern_def_seq == NULL)
331 {
332 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
333 pattern_def_si = gsi_start (pattern_def_seq);
334 }
335 else if (!gsi_end_p (pattern_def_si))
336 gsi_next (&pattern_def_si);
337 if (pattern_def_seq != NULL)
338 {
339 gimple pattern_def_stmt = NULL;
340 stmt_vec_info pattern_def_stmt_info = NULL;
341
342 while (!gsi_end_p (pattern_def_si))
343 {
344 pattern_def_stmt = gsi_stmt (pattern_def_si);
345 pattern_def_stmt_info
346 = vinfo_for_stmt (pattern_def_stmt);
347 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
348 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
349 break;
350 gsi_next (&pattern_def_si);
351 }
352
353 if (!gsi_end_p (pattern_def_si))
354 {
355 if (dump_enabled_p ())
356 {
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "==> examining pattern def stmt: ");
359 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
360 pattern_def_stmt, 0);
361 dump_printf (MSG_NOTE, "\n");
362 }
363
364 stmt = pattern_def_stmt;
365 stmt_info = pattern_def_stmt_info;
366 }
367 else
368 {
369 pattern_def_si = gsi_none ();
370 analyze_pattern_stmt = false;
371 }
372 }
373 else
374 analyze_pattern_stmt = false;
375 }
376
377 if (gimple_get_lhs (stmt) == NULL_TREE)
378 {
379 if (dump_enabled_p ())
380 {
381 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
382 "not vectorized: irregular stmt.");
383 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
384 0);
385 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
386 }
387 return false;
388 }
389
390 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
391 {
392 if (dump_enabled_p ())
393 {
394 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
395 "not vectorized: vector stmt in loop:");
396 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
397 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
398 }
399 return false;
400 }
401
402 if (STMT_VINFO_VECTYPE (stmt_info))
403 {
404 /* The only case when a vectype had been already set is for stmts
405 that contain a dataref, or for "pattern-stmts" (stmts
406 generated by the vectorizer to represent/replace a certain
407 idiom). */
408 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
409 || is_pattern_stmt_p (stmt_info)
410 || !gsi_end_p (pattern_def_si));
411 vectype = STMT_VINFO_VECTYPE (stmt_info);
412 }
413 else
414 {
415 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
416 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
417 if (dump_enabled_p ())
418 {
419 dump_printf_loc (MSG_NOTE, vect_location,
420 "get vectype for scalar type: ");
421 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
422 dump_printf (MSG_NOTE, "\n");
423 }
424 vectype = get_vectype_for_scalar_type (scalar_type);
425 if (!vectype)
426 {
427 if (dump_enabled_p ())
428 {
429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
430 "not vectorized: unsupported "
431 "data-type ");
432 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
433 scalar_type);
434 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
435 }
436 return false;
437 }
438
439 STMT_VINFO_VECTYPE (stmt_info) = vectype;
440
441 if (dump_enabled_p ())
442 {
443 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
444 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
445 dump_printf (MSG_NOTE, "\n");
446 }
447 }
448
449 /* The vectorization factor is according to the smallest
450 scalar type (or the largest vector size, but we only
451 support one vector size per loop). */
452 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
453 &dummy);
454 if (dump_enabled_p ())
455 {
456 dump_printf_loc (MSG_NOTE, vect_location,
457 "get vectype for scalar type: ");
458 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
459 dump_printf (MSG_NOTE, "\n");
460 }
461 vf_vectype = get_vectype_for_scalar_type (scalar_type);
462 if (!vf_vectype)
463 {
464 if (dump_enabled_p ())
465 {
466 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
467 "not vectorized: unsupported data-type ");
468 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
469 scalar_type);
470 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
471 }
472 return false;
473 }
474
475 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
476 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
477 {
478 if (dump_enabled_p ())
479 {
480 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
481 "not vectorized: different sized vector "
482 "types in statement, ");
483 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
484 vectype);
485 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
486 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
487 vf_vectype);
488 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
489 }
490 return false;
491 }
492
493 if (dump_enabled_p ())
494 {
495 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
496 dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
497 dump_printf (MSG_NOTE, "\n");
498 }
499
500 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
501 if (dump_enabled_p ())
502 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
503 if (!vectorization_factor
504 || (nunits > vectorization_factor))
505 vectorization_factor = nunits;
506
507 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
508 {
509 pattern_def_seq = NULL;
510 gsi_next (&si);
511 }
512 }
513 }
514
515 /* TODO: Analyze cost. Decide if worth while to vectorize. */
516 if (dump_enabled_p ())
517 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
518 vectorization_factor);
519 if (vectorization_factor <= 1)
520 {
521 if (dump_enabled_p ())
522 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
523 "not vectorized: unsupported data-type\n");
524 return false;
525 }
526 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
527
528 return true;
529 }
530
531
532 /* Function vect_is_simple_iv_evolution.
533
534 FORNOW: A simple evolution of an induction variables in the loop is
535 considered a polynomial evolution. */
536
537 static bool
538 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
539 tree * step)
540 {
541 tree init_expr;
542 tree step_expr;
543 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
544 basic_block bb;
545
546 /* When there is no evolution in this loop, the evolution function
547 is not "simple". */
548 if (evolution_part == NULL_TREE)
549 return false;
550
551 /* When the evolution is a polynomial of degree >= 2
552 the evolution function is not "simple". */
553 if (tree_is_chrec (evolution_part))
554 return false;
555
556 step_expr = evolution_part;
557 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
558
559 if (dump_enabled_p ())
560 {
561 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
562 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
563 dump_printf (MSG_NOTE, ", init: ");
564 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
565 dump_printf (MSG_NOTE, "\n");
566 }
567
568 *init = init_expr;
569 *step = step_expr;
570
571 if (TREE_CODE (step_expr) != INTEGER_CST
572 && (TREE_CODE (step_expr) != SSA_NAME
573 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
574 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
575 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
576 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
577 || !flag_associative_math)))
578 && (TREE_CODE (step_expr) != REAL_CST
579 || !flag_associative_math))
580 {
581 if (dump_enabled_p ())
582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
583 "step unknown.\n");
584 return false;
585 }
586
587 return true;
588 }
589
590 /* Function vect_analyze_scalar_cycles_1.
591
592 Examine the cross iteration def-use cycles of scalar variables
593 in LOOP. LOOP_VINFO represents the loop that is now being
594 considered for vectorization (can be LOOP, or an outer-loop
595 enclosing LOOP). */
596
597 static void
598 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
599 {
600 basic_block bb = loop->header;
601 tree init, step;
602 stack_vec<gimple, 64> worklist;
603 gimple_stmt_iterator gsi;
604 bool double_reduc;
605
606 if (dump_enabled_p ())
607 dump_printf_loc (MSG_NOTE, vect_location,
608 "=== vect_analyze_scalar_cycles ===\n");
609
610 /* First - identify all inductions. Reduction detection assumes that all the
611 inductions have been identified, therefore, this order must not be
612 changed. */
613 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
614 {
615 gimple phi = gsi_stmt (gsi);
616 tree access_fn = NULL;
617 tree def = PHI_RESULT (phi);
618 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
619
620 if (dump_enabled_p ())
621 {
622 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
623 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
624 dump_printf (MSG_NOTE, "\n");
625 }
626
627 /* Skip virtual phi's. The data dependences that are associated with
628 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
629 if (virtual_operand_p (def))
630 continue;
631
632 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
633
634 /* Analyze the evolution function. */
635 access_fn = analyze_scalar_evolution (loop, def);
636 if (access_fn)
637 {
638 STRIP_NOPS (access_fn);
639 if (dump_enabled_p ())
640 {
641 dump_printf_loc (MSG_NOTE, vect_location,
642 "Access function of PHI: ");
643 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
644 dump_printf (MSG_NOTE, "\n");
645 }
646 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
647 = evolution_part_in_loop_num (access_fn, loop->num);
648 }
649
650 if (!access_fn
651 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
652 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
653 && TREE_CODE (step) != INTEGER_CST))
654 {
655 worklist.safe_push (phi);
656 continue;
657 }
658
659 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
660
661 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
663 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
664 }
665
666
667 /* Second - identify all reductions and nested cycles. */
668 while (worklist.length () > 0)
669 {
670 gimple phi = worklist.pop ();
671 tree def = PHI_RESULT (phi);
672 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
673 gimple reduc_stmt;
674 bool nested_cycle;
675
676 if (dump_enabled_p ())
677 {
678 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
679 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
680 dump_printf (MSG_NOTE, "\n");
681 }
682
683 gcc_assert (!virtual_operand_p (def)
684 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
685
686 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
687 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
688 &double_reduc);
689 if (reduc_stmt)
690 {
691 if (double_reduc)
692 {
693 if (dump_enabled_p ())
694 dump_printf_loc (MSG_NOTE, vect_location,
695 "Detected double reduction.\n");
696
697 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
698 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
699 vect_double_reduction_def;
700 }
701 else
702 {
703 if (nested_cycle)
704 {
705 if (dump_enabled_p ())
706 dump_printf_loc (MSG_NOTE, vect_location,
707 "Detected vectorizable nested cycle.\n");
708
709 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
710 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
711 vect_nested_cycle;
712 }
713 else
714 {
715 if (dump_enabled_p ())
716 dump_printf_loc (MSG_NOTE, vect_location,
717 "Detected reduction.\n");
718
719 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
720 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
721 vect_reduction_def;
722 /* Store the reduction cycles for possible vectorization in
723 loop-aware SLP. */
724 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
725 }
726 }
727 }
728 else
729 if (dump_enabled_p ())
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
731 "Unknown def-use cycle pattern.\n");
732 }
733 }
734
735
736 /* Function vect_analyze_scalar_cycles.
737
738 Examine the cross iteration def-use cycles of scalar variables, by
739 analyzing the loop-header PHIs of scalar variables. Classify each
740 cycle as one of the following: invariant, induction, reduction, unknown.
741 We do that for the loop represented by LOOP_VINFO, and also to its
742 inner-loop, if exists.
743 Examples for scalar cycles:
744
745 Example1: reduction:
746
747 loop1:
748 for (i=0; i<N; i++)
749 sum += a[i];
750
751 Example2: induction:
752
753 loop2:
754 for (i=0; i<N; i++)
755 a[i] = i; */
756
757 static void
758 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
759 {
760 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
761
762 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
763
764 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
765 Reductions in such inner-loop therefore have different properties than
766 the reductions in the nest that gets vectorized:
767 1. When vectorized, they are executed in the same order as in the original
768 scalar loop, so we can't change the order of computation when
769 vectorizing them.
770 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
771 current checks are too strict. */
772
773 if (loop->inner)
774 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
775 }
776
777
778 /* Function vect_get_loop_niters.
779
780 Determine how many iterations the loop is executed and place it
781 in NUMBER_OF_ITERATIONS.
782
783 Return the loop exit condition. */
784
785 static gimple
786 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
787 {
788 tree niters;
789
790 if (dump_enabled_p ())
791 dump_printf_loc (MSG_NOTE, vect_location,
792 "=== get_loop_niters ===\n");
793
794 niters = number_of_latch_executions (loop);
795 /* We want the number of loop header executions which is the number
796 of latch executions plus one.
797 ??? For UINT_MAX latch executions this number overflows to zero
798 for loops like do { n++; } while (n != 0); */
799 if (niters && !chrec_contains_undetermined (niters))
800 niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), niters,
801 build_int_cst (TREE_TYPE (niters), 1));
802 *number_of_iterations = niters;
803
804 return get_loop_exit_condition (loop);
805 }
806
807
808 /* Function bb_in_loop_p
809
810 Used as predicate for dfs order traversal of the loop bbs. */
811
812 static bool
813 bb_in_loop_p (const_basic_block bb, const void *data)
814 {
815 const struct loop *const loop = (const struct loop *)data;
816 if (flow_bb_inside_loop_p (loop, bb))
817 return true;
818 return false;
819 }
820
821
822 /* Function new_loop_vec_info.
823
824 Create and initialize a new loop_vec_info struct for LOOP, as well as
825 stmt_vec_info structs for all the stmts in LOOP. */
826
827 static loop_vec_info
828 new_loop_vec_info (struct loop *loop)
829 {
830 loop_vec_info res;
831 basic_block *bbs;
832 gimple_stmt_iterator si;
833 unsigned int i, nbbs;
834
835 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
836 LOOP_VINFO_LOOP (res) = loop;
837
838 bbs = get_loop_body (loop);
839
840 /* Create/Update stmt_info for all stmts in the loop. */
841 for (i = 0; i < loop->num_nodes; i++)
842 {
843 basic_block bb = bbs[i];
844
845 /* BBs in a nested inner-loop will have been already processed (because
846 we will have called vect_analyze_loop_form for any nested inner-loop).
847 Therefore, for stmts in an inner-loop we just want to update the
848 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
849 loop_info of the outer-loop we are currently considering to vectorize
850 (instead of the loop_info of the inner-loop).
851 For stmts in other BBs we need to create a stmt_info from scratch. */
852 if (bb->loop_father != loop)
853 {
854 /* Inner-loop bb. */
855 gcc_assert (loop->inner && bb->loop_father == loop->inner);
856 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
857 {
858 gimple phi = gsi_stmt (si);
859 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
860 loop_vec_info inner_loop_vinfo =
861 STMT_VINFO_LOOP_VINFO (stmt_info);
862 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
863 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
864 }
865 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
866 {
867 gimple stmt = gsi_stmt (si);
868 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
869 loop_vec_info inner_loop_vinfo =
870 STMT_VINFO_LOOP_VINFO (stmt_info);
871 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
872 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
873 }
874 }
875 else
876 {
877 /* bb in current nest. */
878 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
879 {
880 gimple phi = gsi_stmt (si);
881 gimple_set_uid (phi, 0);
882 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
883 }
884
885 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
886 {
887 gimple stmt = gsi_stmt (si);
888 gimple_set_uid (stmt, 0);
889 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
890 }
891 }
892 }
893
894 /* CHECKME: We want to visit all BBs before their successors (except for
895 latch blocks, for which this assertion wouldn't hold). In the simple
896 case of the loop forms we allow, a dfs order of the BBs would the same
897 as reversed postorder traversal, so we are safe. */
898
899 free (bbs);
900 bbs = XCNEWVEC (basic_block, loop->num_nodes);
901 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
902 bbs, loop->num_nodes, loop);
903 gcc_assert (nbbs == loop->num_nodes);
904
905 LOOP_VINFO_BBS (res) = bbs;
906 LOOP_VINFO_NITERS (res) = NULL;
907 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
908 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
909 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
910 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0;
911 LOOP_VINFO_VECT_FACTOR (res) = 0;
912 LOOP_VINFO_LOOP_NEST (res).create (3);
913 LOOP_VINFO_DATAREFS (res).create (10);
914 LOOP_VINFO_DDRS (res).create (10 * 10);
915 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
916 LOOP_VINFO_MAY_MISALIGN_STMTS (res).create (
917 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
918 LOOP_VINFO_MAY_ALIAS_DDRS (res).create (
919 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
920 LOOP_VINFO_GROUPED_STORES (res).create (10);
921 LOOP_VINFO_REDUCTIONS (res).create (10);
922 LOOP_VINFO_REDUCTION_CHAINS (res).create (10);
923 LOOP_VINFO_SLP_INSTANCES (res).create (10);
924 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
925 LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
926 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
927 LOOP_VINFO_PEELING_FOR_NITER (res) = false;
928 LOOP_VINFO_OPERANDS_SWAPPED (res) = false;
929
930 return res;
931 }
932
933
934 /* Function destroy_loop_vec_info.
935
936 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
937 stmts in the loop. */
938
939 void
940 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
941 {
942 struct loop *loop;
943 basic_block *bbs;
944 int nbbs;
945 gimple_stmt_iterator si;
946 int j;
947 vec<slp_instance> slp_instances;
948 slp_instance instance;
949 bool swapped;
950
951 if (!loop_vinfo)
952 return;
953
954 loop = LOOP_VINFO_LOOP (loop_vinfo);
955
956 bbs = LOOP_VINFO_BBS (loop_vinfo);
957 nbbs = clean_stmts ? loop->num_nodes : 0;
958 swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo);
959
960 for (j = 0; j < nbbs; j++)
961 {
962 basic_block bb = bbs[j];
963 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
964 free_stmt_vec_info (gsi_stmt (si));
965
966 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
967 {
968 gimple stmt = gsi_stmt (si);
969
970 /* We may have broken canonical form by moving a constant
971 into RHS1 of a commutative op. Fix such occurrences. */
972 if (swapped && is_gimple_assign (stmt))
973 {
974 enum tree_code code = gimple_assign_rhs_code (stmt);
975
976 if ((code == PLUS_EXPR
977 || code == POINTER_PLUS_EXPR
978 || code == MULT_EXPR)
979 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
980 swap_ssa_operands (stmt,
981 gimple_assign_rhs1_ptr (stmt),
982 gimple_assign_rhs2_ptr (stmt));
983 }
984
985 /* Free stmt_vec_info. */
986 free_stmt_vec_info (stmt);
987 gsi_next (&si);
988 }
989 }
990
991 free (LOOP_VINFO_BBS (loop_vinfo));
992 vect_destroy_datarefs (loop_vinfo, NULL);
993 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
994 LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
995 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
996 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
997 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
998 FOR_EACH_VEC_ELT (slp_instances, j, instance)
999 vect_free_slp_instance (instance);
1000
1001 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
1002 LOOP_VINFO_GROUPED_STORES (loop_vinfo).release ();
1003 LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
1004 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
1005
1006 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
1007 LOOP_VINFO_PEELING_HTAB (loop_vinfo).dispose ();
1008
1009 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
1010
1011 free (loop_vinfo);
1012 loop->aux = NULL;
1013 }
1014
1015
1016 /* Function vect_analyze_loop_1.
1017
1018 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1019 for it. The different analyses will record information in the
1020 loop_vec_info struct. This is a subset of the analyses applied in
1021 vect_analyze_loop, to be applied on an inner-loop nested in the loop
1022 that is now considered for (outer-loop) vectorization. */
1023
1024 static loop_vec_info
1025 vect_analyze_loop_1 (struct loop *loop)
1026 {
1027 loop_vec_info loop_vinfo;
1028
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_NOTE, vect_location,
1031 "===== analyze_loop_nest_1 =====\n");
1032
1033 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1034
1035 loop_vinfo = vect_analyze_loop_form (loop);
1036 if (!loop_vinfo)
1037 {
1038 if (dump_enabled_p ())
1039 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1040 "bad inner-loop form.\n");
1041 return NULL;
1042 }
1043
1044 return loop_vinfo;
1045 }
1046
1047
1048 /* Function vect_analyze_loop_form.
1049
1050 Verify that certain CFG restrictions hold, including:
1051 - the loop has a pre-header
1052 - the loop has a single entry and exit
1053 - the loop exit condition is simple enough, and the number of iterations
1054 can be analyzed (a countable loop). */
1055
1056 loop_vec_info
1057 vect_analyze_loop_form (struct loop *loop)
1058 {
1059 loop_vec_info loop_vinfo;
1060 gimple loop_cond;
1061 tree number_of_iterations = NULL;
1062 loop_vec_info inner_loop_vinfo = NULL;
1063
1064 if (dump_enabled_p ())
1065 dump_printf_loc (MSG_NOTE, vect_location,
1066 "=== vect_analyze_loop_form ===\n");
1067
1068 /* Different restrictions apply when we are considering an inner-most loop,
1069 vs. an outer (nested) loop.
1070 (FORNOW. May want to relax some of these restrictions in the future). */
1071
1072 if (!loop->inner)
1073 {
1074 /* Inner-most loop. We currently require that the number of BBs is
1075 exactly 2 (the header and latch). Vectorizable inner-most loops
1076 look like this:
1077
1078 (pre-header)
1079 |
1080 header <--------+
1081 | | |
1082 | +--> latch --+
1083 |
1084 (exit-bb) */
1085
1086 if (loop->num_nodes != 2)
1087 {
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1090 "not vectorized: control flow in loop.\n");
1091 return NULL;
1092 }
1093
1094 if (empty_block_p (loop->header))
1095 {
1096 if (dump_enabled_p ())
1097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1098 "not vectorized: empty loop.\n");
1099 return NULL;
1100 }
1101 }
1102 else
1103 {
1104 struct loop *innerloop = loop->inner;
1105 edge entryedge;
1106
1107 /* Nested loop. We currently require that the loop is doubly-nested,
1108 contains a single inner loop, and the number of BBs is exactly 5.
1109 Vectorizable outer-loops look like this:
1110
1111 (pre-header)
1112 |
1113 header <---+
1114 | |
1115 inner-loop |
1116 | |
1117 tail ------+
1118 |
1119 (exit-bb)
1120
1121 The inner-loop has the properties expected of inner-most loops
1122 as described above. */
1123
1124 if ((loop->inner)->inner || (loop->inner)->next)
1125 {
1126 if (dump_enabled_p ())
1127 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1128 "not vectorized: multiple nested loops.\n");
1129 return NULL;
1130 }
1131
1132 /* Analyze the inner-loop. */
1133 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1134 if (!inner_loop_vinfo)
1135 {
1136 if (dump_enabled_p ())
1137 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1138 "not vectorized: Bad inner loop.\n");
1139 return NULL;
1140 }
1141
1142 if (!expr_invariant_in_loop_p (loop,
1143 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1144 {
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1147 "not vectorized: inner-loop count not"
1148 " invariant.\n");
1149 destroy_loop_vec_info (inner_loop_vinfo, true);
1150 return NULL;
1151 }
1152
1153 if (loop->num_nodes != 5)
1154 {
1155 if (dump_enabled_p ())
1156 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1157 "not vectorized: control flow in loop.\n");
1158 destroy_loop_vec_info (inner_loop_vinfo, true);
1159 return NULL;
1160 }
1161
1162 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1163 entryedge = EDGE_PRED (innerloop->header, 0);
1164 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1165 entryedge = EDGE_PRED (innerloop->header, 1);
1166
1167 if (entryedge->src != loop->header
1168 || !single_exit (innerloop)
1169 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1170 {
1171 if (dump_enabled_p ())
1172 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1173 "not vectorized: unsupported outerloop form.\n");
1174 destroy_loop_vec_info (inner_loop_vinfo, true);
1175 return NULL;
1176 }
1177
1178 if (dump_enabled_p ())
1179 dump_printf_loc (MSG_NOTE, vect_location,
1180 "Considering outer-loop vectorization.\n");
1181 }
1182
1183 if (!single_exit (loop)
1184 || EDGE_COUNT (loop->header->preds) != 2)
1185 {
1186 if (dump_enabled_p ())
1187 {
1188 if (!single_exit (loop))
1189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1190 "not vectorized: multiple exits.\n");
1191 else if (EDGE_COUNT (loop->header->preds) != 2)
1192 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1193 "not vectorized: too many incoming edges.\n");
1194 }
1195 if (inner_loop_vinfo)
1196 destroy_loop_vec_info (inner_loop_vinfo, true);
1197 return NULL;
1198 }
1199
1200 /* We assume that the loop exit condition is at the end of the loop. i.e,
1201 that the loop is represented as a do-while (with a proper if-guard
1202 before the loop if needed), where the loop header contains all the
1203 executable statements, and the latch is empty. */
1204 if (!empty_block_p (loop->latch)
1205 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1206 {
1207 if (dump_enabled_p ())
1208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1209 "not vectorized: latch block not empty.\n");
1210 if (inner_loop_vinfo)
1211 destroy_loop_vec_info (inner_loop_vinfo, true);
1212 return NULL;
1213 }
1214
1215 /* Make sure there exists a single-predecessor exit bb: */
1216 if (!single_pred_p (single_exit (loop)->dest))
1217 {
1218 edge e = single_exit (loop);
1219 if (!(e->flags & EDGE_ABNORMAL))
1220 {
1221 split_loop_exit_edge (e);
1222 if (dump_enabled_p ())
1223 dump_printf (MSG_NOTE, "split exit edge.\n");
1224 }
1225 else
1226 {
1227 if (dump_enabled_p ())
1228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1229 "not vectorized: abnormal loop exit edge.\n");
1230 if (inner_loop_vinfo)
1231 destroy_loop_vec_info (inner_loop_vinfo, true);
1232 return NULL;
1233 }
1234 }
1235
1236 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1237 if (!loop_cond)
1238 {
1239 if (dump_enabled_p ())
1240 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1241 "not vectorized: complicated exit condition.\n");
1242 if (inner_loop_vinfo)
1243 destroy_loop_vec_info (inner_loop_vinfo, true);
1244 return NULL;
1245 }
1246
1247 if (!number_of_iterations
1248 || chrec_contains_undetermined (number_of_iterations))
1249 {
1250 if (dump_enabled_p ())
1251 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1252 "not vectorized: number of iterations cannot be "
1253 "computed.\n");
1254 if (inner_loop_vinfo)
1255 destroy_loop_vec_info (inner_loop_vinfo, true);
1256 return NULL;
1257 }
1258
1259 if (integer_zerop (number_of_iterations))
1260 {
1261 if (dump_enabled_p ())
1262 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1263 "not vectorized: number of iterations = 0.\n");
1264 if (inner_loop_vinfo)
1265 destroy_loop_vec_info (inner_loop_vinfo, true);
1266 return NULL;
1267 }
1268
1269 loop_vinfo = new_loop_vec_info (loop);
1270 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1271 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1272
1273 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1274 {
1275 if (dump_enabled_p ())
1276 {
1277 dump_printf_loc (MSG_NOTE, vect_location,
1278 "Symbolic number of iterations is ");
1279 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1280 dump_printf (MSG_NOTE, "\n");
1281 }
1282 }
1283
1284 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1285
1286 /* CHECKME: May want to keep it around it in the future. */
1287 if (inner_loop_vinfo)
1288 destroy_loop_vec_info (inner_loop_vinfo, false);
1289
1290 gcc_assert (!loop->aux);
1291 loop->aux = loop_vinfo;
1292 return loop_vinfo;
1293 }
1294
1295
1296 /* Function vect_analyze_loop_operations.
1297
1298 Scan the loop stmts and make sure they are all vectorizable. */
1299
1300 static bool
1301 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1302 {
1303 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1304 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1305 int nbbs = loop->num_nodes;
1306 gimple_stmt_iterator si;
1307 unsigned int vectorization_factor = 0;
1308 int i;
1309 gimple phi;
1310 stmt_vec_info stmt_info;
1311 bool need_to_vectorize = false;
1312 int min_profitable_iters;
1313 int min_scalar_loop_bound;
1314 unsigned int th;
1315 bool only_slp_in_loop = true, ok;
1316 HOST_WIDE_INT max_niter;
1317 HOST_WIDE_INT estimated_niter;
1318 int min_profitable_estimate;
1319
1320 if (dump_enabled_p ())
1321 dump_printf_loc (MSG_NOTE, vect_location,
1322 "=== vect_analyze_loop_operations ===\n");
1323
1324 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1325 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1326 if (slp)
1327 {
1328 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1329 vectorization factor of the loop is the unrolling factor required by
1330 the SLP instances. If that unrolling factor is 1, we say, that we
1331 perform pure SLP on loop - cross iteration parallelism is not
1332 exploited. */
1333 for (i = 0; i < nbbs; i++)
1334 {
1335 basic_block bb = bbs[i];
1336 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1337 {
1338 gimple stmt = gsi_stmt (si);
1339 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1340 gcc_assert (stmt_info);
1341 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1342 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1343 && !PURE_SLP_STMT (stmt_info))
1344 /* STMT needs both SLP and loop-based vectorization. */
1345 only_slp_in_loop = false;
1346 }
1347 }
1348
1349 if (only_slp_in_loop)
1350 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1351 else
1352 vectorization_factor = least_common_multiple (vectorization_factor,
1353 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1354
1355 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1356 if (dump_enabled_p ())
1357 dump_printf_loc (MSG_NOTE, vect_location,
1358 "Updating vectorization factor to %d\n",
1359 vectorization_factor);
1360 }
1361
1362 for (i = 0; i < nbbs; i++)
1363 {
1364 basic_block bb = bbs[i];
1365
1366 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1367 {
1368 phi = gsi_stmt (si);
1369 ok = true;
1370
1371 stmt_info = vinfo_for_stmt (phi);
1372 if (dump_enabled_p ())
1373 {
1374 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1375 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1376 dump_printf (MSG_NOTE, "\n");
1377 }
1378
1379 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1380 (i.e., a phi in the tail of the outer-loop). */
1381 if (! is_loop_header_bb_p (bb))
1382 {
1383 /* FORNOW: we currently don't support the case that these phis
1384 are not used in the outerloop (unless it is double reduction,
1385 i.e., this phi is vect_reduction_def), cause this case
1386 requires to actually do something here. */
1387 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1388 || STMT_VINFO_LIVE_P (stmt_info))
1389 && STMT_VINFO_DEF_TYPE (stmt_info)
1390 != vect_double_reduction_def)
1391 {
1392 if (dump_enabled_p ())
1393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1394 "Unsupported loop-closed phi in "
1395 "outer-loop.\n");
1396 return false;
1397 }
1398
1399 /* If PHI is used in the outer loop, we check that its operand
1400 is defined in the inner loop. */
1401 if (STMT_VINFO_RELEVANT_P (stmt_info))
1402 {
1403 tree phi_op;
1404 gimple op_def_stmt;
1405
1406 if (gimple_phi_num_args (phi) != 1)
1407 return false;
1408
1409 phi_op = PHI_ARG_DEF (phi, 0);
1410 if (TREE_CODE (phi_op) != SSA_NAME)
1411 return false;
1412
1413 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1414 if (gimple_nop_p (op_def_stmt)
1415 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1416 || !vinfo_for_stmt (op_def_stmt))
1417 return false;
1418
1419 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1420 != vect_used_in_outer
1421 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1422 != vect_used_in_outer_by_reduction)
1423 return false;
1424 }
1425
1426 continue;
1427 }
1428
1429 gcc_assert (stmt_info);
1430
1431 if (STMT_VINFO_LIVE_P (stmt_info))
1432 {
1433 /* FORNOW: not yet supported. */
1434 if (dump_enabled_p ())
1435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1436 "not vectorized: value used after loop.\n");
1437 return false;
1438 }
1439
1440 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1441 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1442 {
1443 /* A scalar-dependence cycle that we don't support. */
1444 if (dump_enabled_p ())
1445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1446 "not vectorized: scalar dependence cycle.\n");
1447 return false;
1448 }
1449
1450 if (STMT_VINFO_RELEVANT_P (stmt_info))
1451 {
1452 need_to_vectorize = true;
1453 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1454 ok = vectorizable_induction (phi, NULL, NULL);
1455 }
1456
1457 if (!ok)
1458 {
1459 if (dump_enabled_p ())
1460 {
1461 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1462 "not vectorized: relevant phi not "
1463 "supported: ");
1464 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1465 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1466 }
1467 return false;
1468 }
1469 }
1470
1471 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1472 {
1473 gimple stmt = gsi_stmt (si);
1474 if (!gimple_clobber_p (stmt)
1475 && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1476 return false;
1477 }
1478 } /* bbs */
1479
1480 /* All operations in the loop are either irrelevant (deal with loop
1481 control, or dead), or only used outside the loop and can be moved
1482 out of the loop (e.g. invariants, inductions). The loop can be
1483 optimized away by scalar optimizations. We're better off not
1484 touching this loop. */
1485 if (!need_to_vectorize)
1486 {
1487 if (dump_enabled_p ())
1488 dump_printf_loc (MSG_NOTE, vect_location,
1489 "All the computation can be taken out of the loop.\n");
1490 if (dump_enabled_p ())
1491 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1492 "not vectorized: redundant loop. no profit to "
1493 "vectorize.\n");
1494 return false;
1495 }
1496
1497 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1498 dump_printf_loc (MSG_NOTE, vect_location,
1499 "vectorization_factor = %d, niters = "
1500 HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
1501 LOOP_VINFO_INT_NITERS (loop_vinfo));
1502
1503 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1504 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1505 || ((max_niter = max_stmt_executions_int (loop)) != -1
1506 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1507 {
1508 if (dump_enabled_p ())
1509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1510 "not vectorized: iteration count too small.\n");
1511 if (dump_enabled_p ())
1512 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1513 "not vectorized: iteration count smaller than "
1514 "vectorization factor.\n");
1515 return false;
1516 }
1517
1518 /* Analyze cost. Decide if worth while to vectorize. */
1519
1520 /* Once VF is set, SLP costs should be updated since the number of created
1521 vector stmts depends on VF. */
1522 vect_update_slp_costs_according_to_vf (loop_vinfo);
1523
1524 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1525 &min_profitable_estimate);
1526 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1527
1528 if (min_profitable_iters < 0)
1529 {
1530 if (dump_enabled_p ())
1531 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1532 "not vectorized: vectorization not profitable.\n");
1533 if (dump_enabled_p ())
1534 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1535 "not vectorized: vector version will never be "
1536 "profitable.\n");
1537 return false;
1538 }
1539
1540 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1541 * vectorization_factor) - 1);
1542
1543
1544 /* Use the cost model only if it is more conservative than user specified
1545 threshold. */
1546
1547 th = (unsigned) min_scalar_loop_bound;
1548 if (min_profitable_iters
1549 && (!min_scalar_loop_bound
1550 || min_profitable_iters > min_scalar_loop_bound))
1551 th = (unsigned) min_profitable_iters;
1552
1553 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1554 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1555 {
1556 if (dump_enabled_p ())
1557 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1558 "not vectorized: vectorization not profitable.\n");
1559 if (dump_enabled_p ())
1560 dump_printf_loc (MSG_NOTE, vect_location,
1561 "not vectorized: iteration count smaller than user "
1562 "specified loop bound parameter or minimum profitable "
1563 "iterations (whichever is more conservative).\n");
1564 return false;
1565 }
1566
1567 if ((estimated_niter = estimated_stmt_executions_int (loop)) != -1
1568 && ((unsigned HOST_WIDE_INT) estimated_niter
1569 <= MAX (th, (unsigned)min_profitable_estimate)))
1570 {
1571 if (dump_enabled_p ())
1572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1573 "not vectorized: estimated iteration count too "
1574 "small.\n");
1575 if (dump_enabled_p ())
1576 dump_printf_loc (MSG_NOTE, vect_location,
1577 "not vectorized: estimated iteration count smaller "
1578 "than specified loop bound parameter or minimum "
1579 "profitable iterations (whichever is more "
1580 "conservative).\n");
1581 return false;
1582 }
1583
1584 return true;
1585 }
1586
1587
1588 /* Function vect_analyze_loop_2.
1589
1590 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1591 for it. The different analyses will record information in the
1592 loop_vec_info struct. */
1593 static bool
1594 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1595 {
1596 bool ok, slp = false;
1597 int max_vf = MAX_VECTORIZATION_FACTOR;
1598 int min_vf = 2;
1599
1600 /* Find all data references in the loop (which correspond to vdefs/vuses)
1601 and analyze their evolution in the loop. Also adjust the minimal
1602 vectorization factor according to the loads and stores.
1603
1604 FORNOW: Handle only simple, array references, which
1605 alignment can be forced, and aligned pointer-references. */
1606
1607 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1608 if (!ok)
1609 {
1610 if (dump_enabled_p ())
1611 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1612 "bad data references.\n");
1613 return false;
1614 }
1615
1616 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1617 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1618
1619 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1620 if (!ok)
1621 {
1622 if (dump_enabled_p ())
1623 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1624 "bad data access.\n");
1625 return false;
1626 }
1627
1628 /* Classify all cross-iteration scalar data-flow cycles.
1629 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1630
1631 vect_analyze_scalar_cycles (loop_vinfo);
1632
1633 vect_pattern_recog (loop_vinfo, NULL);
1634
1635 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1636
1637 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1638 if (!ok)
1639 {
1640 if (dump_enabled_p ())
1641 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1642 "unexpected pattern.\n");
1643 return false;
1644 }
1645
1646 /* Analyze data dependences between the data-refs in the loop
1647 and adjust the maximum vectorization factor according to
1648 the dependences.
1649 FORNOW: fail at the first data dependence that we encounter. */
1650
1651 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1652 if (!ok
1653 || max_vf < min_vf)
1654 {
1655 if (dump_enabled_p ())
1656 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1657 "bad data dependence.\n");
1658 return false;
1659 }
1660
1661 ok = vect_determine_vectorization_factor (loop_vinfo);
1662 if (!ok)
1663 {
1664 if (dump_enabled_p ())
1665 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1666 "can't determine vectorization factor.\n");
1667 return false;
1668 }
1669 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1670 {
1671 if (dump_enabled_p ())
1672 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1673 "bad data dependence.\n");
1674 return false;
1675 }
1676
1677 /* Analyze the alignment of the data-refs in the loop.
1678 Fail if a data reference is found that cannot be vectorized. */
1679
1680 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1681 if (!ok)
1682 {
1683 if (dump_enabled_p ())
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1685 "bad data alignment.\n");
1686 return false;
1687 }
1688
1689 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1690 It is important to call pruning after vect_analyze_data_ref_accesses,
1691 since we use grouping information gathered by interleaving analysis. */
1692 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1693 if (!ok)
1694 {
1695 if (dump_enabled_p ())
1696 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1697 "too long list of versioning for alias "
1698 "run-time tests.\n");
1699 return false;
1700 }
1701
1702 /* This pass will decide on using loop versioning and/or loop peeling in
1703 order to enhance the alignment of data references in the loop. */
1704
1705 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1706 if (!ok)
1707 {
1708 if (dump_enabled_p ())
1709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1710 "bad data alignment.\n");
1711 return false;
1712 }
1713
1714 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1715 ok = vect_analyze_slp (loop_vinfo, NULL);
1716 if (ok)
1717 {
1718 /* Decide which possible SLP instances to SLP. */
1719 slp = vect_make_slp_decision (loop_vinfo);
1720
1721 /* Find stmts that need to be both vectorized and SLPed. */
1722 vect_detect_hybrid_slp (loop_vinfo);
1723 }
1724 else
1725 return false;
1726
1727 /* Scan all the operations in the loop and make sure they are
1728 vectorizable. */
1729
1730 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1731 if (!ok)
1732 {
1733 if (dump_enabled_p ())
1734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1735 "bad operation or unsupported loop bound.\n");
1736 return false;
1737 }
1738
1739 /* Decide whether we need to create an epilogue loop to handle
1740 remaining scalar iterations. */
1741 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1742 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
1743 {
1744 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo)
1745 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
1746 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1747 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1748 }
1749 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1750 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
1751 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))))
1752 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1753
1754 /* If an epilogue loop is required make sure we can create one. */
1755 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1756 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
1757 {
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
1760 if (!vect_can_advance_ivs_p (loop_vinfo)
1761 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
1762 single_exit (LOOP_VINFO_LOOP
1763 (loop_vinfo))))
1764 {
1765 if (dump_enabled_p ())
1766 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1767 "not vectorized: can't create required "
1768 "epilog loop\n");
1769 return false;
1770 }
1771 }
1772
1773 return true;
1774 }
1775
1776 /* Function vect_analyze_loop.
1777
1778 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1779 for it. The different analyses will record information in the
1780 loop_vec_info struct. */
1781 loop_vec_info
1782 vect_analyze_loop (struct loop *loop)
1783 {
1784 loop_vec_info loop_vinfo;
1785 unsigned int vector_sizes;
1786
1787 /* Autodetect first vector size we try. */
1788 current_vector_size = 0;
1789 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1790
1791 if (dump_enabled_p ())
1792 dump_printf_loc (MSG_NOTE, vect_location,
1793 "===== analyze_loop_nest =====\n");
1794
1795 if (loop_outer (loop)
1796 && loop_vec_info_for_loop (loop_outer (loop))
1797 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1798 {
1799 if (dump_enabled_p ())
1800 dump_printf_loc (MSG_NOTE, vect_location,
1801 "outer-loop already vectorized.\n");
1802 return NULL;
1803 }
1804
1805 while (1)
1806 {
1807 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1808 loop_vinfo = vect_analyze_loop_form (loop);
1809 if (!loop_vinfo)
1810 {
1811 if (dump_enabled_p ())
1812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1813 "bad loop form.\n");
1814 return NULL;
1815 }
1816
1817 if (vect_analyze_loop_2 (loop_vinfo))
1818 {
1819 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1820
1821 return loop_vinfo;
1822 }
1823
1824 destroy_loop_vec_info (loop_vinfo, true);
1825
1826 vector_sizes &= ~current_vector_size;
1827 if (vector_sizes == 0
1828 || current_vector_size == 0)
1829 return NULL;
1830
1831 /* Try the next biggest vector size. */
1832 current_vector_size = 1 << floor_log2 (vector_sizes);
1833 if (dump_enabled_p ())
1834 dump_printf_loc (MSG_NOTE, vect_location,
1835 "***** Re-trying analysis with "
1836 "vector size %d\n", current_vector_size);
1837 }
1838 }
1839
1840
1841 /* Function reduction_code_for_scalar_code
1842
1843 Input:
1844 CODE - tree_code of a reduction operations.
1845
1846 Output:
1847 REDUC_CODE - the corresponding tree-code to be used to reduce the
1848 vector of partial results into a single scalar result (which
1849 will also reside in a vector) or ERROR_MARK if the operation is
1850 a supported reduction operation, but does not have such tree-code.
1851
1852 Return FALSE if CODE currently cannot be vectorized as reduction. */
1853
1854 static bool
1855 reduction_code_for_scalar_code (enum tree_code code,
1856 enum tree_code *reduc_code)
1857 {
1858 switch (code)
1859 {
1860 case MAX_EXPR:
1861 *reduc_code = REDUC_MAX_EXPR;
1862 return true;
1863
1864 case MIN_EXPR:
1865 *reduc_code = REDUC_MIN_EXPR;
1866 return true;
1867
1868 case PLUS_EXPR:
1869 *reduc_code = REDUC_PLUS_EXPR;
1870 return true;
1871
1872 case MULT_EXPR:
1873 case MINUS_EXPR:
1874 case BIT_IOR_EXPR:
1875 case BIT_XOR_EXPR:
1876 case BIT_AND_EXPR:
1877 *reduc_code = ERROR_MARK;
1878 return true;
1879
1880 default:
1881 return false;
1882 }
1883 }
1884
1885
1886 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1887 STMT is printed with a message MSG. */
1888
1889 static void
1890 report_vect_op (int msg_type, gimple stmt, const char *msg)
1891 {
1892 dump_printf_loc (msg_type, vect_location, "%s", msg);
1893 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
1894 dump_printf (msg_type, "\n");
1895 }
1896
1897
1898 /* Detect SLP reduction of the form:
1899
1900 #a1 = phi <a5, a0>
1901 a2 = operation (a1)
1902 a3 = operation (a2)
1903 a4 = operation (a3)
1904 a5 = operation (a4)
1905
1906 #a = phi <a5>
1907
1908 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1909 FIRST_STMT is the first reduction stmt in the chain
1910 (a2 = operation (a1)).
1911
1912 Return TRUE if a reduction chain was detected. */
1913
1914 static bool
1915 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1916 {
1917 struct loop *loop = (gimple_bb (phi))->loop_father;
1918 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1919 enum tree_code code;
1920 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1921 stmt_vec_info use_stmt_info, current_stmt_info;
1922 tree lhs;
1923 imm_use_iterator imm_iter;
1924 use_operand_p use_p;
1925 int nloop_uses, size = 0, n_out_of_loop_uses;
1926 bool found = false;
1927
1928 if (loop != vect_loop)
1929 return false;
1930
1931 lhs = PHI_RESULT (phi);
1932 code = gimple_assign_rhs_code (first_stmt);
1933 while (1)
1934 {
1935 nloop_uses = 0;
1936 n_out_of_loop_uses = 0;
1937 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1938 {
1939 gimple use_stmt = USE_STMT (use_p);
1940 if (is_gimple_debug (use_stmt))
1941 continue;
1942
1943 use_stmt = USE_STMT (use_p);
1944
1945 /* Check if we got back to the reduction phi. */
1946 if (use_stmt == phi)
1947 {
1948 loop_use_stmt = use_stmt;
1949 found = true;
1950 break;
1951 }
1952
1953 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1954 {
1955 if (vinfo_for_stmt (use_stmt)
1956 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1957 {
1958 loop_use_stmt = use_stmt;
1959 nloop_uses++;
1960 }
1961 }
1962 else
1963 n_out_of_loop_uses++;
1964
1965 /* There are can be either a single use in the loop or two uses in
1966 phi nodes. */
1967 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
1968 return false;
1969 }
1970
1971 if (found)
1972 break;
1973
1974 /* We reached a statement with no loop uses. */
1975 if (nloop_uses == 0)
1976 return false;
1977
1978 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1979 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1980 return false;
1981
1982 if (!is_gimple_assign (loop_use_stmt)
1983 || code != gimple_assign_rhs_code (loop_use_stmt)
1984 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1985 return false;
1986
1987 /* Insert USE_STMT into reduction chain. */
1988 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1989 if (current_stmt)
1990 {
1991 current_stmt_info = vinfo_for_stmt (current_stmt);
1992 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1993 GROUP_FIRST_ELEMENT (use_stmt_info)
1994 = GROUP_FIRST_ELEMENT (current_stmt_info);
1995 }
1996 else
1997 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1998
1999 lhs = gimple_assign_lhs (loop_use_stmt);
2000 current_stmt = loop_use_stmt;
2001 size++;
2002 }
2003
2004 if (!found || loop_use_stmt != phi || size < 2)
2005 return false;
2006
2007 /* Swap the operands, if needed, to make the reduction operand be the second
2008 operand. */
2009 lhs = PHI_RESULT (phi);
2010 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2011 while (next_stmt)
2012 {
2013 if (gimple_assign_rhs2 (next_stmt) == lhs)
2014 {
2015 tree op = gimple_assign_rhs1 (next_stmt);
2016 gimple def_stmt = NULL;
2017
2018 if (TREE_CODE (op) == SSA_NAME)
2019 def_stmt = SSA_NAME_DEF_STMT (op);
2020
2021 /* Check that the other def is either defined in the loop
2022 ("vect_internal_def"), or it's an induction (defined by a
2023 loop-header phi-node). */
2024 if (def_stmt
2025 && gimple_bb (def_stmt)
2026 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2027 && (is_gimple_assign (def_stmt)
2028 || is_gimple_call (def_stmt)
2029 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2030 == vect_induction_def
2031 || (gimple_code (def_stmt) == GIMPLE_PHI
2032 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2033 == vect_internal_def
2034 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2035 {
2036 lhs = gimple_assign_lhs (next_stmt);
2037 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2038 continue;
2039 }
2040
2041 return false;
2042 }
2043 else
2044 {
2045 tree op = gimple_assign_rhs2 (next_stmt);
2046 gimple def_stmt = NULL;
2047
2048 if (TREE_CODE (op) == SSA_NAME)
2049 def_stmt = SSA_NAME_DEF_STMT (op);
2050
2051 /* Check that the other def is either defined in the loop
2052 ("vect_internal_def"), or it's an induction (defined by a
2053 loop-header phi-node). */
2054 if (def_stmt
2055 && gimple_bb (def_stmt)
2056 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2057 && (is_gimple_assign (def_stmt)
2058 || is_gimple_call (def_stmt)
2059 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2060 == vect_induction_def
2061 || (gimple_code (def_stmt) == GIMPLE_PHI
2062 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2063 == vect_internal_def
2064 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2065 {
2066 if (dump_enabled_p ())
2067 {
2068 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2069 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2070 dump_printf (MSG_NOTE, "\n");
2071 }
2072
2073 swap_ssa_operands (next_stmt,
2074 gimple_assign_rhs1_ptr (next_stmt),
2075 gimple_assign_rhs2_ptr (next_stmt));
2076 update_stmt (next_stmt);
2077
2078 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2079 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2080 }
2081 else
2082 return false;
2083 }
2084
2085 lhs = gimple_assign_lhs (next_stmt);
2086 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2087 }
2088
2089 /* Save the chain for further analysis in SLP detection. */
2090 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2091 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2092 GROUP_SIZE (vinfo_for_stmt (first)) = size;
2093
2094 return true;
2095 }
2096
2097
2098 /* Function vect_is_simple_reduction_1
2099
2100 (1) Detect a cross-iteration def-use cycle that represents a simple
2101 reduction computation. We look for the following pattern:
2102
2103 loop_header:
2104 a1 = phi < a0, a2 >
2105 a3 = ...
2106 a2 = operation (a3, a1)
2107
2108 or
2109
2110 a3 = ...
2111 loop_header:
2112 a1 = phi < a0, a2 >
2113 a2 = operation (a3, a1)
2114
2115 such that:
2116 1. operation is commutative and associative and it is safe to
2117 change the order of the computation (if CHECK_REDUCTION is true)
2118 2. no uses for a2 in the loop (a2 is used out of the loop)
2119 3. no uses of a1 in the loop besides the reduction operation
2120 4. no uses of a1 outside the loop.
2121
2122 Conditions 1,4 are tested here.
2123 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2124
2125 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2126 nested cycles, if CHECK_REDUCTION is false.
2127
2128 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2129 reductions:
2130
2131 a1 = phi < a0, a2 >
2132 inner loop (def of a3)
2133 a2 = phi < a3 >
2134
2135 If MODIFY is true it tries also to rework the code in-place to enable
2136 detection of more reduction patterns. For the time being we rewrite
2137 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
2138 */
2139
2140 static gimple
2141 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
2142 bool check_reduction, bool *double_reduc,
2143 bool modify)
2144 {
2145 struct loop *loop = (gimple_bb (phi))->loop_father;
2146 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2147 edge latch_e = loop_latch_edge (loop);
2148 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2149 gimple def_stmt, def1 = NULL, def2 = NULL;
2150 enum tree_code orig_code, code;
2151 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2152 tree type;
2153 int nloop_uses;
2154 tree name;
2155 imm_use_iterator imm_iter;
2156 use_operand_p use_p;
2157 bool phi_def;
2158
2159 *double_reduc = false;
2160
2161 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2162 otherwise, we assume outer loop vectorization. */
2163 gcc_assert ((check_reduction && loop == vect_loop)
2164 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2165
2166 name = PHI_RESULT (phi);
2167 nloop_uses = 0;
2168 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2169 {
2170 gimple use_stmt = USE_STMT (use_p);
2171 if (is_gimple_debug (use_stmt))
2172 continue;
2173
2174 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2175 {
2176 if (dump_enabled_p ())
2177 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2178 "intermediate value used outside loop.\n");
2179
2180 return NULL;
2181 }
2182
2183 if (vinfo_for_stmt (use_stmt)
2184 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2185 nloop_uses++;
2186 if (nloop_uses > 1)
2187 {
2188 if (dump_enabled_p ())
2189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2190 "reduction used in loop.\n");
2191 return NULL;
2192 }
2193 }
2194
2195 if (TREE_CODE (loop_arg) != SSA_NAME)
2196 {
2197 if (dump_enabled_p ())
2198 {
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2200 "reduction: not ssa_name: ");
2201 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2202 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2203 }
2204 return NULL;
2205 }
2206
2207 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2208 if (!def_stmt)
2209 {
2210 if (dump_enabled_p ())
2211 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2212 "reduction: no def_stmt.\n");
2213 return NULL;
2214 }
2215
2216 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2217 {
2218 if (dump_enabled_p ())
2219 {
2220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2221 dump_printf (MSG_NOTE, "\n");
2222 }
2223 return NULL;
2224 }
2225
2226 if (is_gimple_assign (def_stmt))
2227 {
2228 name = gimple_assign_lhs (def_stmt);
2229 phi_def = false;
2230 }
2231 else
2232 {
2233 name = PHI_RESULT (def_stmt);
2234 phi_def = true;
2235 }
2236
2237 nloop_uses = 0;
2238 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2239 {
2240 gimple use_stmt = USE_STMT (use_p);
2241 if (is_gimple_debug (use_stmt))
2242 continue;
2243 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2244 && vinfo_for_stmt (use_stmt)
2245 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2246 nloop_uses++;
2247 if (nloop_uses > 1)
2248 {
2249 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2251 "reduction used in loop.\n");
2252 return NULL;
2253 }
2254 }
2255
2256 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2257 defined in the inner loop. */
2258 if (phi_def)
2259 {
2260 op1 = PHI_ARG_DEF (def_stmt, 0);
2261
2262 if (gimple_phi_num_args (def_stmt) != 1
2263 || TREE_CODE (op1) != SSA_NAME)
2264 {
2265 if (dump_enabled_p ())
2266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2267 "unsupported phi node definition.\n");
2268
2269 return NULL;
2270 }
2271
2272 def1 = SSA_NAME_DEF_STMT (op1);
2273 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2274 && loop->inner
2275 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2276 && is_gimple_assign (def1))
2277 {
2278 if (dump_enabled_p ())
2279 report_vect_op (MSG_NOTE, def_stmt,
2280 "detected double reduction: ");
2281
2282 *double_reduc = true;
2283 return def_stmt;
2284 }
2285
2286 return NULL;
2287 }
2288
2289 code = orig_code = gimple_assign_rhs_code (def_stmt);
2290
2291 /* We can handle "res -= x[i]", which is non-associative by
2292 simply rewriting this into "res += -x[i]". Avoid changing
2293 gimple instruction for the first simple tests and only do this
2294 if we're allowed to change code at all. */
2295 if (code == MINUS_EXPR
2296 && modify
2297 && (op1 = gimple_assign_rhs1 (def_stmt))
2298 && TREE_CODE (op1) == SSA_NAME
2299 && SSA_NAME_DEF_STMT (op1) == phi)
2300 code = PLUS_EXPR;
2301
2302 if (check_reduction
2303 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2304 {
2305 if (dump_enabled_p ())
2306 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2307 "reduction: not commutative/associative: ");
2308 return NULL;
2309 }
2310
2311 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2312 {
2313 if (code != COND_EXPR)
2314 {
2315 if (dump_enabled_p ())
2316 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2317 "reduction: not binary operation: ");
2318
2319 return NULL;
2320 }
2321
2322 op3 = gimple_assign_rhs1 (def_stmt);
2323 if (COMPARISON_CLASS_P (op3))
2324 {
2325 op4 = TREE_OPERAND (op3, 1);
2326 op3 = TREE_OPERAND (op3, 0);
2327 }
2328
2329 op1 = gimple_assign_rhs2 (def_stmt);
2330 op2 = gimple_assign_rhs3 (def_stmt);
2331
2332 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2333 {
2334 if (dump_enabled_p ())
2335 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2336 "reduction: uses not ssa_names: ");
2337
2338 return NULL;
2339 }
2340 }
2341 else
2342 {
2343 op1 = gimple_assign_rhs1 (def_stmt);
2344 op2 = gimple_assign_rhs2 (def_stmt);
2345
2346 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2347 {
2348 if (dump_enabled_p ())
2349 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2350 "reduction: uses not ssa_names: ");
2351
2352 return NULL;
2353 }
2354 }
2355
2356 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2357 if ((TREE_CODE (op1) == SSA_NAME
2358 && !types_compatible_p (type,TREE_TYPE (op1)))
2359 || (TREE_CODE (op2) == SSA_NAME
2360 && !types_compatible_p (type, TREE_TYPE (op2)))
2361 || (op3 && TREE_CODE (op3) == SSA_NAME
2362 && !types_compatible_p (type, TREE_TYPE (op3)))
2363 || (op4 && TREE_CODE (op4) == SSA_NAME
2364 && !types_compatible_p (type, TREE_TYPE (op4))))
2365 {
2366 if (dump_enabled_p ())
2367 {
2368 dump_printf_loc (MSG_NOTE, vect_location,
2369 "reduction: multiple types: operation type: ");
2370 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
2371 dump_printf (MSG_NOTE, ", operands types: ");
2372 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2373 TREE_TYPE (op1));
2374 dump_printf (MSG_NOTE, ",");
2375 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2376 TREE_TYPE (op2));
2377 if (op3)
2378 {
2379 dump_printf (MSG_NOTE, ",");
2380 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2381 TREE_TYPE (op3));
2382 }
2383
2384 if (op4)
2385 {
2386 dump_printf (MSG_NOTE, ",");
2387 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2388 TREE_TYPE (op4));
2389 }
2390 dump_printf (MSG_NOTE, "\n");
2391 }
2392
2393 return NULL;
2394 }
2395
2396 /* Check that it's ok to change the order of the computation.
2397 Generally, when vectorizing a reduction we change the order of the
2398 computation. This may change the behavior of the program in some
2399 cases, so we need to check that this is ok. One exception is when
2400 vectorizing an outer-loop: the inner-loop is executed sequentially,
2401 and therefore vectorizing reductions in the inner-loop during
2402 outer-loop vectorization is safe. */
2403
2404 /* CHECKME: check for !flag_finite_math_only too? */
2405 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2406 && check_reduction)
2407 {
2408 /* Changing the order of operations changes the semantics. */
2409 if (dump_enabled_p ())
2410 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2411 "reduction: unsafe fp math optimization: ");
2412 return NULL;
2413 }
2414 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2415 && check_reduction)
2416 {
2417 /* Changing the order of operations changes the semantics. */
2418 if (dump_enabled_p ())
2419 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2420 "reduction: unsafe int math optimization: ");
2421 return NULL;
2422 }
2423 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2424 {
2425 /* Changing the order of operations changes the semantics. */
2426 if (dump_enabled_p ())
2427 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2428 "reduction: unsafe fixed-point math optimization: ");
2429 return NULL;
2430 }
2431
2432 /* If we detected "res -= x[i]" earlier, rewrite it into
2433 "res += -x[i]" now. If this turns out to be useless reassoc
2434 will clean it up again. */
2435 if (orig_code == MINUS_EXPR)
2436 {
2437 tree rhs = gimple_assign_rhs2 (def_stmt);
2438 tree negrhs = make_ssa_name (TREE_TYPE (rhs), NULL);
2439 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2440 rhs, NULL);
2441 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2442 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2443 loop_info, NULL));
2444 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2445 gimple_assign_set_rhs2 (def_stmt, negrhs);
2446 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2447 update_stmt (def_stmt);
2448 }
2449
2450 /* Reduction is safe. We're dealing with one of the following:
2451 1) integer arithmetic and no trapv
2452 2) floating point arithmetic, and special flags permit this optimization
2453 3) nested cycle (i.e., outer loop vectorization). */
2454 if (TREE_CODE (op1) == SSA_NAME)
2455 def1 = SSA_NAME_DEF_STMT (op1);
2456
2457 if (TREE_CODE (op2) == SSA_NAME)
2458 def2 = SSA_NAME_DEF_STMT (op2);
2459
2460 if (code != COND_EXPR
2461 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2462 {
2463 if (dump_enabled_p ())
2464 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
2465 return NULL;
2466 }
2467
2468 /* Check that one def is the reduction def, defined by PHI,
2469 the other def is either defined in the loop ("vect_internal_def"),
2470 or it's an induction (defined by a loop-header phi-node). */
2471
2472 if (def2 && def2 == phi
2473 && (code == COND_EXPR
2474 || !def1 || gimple_nop_p (def1)
2475 || !flow_bb_inside_loop_p (loop, gimple_bb (def1))
2476 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2477 && (is_gimple_assign (def1)
2478 || is_gimple_call (def1)
2479 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2480 == vect_induction_def
2481 || (gimple_code (def1) == GIMPLE_PHI
2482 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2483 == vect_internal_def
2484 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2485 {
2486 if (dump_enabled_p ())
2487 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2488 return def_stmt;
2489 }
2490
2491 if (def1 && def1 == phi
2492 && (code == COND_EXPR
2493 || !def2 || gimple_nop_p (def2)
2494 || !flow_bb_inside_loop_p (loop, gimple_bb (def2))
2495 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2496 && (is_gimple_assign (def2)
2497 || is_gimple_call (def2)
2498 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2499 == vect_induction_def
2500 || (gimple_code (def2) == GIMPLE_PHI
2501 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2502 == vect_internal_def
2503 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2504 {
2505 if (check_reduction)
2506 {
2507 /* Swap operands (just for simplicity - so that the rest of the code
2508 can assume that the reduction variable is always the last (second)
2509 argument). */
2510 if (dump_enabled_p ())
2511 report_vect_op (MSG_NOTE, def_stmt,
2512 "detected reduction: need to swap operands: ");
2513
2514 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2515 gimple_assign_rhs2_ptr (def_stmt));
2516
2517 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
2518 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2519 }
2520 else
2521 {
2522 if (dump_enabled_p ())
2523 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2524 }
2525
2526 return def_stmt;
2527 }
2528
2529 /* Try to find SLP reduction chain. */
2530 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2531 {
2532 if (dump_enabled_p ())
2533 report_vect_op (MSG_NOTE, def_stmt,
2534 "reduction: detected reduction chain: ");
2535
2536 return def_stmt;
2537 }
2538
2539 if (dump_enabled_p ())
2540 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2541 "reduction: unknown pattern: ");
2542
2543 return NULL;
2544 }
2545
2546 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2547 in-place. Arguments as there. */
2548
2549 static gimple
2550 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2551 bool check_reduction, bool *double_reduc)
2552 {
2553 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2554 double_reduc, false);
2555 }
2556
2557 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2558 in-place if it enables detection of more reductions. Arguments
2559 as there. */
2560
2561 gimple
2562 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2563 bool check_reduction, bool *double_reduc)
2564 {
2565 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2566 double_reduc, true);
2567 }
2568
2569 /* Calculate the cost of one scalar iteration of the loop. */
2570 int
2571 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
2572 {
2573 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2574 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2575 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2576 int innerloop_iters, i, stmt_cost;
2577
2578 /* Count statements in scalar loop. Using this as scalar cost for a single
2579 iteration for now.
2580
2581 TODO: Add outer loop support.
2582
2583 TODO: Consider assigning different costs to different scalar
2584 statements. */
2585
2586 /* FORNOW. */
2587 innerloop_iters = 1;
2588 if (loop->inner)
2589 innerloop_iters = 50; /* FIXME */
2590
2591 for (i = 0; i < nbbs; i++)
2592 {
2593 gimple_stmt_iterator si;
2594 basic_block bb = bbs[i];
2595
2596 if (bb->loop_father == loop->inner)
2597 factor = innerloop_iters;
2598 else
2599 factor = 1;
2600
2601 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2602 {
2603 gimple stmt = gsi_stmt (si);
2604 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2605
2606 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2607 continue;
2608
2609 /* Skip stmts that are not vectorized inside the loop. */
2610 if (stmt_info
2611 && !STMT_VINFO_RELEVANT_P (stmt_info)
2612 && (!STMT_VINFO_LIVE_P (stmt_info)
2613 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
2614 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
2615 continue;
2616
2617 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2618 {
2619 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2620 stmt_cost = vect_get_stmt_cost (scalar_load);
2621 else
2622 stmt_cost = vect_get_stmt_cost (scalar_store);
2623 }
2624 else
2625 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2626
2627 scalar_single_iter_cost += stmt_cost * factor;
2628 }
2629 }
2630 return scalar_single_iter_cost;
2631 }
2632
2633 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2634 int
2635 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2636 int *peel_iters_epilogue,
2637 int scalar_single_iter_cost,
2638 stmt_vector_for_cost *prologue_cost_vec,
2639 stmt_vector_for_cost *epilogue_cost_vec)
2640 {
2641 int retval = 0;
2642 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2643
2644 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2645 {
2646 *peel_iters_epilogue = vf/2;
2647 if (dump_enabled_p ())
2648 dump_printf_loc (MSG_NOTE, vect_location,
2649 "cost model: epilogue peel iters set to vf/2 "
2650 "because loop iterations are unknown .\n");
2651
2652 /* If peeled iterations are known but number of scalar loop
2653 iterations are unknown, count a taken branch per peeled loop. */
2654 retval = record_stmt_cost (prologue_cost_vec, 2, cond_branch_taken,
2655 NULL, 0, vect_prologue);
2656 }
2657 else
2658 {
2659 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2660 peel_iters_prologue = niters < peel_iters_prologue ?
2661 niters : peel_iters_prologue;
2662 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2663 /* If we need to peel for gaps, but no peeling is required, we have to
2664 peel VF iterations. */
2665 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2666 *peel_iters_epilogue = vf;
2667 }
2668
2669 if (peel_iters_prologue)
2670 retval += record_stmt_cost (prologue_cost_vec,
2671 peel_iters_prologue * scalar_single_iter_cost,
2672 scalar_stmt, NULL, 0, vect_prologue);
2673 if (*peel_iters_epilogue)
2674 retval += record_stmt_cost (epilogue_cost_vec,
2675 *peel_iters_epilogue * scalar_single_iter_cost,
2676 scalar_stmt, NULL, 0, vect_epilogue);
2677 return retval;
2678 }
2679
2680 /* Function vect_estimate_min_profitable_iters
2681
2682 Return the number of iterations required for the vector version of the
2683 loop to be profitable relative to the cost of the scalar version of the
2684 loop. */
2685
2686 static void
2687 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
2688 int *ret_min_profitable_niters,
2689 int *ret_min_profitable_estimate)
2690 {
2691 int min_profitable_iters;
2692 int min_profitable_estimate;
2693 int peel_iters_prologue;
2694 int peel_iters_epilogue;
2695 unsigned vec_inside_cost = 0;
2696 int vec_outside_cost = 0;
2697 unsigned vec_prologue_cost = 0;
2698 unsigned vec_epilogue_cost = 0;
2699 int scalar_single_iter_cost = 0;
2700 int scalar_outside_cost = 0;
2701 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2702 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2703 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2704
2705 /* Cost model disabled. */
2706 if (unlimited_cost_model ())
2707 {
2708 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
2709 *ret_min_profitable_niters = 0;
2710 *ret_min_profitable_estimate = 0;
2711 return;
2712 }
2713
2714 /* Requires loop versioning tests to handle misalignment. */
2715 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2716 {
2717 /* FIXME: Make cost depend on complexity of individual check. */
2718 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
2719 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2720 vect_prologue);
2721 dump_printf (MSG_NOTE,
2722 "cost model: Adding cost of checks for loop "
2723 "versioning to treat misalignment.\n");
2724 }
2725
2726 /* Requires loop versioning with alias checks. */
2727 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2728 {
2729 /* FIXME: Make cost depend on complexity of individual check. */
2730 unsigned len = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).length ();
2731 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2732 vect_prologue);
2733 dump_printf (MSG_NOTE,
2734 "cost model: Adding cost of checks for loop "
2735 "versioning aliasing.\n");
2736 }
2737
2738 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2739 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2740 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
2741 vect_prologue);
2742
2743 /* Count statements in scalar loop. Using this as scalar cost for a single
2744 iteration for now.
2745
2746 TODO: Add outer loop support.
2747
2748 TODO: Consider assigning different costs to different scalar
2749 statements. */
2750
2751 scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
2752
2753 /* Add additional cost for the peeled instructions in prologue and epilogue
2754 loop.
2755
2756 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2757 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2758
2759 TODO: Build an expression that represents peel_iters for prologue and
2760 epilogue to be used in a run-time test. */
2761
2762 if (npeel < 0)
2763 {
2764 peel_iters_prologue = vf/2;
2765 dump_printf (MSG_NOTE, "cost model: "
2766 "prologue peel iters set to vf/2.\n");
2767
2768 /* If peeling for alignment is unknown, loop bound of main loop becomes
2769 unknown. */
2770 peel_iters_epilogue = vf/2;
2771 dump_printf (MSG_NOTE, "cost model: "
2772 "epilogue peel iters set to vf/2 because "
2773 "peeling for alignment is unknown.\n");
2774
2775 /* If peeled iterations are unknown, count a taken branch and a not taken
2776 branch per peeled loop. Even if scalar loop iterations are known,
2777 vector iterations are not known since peeled prologue iterations are
2778 not known. Hence guards remain the same. */
2779 (void) add_stmt_cost (target_cost_data, 2, cond_branch_taken,
2780 NULL, 0, vect_prologue);
2781 (void) add_stmt_cost (target_cost_data, 2, cond_branch_not_taken,
2782 NULL, 0, vect_prologue);
2783 /* FORNOW: Don't attempt to pass individual scalar instructions to
2784 the model; just assume linear cost for scalar iterations. */
2785 (void) add_stmt_cost (target_cost_data,
2786 peel_iters_prologue * scalar_single_iter_cost,
2787 scalar_stmt, NULL, 0, vect_prologue);
2788 (void) add_stmt_cost (target_cost_data,
2789 peel_iters_epilogue * scalar_single_iter_cost,
2790 scalar_stmt, NULL, 0, vect_epilogue);
2791 }
2792 else
2793 {
2794 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2795 stmt_info_for_cost *si;
2796 int j;
2797 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2798
2799 prologue_cost_vec.create (2);
2800 epilogue_cost_vec.create (2);
2801 peel_iters_prologue = npeel;
2802
2803 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
2804 &peel_iters_epilogue,
2805 scalar_single_iter_cost,
2806 &prologue_cost_vec,
2807 &epilogue_cost_vec);
2808
2809 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
2810 {
2811 struct _stmt_vec_info *stmt_info
2812 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2813 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2814 si->misalign, vect_prologue);
2815 }
2816
2817 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
2818 {
2819 struct _stmt_vec_info *stmt_info
2820 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2821 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2822 si->misalign, vect_epilogue);
2823 }
2824
2825 prologue_cost_vec.release ();
2826 epilogue_cost_vec.release ();
2827 }
2828
2829 /* FORNOW: The scalar outside cost is incremented in one of the
2830 following ways:
2831
2832 1. The vectorizer checks for alignment and aliasing and generates
2833 a condition that allows dynamic vectorization. A cost model
2834 check is ANDED with the versioning condition. Hence scalar code
2835 path now has the added cost of the versioning check.
2836
2837 if (cost > th & versioning_check)
2838 jmp to vector code
2839
2840 Hence run-time scalar is incremented by not-taken branch cost.
2841
2842 2. The vectorizer then checks if a prologue is required. If the
2843 cost model check was not done before during versioning, it has to
2844 be done before the prologue check.
2845
2846 if (cost <= th)
2847 prologue = scalar_iters
2848 if (prologue == 0)
2849 jmp to vector code
2850 else
2851 execute prologue
2852 if (prologue == num_iters)
2853 go to exit
2854
2855 Hence the run-time scalar cost is incremented by a taken branch,
2856 plus a not-taken branch, plus a taken branch cost.
2857
2858 3. The vectorizer then checks if an epilogue is required. If the
2859 cost model check was not done before during prologue check, it
2860 has to be done with the epilogue check.
2861
2862 if (prologue == 0)
2863 jmp to vector code
2864 else
2865 execute prologue
2866 if (prologue == num_iters)
2867 go to exit
2868 vector code:
2869 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2870 jmp to epilogue
2871
2872 Hence the run-time scalar cost should be incremented by 2 taken
2873 branches.
2874
2875 TODO: The back end may reorder the BBS's differently and reverse
2876 conditions/branch directions. Change the estimates below to
2877 something more reasonable. */
2878
2879 /* If the number of iterations is known and we do not do versioning, we can
2880 decide whether to vectorize at compile time. Hence the scalar version
2881 do not carry cost model guard costs. */
2882 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2883 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2884 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2885 {
2886 /* Cost model check occurs at versioning. */
2887 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2888 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2889 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
2890 else
2891 {
2892 /* Cost model check occurs at prologue generation. */
2893 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2894 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
2895 + vect_get_stmt_cost (cond_branch_not_taken);
2896 /* Cost model check occurs at epilogue generation. */
2897 else
2898 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
2899 }
2900 }
2901
2902 /* Complete the target-specific cost calculations. */
2903 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
2904 &vec_inside_cost, &vec_epilogue_cost);
2905
2906 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
2907
2908 /* Calculate number of iterations required to make the vector version
2909 profitable, relative to the loop bodies only. The following condition
2910 must hold true:
2911 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2912 where
2913 SIC = scalar iteration cost, VIC = vector iteration cost,
2914 VOC = vector outside cost, VF = vectorization factor,
2915 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2916 SOC = scalar outside cost for run time cost model check. */
2917
2918 if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
2919 {
2920 if (vec_outside_cost <= 0)
2921 min_profitable_iters = 1;
2922 else
2923 {
2924 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2925 - vec_inside_cost * peel_iters_prologue
2926 - vec_inside_cost * peel_iters_epilogue)
2927 / ((scalar_single_iter_cost * vf)
2928 - vec_inside_cost);
2929
2930 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2931 <= (((int) vec_inside_cost * min_profitable_iters)
2932 + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
2933 min_profitable_iters++;
2934 }
2935 }
2936 /* vector version will never be profitable. */
2937 else
2938 {
2939 if (dump_enabled_p ())
2940 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2941 "cost model: the vector iteration cost = %d "
2942 "divided by the scalar iteration cost = %d "
2943 "is greater or equal to the vectorization factor = %d"
2944 ".\n",
2945 vec_inside_cost, scalar_single_iter_cost, vf);
2946 *ret_min_profitable_niters = -1;
2947 *ret_min_profitable_estimate = -1;
2948 return;
2949 }
2950
2951 if (dump_enabled_p ())
2952 {
2953 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2954 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
2955 vec_inside_cost);
2956 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
2957 vec_prologue_cost);
2958 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
2959 vec_epilogue_cost);
2960 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
2961 scalar_single_iter_cost);
2962 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
2963 scalar_outside_cost);
2964 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
2965 vec_outside_cost);
2966 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
2967 peel_iters_prologue);
2968 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
2969 peel_iters_epilogue);
2970 dump_printf (MSG_NOTE,
2971 " Calculated minimum iters for profitability: %d\n",
2972 min_profitable_iters);
2973 dump_printf (MSG_NOTE, "\n");
2974 }
2975
2976 min_profitable_iters =
2977 min_profitable_iters < vf ? vf : min_profitable_iters;
2978
2979 /* Because the condition we create is:
2980 if (niters <= min_profitable_iters)
2981 then skip the vectorized loop. */
2982 min_profitable_iters--;
2983
2984 if (dump_enabled_p ())
2985 dump_printf_loc (MSG_NOTE, vect_location,
2986 " Runtime profitability threshold = %d\n",
2987 min_profitable_iters);
2988
2989 *ret_min_profitable_niters = min_profitable_iters;
2990
2991 /* Calculate number of iterations required to make the vector version
2992 profitable, relative to the loop bodies only.
2993
2994 Non-vectorized variant is SIC * niters and it must win over vector
2995 variant on the expected loop trip count. The following condition must hold true:
2996 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
2997
2998 if (vec_outside_cost <= 0)
2999 min_profitable_estimate = 1;
3000 else
3001 {
3002 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
3003 - vec_inside_cost * peel_iters_prologue
3004 - vec_inside_cost * peel_iters_epilogue)
3005 / ((scalar_single_iter_cost * vf)
3006 - vec_inside_cost);
3007 }
3008 min_profitable_estimate --;
3009 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3010 if (dump_enabled_p ())
3011 dump_printf_loc (MSG_NOTE, vect_location,
3012 " Static estimate profitability threshold = %d\n",
3013 min_profitable_iters);
3014
3015 *ret_min_profitable_estimate = min_profitable_estimate;
3016 }
3017
3018
3019 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3020 functions. Design better to avoid maintenance issues. */
3021
3022 /* Function vect_model_reduction_cost.
3023
3024 Models cost for a reduction operation, including the vector ops
3025 generated within the strip-mine loop, the initial definition before
3026 the loop, and the epilogue code that must be generated. */
3027
3028 static bool
3029 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
3030 int ncopies)
3031 {
3032 int prologue_cost = 0, epilogue_cost = 0;
3033 enum tree_code code;
3034 optab optab;
3035 tree vectype;
3036 gimple stmt, orig_stmt;
3037 tree reduction_op;
3038 enum machine_mode mode;
3039 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3040 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3041 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3042
3043 /* Cost of reduction op inside loop. */
3044 unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3045 stmt_info, 0, vect_body);
3046 stmt = STMT_VINFO_STMT (stmt_info);
3047
3048 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3049 {
3050 case GIMPLE_SINGLE_RHS:
3051 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
3052 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
3053 break;
3054 case GIMPLE_UNARY_RHS:
3055 reduction_op = gimple_assign_rhs1 (stmt);
3056 break;
3057 case GIMPLE_BINARY_RHS:
3058 reduction_op = gimple_assign_rhs2 (stmt);
3059 break;
3060 case GIMPLE_TERNARY_RHS:
3061 reduction_op = gimple_assign_rhs3 (stmt);
3062 break;
3063 default:
3064 gcc_unreachable ();
3065 }
3066
3067 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3068 if (!vectype)
3069 {
3070 if (dump_enabled_p ())
3071 {
3072 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3073 "unsupported data-type ");
3074 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3075 TREE_TYPE (reduction_op));
3076 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3077 }
3078 return false;
3079 }
3080
3081 mode = TYPE_MODE (vectype);
3082 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3083
3084 if (!orig_stmt)
3085 orig_stmt = STMT_VINFO_STMT (stmt_info);
3086
3087 code = gimple_assign_rhs_code (orig_stmt);
3088
3089 /* Add in cost for initial definition. */
3090 prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec,
3091 stmt_info, 0, vect_prologue);
3092
3093 /* Determine cost of epilogue code.
3094
3095 We have a reduction operator that will reduce the vector in one statement.
3096 Also requires scalar extract. */
3097
3098 if (!nested_in_vect_loop_p (loop, orig_stmt))
3099 {
3100 if (reduc_code != ERROR_MARK)
3101 {
3102 epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
3103 stmt_info, 0, vect_epilogue);
3104 epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar,
3105 stmt_info, 0, vect_epilogue);
3106 }
3107 else
3108 {
3109 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3110 tree bitsize =
3111 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
3112 int element_bitsize = tree_to_uhwi (bitsize);
3113 int nelements = vec_size_in_bits / element_bitsize;
3114
3115 optab = optab_for_tree_code (code, vectype, optab_default);
3116
3117 /* We have a whole vector shift available. */
3118 if (VECTOR_MODE_P (mode)
3119 && optab_handler (optab, mode) != CODE_FOR_nothing
3120 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3121 {
3122 /* Final reduction via vector shifts and the reduction operator.
3123 Also requires scalar extract. */
3124 epilogue_cost += add_stmt_cost (target_cost_data,
3125 exact_log2 (nelements) * 2,
3126 vector_stmt, stmt_info, 0,
3127 vect_epilogue);
3128 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3129 vec_to_scalar, stmt_info, 0,
3130 vect_epilogue);
3131 }
3132 else
3133 /* Use extracts and reduction op for final reduction. For N
3134 elements, we have N extracts and N-1 reduction ops. */
3135 epilogue_cost += add_stmt_cost (target_cost_data,
3136 nelements + nelements - 1,
3137 vector_stmt, stmt_info, 0,
3138 vect_epilogue);
3139 }
3140 }
3141
3142 if (dump_enabled_p ())
3143 dump_printf (MSG_NOTE,
3144 "vect_model_reduction_cost: inside_cost = %d, "
3145 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3146 prologue_cost, epilogue_cost);
3147
3148 return true;
3149 }
3150
3151
3152 /* Function vect_model_induction_cost.
3153
3154 Models cost for induction operations. */
3155
3156 static void
3157 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
3158 {
3159 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3160 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3161 unsigned inside_cost, prologue_cost;
3162
3163 /* loop cost for vec_loop. */
3164 inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3165 stmt_info, 0, vect_body);
3166
3167 /* prologue cost for vec_init and vec_step. */
3168 prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
3169 stmt_info, 0, vect_prologue);
3170
3171 if (dump_enabled_p ())
3172 dump_printf_loc (MSG_NOTE, vect_location,
3173 "vect_model_induction_cost: inside_cost = %d, "
3174 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3175 }
3176
3177
3178 /* Function get_initial_def_for_induction
3179
3180 Input:
3181 STMT - a stmt that performs an induction operation in the loop.
3182 IV_PHI - the initial value of the induction variable
3183
3184 Output:
3185 Return a vector variable, initialized with the first VF values of
3186 the induction variable. E.g., for an iv with IV_PHI='X' and
3187 evolution S, for a vector of 4 units, we want to return:
3188 [X, X + S, X + 2*S, X + 3*S]. */
3189
3190 static tree
3191 get_initial_def_for_induction (gimple iv_phi)
3192 {
3193 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
3194 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3195 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3196 tree vectype;
3197 int nunits;
3198 edge pe = loop_preheader_edge (loop);
3199 struct loop *iv_loop;
3200 basic_block new_bb;
3201 tree new_vec, vec_init, vec_step, t;
3202 tree access_fn;
3203 tree new_var;
3204 tree new_name;
3205 gimple init_stmt, induction_phi, new_stmt;
3206 tree induc_def, vec_def, vec_dest;
3207 tree init_expr, step_expr;
3208 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3209 int i;
3210 bool ok;
3211 int ncopies;
3212 tree expr;
3213 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
3214 bool nested_in_vect_loop = false;
3215 gimple_seq stmts = NULL;
3216 imm_use_iterator imm_iter;
3217 use_operand_p use_p;
3218 gimple exit_phi;
3219 edge latch_e;
3220 tree loop_arg;
3221 gimple_stmt_iterator si;
3222 basic_block bb = gimple_bb (iv_phi);
3223 tree stepvectype;
3224 tree resvectype;
3225
3226 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
3227 if (nested_in_vect_loop_p (loop, iv_phi))
3228 {
3229 nested_in_vect_loop = true;
3230 iv_loop = loop->inner;
3231 }
3232 else
3233 iv_loop = loop;
3234 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
3235
3236 latch_e = loop_latch_edge (iv_loop);
3237 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
3238
3239 access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
3240 gcc_assert (access_fn);
3241 STRIP_NOPS (access_fn);
3242 ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
3243 &init_expr, &step_expr);
3244 gcc_assert (ok);
3245 pe = loop_preheader_edge (iv_loop);
3246
3247 vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr));
3248 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3249 gcc_assert (vectype);
3250 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3251 ncopies = vf / nunits;
3252
3253 gcc_assert (phi_info);
3254 gcc_assert (ncopies >= 1);
3255
3256 /* Find the first insertion point in the BB. */
3257 si = gsi_after_labels (bb);
3258
3259 /* Create the vector that holds the initial_value of the induction. */
3260 if (nested_in_vect_loop)
3261 {
3262 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3263 been created during vectorization of previous stmts. We obtain it
3264 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3265 tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3266 loop_preheader_edge (iv_loop));
3267 vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
3268 /* If the initial value is not of proper type, convert it. */
3269 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
3270 {
3271 new_stmt = gimple_build_assign_with_ops
3272 (VIEW_CONVERT_EXPR,
3273 vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"),
3274 build1 (VIEW_CONVERT_EXPR, vectype, vec_init), NULL_TREE);
3275 vec_init = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3276 gimple_assign_set_lhs (new_stmt, vec_init);
3277 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
3278 new_stmt);
3279 gcc_assert (!new_bb);
3280 set_vinfo_for_stmt (new_stmt,
3281 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3282 }
3283 }
3284 else
3285 {
3286 vec<constructor_elt, va_gc> *v;
3287
3288 /* iv_loop is the loop to be vectorized. Create:
3289 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3290 new_var = vect_get_new_vect_var (TREE_TYPE (vectype),
3291 vect_scalar_var, "var_");
3292 new_name = force_gimple_operand (fold_convert (TREE_TYPE (vectype),
3293 init_expr),
3294 &stmts, false, new_var);
3295 if (stmts)
3296 {
3297 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3298 gcc_assert (!new_bb);
3299 }
3300
3301 vec_alloc (v, nunits);
3302 bool constant_p = is_gimple_min_invariant (new_name);
3303 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3304 for (i = 1; i < nunits; i++)
3305 {
3306 /* Create: new_name_i = new_name + step_expr */
3307 new_name = fold_build2 (PLUS_EXPR, TREE_TYPE (new_name),
3308 new_name, step_expr);
3309 if (!is_gimple_min_invariant (new_name))
3310 {
3311 init_stmt = gimple_build_assign (new_var, new_name);
3312 new_name = make_ssa_name (new_var, init_stmt);
3313 gimple_assign_set_lhs (init_stmt, new_name);
3314 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3315 gcc_assert (!new_bb);
3316 if (dump_enabled_p ())
3317 {
3318 dump_printf_loc (MSG_NOTE, vect_location,
3319 "created new init_stmt: ");
3320 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, init_stmt, 0);
3321 dump_printf (MSG_NOTE, "\n");
3322 }
3323 constant_p = false;
3324 }
3325 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3326 }
3327 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3328 if (constant_p)
3329 new_vec = build_vector_from_ctor (vectype, v);
3330 else
3331 new_vec = build_constructor (vectype, v);
3332 vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL);
3333 }
3334
3335
3336 /* Create the vector that holds the step of the induction. */
3337 if (nested_in_vect_loop)
3338 /* iv_loop is nested in the loop to be vectorized. Generate:
3339 vec_step = [S, S, S, S] */
3340 new_name = step_expr;
3341 else
3342 {
3343 /* iv_loop is the loop to be vectorized. Generate:
3344 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3345 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3346 {
3347 expr = build_int_cst (integer_type_node, vf);
3348 expr = fold_convert (TREE_TYPE (step_expr), expr);
3349 }
3350 else
3351 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3352 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3353 expr, step_expr);
3354 if (TREE_CODE (step_expr) == SSA_NAME)
3355 new_name = vect_init_vector (iv_phi, new_name,
3356 TREE_TYPE (step_expr), NULL);
3357 }
3358
3359 t = unshare_expr (new_name);
3360 gcc_assert (CONSTANT_CLASS_P (new_name)
3361 || TREE_CODE (new_name) == SSA_NAME);
3362 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3363 gcc_assert (stepvectype);
3364 new_vec = build_vector_from_val (stepvectype, t);
3365 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3366
3367
3368 /* Create the following def-use cycle:
3369 loop prolog:
3370 vec_init = ...
3371 vec_step = ...
3372 loop:
3373 vec_iv = PHI <vec_init, vec_loop>
3374 ...
3375 STMT
3376 ...
3377 vec_loop = vec_iv + vec_step; */
3378
3379 /* Create the induction-phi that defines the induction-operand. */
3380 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3381 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3382 set_vinfo_for_stmt (induction_phi,
3383 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3384 induc_def = PHI_RESULT (induction_phi);
3385
3386 /* Create the iv update inside the loop */
3387 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3388 induc_def, vec_step);
3389 vec_def = make_ssa_name (vec_dest, new_stmt);
3390 gimple_assign_set_lhs (new_stmt, vec_def);
3391 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3392 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3393 NULL));
3394
3395 /* Set the arguments of the phi node: */
3396 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3397 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3398 UNKNOWN_LOCATION);
3399
3400
3401 /* In case that vectorization factor (VF) is bigger than the number
3402 of elements that we can fit in a vectype (nunits), we have to generate
3403 more than one vector stmt - i.e - we need to "unroll" the
3404 vector stmt by a factor VF/nunits. For more details see documentation
3405 in vectorizable_operation. */
3406
3407 if (ncopies > 1)
3408 {
3409 stmt_vec_info prev_stmt_vinfo;
3410 /* FORNOW. This restriction should be relaxed. */
3411 gcc_assert (!nested_in_vect_loop);
3412
3413 /* Create the vector that holds the step of the induction. */
3414 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3415 {
3416 expr = build_int_cst (integer_type_node, nunits);
3417 expr = fold_convert (TREE_TYPE (step_expr), expr);
3418 }
3419 else
3420 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3421 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3422 expr, step_expr);
3423 if (TREE_CODE (step_expr) == SSA_NAME)
3424 new_name = vect_init_vector (iv_phi, new_name,
3425 TREE_TYPE (step_expr), NULL);
3426 t = unshare_expr (new_name);
3427 gcc_assert (CONSTANT_CLASS_P (new_name)
3428 || TREE_CODE (new_name) == SSA_NAME);
3429 new_vec = build_vector_from_val (stepvectype, t);
3430 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3431
3432 vec_def = induc_def;
3433 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3434 for (i = 1; i < ncopies; i++)
3435 {
3436 /* vec_i = vec_prev + vec_step */
3437 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3438 vec_def, vec_step);
3439 vec_def = make_ssa_name (vec_dest, new_stmt);
3440 gimple_assign_set_lhs (new_stmt, vec_def);
3441
3442 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3443 if (!useless_type_conversion_p (resvectype, vectype))
3444 {
3445 new_stmt = gimple_build_assign_with_ops
3446 (VIEW_CONVERT_EXPR,
3447 vect_get_new_vect_var (resvectype, vect_simple_var,
3448 "vec_iv_"),
3449 build1 (VIEW_CONVERT_EXPR, resvectype,
3450 gimple_assign_lhs (new_stmt)), NULL_TREE);
3451 gimple_assign_set_lhs (new_stmt,
3452 make_ssa_name
3453 (gimple_assign_lhs (new_stmt), new_stmt));
3454 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3455 }
3456 set_vinfo_for_stmt (new_stmt,
3457 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3458 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3459 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3460 }
3461 }
3462
3463 if (nested_in_vect_loop)
3464 {
3465 /* Find the loop-closed exit-phi of the induction, and record
3466 the final vector of induction results: */
3467 exit_phi = NULL;
3468 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3469 {
3470 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3471 {
3472 exit_phi = USE_STMT (use_p);
3473 break;
3474 }
3475 }
3476 if (exit_phi)
3477 {
3478 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3479 /* FORNOW. Currently not supporting the case that an inner-loop induction
3480 is not used in the outer-loop (i.e. only outside the outer-loop). */
3481 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3482 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3483
3484 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3485 if (dump_enabled_p ())
3486 {
3487 dump_printf_loc (MSG_NOTE, vect_location,
3488 "vector of inductions after inner-loop:");
3489 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
3490 dump_printf (MSG_NOTE, "\n");
3491 }
3492 }
3493 }
3494
3495
3496 if (dump_enabled_p ())
3497 {
3498 dump_printf_loc (MSG_NOTE, vect_location,
3499 "transform induction: created def-use cycle: ");
3500 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
3501 dump_printf (MSG_NOTE, "\n");
3502 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
3503 SSA_NAME_DEF_STMT (vec_def), 0);
3504 dump_printf (MSG_NOTE, "\n");
3505 }
3506
3507 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3508 if (!useless_type_conversion_p (resvectype, vectype))
3509 {
3510 new_stmt = gimple_build_assign_with_ops
3511 (VIEW_CONVERT_EXPR,
3512 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3513 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3514 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3515 gimple_assign_set_lhs (new_stmt, induc_def);
3516 si = gsi_after_labels (bb);
3517 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3518 set_vinfo_for_stmt (new_stmt,
3519 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3520 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3521 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3522 }
3523
3524 return induc_def;
3525 }
3526
3527
3528 /* Function get_initial_def_for_reduction
3529
3530 Input:
3531 STMT - a stmt that performs a reduction operation in the loop.
3532 INIT_VAL - the initial value of the reduction variable
3533
3534 Output:
3535 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3536 of the reduction (used for adjusting the epilog - see below).
3537 Return a vector variable, initialized according to the operation that STMT
3538 performs. This vector will be used as the initial value of the
3539 vector of partial results.
3540
3541 Option1 (adjust in epilog): Initialize the vector as follows:
3542 add/bit or/xor: [0,0,...,0,0]
3543 mult/bit and: [1,1,...,1,1]
3544 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3545 and when necessary (e.g. add/mult case) let the caller know
3546 that it needs to adjust the result by init_val.
3547
3548 Option2: Initialize the vector as follows:
3549 add/bit or/xor: [init_val,0,0,...,0]
3550 mult/bit and: [init_val,1,1,...,1]
3551 min/max/cond_expr: [init_val,init_val,...,init_val]
3552 and no adjustments are needed.
3553
3554 For example, for the following code:
3555
3556 s = init_val;
3557 for (i=0;i<n;i++)
3558 s = s + a[i];
3559
3560 STMT is 's = s + a[i]', and the reduction variable is 's'.
3561 For a vector of 4 units, we want to return either [0,0,0,init_val],
3562 or [0,0,0,0] and let the caller know that it needs to adjust
3563 the result at the end by 'init_val'.
3564
3565 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3566 initialization vector is simpler (same element in all entries), if
3567 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3568
3569 A cost model should help decide between these two schemes. */
3570
3571 tree
3572 get_initial_def_for_reduction (gimple stmt, tree init_val,
3573 tree *adjustment_def)
3574 {
3575 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3576 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3577 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3578 tree scalar_type = TREE_TYPE (init_val);
3579 tree vectype = get_vectype_for_scalar_type (scalar_type);
3580 int nunits;
3581 enum tree_code code = gimple_assign_rhs_code (stmt);
3582 tree def_for_init;
3583 tree init_def;
3584 tree *elts;
3585 int i;
3586 bool nested_in_vect_loop = false;
3587 tree init_value;
3588 REAL_VALUE_TYPE real_init_val = dconst0;
3589 int int_init_val = 0;
3590 gimple def_stmt = NULL;
3591
3592 gcc_assert (vectype);
3593 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3594
3595 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3596 || SCALAR_FLOAT_TYPE_P (scalar_type));
3597
3598 if (nested_in_vect_loop_p (loop, stmt))
3599 nested_in_vect_loop = true;
3600 else
3601 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3602
3603 /* In case of double reduction we only create a vector variable to be put
3604 in the reduction phi node. The actual statement creation is done in
3605 vect_create_epilog_for_reduction. */
3606 if (adjustment_def && nested_in_vect_loop
3607 && TREE_CODE (init_val) == SSA_NAME
3608 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3609 && gimple_code (def_stmt) == GIMPLE_PHI
3610 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3611 && vinfo_for_stmt (def_stmt)
3612 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3613 == vect_double_reduction_def)
3614 {
3615 *adjustment_def = NULL;
3616 return vect_create_destination_var (init_val, vectype);
3617 }
3618
3619 if (TREE_CONSTANT (init_val))
3620 {
3621 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3622 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3623 else
3624 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3625 }
3626 else
3627 init_value = init_val;
3628
3629 switch (code)
3630 {
3631 case WIDEN_SUM_EXPR:
3632 case DOT_PROD_EXPR:
3633 case PLUS_EXPR:
3634 case MINUS_EXPR:
3635 case BIT_IOR_EXPR:
3636 case BIT_XOR_EXPR:
3637 case MULT_EXPR:
3638 case BIT_AND_EXPR:
3639 /* ADJUSMENT_DEF is NULL when called from
3640 vect_create_epilog_for_reduction to vectorize double reduction. */
3641 if (adjustment_def)
3642 {
3643 if (nested_in_vect_loop)
3644 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3645 NULL);
3646 else
3647 *adjustment_def = init_val;
3648 }
3649
3650 if (code == MULT_EXPR)
3651 {
3652 real_init_val = dconst1;
3653 int_init_val = 1;
3654 }
3655
3656 if (code == BIT_AND_EXPR)
3657 int_init_val = -1;
3658
3659 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3660 def_for_init = build_real (scalar_type, real_init_val);
3661 else
3662 def_for_init = build_int_cst (scalar_type, int_init_val);
3663
3664 /* Create a vector of '0' or '1' except the first element. */
3665 elts = XALLOCAVEC (tree, nunits);
3666 for (i = nunits - 2; i >= 0; --i)
3667 elts[i + 1] = def_for_init;
3668
3669 /* Option1: the first element is '0' or '1' as well. */
3670 if (adjustment_def)
3671 {
3672 elts[0] = def_for_init;
3673 init_def = build_vector (vectype, elts);
3674 break;
3675 }
3676
3677 /* Option2: the first element is INIT_VAL. */
3678 elts[0] = init_val;
3679 if (TREE_CONSTANT (init_val))
3680 init_def = build_vector (vectype, elts);
3681 else
3682 {
3683 vec<constructor_elt, va_gc> *v;
3684 vec_alloc (v, nunits);
3685 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
3686 for (i = 1; i < nunits; ++i)
3687 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
3688 init_def = build_constructor (vectype, v);
3689 }
3690
3691 break;
3692
3693 case MIN_EXPR:
3694 case MAX_EXPR:
3695 case COND_EXPR:
3696 if (adjustment_def)
3697 {
3698 *adjustment_def = NULL_TREE;
3699 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3700 break;
3701 }
3702
3703 init_def = build_vector_from_val (vectype, init_value);
3704 break;
3705
3706 default:
3707 gcc_unreachable ();
3708 }
3709
3710 return init_def;
3711 }
3712
3713
3714 /* Function vect_create_epilog_for_reduction
3715
3716 Create code at the loop-epilog to finalize the result of a reduction
3717 computation.
3718
3719 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3720 reduction statements.
3721 STMT is the scalar reduction stmt that is being vectorized.
3722 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3723 number of elements that we can fit in a vectype (nunits). In this case
3724 we have to generate more than one vector stmt - i.e - we need to "unroll"
3725 the vector stmt by a factor VF/nunits. For more details see documentation
3726 in vectorizable_operation.
3727 REDUC_CODE is the tree-code for the epilog reduction.
3728 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3729 computation.
3730 REDUC_INDEX is the index of the operand in the right hand side of the
3731 statement that is defined by REDUCTION_PHI.
3732 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3733 SLP_NODE is an SLP node containing a group of reduction statements. The
3734 first one in this group is STMT.
3735
3736 This function:
3737 1. Creates the reduction def-use cycles: sets the arguments for
3738 REDUCTION_PHIS:
3739 The loop-entry argument is the vectorized initial-value of the reduction.
3740 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3741 sums.
3742 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3743 by applying the operation specified by REDUC_CODE if available, or by
3744 other means (whole-vector shifts or a scalar loop).
3745 The function also creates a new phi node at the loop exit to preserve
3746 loop-closed form, as illustrated below.
3747
3748 The flow at the entry to this function:
3749
3750 loop:
3751 vec_def = phi <null, null> # REDUCTION_PHI
3752 VECT_DEF = vector_stmt # vectorized form of STMT
3753 s_loop = scalar_stmt # (scalar) STMT
3754 loop_exit:
3755 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3756 use <s_out0>
3757 use <s_out0>
3758
3759 The above is transformed by this function into:
3760
3761 loop:
3762 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3763 VECT_DEF = vector_stmt # vectorized form of STMT
3764 s_loop = scalar_stmt # (scalar) STMT
3765 loop_exit:
3766 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3767 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3768 v_out2 = reduce <v_out1>
3769 s_out3 = extract_field <v_out2, 0>
3770 s_out4 = adjust_result <s_out3>
3771 use <s_out4>
3772 use <s_out4>
3773 */
3774
3775 static void
3776 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
3777 int ncopies, enum tree_code reduc_code,
3778 vec<gimple> reduction_phis,
3779 int reduc_index, bool double_reduc,
3780 slp_tree slp_node)
3781 {
3782 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3783 stmt_vec_info prev_phi_info;
3784 tree vectype;
3785 enum machine_mode mode;
3786 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3787 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3788 basic_block exit_bb;
3789 tree scalar_dest;
3790 tree scalar_type;
3791 gimple new_phi = NULL, phi;
3792 gimple_stmt_iterator exit_gsi;
3793 tree vec_dest;
3794 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3795 gimple epilog_stmt = NULL;
3796 enum tree_code code = gimple_assign_rhs_code (stmt);
3797 gimple exit_phi;
3798 tree bitsize, bitpos;
3799 tree adjustment_def = NULL;
3800 tree vec_initial_def = NULL;
3801 tree reduction_op, expr, def;
3802 tree orig_name, scalar_result;
3803 imm_use_iterator imm_iter, phi_imm_iter;
3804 use_operand_p use_p, phi_use_p;
3805 bool extract_scalar_result = false;
3806 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3807 bool nested_in_vect_loop = false;
3808 auto_vec<gimple> new_phis;
3809 auto_vec<gimple> inner_phis;
3810 enum vect_def_type dt = vect_unknown_def_type;
3811 int j, i;
3812 auto_vec<tree> scalar_results;
3813 unsigned int group_size = 1, k, ratio;
3814 auto_vec<tree> vec_initial_defs;
3815 auto_vec<gimple> phis;
3816 bool slp_reduc = false;
3817 tree new_phi_result;
3818 gimple inner_phi = NULL;
3819
3820 if (slp_node)
3821 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
3822
3823 if (nested_in_vect_loop_p (loop, stmt))
3824 {
3825 outer_loop = loop;
3826 loop = loop->inner;
3827 nested_in_vect_loop = true;
3828 gcc_assert (!slp_node);
3829 }
3830
3831 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3832 {
3833 case GIMPLE_SINGLE_RHS:
3834 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3835 == ternary_op);
3836 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3837 break;
3838 case GIMPLE_UNARY_RHS:
3839 reduction_op = gimple_assign_rhs1 (stmt);
3840 break;
3841 case GIMPLE_BINARY_RHS:
3842 reduction_op = reduc_index ?
3843 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3844 break;
3845 case GIMPLE_TERNARY_RHS:
3846 reduction_op = gimple_op (stmt, reduc_index + 1);
3847 break;
3848 default:
3849 gcc_unreachable ();
3850 }
3851
3852 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3853 gcc_assert (vectype);
3854 mode = TYPE_MODE (vectype);
3855
3856 /* 1. Create the reduction def-use cycle:
3857 Set the arguments of REDUCTION_PHIS, i.e., transform
3858
3859 loop:
3860 vec_def = phi <null, null> # REDUCTION_PHI
3861 VECT_DEF = vector_stmt # vectorized form of STMT
3862 ...
3863
3864 into:
3865
3866 loop:
3867 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3868 VECT_DEF = vector_stmt # vectorized form of STMT
3869 ...
3870
3871 (in case of SLP, do it for all the phis). */
3872
3873 /* Get the loop-entry arguments. */
3874 if (slp_node)
3875 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3876 NULL, slp_node, reduc_index);
3877 else
3878 {
3879 vec_initial_defs.create (1);
3880 /* For the case of reduction, vect_get_vec_def_for_operand returns
3881 the scalar def before the loop, that defines the initial value
3882 of the reduction variable. */
3883 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3884 &adjustment_def);
3885 vec_initial_defs.quick_push (vec_initial_def);
3886 }
3887
3888 /* Set phi nodes arguments. */
3889 FOR_EACH_VEC_ELT (reduction_phis, i, phi)
3890 {
3891 tree vec_init_def = vec_initial_defs[i];
3892 tree def = vect_defs[i];
3893 for (j = 0; j < ncopies; j++)
3894 {
3895 /* Set the loop-entry arg of the reduction-phi. */
3896 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3897 UNKNOWN_LOCATION);
3898
3899 /* Set the loop-latch arg for the reduction-phi. */
3900 if (j > 0)
3901 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3902
3903 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3904
3905 if (dump_enabled_p ())
3906 {
3907 dump_printf_loc (MSG_NOTE, vect_location,
3908 "transform reduction: created def-use cycle: ");
3909 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
3910 dump_printf (MSG_NOTE, "\n");
3911 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
3912 dump_printf (MSG_NOTE, "\n");
3913 }
3914
3915 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3916 }
3917 }
3918
3919 /* 2. Create epilog code.
3920 The reduction epilog code operates across the elements of the vector
3921 of partial results computed by the vectorized loop.
3922 The reduction epilog code consists of:
3923
3924 step 1: compute the scalar result in a vector (v_out2)
3925 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3926 step 3: adjust the scalar result (s_out3) if needed.
3927
3928 Step 1 can be accomplished using one the following three schemes:
3929 (scheme 1) using reduc_code, if available.
3930 (scheme 2) using whole-vector shifts, if available.
3931 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3932 combined.
3933
3934 The overall epilog code looks like this:
3935
3936 s_out0 = phi <s_loop> # original EXIT_PHI
3937 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3938 v_out2 = reduce <v_out1> # step 1
3939 s_out3 = extract_field <v_out2, 0> # step 2
3940 s_out4 = adjust_result <s_out3> # step 3
3941
3942 (step 3 is optional, and steps 1 and 2 may be combined).
3943 Lastly, the uses of s_out0 are replaced by s_out4. */
3944
3945
3946 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3947 v_out1 = phi <VECT_DEF>
3948 Store them in NEW_PHIS. */
3949
3950 exit_bb = single_exit (loop)->dest;
3951 prev_phi_info = NULL;
3952 new_phis.create (vect_defs.length ());
3953 FOR_EACH_VEC_ELT (vect_defs, i, def)
3954 {
3955 for (j = 0; j < ncopies; j++)
3956 {
3957 tree new_def = copy_ssa_name (def, NULL);
3958 phi = create_phi_node (new_def, exit_bb);
3959 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3960 if (j == 0)
3961 new_phis.quick_push (phi);
3962 else
3963 {
3964 def = vect_get_vec_def_for_stmt_copy (dt, def);
3965 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3966 }
3967
3968 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3969 prev_phi_info = vinfo_for_stmt (phi);
3970 }
3971 }
3972
3973 /* The epilogue is created for the outer-loop, i.e., for the loop being
3974 vectorized. Create exit phis for the outer loop. */
3975 if (double_reduc)
3976 {
3977 loop = outer_loop;
3978 exit_bb = single_exit (loop)->dest;
3979 inner_phis.create (vect_defs.length ());
3980 FOR_EACH_VEC_ELT (new_phis, i, phi)
3981 {
3982 tree new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
3983 gimple outer_phi = create_phi_node (new_result, exit_bb);
3984 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3985 PHI_RESULT (phi));
3986 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3987 loop_vinfo, NULL));
3988 inner_phis.quick_push (phi);
3989 new_phis[i] = outer_phi;
3990 prev_phi_info = vinfo_for_stmt (outer_phi);
3991 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
3992 {
3993 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3994 new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
3995 outer_phi = create_phi_node (new_result, exit_bb);
3996 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3997 PHI_RESULT (phi));
3998 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3999 loop_vinfo, NULL));
4000 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
4001 prev_phi_info = vinfo_for_stmt (outer_phi);
4002 }
4003 }
4004 }
4005
4006 exit_gsi = gsi_after_labels (exit_bb);
4007
4008 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4009 (i.e. when reduc_code is not available) and in the final adjustment
4010 code (if needed). Also get the original scalar reduction variable as
4011 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4012 represents a reduction pattern), the tree-code and scalar-def are
4013 taken from the original stmt that the pattern-stmt (STMT) replaces.
4014 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4015 are taken from STMT. */
4016
4017 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4018 if (!orig_stmt)
4019 {
4020 /* Regular reduction */
4021 orig_stmt = stmt;
4022 }
4023 else
4024 {
4025 /* Reduction pattern */
4026 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
4027 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
4028 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4029 }
4030
4031 code = gimple_assign_rhs_code (orig_stmt);
4032 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4033 partial results are added and not subtracted. */
4034 if (code == MINUS_EXPR)
4035 code = PLUS_EXPR;
4036
4037 scalar_dest = gimple_assign_lhs (orig_stmt);
4038 scalar_type = TREE_TYPE (scalar_dest);
4039 scalar_results.create (group_size);
4040 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4041 bitsize = TYPE_SIZE (scalar_type);
4042
4043 /* In case this is a reduction in an inner-loop while vectorizing an outer
4044 loop - we don't need to extract a single scalar result at the end of the
4045 inner-loop (unless it is double reduction, i.e., the use of reduction is
4046 outside the outer-loop). The final vector of partial results will be used
4047 in the vectorized outer-loop, or reduced to a scalar result at the end of
4048 the outer-loop. */
4049 if (nested_in_vect_loop && !double_reduc)
4050 goto vect_finalize_reduction;
4051
4052 /* SLP reduction without reduction chain, e.g.,
4053 # a1 = phi <a2, a0>
4054 # b1 = phi <b2, b0>
4055 a2 = operation (a1)
4056 b2 = operation (b1) */
4057 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4058
4059 /* In case of reduction chain, e.g.,
4060 # a1 = phi <a3, a0>
4061 a2 = operation (a1)
4062 a3 = operation (a2),
4063
4064 we may end up with more than one vector result. Here we reduce them to
4065 one vector. */
4066 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4067 {
4068 tree first_vect = PHI_RESULT (new_phis[0]);
4069 tree tmp;
4070 gimple new_vec_stmt = NULL;
4071
4072 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4073 for (k = 1; k < new_phis.length (); k++)
4074 {
4075 gimple next_phi = new_phis[k];
4076 tree second_vect = PHI_RESULT (next_phi);
4077
4078 tmp = build2 (code, vectype, first_vect, second_vect);
4079 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
4080 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
4081 gimple_assign_set_lhs (new_vec_stmt, first_vect);
4082 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4083 }
4084
4085 new_phi_result = first_vect;
4086 if (new_vec_stmt)
4087 {
4088 new_phis.truncate (0);
4089 new_phis.safe_push (new_vec_stmt);
4090 }
4091 }
4092 else
4093 new_phi_result = PHI_RESULT (new_phis[0]);
4094
4095 /* 2.3 Create the reduction code, using one of the three schemes described
4096 above. In SLP we simply need to extract all the elements from the
4097 vector (without reducing them), so we use scalar shifts. */
4098 if (reduc_code != ERROR_MARK && !slp_reduc)
4099 {
4100 tree tmp;
4101
4102 /*** Case 1: Create:
4103 v_out2 = reduc_expr <v_out1> */
4104
4105 if (dump_enabled_p ())
4106 dump_printf_loc (MSG_NOTE, vect_location,
4107 "Reduce using direct vector reduction.\n");
4108
4109 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4110 tmp = build1 (reduc_code, vectype, new_phi_result);
4111 epilog_stmt = gimple_build_assign (vec_dest, tmp);
4112 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4113 gimple_assign_set_lhs (epilog_stmt, new_temp);
4114 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4115
4116 extract_scalar_result = true;
4117 }
4118 else
4119 {
4120 enum tree_code shift_code = ERROR_MARK;
4121 bool have_whole_vector_shift = true;
4122 int bit_offset;
4123 int element_bitsize = tree_to_uhwi (bitsize);
4124 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4125 tree vec_temp;
4126
4127 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
4128 shift_code = VEC_RSHIFT_EXPR;
4129 else
4130 have_whole_vector_shift = false;
4131
4132 /* Regardless of whether we have a whole vector shift, if we're
4133 emulating the operation via tree-vect-generic, we don't want
4134 to use it. Only the first round of the reduction is likely
4135 to still be profitable via emulation. */
4136 /* ??? It might be better to emit a reduction tree code here, so that
4137 tree-vect-generic can expand the first round via bit tricks. */
4138 if (!VECTOR_MODE_P (mode))
4139 have_whole_vector_shift = false;
4140 else
4141 {
4142 optab optab = optab_for_tree_code (code, vectype, optab_default);
4143 if (optab_handler (optab, mode) == CODE_FOR_nothing)
4144 have_whole_vector_shift = false;
4145 }
4146
4147 if (have_whole_vector_shift && !slp_reduc)
4148 {
4149 /*** Case 2: Create:
4150 for (offset = VS/2; offset >= element_size; offset/=2)
4151 {
4152 Create: va' = vec_shift <va, offset>
4153 Create: va = vop <va, va'>
4154 } */
4155
4156 if (dump_enabled_p ())
4157 dump_printf_loc (MSG_NOTE, vect_location,
4158 "Reduce using vector shifts\n");
4159
4160 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4161 new_temp = new_phi_result;
4162 for (bit_offset = vec_size_in_bits/2;
4163 bit_offset >= element_bitsize;
4164 bit_offset /= 2)
4165 {
4166 tree bitpos = size_int (bit_offset);
4167
4168 epilog_stmt = gimple_build_assign_with_ops (shift_code,
4169 vec_dest, new_temp, bitpos);
4170 new_name = make_ssa_name (vec_dest, epilog_stmt);
4171 gimple_assign_set_lhs (epilog_stmt, new_name);
4172 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4173
4174 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
4175 new_name, new_temp);
4176 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4177 gimple_assign_set_lhs (epilog_stmt, new_temp);
4178 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4179 }
4180
4181 extract_scalar_result = true;
4182 }
4183 else
4184 {
4185 tree rhs;
4186
4187 /*** Case 3: Create:
4188 s = extract_field <v_out2, 0>
4189 for (offset = element_size;
4190 offset < vector_size;
4191 offset += element_size;)
4192 {
4193 Create: s' = extract_field <v_out2, offset>
4194 Create: s = op <s, s'> // For non SLP cases
4195 } */
4196
4197 if (dump_enabled_p ())
4198 dump_printf_loc (MSG_NOTE, vect_location,
4199 "Reduce using scalar code.\n");
4200
4201 vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4202 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
4203 {
4204 if (gimple_code (new_phi) == GIMPLE_PHI)
4205 vec_temp = PHI_RESULT (new_phi);
4206 else
4207 vec_temp = gimple_assign_lhs (new_phi);
4208 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
4209 bitsize_zero_node);
4210 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4211 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4212 gimple_assign_set_lhs (epilog_stmt, new_temp);
4213 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4214
4215 /* In SLP we don't need to apply reduction operation, so we just
4216 collect s' values in SCALAR_RESULTS. */
4217 if (slp_reduc)
4218 scalar_results.safe_push (new_temp);
4219
4220 for (bit_offset = element_bitsize;
4221 bit_offset < vec_size_in_bits;
4222 bit_offset += element_bitsize)
4223 {
4224 tree bitpos = bitsize_int (bit_offset);
4225 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
4226 bitsize, bitpos);
4227
4228 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4229 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
4230 gimple_assign_set_lhs (epilog_stmt, new_name);
4231 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4232
4233 if (slp_reduc)
4234 {
4235 /* In SLP we don't need to apply reduction operation, so
4236 we just collect s' values in SCALAR_RESULTS. */
4237 new_temp = new_name;
4238 scalar_results.safe_push (new_name);
4239 }
4240 else
4241 {
4242 epilog_stmt = gimple_build_assign_with_ops (code,
4243 new_scalar_dest, new_name, new_temp);
4244 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4245 gimple_assign_set_lhs (epilog_stmt, new_temp);
4246 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4247 }
4248 }
4249 }
4250
4251 /* The only case where we need to reduce scalar results in SLP, is
4252 unrolling. If the size of SCALAR_RESULTS is greater than
4253 GROUP_SIZE, we reduce them combining elements modulo
4254 GROUP_SIZE. */
4255 if (slp_reduc)
4256 {
4257 tree res, first_res, new_res;
4258 gimple new_stmt;
4259
4260 /* Reduce multiple scalar results in case of SLP unrolling. */
4261 for (j = group_size; scalar_results.iterate (j, &res);
4262 j++)
4263 {
4264 first_res = scalar_results[j % group_size];
4265 new_stmt = gimple_build_assign_with_ops (code,
4266 new_scalar_dest, first_res, res);
4267 new_res = make_ssa_name (new_scalar_dest, new_stmt);
4268 gimple_assign_set_lhs (new_stmt, new_res);
4269 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
4270 scalar_results[j % group_size] = new_res;
4271 }
4272 }
4273 else
4274 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
4275 scalar_results.safe_push (new_temp);
4276
4277 extract_scalar_result = false;
4278 }
4279 }
4280
4281 /* 2.4 Extract the final scalar result. Create:
4282 s_out3 = extract_field <v_out2, bitpos> */
4283
4284 if (extract_scalar_result)
4285 {
4286 tree rhs;
4287
4288 if (dump_enabled_p ())
4289 dump_printf_loc (MSG_NOTE, vect_location,
4290 "extract scalar result\n");
4291
4292 if (BYTES_BIG_ENDIAN)
4293 bitpos = size_binop (MULT_EXPR,
4294 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
4295 TYPE_SIZE (scalar_type));
4296 else
4297 bitpos = bitsize_zero_node;
4298
4299 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
4300 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4301 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4302 gimple_assign_set_lhs (epilog_stmt, new_temp);
4303 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4304 scalar_results.safe_push (new_temp);
4305 }
4306
4307 vect_finalize_reduction:
4308
4309 if (double_reduc)
4310 loop = loop->inner;
4311
4312 /* 2.5 Adjust the final result by the initial value of the reduction
4313 variable. (When such adjustment is not needed, then
4314 'adjustment_def' is zero). For example, if code is PLUS we create:
4315 new_temp = loop_exit_def + adjustment_def */
4316
4317 if (adjustment_def)
4318 {
4319 gcc_assert (!slp_reduc);
4320 if (nested_in_vect_loop)
4321 {
4322 new_phi = new_phis[0];
4323 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4324 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4325 new_dest = vect_create_destination_var (scalar_dest, vectype);
4326 }
4327 else
4328 {
4329 new_temp = scalar_results[0];
4330 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4331 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4332 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4333 }
4334
4335 epilog_stmt = gimple_build_assign (new_dest, expr);
4336 new_temp = make_ssa_name (new_dest, epilog_stmt);
4337 gimple_assign_set_lhs (epilog_stmt, new_temp);
4338 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4339 if (nested_in_vect_loop)
4340 {
4341 set_vinfo_for_stmt (epilog_stmt,
4342 new_stmt_vec_info (epilog_stmt, loop_vinfo,
4343 NULL));
4344 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4345 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4346
4347 if (!double_reduc)
4348 scalar_results.quick_push (new_temp);
4349 else
4350 scalar_results[0] = new_temp;
4351 }
4352 else
4353 scalar_results[0] = new_temp;
4354
4355 new_phis[0] = epilog_stmt;
4356 }
4357
4358 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4359 phis with new adjusted scalar results, i.e., replace use <s_out0>
4360 with use <s_out4>.
4361
4362 Transform:
4363 loop_exit:
4364 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4365 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4366 v_out2 = reduce <v_out1>
4367 s_out3 = extract_field <v_out2, 0>
4368 s_out4 = adjust_result <s_out3>
4369 use <s_out0>
4370 use <s_out0>
4371
4372 into:
4373
4374 loop_exit:
4375 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4376 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4377 v_out2 = reduce <v_out1>
4378 s_out3 = extract_field <v_out2, 0>
4379 s_out4 = adjust_result <s_out3>
4380 use <s_out4>
4381 use <s_out4> */
4382
4383
4384 /* In SLP reduction chain we reduce vector results into one vector if
4385 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4386 the last stmt in the reduction chain, since we are looking for the loop
4387 exit phi node. */
4388 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4389 {
4390 scalar_dest = gimple_assign_lhs (
4391 SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
4392 group_size = 1;
4393 }
4394
4395 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4396 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4397 need to match SCALAR_RESULTS with corresponding statements. The first
4398 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4399 the first vector stmt, etc.
4400 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4401 if (group_size > new_phis.length ())
4402 {
4403 ratio = group_size / new_phis.length ();
4404 gcc_assert (!(group_size % new_phis.length ()));
4405 }
4406 else
4407 ratio = 1;
4408
4409 for (k = 0; k < group_size; k++)
4410 {
4411 if (k % ratio == 0)
4412 {
4413 epilog_stmt = new_phis[k / ratio];
4414 reduction_phi = reduction_phis[k / ratio];
4415 if (double_reduc)
4416 inner_phi = inner_phis[k / ratio];
4417 }
4418
4419 if (slp_reduc)
4420 {
4421 gimple current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
4422
4423 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4424 /* SLP statements can't participate in patterns. */
4425 gcc_assert (!orig_stmt);
4426 scalar_dest = gimple_assign_lhs (current_stmt);
4427 }
4428
4429 phis.create (3);
4430 /* Find the loop-closed-use at the loop exit of the original scalar
4431 result. (The reduction result is expected to have two immediate uses -
4432 one at the latch block, and one at the loop exit). */
4433 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4434 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
4435 && !is_gimple_debug (USE_STMT (use_p)))
4436 phis.safe_push (USE_STMT (use_p));
4437
4438 /* While we expect to have found an exit_phi because of loop-closed-ssa
4439 form we can end up without one if the scalar cycle is dead. */
4440
4441 FOR_EACH_VEC_ELT (phis, i, exit_phi)
4442 {
4443 if (outer_loop)
4444 {
4445 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4446 gimple vect_phi;
4447
4448 /* FORNOW. Currently not supporting the case that an inner-loop
4449 reduction is not used in the outer-loop (but only outside the
4450 outer-loop), unless it is double reduction. */
4451 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4452 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4453 || double_reduc);
4454
4455 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4456 if (!double_reduc
4457 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4458 != vect_double_reduction_def)
4459 continue;
4460
4461 /* Handle double reduction:
4462
4463 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4464 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4465 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4466 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4467
4468 At that point the regular reduction (stmt2 and stmt3) is
4469 already vectorized, as well as the exit phi node, stmt4.
4470 Here we vectorize the phi node of double reduction, stmt1, and
4471 update all relevant statements. */
4472
4473 /* Go through all the uses of s2 to find double reduction phi
4474 node, i.e., stmt1 above. */
4475 orig_name = PHI_RESULT (exit_phi);
4476 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4477 {
4478 stmt_vec_info use_stmt_vinfo;
4479 stmt_vec_info new_phi_vinfo;
4480 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4481 basic_block bb = gimple_bb (use_stmt);
4482 gimple use;
4483
4484 /* Check that USE_STMT is really double reduction phi
4485 node. */
4486 if (gimple_code (use_stmt) != GIMPLE_PHI
4487 || gimple_phi_num_args (use_stmt) != 2
4488 || bb->loop_father != outer_loop)
4489 continue;
4490 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4491 if (!use_stmt_vinfo
4492 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4493 != vect_double_reduction_def)
4494 continue;
4495
4496 /* Create vector phi node for double reduction:
4497 vs1 = phi <vs0, vs2>
4498 vs1 was created previously in this function by a call to
4499 vect_get_vec_def_for_operand and is stored in
4500 vec_initial_def;
4501 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4502 vs0 is created here. */
4503
4504 /* Create vector phi node. */
4505 vect_phi = create_phi_node (vec_initial_def, bb);
4506 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4507 loop_vec_info_for_loop (outer_loop), NULL);
4508 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4509
4510 /* Create vs0 - initial def of the double reduction phi. */
4511 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4512 loop_preheader_edge (outer_loop));
4513 init_def = get_initial_def_for_reduction (stmt,
4514 preheader_arg, NULL);
4515 vect_phi_init = vect_init_vector (use_stmt, init_def,
4516 vectype, NULL);
4517
4518 /* Update phi node arguments with vs0 and vs2. */
4519 add_phi_arg (vect_phi, vect_phi_init,
4520 loop_preheader_edge (outer_loop),
4521 UNKNOWN_LOCATION);
4522 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4523 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4524 if (dump_enabled_p ())
4525 {
4526 dump_printf_loc (MSG_NOTE, vect_location,
4527 "created double reduction phi node: ");
4528 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
4529 dump_printf (MSG_NOTE, "\n");
4530 }
4531
4532 vect_phi_res = PHI_RESULT (vect_phi);
4533
4534 /* Replace the use, i.e., set the correct vs1 in the regular
4535 reduction phi node. FORNOW, NCOPIES is always 1, so the
4536 loop is redundant. */
4537 use = reduction_phi;
4538 for (j = 0; j < ncopies; j++)
4539 {
4540 edge pr_edge = loop_preheader_edge (loop);
4541 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4542 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4543 }
4544 }
4545 }
4546 }
4547
4548 phis.release ();
4549 if (nested_in_vect_loop)
4550 {
4551 if (double_reduc)
4552 loop = outer_loop;
4553 else
4554 continue;
4555 }
4556
4557 phis.create (3);
4558 /* Find the loop-closed-use at the loop exit of the original scalar
4559 result. (The reduction result is expected to have two immediate uses,
4560 one at the latch block, and one at the loop exit). For double
4561 reductions we are looking for exit phis of the outer loop. */
4562 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4563 {
4564 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4565 {
4566 if (!is_gimple_debug (USE_STMT (use_p)))
4567 phis.safe_push (USE_STMT (use_p));
4568 }
4569 else
4570 {
4571 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4572 {
4573 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4574
4575 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4576 {
4577 if (!flow_bb_inside_loop_p (loop,
4578 gimple_bb (USE_STMT (phi_use_p)))
4579 && !is_gimple_debug (USE_STMT (phi_use_p)))
4580 phis.safe_push (USE_STMT (phi_use_p));
4581 }
4582 }
4583 }
4584 }
4585
4586 FOR_EACH_VEC_ELT (phis, i, exit_phi)
4587 {
4588 /* Replace the uses: */
4589 orig_name = PHI_RESULT (exit_phi);
4590 scalar_result = scalar_results[k];
4591 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4592 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4593 SET_USE (use_p, scalar_result);
4594 }
4595
4596 phis.release ();
4597 }
4598 }
4599
4600
4601 /* Function vectorizable_reduction.
4602
4603 Check if STMT performs a reduction operation that can be vectorized.
4604 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4605 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4606 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4607
4608 This function also handles reduction idioms (patterns) that have been
4609 recognized in advance during vect_pattern_recog. In this case, STMT may be
4610 of this form:
4611 X = pattern_expr (arg0, arg1, ..., X)
4612 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4613 sequence that had been detected and replaced by the pattern-stmt (STMT).
4614
4615 In some cases of reduction patterns, the type of the reduction variable X is
4616 different than the type of the other arguments of STMT.
4617 In such cases, the vectype that is used when transforming STMT into a vector
4618 stmt is different than the vectype that is used to determine the
4619 vectorization factor, because it consists of a different number of elements
4620 than the actual number of elements that are being operated upon in parallel.
4621
4622 For example, consider an accumulation of shorts into an int accumulator.
4623 On some targets it's possible to vectorize this pattern operating on 8
4624 shorts at a time (hence, the vectype for purposes of determining the
4625 vectorization factor should be V8HI); on the other hand, the vectype that
4626 is used to create the vector form is actually V4SI (the type of the result).
4627
4628 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4629 indicates what is the actual level of parallelism (V8HI in the example), so
4630 that the right vectorization factor would be derived. This vectype
4631 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4632 be used to create the vectorized stmt. The right vectype for the vectorized
4633 stmt is obtained from the type of the result X:
4634 get_vectype_for_scalar_type (TREE_TYPE (X))
4635
4636 This means that, contrary to "regular" reductions (or "regular" stmts in
4637 general), the following equation:
4638 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4639 does *NOT* necessarily hold for reduction patterns. */
4640
4641 bool
4642 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4643 gimple *vec_stmt, slp_tree slp_node)
4644 {
4645 tree vec_dest;
4646 tree scalar_dest;
4647 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4648 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4649 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4650 tree vectype_in = NULL_TREE;
4651 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4652 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4653 enum tree_code code, orig_code, epilog_reduc_code;
4654 enum machine_mode vec_mode;
4655 int op_type;
4656 optab optab, reduc_optab;
4657 tree new_temp = NULL_TREE;
4658 tree def;
4659 gimple def_stmt;
4660 enum vect_def_type dt;
4661 gimple new_phi = NULL;
4662 tree scalar_type;
4663 bool is_simple_use;
4664 gimple orig_stmt;
4665 stmt_vec_info orig_stmt_info;
4666 tree expr = NULL_TREE;
4667 int i;
4668 int ncopies;
4669 int epilog_copies;
4670 stmt_vec_info prev_stmt_info, prev_phi_info;
4671 bool single_defuse_cycle = false;
4672 tree reduc_def = NULL_TREE;
4673 gimple new_stmt = NULL;
4674 int j;
4675 tree ops[3];
4676 bool nested_cycle = false, found_nested_cycle_def = false;
4677 gimple reduc_def_stmt = NULL;
4678 /* The default is that the reduction variable is the last in statement. */
4679 int reduc_index = 2;
4680 bool double_reduc = false, dummy;
4681 basic_block def_bb;
4682 struct loop * def_stmt_loop, *outer_loop = NULL;
4683 tree def_arg;
4684 gimple def_arg_stmt;
4685 auto_vec<tree> vec_oprnds0;
4686 auto_vec<tree> vec_oprnds1;
4687 auto_vec<tree> vect_defs;
4688 auto_vec<gimple> phis;
4689 int vec_num;
4690 tree def0, def1, tem, op0, op1 = NULL_TREE;
4691
4692 /* In case of reduction chain we switch to the first stmt in the chain, but
4693 we don't update STMT_INFO, since only the last stmt is marked as reduction
4694 and has reduction properties. */
4695 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4696 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4697
4698 if (nested_in_vect_loop_p (loop, stmt))
4699 {
4700 outer_loop = loop;
4701 loop = loop->inner;
4702 nested_cycle = true;
4703 }
4704
4705 /* 1. Is vectorizable reduction? */
4706 /* Not supportable if the reduction variable is used in the loop, unless
4707 it's a reduction chain. */
4708 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4709 && !GROUP_FIRST_ELEMENT (stmt_info))
4710 return false;
4711
4712 /* Reductions that are not used even in an enclosing outer-loop,
4713 are expected to be "live" (used out of the loop). */
4714 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4715 && !STMT_VINFO_LIVE_P (stmt_info))
4716 return false;
4717
4718 /* Make sure it was already recognized as a reduction computation. */
4719 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4720 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4721 return false;
4722
4723 /* 2. Has this been recognized as a reduction pattern?
4724
4725 Check if STMT represents a pattern that has been recognized
4726 in earlier analysis stages. For stmts that represent a pattern,
4727 the STMT_VINFO_RELATED_STMT field records the last stmt in
4728 the original sequence that constitutes the pattern. */
4729
4730 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4731 if (orig_stmt)
4732 {
4733 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4734 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4735 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4736 }
4737
4738 /* 3. Check the operands of the operation. The first operands are defined
4739 inside the loop body. The last operand is the reduction variable,
4740 which is defined by the loop-header-phi. */
4741
4742 gcc_assert (is_gimple_assign (stmt));
4743
4744 /* Flatten RHS. */
4745 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4746 {
4747 case GIMPLE_SINGLE_RHS:
4748 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4749 if (op_type == ternary_op)
4750 {
4751 tree rhs = gimple_assign_rhs1 (stmt);
4752 ops[0] = TREE_OPERAND (rhs, 0);
4753 ops[1] = TREE_OPERAND (rhs, 1);
4754 ops[2] = TREE_OPERAND (rhs, 2);
4755 code = TREE_CODE (rhs);
4756 }
4757 else
4758 return false;
4759 break;
4760
4761 case GIMPLE_BINARY_RHS:
4762 code = gimple_assign_rhs_code (stmt);
4763 op_type = TREE_CODE_LENGTH (code);
4764 gcc_assert (op_type == binary_op);
4765 ops[0] = gimple_assign_rhs1 (stmt);
4766 ops[1] = gimple_assign_rhs2 (stmt);
4767 break;
4768
4769 case GIMPLE_TERNARY_RHS:
4770 code = gimple_assign_rhs_code (stmt);
4771 op_type = TREE_CODE_LENGTH (code);
4772 gcc_assert (op_type == ternary_op);
4773 ops[0] = gimple_assign_rhs1 (stmt);
4774 ops[1] = gimple_assign_rhs2 (stmt);
4775 ops[2] = gimple_assign_rhs3 (stmt);
4776 break;
4777
4778 case GIMPLE_UNARY_RHS:
4779 return false;
4780
4781 default:
4782 gcc_unreachable ();
4783 }
4784
4785 if (code == COND_EXPR && slp_node)
4786 return false;
4787
4788 scalar_dest = gimple_assign_lhs (stmt);
4789 scalar_type = TREE_TYPE (scalar_dest);
4790 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4791 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4792 return false;
4793
4794 /* Do not try to vectorize bit-precision reductions. */
4795 if ((TYPE_PRECISION (scalar_type)
4796 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4797 return false;
4798
4799 /* All uses but the last are expected to be defined in the loop.
4800 The last use is the reduction variable. In case of nested cycle this
4801 assumption is not true: we use reduc_index to record the index of the
4802 reduction variable. */
4803 for (i = 0; i < op_type - 1; i++)
4804 {
4805 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4806 if (i == 0 && code == COND_EXPR)
4807 continue;
4808
4809 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4810 &def_stmt, &def, &dt, &tem);
4811 if (!vectype_in)
4812 vectype_in = tem;
4813 gcc_assert (is_simple_use);
4814
4815 if (dt != vect_internal_def
4816 && dt != vect_external_def
4817 && dt != vect_constant_def
4818 && dt != vect_induction_def
4819 && !(dt == vect_nested_cycle && nested_cycle))
4820 return false;
4821
4822 if (dt == vect_nested_cycle)
4823 {
4824 found_nested_cycle_def = true;
4825 reduc_def_stmt = def_stmt;
4826 reduc_index = i;
4827 }
4828 }
4829
4830 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4831 &def_stmt, &def, &dt, &tem);
4832 if (!vectype_in)
4833 vectype_in = tem;
4834 gcc_assert (is_simple_use);
4835 if (!(dt == vect_reduction_def
4836 || dt == vect_nested_cycle
4837 || ((dt == vect_internal_def || dt == vect_external_def
4838 || dt == vect_constant_def || dt == vect_induction_def)
4839 && nested_cycle && found_nested_cycle_def)))
4840 {
4841 /* For pattern recognized stmts, orig_stmt might be a reduction,
4842 but some helper statements for the pattern might not, or
4843 might be COND_EXPRs with reduction uses in the condition. */
4844 gcc_assert (orig_stmt);
4845 return false;
4846 }
4847 if (!found_nested_cycle_def)
4848 reduc_def_stmt = def_stmt;
4849
4850 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4851 if (orig_stmt)
4852 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4853 reduc_def_stmt,
4854 !nested_cycle,
4855 &dummy));
4856 else
4857 {
4858 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4859 !nested_cycle, &dummy);
4860 /* We changed STMT to be the first stmt in reduction chain, hence we
4861 check that in this case the first element in the chain is STMT. */
4862 gcc_assert (stmt == tmp
4863 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4864 }
4865
4866 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4867 return false;
4868
4869 if (slp_node || PURE_SLP_STMT (stmt_info))
4870 ncopies = 1;
4871 else
4872 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4873 / TYPE_VECTOR_SUBPARTS (vectype_in));
4874
4875 gcc_assert (ncopies >= 1);
4876
4877 vec_mode = TYPE_MODE (vectype_in);
4878
4879 if (code == COND_EXPR)
4880 {
4881 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4882 {
4883 if (dump_enabled_p ())
4884 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4885 "unsupported condition in reduction\n");
4886
4887 return false;
4888 }
4889 }
4890 else
4891 {
4892 /* 4. Supportable by target? */
4893
4894 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
4895 || code == LROTATE_EXPR || code == RROTATE_EXPR)
4896 {
4897 /* Shifts and rotates are only supported by vectorizable_shifts,
4898 not vectorizable_reduction. */
4899 if (dump_enabled_p ())
4900 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4901 "unsupported shift or rotation.\n");
4902 return false;
4903 }
4904
4905 /* 4.1. check support for the operation in the loop */
4906 optab = optab_for_tree_code (code, vectype_in, optab_default);
4907 if (!optab)
4908 {
4909 if (dump_enabled_p ())
4910 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4911 "no optab.\n");
4912
4913 return false;
4914 }
4915
4916 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4917 {
4918 if (dump_enabled_p ())
4919 dump_printf (MSG_NOTE, "op not supported by target.\n");
4920
4921 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4922 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4923 < vect_min_worthwhile_factor (code))
4924 return false;
4925
4926 if (dump_enabled_p ())
4927 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
4928 }
4929
4930 /* Worthwhile without SIMD support? */
4931 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4932 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4933 < vect_min_worthwhile_factor (code))
4934 {
4935 if (dump_enabled_p ())
4936 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4937 "not worthwhile without SIMD support.\n");
4938
4939 return false;
4940 }
4941 }
4942
4943 /* 4.2. Check support for the epilog operation.
4944
4945 If STMT represents a reduction pattern, then the type of the
4946 reduction variable may be different than the type of the rest
4947 of the arguments. For example, consider the case of accumulation
4948 of shorts into an int accumulator; The original code:
4949 S1: int_a = (int) short_a;
4950 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4951
4952 was replaced with:
4953 STMT: int_acc = widen_sum <short_a, int_acc>
4954
4955 This means that:
4956 1. The tree-code that is used to create the vector operation in the
4957 epilog code (that reduces the partial results) is not the
4958 tree-code of STMT, but is rather the tree-code of the original
4959 stmt from the pattern that STMT is replacing. I.e, in the example
4960 above we want to use 'widen_sum' in the loop, but 'plus' in the
4961 epilog.
4962 2. The type (mode) we use to check available target support
4963 for the vector operation to be created in the *epilog*, is
4964 determined by the type of the reduction variable (in the example
4965 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4966 However the type (mode) we use to check available target support
4967 for the vector operation to be created *inside the loop*, is
4968 determined by the type of the other arguments to STMT (in the
4969 example we'd check this: optab_handler (widen_sum_optab,
4970 vect_short_mode)).
4971
4972 This is contrary to "regular" reductions, in which the types of all
4973 the arguments are the same as the type of the reduction variable.
4974 For "regular" reductions we can therefore use the same vector type
4975 (and also the same tree-code) when generating the epilog code and
4976 when generating the code inside the loop. */
4977
4978 if (orig_stmt)
4979 {
4980 /* This is a reduction pattern: get the vectype from the type of the
4981 reduction variable, and get the tree-code from orig_stmt. */
4982 orig_code = gimple_assign_rhs_code (orig_stmt);
4983 gcc_assert (vectype_out);
4984 vec_mode = TYPE_MODE (vectype_out);
4985 }
4986 else
4987 {
4988 /* Regular reduction: use the same vectype and tree-code as used for
4989 the vector code inside the loop can be used for the epilog code. */
4990 orig_code = code;
4991 }
4992
4993 if (nested_cycle)
4994 {
4995 def_bb = gimple_bb (reduc_def_stmt);
4996 def_stmt_loop = def_bb->loop_father;
4997 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4998 loop_preheader_edge (def_stmt_loop));
4999 if (TREE_CODE (def_arg) == SSA_NAME
5000 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
5001 && gimple_code (def_arg_stmt) == GIMPLE_PHI
5002 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
5003 && vinfo_for_stmt (def_arg_stmt)
5004 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
5005 == vect_double_reduction_def)
5006 double_reduc = true;
5007 }
5008
5009 epilog_reduc_code = ERROR_MARK;
5010 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
5011 {
5012 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
5013 optab_default);
5014 if (!reduc_optab)
5015 {
5016 if (dump_enabled_p ())
5017 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5018 "no optab for reduction.\n");
5019
5020 epilog_reduc_code = ERROR_MARK;
5021 }
5022
5023 if (reduc_optab
5024 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
5025 {
5026 if (dump_enabled_p ())
5027 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5028 "reduc op not supported by target.\n");
5029
5030 epilog_reduc_code = ERROR_MARK;
5031 }
5032 }
5033 else
5034 {
5035 if (!nested_cycle || double_reduc)
5036 {
5037 if (dump_enabled_p ())
5038 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5039 "no reduc code for scalar code.\n");
5040
5041 return false;
5042 }
5043 }
5044
5045 if (double_reduc && ncopies > 1)
5046 {
5047 if (dump_enabled_p ())
5048 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5049 "multiple types in double reduction\n");
5050
5051 return false;
5052 }
5053
5054 /* In case of widenning multiplication by a constant, we update the type
5055 of the constant to be the type of the other operand. We check that the
5056 constant fits the type in the pattern recognition pass. */
5057 if (code == DOT_PROD_EXPR
5058 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
5059 {
5060 if (TREE_CODE (ops[0]) == INTEGER_CST)
5061 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
5062 else if (TREE_CODE (ops[1]) == INTEGER_CST)
5063 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
5064 else
5065 {
5066 if (dump_enabled_p ())
5067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5068 "invalid types in dot-prod\n");
5069
5070 return false;
5071 }
5072 }
5073
5074 if (!vec_stmt) /* transformation not required. */
5075 {
5076 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
5077 return false;
5078 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
5079 return true;
5080 }
5081
5082 /** Transform. **/
5083
5084 if (dump_enabled_p ())
5085 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
5086
5087 /* FORNOW: Multiple types are not supported for condition. */
5088 if (code == COND_EXPR)
5089 gcc_assert (ncopies == 1);
5090
5091 /* Create the destination vector */
5092 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5093
5094 /* In case the vectorization factor (VF) is bigger than the number
5095 of elements that we can fit in a vectype (nunits), we have to generate
5096 more than one vector stmt - i.e - we need to "unroll" the
5097 vector stmt by a factor VF/nunits. For more details see documentation
5098 in vectorizable_operation. */
5099
5100 /* If the reduction is used in an outer loop we need to generate
5101 VF intermediate results, like so (e.g. for ncopies=2):
5102 r0 = phi (init, r0)
5103 r1 = phi (init, r1)
5104 r0 = x0 + r0;
5105 r1 = x1 + r1;
5106 (i.e. we generate VF results in 2 registers).
5107 In this case we have a separate def-use cycle for each copy, and therefore
5108 for each copy we get the vector def for the reduction variable from the
5109 respective phi node created for this copy.
5110
5111 Otherwise (the reduction is unused in the loop nest), we can combine
5112 together intermediate results, like so (e.g. for ncopies=2):
5113 r = phi (init, r)
5114 r = x0 + r;
5115 r = x1 + r;
5116 (i.e. we generate VF/2 results in a single register).
5117 In this case for each copy we get the vector def for the reduction variable
5118 from the vectorized reduction operation generated in the previous iteration.
5119 */
5120
5121 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
5122 {
5123 single_defuse_cycle = true;
5124 epilog_copies = 1;
5125 }
5126 else
5127 epilog_copies = ncopies;
5128
5129 prev_stmt_info = NULL;
5130 prev_phi_info = NULL;
5131 if (slp_node)
5132 {
5133 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5134 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
5135 == TYPE_VECTOR_SUBPARTS (vectype_in));
5136 }
5137 else
5138 {
5139 vec_num = 1;
5140 vec_oprnds0.create (1);
5141 if (op_type == ternary_op)
5142 vec_oprnds1.create (1);
5143 }
5144
5145 phis.create (vec_num);
5146 vect_defs.create (vec_num);
5147 if (!slp_node)
5148 vect_defs.quick_push (NULL_TREE);
5149
5150 for (j = 0; j < ncopies; j++)
5151 {
5152 if (j == 0 || !single_defuse_cycle)
5153 {
5154 for (i = 0; i < vec_num; i++)
5155 {
5156 /* Create the reduction-phi that defines the reduction
5157 operand. */
5158 new_phi = create_phi_node (vec_dest, loop->header);
5159 set_vinfo_for_stmt (new_phi,
5160 new_stmt_vec_info (new_phi, loop_vinfo,
5161 NULL));
5162 if (j == 0 || slp_node)
5163 phis.quick_push (new_phi);
5164 }
5165 }
5166
5167 if (code == COND_EXPR)
5168 {
5169 gcc_assert (!slp_node);
5170 vectorizable_condition (stmt, gsi, vec_stmt,
5171 PHI_RESULT (phis[0]),
5172 reduc_index, NULL);
5173 /* Multiple types are not supported for condition. */
5174 break;
5175 }
5176
5177 /* Handle uses. */
5178 if (j == 0)
5179 {
5180 op0 = ops[!reduc_index];
5181 if (op_type == ternary_op)
5182 {
5183 if (reduc_index == 0)
5184 op1 = ops[2];
5185 else
5186 op1 = ops[1];
5187 }
5188
5189 if (slp_node)
5190 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5191 slp_node, -1);
5192 else
5193 {
5194 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
5195 stmt, NULL);
5196 vec_oprnds0.quick_push (loop_vec_def0);
5197 if (op_type == ternary_op)
5198 {
5199 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
5200 NULL);
5201 vec_oprnds1.quick_push (loop_vec_def1);
5202 }
5203 }
5204 }
5205 else
5206 {
5207 if (!slp_node)
5208 {
5209 enum vect_def_type dt;
5210 gimple dummy_stmt;
5211 tree dummy;
5212
5213 vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
5214 &dummy_stmt, &dummy, &dt);
5215 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
5216 loop_vec_def0);
5217 vec_oprnds0[0] = loop_vec_def0;
5218 if (op_type == ternary_op)
5219 {
5220 vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
5221 &dummy, &dt);
5222 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
5223 loop_vec_def1);
5224 vec_oprnds1[0] = loop_vec_def1;
5225 }
5226 }
5227
5228 if (single_defuse_cycle)
5229 reduc_def = gimple_assign_lhs (new_stmt);
5230
5231 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
5232 }
5233
5234 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5235 {
5236 if (slp_node)
5237 reduc_def = PHI_RESULT (phis[i]);
5238 else
5239 {
5240 if (!single_defuse_cycle || j == 0)
5241 reduc_def = PHI_RESULT (new_phi);
5242 }
5243
5244 def1 = ((op_type == ternary_op)
5245 ? vec_oprnds1[i] : NULL);
5246 if (op_type == binary_op)
5247 {
5248 if (reduc_index == 0)
5249 expr = build2 (code, vectype_out, reduc_def, def0);
5250 else
5251 expr = build2 (code, vectype_out, def0, reduc_def);
5252 }
5253 else
5254 {
5255 if (reduc_index == 0)
5256 expr = build3 (code, vectype_out, reduc_def, def0, def1);
5257 else
5258 {
5259 if (reduc_index == 1)
5260 expr = build3 (code, vectype_out, def0, reduc_def, def1);
5261 else
5262 expr = build3 (code, vectype_out, def0, def1, reduc_def);
5263 }
5264 }
5265
5266 new_stmt = gimple_build_assign (vec_dest, expr);
5267 new_temp = make_ssa_name (vec_dest, new_stmt);
5268 gimple_assign_set_lhs (new_stmt, new_temp);
5269 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5270
5271 if (slp_node)
5272 {
5273 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5274 vect_defs.quick_push (new_temp);
5275 }
5276 else
5277 vect_defs[0] = new_temp;
5278 }
5279
5280 if (slp_node)
5281 continue;
5282
5283 if (j == 0)
5284 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5285 else
5286 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5287
5288 prev_stmt_info = vinfo_for_stmt (new_stmt);
5289 prev_phi_info = vinfo_for_stmt (new_phi);
5290 }
5291
5292 /* Finalize the reduction-phi (set its arguments) and create the
5293 epilog reduction code. */
5294 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
5295 {
5296 new_temp = gimple_assign_lhs (*vec_stmt);
5297 vect_defs[0] = new_temp;
5298 }
5299
5300 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
5301 epilog_reduc_code, phis, reduc_index,
5302 double_reduc, slp_node);
5303
5304 return true;
5305 }
5306
5307 /* Function vect_min_worthwhile_factor.
5308
5309 For a loop where we could vectorize the operation indicated by CODE,
5310 return the minimum vectorization factor that makes it worthwhile
5311 to use generic vectors. */
5312 int
5313 vect_min_worthwhile_factor (enum tree_code code)
5314 {
5315 switch (code)
5316 {
5317 case PLUS_EXPR:
5318 case MINUS_EXPR:
5319 case NEGATE_EXPR:
5320 return 4;
5321
5322 case BIT_AND_EXPR:
5323 case BIT_IOR_EXPR:
5324 case BIT_XOR_EXPR:
5325 case BIT_NOT_EXPR:
5326 return 2;
5327
5328 default:
5329 return INT_MAX;
5330 }
5331 }
5332
5333
5334 /* Function vectorizable_induction
5335
5336 Check if PHI performs an induction computation that can be vectorized.
5337 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5338 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5339 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5340
5341 bool
5342 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5343 gimple *vec_stmt)
5344 {
5345 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5346 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5347 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5348 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5349 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5350 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5351 tree vec_def;
5352
5353 gcc_assert (ncopies >= 1);
5354 /* FORNOW. These restrictions should be relaxed. */
5355 if (nested_in_vect_loop_p (loop, phi))
5356 {
5357 imm_use_iterator imm_iter;
5358 use_operand_p use_p;
5359 gimple exit_phi;
5360 edge latch_e;
5361 tree loop_arg;
5362
5363 if (ncopies > 1)
5364 {
5365 if (dump_enabled_p ())
5366 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5367 "multiple types in nested loop.\n");
5368 return false;
5369 }
5370
5371 exit_phi = NULL;
5372 latch_e = loop_latch_edge (loop->inner);
5373 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
5374 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
5375 {
5376 if (!flow_bb_inside_loop_p (loop->inner,
5377 gimple_bb (USE_STMT (use_p))))
5378 {
5379 exit_phi = USE_STMT (use_p);
5380 break;
5381 }
5382 }
5383 if (exit_phi)
5384 {
5385 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5386 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5387 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
5388 {
5389 if (dump_enabled_p ())
5390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5391 "inner-loop induction only used outside "
5392 "of the outer vectorized loop.\n");
5393 return false;
5394 }
5395 }
5396 }
5397
5398 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5399 return false;
5400
5401 /* FORNOW: SLP not supported. */
5402 if (STMT_SLP_TYPE (stmt_info))
5403 return false;
5404
5405 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5406
5407 if (gimple_code (phi) != GIMPLE_PHI)
5408 return false;
5409
5410 if (!vec_stmt) /* transformation not required. */
5411 {
5412 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5413 if (dump_enabled_p ())
5414 dump_printf_loc (MSG_NOTE, vect_location,
5415 "=== vectorizable_induction ===\n");
5416 vect_model_induction_cost (stmt_info, ncopies);
5417 return true;
5418 }
5419
5420 /** Transform. **/
5421
5422 if (dump_enabled_p ())
5423 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
5424
5425 vec_def = get_initial_def_for_induction (phi);
5426 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5427 return true;
5428 }
5429
5430 /* Function vectorizable_live_operation.
5431
5432 STMT computes a value that is used outside the loop. Check if
5433 it can be supported. */
5434
5435 bool
5436 vectorizable_live_operation (gimple stmt,
5437 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5438 gimple *vec_stmt)
5439 {
5440 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5441 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5442 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5443 int i;
5444 int op_type;
5445 tree op;
5446 tree def;
5447 gimple def_stmt;
5448 enum vect_def_type dt;
5449 enum tree_code code;
5450 enum gimple_rhs_class rhs_class;
5451
5452 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5453
5454 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5455 return false;
5456
5457 if (!is_gimple_assign (stmt))
5458 {
5459 if (gimple_call_internal_p (stmt)
5460 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
5461 && gimple_call_lhs (stmt)
5462 && loop->simduid
5463 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
5464 && loop->simduid
5465 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
5466 {
5467 edge e = single_exit (loop);
5468 basic_block merge_bb = e->dest;
5469 imm_use_iterator imm_iter;
5470 use_operand_p use_p;
5471 tree lhs = gimple_call_lhs (stmt);
5472
5473 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
5474 {
5475 gimple use_stmt = USE_STMT (use_p);
5476 if (gimple_code (use_stmt) == GIMPLE_PHI
5477 || gimple_bb (use_stmt) == merge_bb)
5478 {
5479 if (vec_stmt)
5480 {
5481 tree vfm1
5482 = build_int_cst (unsigned_type_node,
5483 loop_vinfo->vectorization_factor - 1);
5484 SET_PHI_ARG_DEF (use_stmt, e->dest_idx, vfm1);
5485 }
5486 return true;
5487 }
5488 }
5489 }
5490
5491 return false;
5492 }
5493
5494 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5495 return false;
5496
5497 /* FORNOW. CHECKME. */
5498 if (nested_in_vect_loop_p (loop, stmt))
5499 return false;
5500
5501 code = gimple_assign_rhs_code (stmt);
5502 op_type = TREE_CODE_LENGTH (code);
5503 rhs_class = get_gimple_rhs_class (code);
5504 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5505 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5506
5507 /* FORNOW: support only if all uses are invariant. This means
5508 that the scalar operations can remain in place, unvectorized.
5509 The original last scalar value that they compute will be used. */
5510
5511 for (i = 0; i < op_type; i++)
5512 {
5513 if (rhs_class == GIMPLE_SINGLE_RHS)
5514 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5515 else
5516 op = gimple_op (stmt, i + 1);
5517 if (op
5518 && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5519 &dt))
5520 {
5521 if (dump_enabled_p ())
5522 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5523 "use not simple.\n");
5524 return false;
5525 }
5526
5527 if (dt != vect_external_def && dt != vect_constant_def)
5528 return false;
5529 }
5530
5531 /* No transformation is required for the cases we currently support. */
5532 return true;
5533 }
5534
5535 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5536
5537 static void
5538 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5539 {
5540 ssa_op_iter op_iter;
5541 imm_use_iterator imm_iter;
5542 def_operand_p def_p;
5543 gimple ustmt;
5544
5545 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5546 {
5547 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5548 {
5549 basic_block bb;
5550
5551 if (!is_gimple_debug (ustmt))
5552 continue;
5553
5554 bb = gimple_bb (ustmt);
5555
5556 if (!flow_bb_inside_loop_p (loop, bb))
5557 {
5558 if (gimple_debug_bind_p (ustmt))
5559 {
5560 if (dump_enabled_p ())
5561 dump_printf_loc (MSG_NOTE, vect_location,
5562 "killing debug use\n");
5563
5564 gimple_debug_bind_reset_value (ustmt);
5565 update_stmt (ustmt);
5566 }
5567 else
5568 gcc_unreachable ();
5569 }
5570 }
5571 }
5572 }
5573
5574
5575 /* This function builds ni_name = number of iterations. Statements
5576 are emitted on the loop preheader edge. */
5577
5578 static tree
5579 vect_build_loop_niters (loop_vec_info loop_vinfo)
5580 {
5581 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
5582 if (TREE_CODE (ni) == INTEGER_CST)
5583 return ni;
5584 else
5585 {
5586 tree ni_name, var;
5587 gimple_seq stmts = NULL;
5588 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
5589
5590 var = create_tmp_var (TREE_TYPE (ni), "niters");
5591 ni_name = force_gimple_operand (ni, &stmts, false, var);
5592 if (stmts)
5593 gsi_insert_seq_on_edge_immediate (pe, stmts);
5594
5595 return ni_name;
5596 }
5597 }
5598
5599
5600 /* This function generates the following statements:
5601
5602 ni_name = number of iterations loop executes
5603 ratio = ni_name / vf
5604 ratio_mult_vf_name = ratio * vf
5605
5606 and places them on the loop preheader edge. */
5607
5608 static void
5609 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
5610 tree ni_name,
5611 tree *ratio_mult_vf_name_ptr,
5612 tree *ratio_name_ptr)
5613 {
5614 tree ni_minus_gap_name;
5615 tree var;
5616 tree ratio_name;
5617 tree ratio_mult_vf_name;
5618 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
5619 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5620 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
5621 tree log_vf;
5622
5623 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
5624
5625 /* If epilogue loop is required because of data accesses with gaps, we
5626 subtract one iteration from the total number of iterations here for
5627 correct calculation of RATIO. */
5628 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5629 {
5630 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
5631 ni_name,
5632 build_one_cst (TREE_TYPE (ni_name)));
5633 if (!is_gimple_val (ni_minus_gap_name))
5634 {
5635 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
5636 gimple stmts = NULL;
5637 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
5638 true, var);
5639 gsi_insert_seq_on_edge_immediate (pe, stmts);
5640 }
5641 }
5642 else
5643 ni_minus_gap_name = ni_name;
5644
5645 /* Create: ratio = ni >> log2(vf) */
5646
5647 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
5648 ni_minus_gap_name, log_vf);
5649 if (!is_gimple_val (ratio_name))
5650 {
5651 var = create_tmp_var (TREE_TYPE (ni), "bnd");
5652 gimple stmts = NULL;
5653 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
5654 gsi_insert_seq_on_edge_immediate (pe, stmts);
5655 }
5656 *ratio_name_ptr = ratio_name;
5657
5658 /* Create: ratio_mult_vf = ratio << log2 (vf). */
5659
5660 if (ratio_mult_vf_name_ptr)
5661 {
5662 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
5663 ratio_name, log_vf);
5664 if (!is_gimple_val (ratio_mult_vf_name))
5665 {
5666 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
5667 gimple stmts = NULL;
5668 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
5669 true, var);
5670 gsi_insert_seq_on_edge_immediate (pe, stmts);
5671 }
5672 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
5673 }
5674
5675 return;
5676 }
5677
5678
5679 /* Function vect_transform_loop.
5680
5681 The analysis phase has determined that the loop is vectorizable.
5682 Vectorize the loop - created vectorized stmts to replace the scalar
5683 stmts in the loop, and update the loop exit condition. */
5684
5685 void
5686 vect_transform_loop (loop_vec_info loop_vinfo)
5687 {
5688 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5689 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5690 int nbbs = loop->num_nodes;
5691 gimple_stmt_iterator si;
5692 int i;
5693 tree ratio = NULL;
5694 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5695 bool grouped_store;
5696 bool slp_scheduled = false;
5697 unsigned int nunits;
5698 gimple stmt, pattern_stmt;
5699 gimple_seq pattern_def_seq = NULL;
5700 gimple_stmt_iterator pattern_def_si = gsi_none ();
5701 bool transform_pattern_stmt = false;
5702 bool check_profitability = false;
5703 int th;
5704 /* Record number of iterations before we started tampering with the profile. */
5705 gcov_type expected_iterations = expected_loop_iterations_unbounded (loop);
5706
5707 if (dump_enabled_p ())
5708 dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
5709
5710 /* If profile is inprecise, we have chance to fix it up. */
5711 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5712 expected_iterations = LOOP_VINFO_INT_NITERS (loop_vinfo);
5713
5714 /* Use the more conservative vectorization threshold. If the number
5715 of iterations is constant assume the cost check has been performed
5716 by our caller. If the threshold makes all loops profitable that
5717 run at least the vectorization factor number of times checking
5718 is pointless, too. */
5719 th = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
5720 * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
5721 th = MAX (th, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo));
5722 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
5723 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5724 {
5725 if (dump_enabled_p ())
5726 dump_printf_loc (MSG_NOTE, vect_location,
5727 "Profitability threshold is %d loop iterations.\n",
5728 th);
5729 check_profitability = true;
5730 }
5731
5732 /* Version the loop first, if required, so the profitability check
5733 comes first. */
5734
5735 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5736 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5737 {
5738 vect_loop_versioning (loop_vinfo, th, check_profitability);
5739 check_profitability = false;
5740 }
5741
5742 tree ni_name = vect_build_loop_niters (loop_vinfo);
5743 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = ni_name;
5744
5745 /* Peel the loop if there are data refs with unknown alignment.
5746 Only one data ref with unknown store is allowed. */
5747
5748 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
5749 {
5750 vect_do_peeling_for_alignment (loop_vinfo, ni_name,
5751 th, check_profitability);
5752 check_profitability = false;
5753 /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to
5754 be re-computed. */
5755 ni_name = NULL_TREE;
5756 }
5757
5758 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5759 compile time constant), or it is a constant that doesn't divide by the
5760 vectorization factor, then an epilog loop needs to be created.
5761 We therefore duplicate the loop: the original loop will be vectorized,
5762 and will compute the first (n/VF) iterations. The second copy of the loop
5763 will remain scalar and will compute the remaining (n%VF) iterations.
5764 (VF is the vectorization factor). */
5765
5766 if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
5767 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5768 {
5769 tree ratio_mult_vf;
5770 if (!ni_name)
5771 ni_name = vect_build_loop_niters (loop_vinfo);
5772 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, &ratio_mult_vf,
5773 &ratio);
5774 vect_do_peeling_for_loop_bound (loop_vinfo, ni_name, ratio_mult_vf,
5775 th, check_profitability);
5776 }
5777 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5778 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5779 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5780 else
5781 {
5782 if (!ni_name)
5783 ni_name = vect_build_loop_niters (loop_vinfo);
5784 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, NULL, &ratio);
5785 }
5786
5787 /* 1) Make sure the loop header has exactly two entries
5788 2) Make sure we have a preheader basic block. */
5789
5790 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5791
5792 split_edge (loop_preheader_edge (loop));
5793
5794 /* FORNOW: the vectorizer supports only loops which body consist
5795 of one basic block (header + empty latch). When the vectorizer will
5796 support more involved loop forms, the order by which the BBs are
5797 traversed need to be reconsidered. */
5798
5799 for (i = 0; i < nbbs; i++)
5800 {
5801 basic_block bb = bbs[i];
5802 stmt_vec_info stmt_info;
5803 gimple phi;
5804
5805 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5806 {
5807 phi = gsi_stmt (si);
5808 if (dump_enabled_p ())
5809 {
5810 dump_printf_loc (MSG_NOTE, vect_location,
5811 "------>vectorizing phi: ");
5812 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
5813 dump_printf (MSG_NOTE, "\n");
5814 }
5815 stmt_info = vinfo_for_stmt (phi);
5816 if (!stmt_info)
5817 continue;
5818
5819 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5820 vect_loop_kill_debug_uses (loop, phi);
5821
5822 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5823 && !STMT_VINFO_LIVE_P (stmt_info))
5824 continue;
5825
5826 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5827 != (unsigned HOST_WIDE_INT) vectorization_factor)
5828 && dump_enabled_p ())
5829 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
5830
5831 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5832 {
5833 if (dump_enabled_p ())
5834 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
5835 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5836 }
5837 }
5838
5839 pattern_stmt = NULL;
5840 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5841 {
5842 bool is_store;
5843
5844 if (transform_pattern_stmt)
5845 stmt = pattern_stmt;
5846 else
5847 {
5848 stmt = gsi_stmt (si);
5849 /* During vectorization remove existing clobber stmts. */
5850 if (gimple_clobber_p (stmt))
5851 {
5852 unlink_stmt_vdef (stmt);
5853 gsi_remove (&si, true);
5854 release_defs (stmt);
5855 continue;
5856 }
5857 }
5858
5859 if (dump_enabled_p ())
5860 {
5861 dump_printf_loc (MSG_NOTE, vect_location,
5862 "------>vectorizing statement: ");
5863 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
5864 dump_printf (MSG_NOTE, "\n");
5865 }
5866
5867 stmt_info = vinfo_for_stmt (stmt);
5868
5869 /* vector stmts created in the outer-loop during vectorization of
5870 stmts in an inner-loop may not have a stmt_info, and do not
5871 need to be vectorized. */
5872 if (!stmt_info)
5873 {
5874 gsi_next (&si);
5875 continue;
5876 }
5877
5878 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5879 vect_loop_kill_debug_uses (loop, stmt);
5880
5881 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5882 && !STMT_VINFO_LIVE_P (stmt_info))
5883 {
5884 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5885 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5886 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5887 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5888 {
5889 stmt = pattern_stmt;
5890 stmt_info = vinfo_for_stmt (stmt);
5891 }
5892 else
5893 {
5894 gsi_next (&si);
5895 continue;
5896 }
5897 }
5898 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5899 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5900 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5901 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5902 transform_pattern_stmt = true;
5903
5904 /* If pattern statement has def stmts, vectorize them too. */
5905 if (is_pattern_stmt_p (stmt_info))
5906 {
5907 if (pattern_def_seq == NULL)
5908 {
5909 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
5910 pattern_def_si = gsi_start (pattern_def_seq);
5911 }
5912 else if (!gsi_end_p (pattern_def_si))
5913 gsi_next (&pattern_def_si);
5914 if (pattern_def_seq != NULL)
5915 {
5916 gimple pattern_def_stmt = NULL;
5917 stmt_vec_info pattern_def_stmt_info = NULL;
5918
5919 while (!gsi_end_p (pattern_def_si))
5920 {
5921 pattern_def_stmt = gsi_stmt (pattern_def_si);
5922 pattern_def_stmt_info
5923 = vinfo_for_stmt (pattern_def_stmt);
5924 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
5925 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
5926 break;
5927 gsi_next (&pattern_def_si);
5928 }
5929
5930 if (!gsi_end_p (pattern_def_si))
5931 {
5932 if (dump_enabled_p ())
5933 {
5934 dump_printf_loc (MSG_NOTE, vect_location,
5935 "==> vectorizing pattern def "
5936 "stmt: ");
5937 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
5938 pattern_def_stmt, 0);
5939 dump_printf (MSG_NOTE, "\n");
5940 }
5941
5942 stmt = pattern_def_stmt;
5943 stmt_info = pattern_def_stmt_info;
5944 }
5945 else
5946 {
5947 pattern_def_si = gsi_none ();
5948 transform_pattern_stmt = false;
5949 }
5950 }
5951 else
5952 transform_pattern_stmt = false;
5953 }
5954
5955 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5956 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5957 STMT_VINFO_VECTYPE (stmt_info));
5958 if (!STMT_SLP_TYPE (stmt_info)
5959 && nunits != (unsigned int) vectorization_factor
5960 && dump_enabled_p ())
5961 /* For SLP VF is set according to unrolling factor, and not to
5962 vector size, hence for SLP this print is not valid. */
5963 dump_printf_loc (MSG_NOTE, vect_location,
5964 "multiple-types.\n");
5965
5966 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5967 reached. */
5968 if (STMT_SLP_TYPE (stmt_info))
5969 {
5970 if (!slp_scheduled)
5971 {
5972 slp_scheduled = true;
5973
5974 if (dump_enabled_p ())
5975 dump_printf_loc (MSG_NOTE, vect_location,
5976 "=== scheduling SLP instances ===\n");
5977
5978 vect_schedule_slp (loop_vinfo, NULL);
5979 }
5980
5981 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5982 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5983 {
5984 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5985 {
5986 pattern_def_seq = NULL;
5987 gsi_next (&si);
5988 }
5989 continue;
5990 }
5991 }
5992
5993 /* -------- vectorize statement ------------ */
5994 if (dump_enabled_p ())
5995 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
5996
5997 grouped_store = false;
5998 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
5999 if (is_store)
6000 {
6001 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6002 {
6003 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6004 interleaving chain was completed - free all the stores in
6005 the chain. */
6006 gsi_next (&si);
6007 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
6008 continue;
6009 }
6010 else
6011 {
6012 /* Free the attached stmt_vec_info and remove the stmt. */
6013 gimple store = gsi_stmt (si);
6014 free_stmt_vec_info (store);
6015 unlink_stmt_vdef (store);
6016 gsi_remove (&si, true);
6017 release_defs (store);
6018 continue;
6019 }
6020 }
6021
6022 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
6023 {
6024 pattern_def_seq = NULL;
6025 gsi_next (&si);
6026 }
6027 } /* stmts in BB */
6028 } /* BBs in loop */
6029
6030 slpeel_make_loop_iterate_ntimes (loop, ratio);
6031
6032 /* Reduce loop iterations by the vectorization factor. */
6033 scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
6034 expected_iterations / vectorization_factor);
6035 loop->nb_iterations_upper_bound
6036 = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor),
6037 FLOOR_DIV_EXPR);
6038 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6039 && loop->nb_iterations_upper_bound != double_int_zero)
6040 loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one;
6041 if (loop->any_estimate)
6042 {
6043 loop->nb_iterations_estimate
6044 = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor),
6045 FLOOR_DIV_EXPR);
6046 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6047 && loop->nb_iterations_estimate != double_int_zero)
6048 loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one;
6049 }
6050
6051 if (dump_enabled_p ())
6052 {
6053 dump_printf_loc (MSG_NOTE, vect_location,
6054 "LOOP VECTORIZED\n");
6055 if (loop->inner)
6056 dump_printf_loc (MSG_NOTE, vect_location,
6057 "OUTER LOOP VECTORIZED\n");
6058 dump_printf (MSG_NOTE, "\n");
6059 }
6060 }