re PR tree-optimization/59288 (ICE in get_initial_def_for_induction)
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-ssa-alias.h"
32 #include "internal-fn.h"
33 #include "gimple-expr.h"
34 #include "is-a.h"
35 #include "gimple.h"
36 #include "gimplify.h"
37 #include "gimple-iterator.h"
38 #include "gimplify-me.h"
39 #include "gimple-ssa.h"
40 #include "tree-phinodes.h"
41 #include "ssa-iterators.h"
42 #include "stringpool.h"
43 #include "tree-ssanames.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-pass.h"
48 #include "cfgloop.h"
49 #include "expr.h"
50 #include "recog.h"
51 #include "optabs.h"
52 #include "params.h"
53 #include "diagnostic-core.h"
54 #include "tree-chrec.h"
55 #include "tree-scalar-evolution.h"
56 #include "tree-vectorizer.h"
57 #include "target.h"
58
59 /* Loop Vectorization Pass.
60
61 This pass tries to vectorize loops.
62
63 For example, the vectorizer transforms the following simple loop:
64
65 short a[N]; short b[N]; short c[N]; int i;
66
67 for (i=0; i<N; i++){
68 a[i] = b[i] + c[i];
69 }
70
71 as if it was manually vectorized by rewriting the source code into:
72
73 typedef int __attribute__((mode(V8HI))) v8hi;
74 short a[N]; short b[N]; short c[N]; int i;
75 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
76 v8hi va, vb, vc;
77
78 for (i=0; i<N/8; i++){
79 vb = pb[i];
80 vc = pc[i];
81 va = vb + vc;
82 pa[i] = va;
83 }
84
85 The main entry to this pass is vectorize_loops(), in which
86 the vectorizer applies a set of analyses on a given set of loops,
87 followed by the actual vectorization transformation for the loops that
88 had successfully passed the analysis phase.
89 Throughout this pass we make a distinction between two types of
90 data: scalars (which are represented by SSA_NAMES), and memory references
91 ("data-refs"). These two types of data require different handling both
92 during analysis and transformation. The types of data-refs that the
93 vectorizer currently supports are ARRAY_REFS which base is an array DECL
94 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
95 accesses are required to have a simple (consecutive) access pattern.
96
97 Analysis phase:
98 ===============
99 The driver for the analysis phase is vect_analyze_loop().
100 It applies a set of analyses, some of which rely on the scalar evolution
101 analyzer (scev) developed by Sebastian Pop.
102
103 During the analysis phase the vectorizer records some information
104 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
105 loop, as well as general information about the loop as a whole, which is
106 recorded in a "loop_vec_info" struct attached to each loop.
107
108 Transformation phase:
109 =====================
110 The loop transformation phase scans all the stmts in the loop, and
111 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
112 the loop that needs to be vectorized. It inserts the vector code sequence
113 just before the scalar stmt S, and records a pointer to the vector code
114 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
115 attached to S). This pointer will be used for the vectorization of following
116 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
117 otherwise, we rely on dead code elimination for removing it.
118
119 For example, say stmt S1 was vectorized into stmt VS1:
120
121 VS1: vb = px[i];
122 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
123 S2: a = b;
124
125 To vectorize stmt S2, the vectorizer first finds the stmt that defines
126 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
127 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
128 resulting sequence would be:
129
130 VS1: vb = px[i];
131 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
132 VS2: va = vb;
133 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134
135 Operands that are not SSA_NAMEs, are data-refs that appear in
136 load/store operations (like 'x[i]' in S1), and are handled differently.
137
138 Target modeling:
139 =================
140 Currently the only target specific information that is used is the
141 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
142 Targets that can support different sizes of vectors, for now will need
143 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
144 flexibility will be added in the future.
145
146 Since we only vectorize operations which vector form can be
147 expressed using existing tree codes, to verify that an operation is
148 supported, the vectorizer checks the relevant optab at the relevant
149 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
150 the value found is CODE_FOR_nothing, then there's no target support, and
151 we can't vectorize the stmt.
152
153 For additional information on this project see:
154 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
155 */
156
157 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
158
159 /* Function vect_determine_vectorization_factor
160
161 Determine the vectorization factor (VF). VF is the number of data elements
162 that are operated upon in parallel in a single iteration of the vectorized
163 loop. For example, when vectorizing a loop that operates on 4byte elements,
164 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
165 elements can fit in a single vector register.
166
167 We currently support vectorization of loops in which all types operated upon
168 are of the same size. Therefore this function currently sets VF according to
169 the size of the types operated upon, and fails if there are multiple sizes
170 in the loop.
171
172 VF is also the factor by which the loop iterations are strip-mined, e.g.:
173 original loop:
174 for (i=0; i<N; i++){
175 a[i] = b[i] + c[i];
176 }
177
178 vectorized loop:
179 for (i=0; i<N; i+=VF){
180 a[i:VF] = b[i:VF] + c[i:VF];
181 }
182 */
183
184 static bool
185 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
186 {
187 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
188 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
189 int nbbs = loop->num_nodes;
190 gimple_stmt_iterator si;
191 unsigned int vectorization_factor = 0;
192 tree scalar_type;
193 gimple phi;
194 tree vectype;
195 unsigned int nunits;
196 stmt_vec_info stmt_info;
197 int i;
198 HOST_WIDE_INT dummy;
199 gimple stmt, pattern_stmt = NULL;
200 gimple_seq pattern_def_seq = NULL;
201 gimple_stmt_iterator pattern_def_si = gsi_none ();
202 bool analyze_pattern_stmt = false;
203
204 if (dump_enabled_p ())
205 dump_printf_loc (MSG_NOTE, vect_location,
206 "=== vect_determine_vectorization_factor ===\n");
207
208 for (i = 0; i < nbbs; i++)
209 {
210 basic_block bb = bbs[i];
211
212 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
213 {
214 phi = gsi_stmt (si);
215 stmt_info = vinfo_for_stmt (phi);
216 if (dump_enabled_p ())
217 {
218 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
219 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
220 dump_printf (MSG_NOTE, "\n");
221 }
222
223 gcc_assert (stmt_info);
224
225 if (STMT_VINFO_RELEVANT_P (stmt_info))
226 {
227 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
228 scalar_type = TREE_TYPE (PHI_RESULT (phi));
229
230 if (dump_enabled_p ())
231 {
232 dump_printf_loc (MSG_NOTE, vect_location,
233 "get vectype for scalar type: ");
234 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
235 dump_printf (MSG_NOTE, "\n");
236 }
237
238 vectype = get_vectype_for_scalar_type (scalar_type);
239 if (!vectype)
240 {
241 if (dump_enabled_p ())
242 {
243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
244 "not vectorized: unsupported "
245 "data-type ");
246 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
247 scalar_type);
248 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
249 }
250 return false;
251 }
252 STMT_VINFO_VECTYPE (stmt_info) = vectype;
253
254 if (dump_enabled_p ())
255 {
256 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
257 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
258 dump_printf (MSG_NOTE, "\n");
259 }
260
261 nunits = TYPE_VECTOR_SUBPARTS (vectype);
262 if (dump_enabled_p ())
263 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
264 nunits);
265
266 if (!vectorization_factor
267 || (nunits > vectorization_factor))
268 vectorization_factor = nunits;
269 }
270 }
271
272 for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
273 {
274 tree vf_vectype;
275
276 if (analyze_pattern_stmt)
277 stmt = pattern_stmt;
278 else
279 stmt = gsi_stmt (si);
280
281 stmt_info = vinfo_for_stmt (stmt);
282
283 if (dump_enabled_p ())
284 {
285 dump_printf_loc (MSG_NOTE, vect_location,
286 "==> examining statement: ");
287 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
288 dump_printf (MSG_NOTE, "\n");
289 }
290
291 gcc_assert (stmt_info);
292
293 /* Skip stmts which do not need to be vectorized. */
294 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
295 && !STMT_VINFO_LIVE_P (stmt_info))
296 || gimple_clobber_p (stmt))
297 {
298 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
299 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
300 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
301 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
302 {
303 stmt = pattern_stmt;
304 stmt_info = vinfo_for_stmt (pattern_stmt);
305 if (dump_enabled_p ())
306 {
307 dump_printf_loc (MSG_NOTE, vect_location,
308 "==> examining pattern statement: ");
309 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
310 dump_printf (MSG_NOTE, "\n");
311 }
312 }
313 else
314 {
315 if (dump_enabled_p ())
316 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
317 gsi_next (&si);
318 continue;
319 }
320 }
321 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
322 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
323 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
324 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
325 analyze_pattern_stmt = true;
326
327 /* If a pattern statement has def stmts, analyze them too. */
328 if (is_pattern_stmt_p (stmt_info))
329 {
330 if (pattern_def_seq == NULL)
331 {
332 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
333 pattern_def_si = gsi_start (pattern_def_seq);
334 }
335 else if (!gsi_end_p (pattern_def_si))
336 gsi_next (&pattern_def_si);
337 if (pattern_def_seq != NULL)
338 {
339 gimple pattern_def_stmt = NULL;
340 stmt_vec_info pattern_def_stmt_info = NULL;
341
342 while (!gsi_end_p (pattern_def_si))
343 {
344 pattern_def_stmt = gsi_stmt (pattern_def_si);
345 pattern_def_stmt_info
346 = vinfo_for_stmt (pattern_def_stmt);
347 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
348 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
349 break;
350 gsi_next (&pattern_def_si);
351 }
352
353 if (!gsi_end_p (pattern_def_si))
354 {
355 if (dump_enabled_p ())
356 {
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "==> examining pattern def stmt: ");
359 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
360 pattern_def_stmt, 0);
361 dump_printf (MSG_NOTE, "\n");
362 }
363
364 stmt = pattern_def_stmt;
365 stmt_info = pattern_def_stmt_info;
366 }
367 else
368 {
369 pattern_def_si = gsi_none ();
370 analyze_pattern_stmt = false;
371 }
372 }
373 else
374 analyze_pattern_stmt = false;
375 }
376
377 if (gimple_get_lhs (stmt) == NULL_TREE)
378 {
379 if (dump_enabled_p ())
380 {
381 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
382 "not vectorized: irregular stmt.");
383 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
384 0);
385 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
386 }
387 return false;
388 }
389
390 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
391 {
392 if (dump_enabled_p ())
393 {
394 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
395 "not vectorized: vector stmt in loop:");
396 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
397 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
398 }
399 return false;
400 }
401
402 if (STMT_VINFO_VECTYPE (stmt_info))
403 {
404 /* The only case when a vectype had been already set is for stmts
405 that contain a dataref, or for "pattern-stmts" (stmts
406 generated by the vectorizer to represent/replace a certain
407 idiom). */
408 gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
409 || is_pattern_stmt_p (stmt_info)
410 || !gsi_end_p (pattern_def_si));
411 vectype = STMT_VINFO_VECTYPE (stmt_info);
412 }
413 else
414 {
415 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
416 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
417 if (dump_enabled_p ())
418 {
419 dump_printf_loc (MSG_NOTE, vect_location,
420 "get vectype for scalar type: ");
421 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
422 dump_printf (MSG_NOTE, "\n");
423 }
424 vectype = get_vectype_for_scalar_type (scalar_type);
425 if (!vectype)
426 {
427 if (dump_enabled_p ())
428 {
429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
430 "not vectorized: unsupported "
431 "data-type ");
432 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
433 scalar_type);
434 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
435 }
436 return false;
437 }
438
439 STMT_VINFO_VECTYPE (stmt_info) = vectype;
440
441 if (dump_enabled_p ())
442 {
443 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
444 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
445 dump_printf (MSG_NOTE, "\n");
446 }
447 }
448
449 /* The vectorization factor is according to the smallest
450 scalar type (or the largest vector size, but we only
451 support one vector size per loop). */
452 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
453 &dummy);
454 if (dump_enabled_p ())
455 {
456 dump_printf_loc (MSG_NOTE, vect_location,
457 "get vectype for scalar type: ");
458 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
459 dump_printf (MSG_NOTE, "\n");
460 }
461 vf_vectype = get_vectype_for_scalar_type (scalar_type);
462 if (!vf_vectype)
463 {
464 if (dump_enabled_p ())
465 {
466 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
467 "not vectorized: unsupported data-type ");
468 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
469 scalar_type);
470 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
471 }
472 return false;
473 }
474
475 if ((GET_MODE_SIZE (TYPE_MODE (vectype))
476 != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
477 {
478 if (dump_enabled_p ())
479 {
480 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
481 "not vectorized: different sized vector "
482 "types in statement, ");
483 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
484 vectype);
485 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
486 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
487 vf_vectype);
488 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
489 }
490 return false;
491 }
492
493 if (dump_enabled_p ())
494 {
495 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
496 dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
497 dump_printf (MSG_NOTE, "\n");
498 }
499
500 nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
501 if (dump_enabled_p ())
502 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
503 if (!vectorization_factor
504 || (nunits > vectorization_factor))
505 vectorization_factor = nunits;
506
507 if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
508 {
509 pattern_def_seq = NULL;
510 gsi_next (&si);
511 }
512 }
513 }
514
515 /* TODO: Analyze cost. Decide if worth while to vectorize. */
516 if (dump_enabled_p ())
517 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
518 vectorization_factor);
519 if (vectorization_factor <= 1)
520 {
521 if (dump_enabled_p ())
522 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
523 "not vectorized: unsupported data-type\n");
524 return false;
525 }
526 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
527
528 return true;
529 }
530
531
532 /* Function vect_is_simple_iv_evolution.
533
534 FORNOW: A simple evolution of an induction variables in the loop is
535 considered a polynomial evolution. */
536
537 static bool
538 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
539 tree * step)
540 {
541 tree init_expr;
542 tree step_expr;
543 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
544 basic_block bb;
545
546 /* When there is no evolution in this loop, the evolution function
547 is not "simple". */
548 if (evolution_part == NULL_TREE)
549 return false;
550
551 /* When the evolution is a polynomial of degree >= 2
552 the evolution function is not "simple". */
553 if (tree_is_chrec (evolution_part))
554 return false;
555
556 step_expr = evolution_part;
557 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
558
559 if (dump_enabled_p ())
560 {
561 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
562 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
563 dump_printf (MSG_NOTE, ", init: ");
564 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
565 dump_printf (MSG_NOTE, "\n");
566 }
567
568 *init = init_expr;
569 *step = step_expr;
570
571 if (TREE_CODE (step_expr) != INTEGER_CST
572 && (TREE_CODE (step_expr) != SSA_NAME
573 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
574 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
575 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
576 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
577 || !flag_associative_math)))
578 && (TREE_CODE (step_expr) != REAL_CST
579 || !flag_associative_math))
580 {
581 if (dump_enabled_p ())
582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
583 "step unknown.\n");
584 return false;
585 }
586
587 return true;
588 }
589
590 /* Function vect_analyze_scalar_cycles_1.
591
592 Examine the cross iteration def-use cycles of scalar variables
593 in LOOP. LOOP_VINFO represents the loop that is now being
594 considered for vectorization (can be LOOP, or an outer-loop
595 enclosing LOOP). */
596
597 static void
598 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
599 {
600 basic_block bb = loop->header;
601 tree init, step;
602 stack_vec<gimple, 64> worklist;
603 gimple_stmt_iterator gsi;
604 bool double_reduc;
605
606 if (dump_enabled_p ())
607 dump_printf_loc (MSG_NOTE, vect_location,
608 "=== vect_analyze_scalar_cycles ===\n");
609
610 /* First - identify all inductions. Reduction detection assumes that all the
611 inductions have been identified, therefore, this order must not be
612 changed. */
613 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
614 {
615 gimple phi = gsi_stmt (gsi);
616 tree access_fn = NULL;
617 tree def = PHI_RESULT (phi);
618 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
619
620 if (dump_enabled_p ())
621 {
622 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
623 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
624 dump_printf (MSG_NOTE, "\n");
625 }
626
627 /* Skip virtual phi's. The data dependences that are associated with
628 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
629 if (virtual_operand_p (def))
630 continue;
631
632 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
633
634 /* Analyze the evolution function. */
635 access_fn = analyze_scalar_evolution (loop, def);
636 if (access_fn)
637 {
638 STRIP_NOPS (access_fn);
639 if (dump_enabled_p ())
640 {
641 dump_printf_loc (MSG_NOTE, vect_location,
642 "Access function of PHI: ");
643 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
644 dump_printf (MSG_NOTE, "\n");
645 }
646 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
647 = evolution_part_in_loop_num (access_fn, loop->num);
648 }
649
650 if (!access_fn
651 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
652 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
653 && TREE_CODE (step) != INTEGER_CST))
654 {
655 worklist.safe_push (phi);
656 continue;
657 }
658
659 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
660
661 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
663 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
664 }
665
666
667 /* Second - identify all reductions and nested cycles. */
668 while (worklist.length () > 0)
669 {
670 gimple phi = worklist.pop ();
671 tree def = PHI_RESULT (phi);
672 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
673 gimple reduc_stmt;
674 bool nested_cycle;
675
676 if (dump_enabled_p ())
677 {
678 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
679 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
680 dump_printf (MSG_NOTE, "\n");
681 }
682
683 gcc_assert (!virtual_operand_p (def)
684 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
685
686 nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
687 reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
688 &double_reduc);
689 if (reduc_stmt)
690 {
691 if (double_reduc)
692 {
693 if (dump_enabled_p ())
694 dump_printf_loc (MSG_NOTE, vect_location,
695 "Detected double reduction.\n");
696
697 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
698 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
699 vect_double_reduction_def;
700 }
701 else
702 {
703 if (nested_cycle)
704 {
705 if (dump_enabled_p ())
706 dump_printf_loc (MSG_NOTE, vect_location,
707 "Detected vectorizable nested cycle.\n");
708
709 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
710 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
711 vect_nested_cycle;
712 }
713 else
714 {
715 if (dump_enabled_p ())
716 dump_printf_loc (MSG_NOTE, vect_location,
717 "Detected reduction.\n");
718
719 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
720 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
721 vect_reduction_def;
722 /* Store the reduction cycles for possible vectorization in
723 loop-aware SLP. */
724 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
725 }
726 }
727 }
728 else
729 if (dump_enabled_p ())
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
731 "Unknown def-use cycle pattern.\n");
732 }
733 }
734
735
736 /* Function vect_analyze_scalar_cycles.
737
738 Examine the cross iteration def-use cycles of scalar variables, by
739 analyzing the loop-header PHIs of scalar variables. Classify each
740 cycle as one of the following: invariant, induction, reduction, unknown.
741 We do that for the loop represented by LOOP_VINFO, and also to its
742 inner-loop, if exists.
743 Examples for scalar cycles:
744
745 Example1: reduction:
746
747 loop1:
748 for (i=0; i<N; i++)
749 sum += a[i];
750
751 Example2: induction:
752
753 loop2:
754 for (i=0; i<N; i++)
755 a[i] = i; */
756
757 static void
758 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
759 {
760 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
761
762 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
763
764 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
765 Reductions in such inner-loop therefore have different properties than
766 the reductions in the nest that gets vectorized:
767 1. When vectorized, they are executed in the same order as in the original
768 scalar loop, so we can't change the order of computation when
769 vectorizing them.
770 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
771 current checks are too strict. */
772
773 if (loop->inner)
774 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
775 }
776
777
778 /* Function vect_get_loop_niters.
779
780 Determine how many iterations the loop is executed and place it
781 in NUMBER_OF_ITERATIONS.
782
783 Return the loop exit condition. */
784
785 static gimple
786 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
787 {
788 tree niters;
789
790 if (dump_enabled_p ())
791 dump_printf_loc (MSG_NOTE, vect_location,
792 "=== get_loop_niters ===\n");
793
794 niters = number_of_latch_executions (loop);
795 /* We want the number of loop header executions which is the number
796 of latch executions plus one.
797 ??? For UINT_MAX latch executions this number overflows to zero
798 for loops like do { n++; } while (n != 0); */
799 if (niters && !chrec_contains_undetermined (niters))
800 niters = fold_build2 (PLUS_EXPR, TREE_TYPE (niters), niters,
801 build_int_cst (TREE_TYPE (niters), 1));
802 *number_of_iterations = niters;
803
804 return get_loop_exit_condition (loop);
805 }
806
807
808 /* Function bb_in_loop_p
809
810 Used as predicate for dfs order traversal of the loop bbs. */
811
812 static bool
813 bb_in_loop_p (const_basic_block bb, const void *data)
814 {
815 const struct loop *const loop = (const struct loop *)data;
816 if (flow_bb_inside_loop_p (loop, bb))
817 return true;
818 return false;
819 }
820
821
822 /* Function new_loop_vec_info.
823
824 Create and initialize a new loop_vec_info struct for LOOP, as well as
825 stmt_vec_info structs for all the stmts in LOOP. */
826
827 static loop_vec_info
828 new_loop_vec_info (struct loop *loop)
829 {
830 loop_vec_info res;
831 basic_block *bbs;
832 gimple_stmt_iterator si;
833 unsigned int i, nbbs;
834
835 res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
836 LOOP_VINFO_LOOP (res) = loop;
837
838 bbs = get_loop_body (loop);
839
840 /* Create/Update stmt_info for all stmts in the loop. */
841 for (i = 0; i < loop->num_nodes; i++)
842 {
843 basic_block bb = bbs[i];
844
845 /* BBs in a nested inner-loop will have been already processed (because
846 we will have called vect_analyze_loop_form for any nested inner-loop).
847 Therefore, for stmts in an inner-loop we just want to update the
848 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
849 loop_info of the outer-loop we are currently considering to vectorize
850 (instead of the loop_info of the inner-loop).
851 For stmts in other BBs we need to create a stmt_info from scratch. */
852 if (bb->loop_father != loop)
853 {
854 /* Inner-loop bb. */
855 gcc_assert (loop->inner && bb->loop_father == loop->inner);
856 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
857 {
858 gimple phi = gsi_stmt (si);
859 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
860 loop_vec_info inner_loop_vinfo =
861 STMT_VINFO_LOOP_VINFO (stmt_info);
862 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
863 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
864 }
865 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
866 {
867 gimple stmt = gsi_stmt (si);
868 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
869 loop_vec_info inner_loop_vinfo =
870 STMT_VINFO_LOOP_VINFO (stmt_info);
871 gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
872 STMT_VINFO_LOOP_VINFO (stmt_info) = res;
873 }
874 }
875 else
876 {
877 /* bb in current nest. */
878 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
879 {
880 gimple phi = gsi_stmt (si);
881 gimple_set_uid (phi, 0);
882 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
883 }
884
885 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
886 {
887 gimple stmt = gsi_stmt (si);
888 gimple_set_uid (stmt, 0);
889 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
890 }
891 }
892 }
893
894 /* CHECKME: We want to visit all BBs before their successors (except for
895 latch blocks, for which this assertion wouldn't hold). In the simple
896 case of the loop forms we allow, a dfs order of the BBs would the same
897 as reversed postorder traversal, so we are safe. */
898
899 free (bbs);
900 bbs = XCNEWVEC (basic_block, loop->num_nodes);
901 nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
902 bbs, loop->num_nodes, loop);
903 gcc_assert (nbbs == loop->num_nodes);
904
905 LOOP_VINFO_BBS (res) = bbs;
906 LOOP_VINFO_NITERS (res) = NULL;
907 LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
908 LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
909 LOOP_VINFO_VECTORIZABLE_P (res) = 0;
910 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res) = 0;
911 LOOP_VINFO_VECT_FACTOR (res) = 0;
912 LOOP_VINFO_LOOP_NEST (res).create (3);
913 LOOP_VINFO_DATAREFS (res).create (10);
914 LOOP_VINFO_DDRS (res).create (10 * 10);
915 LOOP_VINFO_UNALIGNED_DR (res) = NULL;
916 LOOP_VINFO_MAY_MISALIGN_STMTS (res).create (
917 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
918 LOOP_VINFO_MAY_ALIAS_DDRS (res).create (
919 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
920 LOOP_VINFO_GROUPED_STORES (res).create (10);
921 LOOP_VINFO_REDUCTIONS (res).create (10);
922 LOOP_VINFO_REDUCTION_CHAINS (res).create (10);
923 LOOP_VINFO_SLP_INSTANCES (res).create (10);
924 LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
925 LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
926 LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
927 LOOP_VINFO_PEELING_FOR_NITER (res) = false;
928 LOOP_VINFO_OPERANDS_SWAPPED (res) = false;
929
930 return res;
931 }
932
933
934 /* Function destroy_loop_vec_info.
935
936 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
937 stmts in the loop. */
938
939 void
940 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
941 {
942 struct loop *loop;
943 basic_block *bbs;
944 int nbbs;
945 gimple_stmt_iterator si;
946 int j;
947 vec<slp_instance> slp_instances;
948 slp_instance instance;
949 bool swapped;
950
951 if (!loop_vinfo)
952 return;
953
954 loop = LOOP_VINFO_LOOP (loop_vinfo);
955
956 bbs = LOOP_VINFO_BBS (loop_vinfo);
957 nbbs = clean_stmts ? loop->num_nodes : 0;
958 swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo);
959
960 for (j = 0; j < nbbs; j++)
961 {
962 basic_block bb = bbs[j];
963 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
964 free_stmt_vec_info (gsi_stmt (si));
965
966 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
967 {
968 gimple stmt = gsi_stmt (si);
969
970 /* We may have broken canonical form by moving a constant
971 into RHS1 of a commutative op. Fix such occurrences. */
972 if (swapped && is_gimple_assign (stmt))
973 {
974 enum tree_code code = gimple_assign_rhs_code (stmt);
975
976 if ((code == PLUS_EXPR
977 || code == POINTER_PLUS_EXPR
978 || code == MULT_EXPR)
979 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
980 swap_ssa_operands (stmt,
981 gimple_assign_rhs1_ptr (stmt),
982 gimple_assign_rhs2_ptr (stmt));
983 }
984
985 /* Free stmt_vec_info. */
986 free_stmt_vec_info (stmt);
987 gsi_next (&si);
988 }
989 }
990
991 free (LOOP_VINFO_BBS (loop_vinfo));
992 vect_destroy_datarefs (loop_vinfo, NULL);
993 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
994 LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
995 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
996 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
997 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
998 FOR_EACH_VEC_ELT (slp_instances, j, instance)
999 vect_free_slp_instance (instance);
1000
1001 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
1002 LOOP_VINFO_GROUPED_STORES (loop_vinfo).release ();
1003 LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
1004 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
1005
1006 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
1007 LOOP_VINFO_PEELING_HTAB (loop_vinfo).dispose ();
1008
1009 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
1010
1011 free (loop_vinfo);
1012 loop->aux = NULL;
1013 }
1014
1015
1016 /* Function vect_analyze_loop_1.
1017
1018 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1019 for it. The different analyses will record information in the
1020 loop_vec_info struct. This is a subset of the analyses applied in
1021 vect_analyze_loop, to be applied on an inner-loop nested in the loop
1022 that is now considered for (outer-loop) vectorization. */
1023
1024 static loop_vec_info
1025 vect_analyze_loop_1 (struct loop *loop)
1026 {
1027 loop_vec_info loop_vinfo;
1028
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_NOTE, vect_location,
1031 "===== analyze_loop_nest_1 =====\n");
1032
1033 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1034
1035 loop_vinfo = vect_analyze_loop_form (loop);
1036 if (!loop_vinfo)
1037 {
1038 if (dump_enabled_p ())
1039 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1040 "bad inner-loop form.\n");
1041 return NULL;
1042 }
1043
1044 return loop_vinfo;
1045 }
1046
1047
1048 /* Function vect_analyze_loop_form.
1049
1050 Verify that certain CFG restrictions hold, including:
1051 - the loop has a pre-header
1052 - the loop has a single entry and exit
1053 - the loop exit condition is simple enough, and the number of iterations
1054 can be analyzed (a countable loop). */
1055
1056 loop_vec_info
1057 vect_analyze_loop_form (struct loop *loop)
1058 {
1059 loop_vec_info loop_vinfo;
1060 gimple loop_cond;
1061 tree number_of_iterations = NULL;
1062 loop_vec_info inner_loop_vinfo = NULL;
1063
1064 if (dump_enabled_p ())
1065 dump_printf_loc (MSG_NOTE, vect_location,
1066 "=== vect_analyze_loop_form ===\n");
1067
1068 /* Different restrictions apply when we are considering an inner-most loop,
1069 vs. an outer (nested) loop.
1070 (FORNOW. May want to relax some of these restrictions in the future). */
1071
1072 if (!loop->inner)
1073 {
1074 /* Inner-most loop. We currently require that the number of BBs is
1075 exactly 2 (the header and latch). Vectorizable inner-most loops
1076 look like this:
1077
1078 (pre-header)
1079 |
1080 header <--------+
1081 | | |
1082 | +--> latch --+
1083 |
1084 (exit-bb) */
1085
1086 if (loop->num_nodes != 2)
1087 {
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1090 "not vectorized: control flow in loop.\n");
1091 return NULL;
1092 }
1093
1094 if (empty_block_p (loop->header))
1095 {
1096 if (dump_enabled_p ())
1097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1098 "not vectorized: empty loop.\n");
1099 return NULL;
1100 }
1101 }
1102 else
1103 {
1104 struct loop *innerloop = loop->inner;
1105 edge entryedge;
1106
1107 /* Nested loop. We currently require that the loop is doubly-nested,
1108 contains a single inner loop, and the number of BBs is exactly 5.
1109 Vectorizable outer-loops look like this:
1110
1111 (pre-header)
1112 |
1113 header <---+
1114 | |
1115 inner-loop |
1116 | |
1117 tail ------+
1118 |
1119 (exit-bb)
1120
1121 The inner-loop has the properties expected of inner-most loops
1122 as described above. */
1123
1124 if ((loop->inner)->inner || (loop->inner)->next)
1125 {
1126 if (dump_enabled_p ())
1127 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1128 "not vectorized: multiple nested loops.\n");
1129 return NULL;
1130 }
1131
1132 /* Analyze the inner-loop. */
1133 inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1134 if (!inner_loop_vinfo)
1135 {
1136 if (dump_enabled_p ())
1137 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1138 "not vectorized: Bad inner loop.\n");
1139 return NULL;
1140 }
1141
1142 if (!expr_invariant_in_loop_p (loop,
1143 LOOP_VINFO_NITERS (inner_loop_vinfo)))
1144 {
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1147 "not vectorized: inner-loop count not"
1148 " invariant.\n");
1149 destroy_loop_vec_info (inner_loop_vinfo, true);
1150 return NULL;
1151 }
1152
1153 if (loop->num_nodes != 5)
1154 {
1155 if (dump_enabled_p ())
1156 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1157 "not vectorized: control flow in loop.\n");
1158 destroy_loop_vec_info (inner_loop_vinfo, true);
1159 return NULL;
1160 }
1161
1162 gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1163 entryedge = EDGE_PRED (innerloop->header, 0);
1164 if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1165 entryedge = EDGE_PRED (innerloop->header, 1);
1166
1167 if (entryedge->src != loop->header
1168 || !single_exit (innerloop)
1169 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1170 {
1171 if (dump_enabled_p ())
1172 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1173 "not vectorized: unsupported outerloop form.\n");
1174 destroy_loop_vec_info (inner_loop_vinfo, true);
1175 return NULL;
1176 }
1177
1178 if (dump_enabled_p ())
1179 dump_printf_loc (MSG_NOTE, vect_location,
1180 "Considering outer-loop vectorization.\n");
1181 }
1182
1183 if (!single_exit (loop)
1184 || EDGE_COUNT (loop->header->preds) != 2)
1185 {
1186 if (dump_enabled_p ())
1187 {
1188 if (!single_exit (loop))
1189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1190 "not vectorized: multiple exits.\n");
1191 else if (EDGE_COUNT (loop->header->preds) != 2)
1192 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1193 "not vectorized: too many incoming edges.\n");
1194 }
1195 if (inner_loop_vinfo)
1196 destroy_loop_vec_info (inner_loop_vinfo, true);
1197 return NULL;
1198 }
1199
1200 /* We assume that the loop exit condition is at the end of the loop. i.e,
1201 that the loop is represented as a do-while (with a proper if-guard
1202 before the loop if needed), where the loop header contains all the
1203 executable statements, and the latch is empty. */
1204 if (!empty_block_p (loop->latch)
1205 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1206 {
1207 if (dump_enabled_p ())
1208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1209 "not vectorized: latch block not empty.\n");
1210 if (inner_loop_vinfo)
1211 destroy_loop_vec_info (inner_loop_vinfo, true);
1212 return NULL;
1213 }
1214
1215 /* Make sure there exists a single-predecessor exit bb: */
1216 if (!single_pred_p (single_exit (loop)->dest))
1217 {
1218 edge e = single_exit (loop);
1219 if (!(e->flags & EDGE_ABNORMAL))
1220 {
1221 split_loop_exit_edge (e);
1222 if (dump_enabled_p ())
1223 dump_printf (MSG_NOTE, "split exit edge.\n");
1224 }
1225 else
1226 {
1227 if (dump_enabled_p ())
1228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1229 "not vectorized: abnormal loop exit edge.\n");
1230 if (inner_loop_vinfo)
1231 destroy_loop_vec_info (inner_loop_vinfo, true);
1232 return NULL;
1233 }
1234 }
1235
1236 loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1237 if (!loop_cond)
1238 {
1239 if (dump_enabled_p ())
1240 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1241 "not vectorized: complicated exit condition.\n");
1242 if (inner_loop_vinfo)
1243 destroy_loop_vec_info (inner_loop_vinfo, true);
1244 return NULL;
1245 }
1246
1247 if (!number_of_iterations
1248 || chrec_contains_undetermined (number_of_iterations))
1249 {
1250 if (dump_enabled_p ())
1251 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1252 "not vectorized: number of iterations cannot be "
1253 "computed.\n");
1254 if (inner_loop_vinfo)
1255 destroy_loop_vec_info (inner_loop_vinfo, true);
1256 return NULL;
1257 }
1258
1259 if (integer_zerop (number_of_iterations))
1260 {
1261 if (dump_enabled_p ())
1262 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1263 "not vectorized: number of iterations = 0.\n");
1264 if (inner_loop_vinfo)
1265 destroy_loop_vec_info (inner_loop_vinfo, true);
1266 return NULL;
1267 }
1268
1269 loop_vinfo = new_loop_vec_info (loop);
1270 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1271 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1272
1273 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1274 {
1275 if (dump_enabled_p ())
1276 {
1277 dump_printf_loc (MSG_NOTE, vect_location,
1278 "Symbolic number of iterations is ");
1279 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1280 dump_printf (MSG_NOTE, "\n");
1281 }
1282 }
1283
1284 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1285
1286 /* CHECKME: May want to keep it around it in the future. */
1287 if (inner_loop_vinfo)
1288 destroy_loop_vec_info (inner_loop_vinfo, false);
1289
1290 gcc_assert (!loop->aux);
1291 loop->aux = loop_vinfo;
1292 return loop_vinfo;
1293 }
1294
1295
1296 /* Function vect_analyze_loop_operations.
1297
1298 Scan the loop stmts and make sure they are all vectorizable. */
1299
1300 static bool
1301 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1302 {
1303 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1304 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1305 int nbbs = loop->num_nodes;
1306 gimple_stmt_iterator si;
1307 unsigned int vectorization_factor = 0;
1308 int i;
1309 gimple phi;
1310 stmt_vec_info stmt_info;
1311 bool need_to_vectorize = false;
1312 int min_profitable_iters;
1313 int min_scalar_loop_bound;
1314 unsigned int th;
1315 bool only_slp_in_loop = true, ok;
1316 HOST_WIDE_INT max_niter;
1317 HOST_WIDE_INT estimated_niter;
1318 int min_profitable_estimate;
1319
1320 if (dump_enabled_p ())
1321 dump_printf_loc (MSG_NOTE, vect_location,
1322 "=== vect_analyze_loop_operations ===\n");
1323
1324 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1325 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1326 if (slp)
1327 {
1328 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1329 vectorization factor of the loop is the unrolling factor required by
1330 the SLP instances. If that unrolling factor is 1, we say, that we
1331 perform pure SLP on loop - cross iteration parallelism is not
1332 exploited. */
1333 for (i = 0; i < nbbs; i++)
1334 {
1335 basic_block bb = bbs[i];
1336 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1337 {
1338 gimple stmt = gsi_stmt (si);
1339 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1340 gcc_assert (stmt_info);
1341 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1342 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1343 && !PURE_SLP_STMT (stmt_info))
1344 /* STMT needs both SLP and loop-based vectorization. */
1345 only_slp_in_loop = false;
1346 }
1347 }
1348
1349 if (only_slp_in_loop)
1350 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1351 else
1352 vectorization_factor = least_common_multiple (vectorization_factor,
1353 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1354
1355 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1356 if (dump_enabled_p ())
1357 dump_printf_loc (MSG_NOTE, vect_location,
1358 "Updating vectorization factor to %d\n",
1359 vectorization_factor);
1360 }
1361
1362 for (i = 0; i < nbbs; i++)
1363 {
1364 basic_block bb = bbs[i];
1365
1366 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1367 {
1368 phi = gsi_stmt (si);
1369 ok = true;
1370
1371 stmt_info = vinfo_for_stmt (phi);
1372 if (dump_enabled_p ())
1373 {
1374 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1375 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1376 dump_printf (MSG_NOTE, "\n");
1377 }
1378
1379 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1380 (i.e., a phi in the tail of the outer-loop). */
1381 if (! is_loop_header_bb_p (bb))
1382 {
1383 /* FORNOW: we currently don't support the case that these phis
1384 are not used in the outerloop (unless it is double reduction,
1385 i.e., this phi is vect_reduction_def), cause this case
1386 requires to actually do something here. */
1387 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1388 || STMT_VINFO_LIVE_P (stmt_info))
1389 && STMT_VINFO_DEF_TYPE (stmt_info)
1390 != vect_double_reduction_def)
1391 {
1392 if (dump_enabled_p ())
1393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1394 "Unsupported loop-closed phi in "
1395 "outer-loop.\n");
1396 return false;
1397 }
1398
1399 /* If PHI is used in the outer loop, we check that its operand
1400 is defined in the inner loop. */
1401 if (STMT_VINFO_RELEVANT_P (stmt_info))
1402 {
1403 tree phi_op;
1404 gimple op_def_stmt;
1405
1406 if (gimple_phi_num_args (phi) != 1)
1407 return false;
1408
1409 phi_op = PHI_ARG_DEF (phi, 0);
1410 if (TREE_CODE (phi_op) != SSA_NAME)
1411 return false;
1412
1413 op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1414 if (gimple_nop_p (op_def_stmt)
1415 || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1416 || !vinfo_for_stmt (op_def_stmt))
1417 return false;
1418
1419 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1420 != vect_used_in_outer
1421 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1422 != vect_used_in_outer_by_reduction)
1423 return false;
1424 }
1425
1426 continue;
1427 }
1428
1429 gcc_assert (stmt_info);
1430
1431 if (STMT_VINFO_LIVE_P (stmt_info))
1432 {
1433 /* FORNOW: not yet supported. */
1434 if (dump_enabled_p ())
1435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1436 "not vectorized: value used after loop.\n");
1437 return false;
1438 }
1439
1440 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1441 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1442 {
1443 /* A scalar-dependence cycle that we don't support. */
1444 if (dump_enabled_p ())
1445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1446 "not vectorized: scalar dependence cycle.\n");
1447 return false;
1448 }
1449
1450 if (STMT_VINFO_RELEVANT_P (stmt_info))
1451 {
1452 need_to_vectorize = true;
1453 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1454 ok = vectorizable_induction (phi, NULL, NULL);
1455 }
1456
1457 if (!ok)
1458 {
1459 if (dump_enabled_p ())
1460 {
1461 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1462 "not vectorized: relevant phi not "
1463 "supported: ");
1464 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1465 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1466 }
1467 return false;
1468 }
1469 }
1470
1471 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1472 {
1473 gimple stmt = gsi_stmt (si);
1474 if (!gimple_clobber_p (stmt)
1475 && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1476 return false;
1477 }
1478 } /* bbs */
1479
1480 /* All operations in the loop are either irrelevant (deal with loop
1481 control, or dead), or only used outside the loop and can be moved
1482 out of the loop (e.g. invariants, inductions). The loop can be
1483 optimized away by scalar optimizations. We're better off not
1484 touching this loop. */
1485 if (!need_to_vectorize)
1486 {
1487 if (dump_enabled_p ())
1488 dump_printf_loc (MSG_NOTE, vect_location,
1489 "All the computation can be taken out of the loop.\n");
1490 if (dump_enabled_p ())
1491 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1492 "not vectorized: redundant loop. no profit to "
1493 "vectorize.\n");
1494 return false;
1495 }
1496
1497 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1498 dump_printf_loc (MSG_NOTE, vect_location,
1499 "vectorization_factor = %d, niters = "
1500 HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
1501 LOOP_VINFO_INT_NITERS (loop_vinfo));
1502
1503 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1504 && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1505 || ((max_niter = max_stmt_executions_int (loop)) != -1
1506 && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1507 {
1508 if (dump_enabled_p ())
1509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1510 "not vectorized: iteration count too small.\n");
1511 if (dump_enabled_p ())
1512 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1513 "not vectorized: iteration count smaller than "
1514 "vectorization factor.\n");
1515 return false;
1516 }
1517
1518 /* Analyze cost. Decide if worth while to vectorize. */
1519
1520 /* Once VF is set, SLP costs should be updated since the number of created
1521 vector stmts depends on VF. */
1522 vect_update_slp_costs_according_to_vf (loop_vinfo);
1523
1524 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1525 &min_profitable_estimate);
1526 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1527
1528 if (min_profitable_iters < 0)
1529 {
1530 if (dump_enabled_p ())
1531 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1532 "not vectorized: vectorization not profitable.\n");
1533 if (dump_enabled_p ())
1534 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1535 "not vectorized: vector version will never be "
1536 "profitable.\n");
1537 return false;
1538 }
1539
1540 min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1541 * vectorization_factor) - 1);
1542
1543
1544 /* Use the cost model only if it is more conservative than user specified
1545 threshold. */
1546
1547 th = (unsigned) min_scalar_loop_bound;
1548 if (min_profitable_iters
1549 && (!min_scalar_loop_bound
1550 || min_profitable_iters > min_scalar_loop_bound))
1551 th = (unsigned) min_profitable_iters;
1552
1553 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1554 && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1555 {
1556 if (dump_enabled_p ())
1557 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1558 "not vectorized: vectorization not profitable.\n");
1559 if (dump_enabled_p ())
1560 dump_printf_loc (MSG_NOTE, vect_location,
1561 "not vectorized: iteration count smaller than user "
1562 "specified loop bound parameter or minimum profitable "
1563 "iterations (whichever is more conservative).\n");
1564 return false;
1565 }
1566
1567 if ((estimated_niter = estimated_stmt_executions_int (loop)) != -1
1568 && ((unsigned HOST_WIDE_INT) estimated_niter
1569 <= MAX (th, (unsigned)min_profitable_estimate)))
1570 {
1571 if (dump_enabled_p ())
1572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1573 "not vectorized: estimated iteration count too "
1574 "small.\n");
1575 if (dump_enabled_p ())
1576 dump_printf_loc (MSG_NOTE, vect_location,
1577 "not vectorized: estimated iteration count smaller "
1578 "than specified loop bound parameter or minimum "
1579 "profitable iterations (whichever is more "
1580 "conservative).\n");
1581 return false;
1582 }
1583
1584 return true;
1585 }
1586
1587
1588 /* Function vect_analyze_loop_2.
1589
1590 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1591 for it. The different analyses will record information in the
1592 loop_vec_info struct. */
1593 static bool
1594 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1595 {
1596 bool ok, slp = false;
1597 int max_vf = MAX_VECTORIZATION_FACTOR;
1598 int min_vf = 2;
1599
1600 /* Find all data references in the loop (which correspond to vdefs/vuses)
1601 and analyze their evolution in the loop. Also adjust the minimal
1602 vectorization factor according to the loads and stores.
1603
1604 FORNOW: Handle only simple, array references, which
1605 alignment can be forced, and aligned pointer-references. */
1606
1607 ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1608 if (!ok)
1609 {
1610 if (dump_enabled_p ())
1611 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1612 "bad data references.\n");
1613 return false;
1614 }
1615
1616 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1617 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1618
1619 ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1620 if (!ok)
1621 {
1622 if (dump_enabled_p ())
1623 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1624 "bad data access.\n");
1625 return false;
1626 }
1627
1628 /* Classify all cross-iteration scalar data-flow cycles.
1629 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1630
1631 vect_analyze_scalar_cycles (loop_vinfo);
1632
1633 vect_pattern_recog (loop_vinfo, NULL);
1634
1635 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1636
1637 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1638 if (!ok)
1639 {
1640 if (dump_enabled_p ())
1641 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1642 "unexpected pattern.\n");
1643 return false;
1644 }
1645
1646 /* Analyze data dependences between the data-refs in the loop
1647 and adjust the maximum vectorization factor according to
1648 the dependences.
1649 FORNOW: fail at the first data dependence that we encounter. */
1650
1651 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1652 if (!ok
1653 || max_vf < min_vf)
1654 {
1655 if (dump_enabled_p ())
1656 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1657 "bad data dependence.\n");
1658 return false;
1659 }
1660
1661 ok = vect_determine_vectorization_factor (loop_vinfo);
1662 if (!ok)
1663 {
1664 if (dump_enabled_p ())
1665 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1666 "can't determine vectorization factor.\n");
1667 return false;
1668 }
1669 if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1670 {
1671 if (dump_enabled_p ())
1672 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1673 "bad data dependence.\n");
1674 return false;
1675 }
1676
1677 /* Analyze the alignment of the data-refs in the loop.
1678 Fail if a data reference is found that cannot be vectorized. */
1679
1680 ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1681 if (!ok)
1682 {
1683 if (dump_enabled_p ())
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1685 "bad data alignment.\n");
1686 return false;
1687 }
1688
1689 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1690 It is important to call pruning after vect_analyze_data_ref_accesses,
1691 since we use grouping information gathered by interleaving analysis. */
1692 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1693 if (!ok)
1694 {
1695 if (dump_enabled_p ())
1696 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1697 "too long list of versioning for alias "
1698 "run-time tests.\n");
1699 return false;
1700 }
1701
1702 /* This pass will decide on using loop versioning and/or loop peeling in
1703 order to enhance the alignment of data references in the loop. */
1704
1705 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1706 if (!ok)
1707 {
1708 if (dump_enabled_p ())
1709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1710 "bad data alignment.\n");
1711 return false;
1712 }
1713
1714 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1715 ok = vect_analyze_slp (loop_vinfo, NULL);
1716 if (ok)
1717 {
1718 /* Decide which possible SLP instances to SLP. */
1719 slp = vect_make_slp_decision (loop_vinfo);
1720
1721 /* Find stmts that need to be both vectorized and SLPed. */
1722 vect_detect_hybrid_slp (loop_vinfo);
1723 }
1724 else
1725 return false;
1726
1727 /* Scan all the operations in the loop and make sure they are
1728 vectorizable. */
1729
1730 ok = vect_analyze_loop_operations (loop_vinfo, slp);
1731 if (!ok)
1732 {
1733 if (dump_enabled_p ())
1734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1735 "bad operation or unsupported loop bound.\n");
1736 return false;
1737 }
1738
1739 /* Decide whether we need to create an epilogue loop to handle
1740 remaining scalar iterations. */
1741 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1742 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
1743 {
1744 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo)
1745 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
1746 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1747 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1748 }
1749 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1750 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
1751 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))))
1752 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
1753
1754 /* If an epilogue loop is required make sure we can create one. */
1755 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1756 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
1757 {
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
1760 if (!vect_can_advance_ivs_p (loop_vinfo)
1761 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
1762 single_exit (LOOP_VINFO_LOOP
1763 (loop_vinfo))))
1764 {
1765 if (dump_enabled_p ())
1766 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1767 "not vectorized: can't create required "
1768 "epilog loop\n");
1769 return false;
1770 }
1771 }
1772
1773 return true;
1774 }
1775
1776 /* Function vect_analyze_loop.
1777
1778 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1779 for it. The different analyses will record information in the
1780 loop_vec_info struct. */
1781 loop_vec_info
1782 vect_analyze_loop (struct loop *loop)
1783 {
1784 loop_vec_info loop_vinfo;
1785 unsigned int vector_sizes;
1786
1787 /* Autodetect first vector size we try. */
1788 current_vector_size = 0;
1789 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1790
1791 if (dump_enabled_p ())
1792 dump_printf_loc (MSG_NOTE, vect_location,
1793 "===== analyze_loop_nest =====\n");
1794
1795 if (loop_outer (loop)
1796 && loop_vec_info_for_loop (loop_outer (loop))
1797 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1798 {
1799 if (dump_enabled_p ())
1800 dump_printf_loc (MSG_NOTE, vect_location,
1801 "outer-loop already vectorized.\n");
1802 return NULL;
1803 }
1804
1805 while (1)
1806 {
1807 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1808 loop_vinfo = vect_analyze_loop_form (loop);
1809 if (!loop_vinfo)
1810 {
1811 if (dump_enabled_p ())
1812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1813 "bad loop form.\n");
1814 return NULL;
1815 }
1816
1817 if (vect_analyze_loop_2 (loop_vinfo))
1818 {
1819 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1820
1821 return loop_vinfo;
1822 }
1823
1824 destroy_loop_vec_info (loop_vinfo, true);
1825
1826 vector_sizes &= ~current_vector_size;
1827 if (vector_sizes == 0
1828 || current_vector_size == 0)
1829 return NULL;
1830
1831 /* Try the next biggest vector size. */
1832 current_vector_size = 1 << floor_log2 (vector_sizes);
1833 if (dump_enabled_p ())
1834 dump_printf_loc (MSG_NOTE, vect_location,
1835 "***** Re-trying analysis with "
1836 "vector size %d\n", current_vector_size);
1837 }
1838 }
1839
1840
1841 /* Function reduction_code_for_scalar_code
1842
1843 Input:
1844 CODE - tree_code of a reduction operations.
1845
1846 Output:
1847 REDUC_CODE - the corresponding tree-code to be used to reduce the
1848 vector of partial results into a single scalar result (which
1849 will also reside in a vector) or ERROR_MARK if the operation is
1850 a supported reduction operation, but does not have such tree-code.
1851
1852 Return FALSE if CODE currently cannot be vectorized as reduction. */
1853
1854 static bool
1855 reduction_code_for_scalar_code (enum tree_code code,
1856 enum tree_code *reduc_code)
1857 {
1858 switch (code)
1859 {
1860 case MAX_EXPR:
1861 *reduc_code = REDUC_MAX_EXPR;
1862 return true;
1863
1864 case MIN_EXPR:
1865 *reduc_code = REDUC_MIN_EXPR;
1866 return true;
1867
1868 case PLUS_EXPR:
1869 *reduc_code = REDUC_PLUS_EXPR;
1870 return true;
1871
1872 case MULT_EXPR:
1873 case MINUS_EXPR:
1874 case BIT_IOR_EXPR:
1875 case BIT_XOR_EXPR:
1876 case BIT_AND_EXPR:
1877 *reduc_code = ERROR_MARK;
1878 return true;
1879
1880 default:
1881 return false;
1882 }
1883 }
1884
1885
1886 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1887 STMT is printed with a message MSG. */
1888
1889 static void
1890 report_vect_op (int msg_type, gimple stmt, const char *msg)
1891 {
1892 dump_printf_loc (msg_type, vect_location, "%s", msg);
1893 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
1894 dump_printf (msg_type, "\n");
1895 }
1896
1897
1898 /* Detect SLP reduction of the form:
1899
1900 #a1 = phi <a5, a0>
1901 a2 = operation (a1)
1902 a3 = operation (a2)
1903 a4 = operation (a3)
1904 a5 = operation (a4)
1905
1906 #a = phi <a5>
1907
1908 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1909 FIRST_STMT is the first reduction stmt in the chain
1910 (a2 = operation (a1)).
1911
1912 Return TRUE if a reduction chain was detected. */
1913
1914 static bool
1915 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1916 {
1917 struct loop *loop = (gimple_bb (phi))->loop_father;
1918 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1919 enum tree_code code;
1920 gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1921 stmt_vec_info use_stmt_info, current_stmt_info;
1922 tree lhs;
1923 imm_use_iterator imm_iter;
1924 use_operand_p use_p;
1925 int nloop_uses, size = 0, n_out_of_loop_uses;
1926 bool found = false;
1927
1928 if (loop != vect_loop)
1929 return false;
1930
1931 lhs = PHI_RESULT (phi);
1932 code = gimple_assign_rhs_code (first_stmt);
1933 while (1)
1934 {
1935 nloop_uses = 0;
1936 n_out_of_loop_uses = 0;
1937 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1938 {
1939 gimple use_stmt = USE_STMT (use_p);
1940 if (is_gimple_debug (use_stmt))
1941 continue;
1942
1943 use_stmt = USE_STMT (use_p);
1944
1945 /* Check if we got back to the reduction phi. */
1946 if (use_stmt == phi)
1947 {
1948 loop_use_stmt = use_stmt;
1949 found = true;
1950 break;
1951 }
1952
1953 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1954 {
1955 if (vinfo_for_stmt (use_stmt)
1956 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1957 {
1958 loop_use_stmt = use_stmt;
1959 nloop_uses++;
1960 }
1961 }
1962 else
1963 n_out_of_loop_uses++;
1964
1965 /* There are can be either a single use in the loop or two uses in
1966 phi nodes. */
1967 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
1968 return false;
1969 }
1970
1971 if (found)
1972 break;
1973
1974 /* We reached a statement with no loop uses. */
1975 if (nloop_uses == 0)
1976 return false;
1977
1978 /* This is a loop exit phi, and we haven't reached the reduction phi. */
1979 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1980 return false;
1981
1982 if (!is_gimple_assign (loop_use_stmt)
1983 || code != gimple_assign_rhs_code (loop_use_stmt)
1984 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1985 return false;
1986
1987 /* Insert USE_STMT into reduction chain. */
1988 use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1989 if (current_stmt)
1990 {
1991 current_stmt_info = vinfo_for_stmt (current_stmt);
1992 GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1993 GROUP_FIRST_ELEMENT (use_stmt_info)
1994 = GROUP_FIRST_ELEMENT (current_stmt_info);
1995 }
1996 else
1997 GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1998
1999 lhs = gimple_assign_lhs (loop_use_stmt);
2000 current_stmt = loop_use_stmt;
2001 size++;
2002 }
2003
2004 if (!found || loop_use_stmt != phi || size < 2)
2005 return false;
2006
2007 /* Swap the operands, if needed, to make the reduction operand be the second
2008 operand. */
2009 lhs = PHI_RESULT (phi);
2010 next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2011 while (next_stmt)
2012 {
2013 if (gimple_assign_rhs2 (next_stmt) == lhs)
2014 {
2015 tree op = gimple_assign_rhs1 (next_stmt);
2016 gimple def_stmt = NULL;
2017
2018 if (TREE_CODE (op) == SSA_NAME)
2019 def_stmt = SSA_NAME_DEF_STMT (op);
2020
2021 /* Check that the other def is either defined in the loop
2022 ("vect_internal_def"), or it's an induction (defined by a
2023 loop-header phi-node). */
2024 if (def_stmt
2025 && gimple_bb (def_stmt)
2026 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2027 && (is_gimple_assign (def_stmt)
2028 || is_gimple_call (def_stmt)
2029 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2030 == vect_induction_def
2031 || (gimple_code (def_stmt) == GIMPLE_PHI
2032 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2033 == vect_internal_def
2034 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2035 {
2036 lhs = gimple_assign_lhs (next_stmt);
2037 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2038 continue;
2039 }
2040
2041 return false;
2042 }
2043 else
2044 {
2045 tree op = gimple_assign_rhs2 (next_stmt);
2046 gimple def_stmt = NULL;
2047
2048 if (TREE_CODE (op) == SSA_NAME)
2049 def_stmt = SSA_NAME_DEF_STMT (op);
2050
2051 /* Check that the other def is either defined in the loop
2052 ("vect_internal_def"), or it's an induction (defined by a
2053 loop-header phi-node). */
2054 if (def_stmt
2055 && gimple_bb (def_stmt)
2056 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2057 && (is_gimple_assign (def_stmt)
2058 || is_gimple_call (def_stmt)
2059 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2060 == vect_induction_def
2061 || (gimple_code (def_stmt) == GIMPLE_PHI
2062 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2063 == vect_internal_def
2064 && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2065 {
2066 if (dump_enabled_p ())
2067 {
2068 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2069 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2070 dump_printf (MSG_NOTE, "\n");
2071 }
2072
2073 swap_ssa_operands (next_stmt,
2074 gimple_assign_rhs1_ptr (next_stmt),
2075 gimple_assign_rhs2_ptr (next_stmt));
2076 update_stmt (next_stmt);
2077
2078 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2079 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2080 }
2081 else
2082 return false;
2083 }
2084
2085 lhs = gimple_assign_lhs (next_stmt);
2086 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2087 }
2088
2089 /* Save the chain for further analysis in SLP detection. */
2090 first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2091 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2092 GROUP_SIZE (vinfo_for_stmt (first)) = size;
2093
2094 return true;
2095 }
2096
2097
2098 /* Function vect_is_simple_reduction_1
2099
2100 (1) Detect a cross-iteration def-use cycle that represents a simple
2101 reduction computation. We look for the following pattern:
2102
2103 loop_header:
2104 a1 = phi < a0, a2 >
2105 a3 = ...
2106 a2 = operation (a3, a1)
2107
2108 or
2109
2110 a3 = ...
2111 loop_header:
2112 a1 = phi < a0, a2 >
2113 a2 = operation (a3, a1)
2114
2115 such that:
2116 1. operation is commutative and associative and it is safe to
2117 change the order of the computation (if CHECK_REDUCTION is true)
2118 2. no uses for a2 in the loop (a2 is used out of the loop)
2119 3. no uses of a1 in the loop besides the reduction operation
2120 4. no uses of a1 outside the loop.
2121
2122 Conditions 1,4 are tested here.
2123 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2124
2125 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2126 nested cycles, if CHECK_REDUCTION is false.
2127
2128 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2129 reductions:
2130
2131 a1 = phi < a0, a2 >
2132 inner loop (def of a3)
2133 a2 = phi < a3 >
2134
2135 If MODIFY is true it tries also to rework the code in-place to enable
2136 detection of more reduction patterns. For the time being we rewrite
2137 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
2138 */
2139
2140 static gimple
2141 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
2142 bool check_reduction, bool *double_reduc,
2143 bool modify)
2144 {
2145 struct loop *loop = (gimple_bb (phi))->loop_father;
2146 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2147 edge latch_e = loop_latch_edge (loop);
2148 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2149 gimple def_stmt, def1 = NULL, def2 = NULL;
2150 enum tree_code orig_code, code;
2151 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2152 tree type;
2153 int nloop_uses;
2154 tree name;
2155 imm_use_iterator imm_iter;
2156 use_operand_p use_p;
2157 bool phi_def;
2158
2159 *double_reduc = false;
2160
2161 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2162 otherwise, we assume outer loop vectorization. */
2163 gcc_assert ((check_reduction && loop == vect_loop)
2164 || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2165
2166 name = PHI_RESULT (phi);
2167 nloop_uses = 0;
2168 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2169 {
2170 gimple use_stmt = USE_STMT (use_p);
2171 if (is_gimple_debug (use_stmt))
2172 continue;
2173
2174 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2175 {
2176 if (dump_enabled_p ())
2177 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2178 "intermediate value used outside loop.\n");
2179
2180 return NULL;
2181 }
2182
2183 if (vinfo_for_stmt (use_stmt)
2184 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2185 nloop_uses++;
2186 if (nloop_uses > 1)
2187 {
2188 if (dump_enabled_p ())
2189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2190 "reduction used in loop.\n");
2191 return NULL;
2192 }
2193 }
2194
2195 if (TREE_CODE (loop_arg) != SSA_NAME)
2196 {
2197 if (dump_enabled_p ())
2198 {
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2200 "reduction: not ssa_name: ");
2201 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2202 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2203 }
2204 return NULL;
2205 }
2206
2207 def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2208 if (!def_stmt)
2209 {
2210 if (dump_enabled_p ())
2211 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2212 "reduction: no def_stmt.\n");
2213 return NULL;
2214 }
2215
2216 if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2217 {
2218 if (dump_enabled_p ())
2219 {
2220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2221 dump_printf (MSG_NOTE, "\n");
2222 }
2223 return NULL;
2224 }
2225
2226 if (is_gimple_assign (def_stmt))
2227 {
2228 name = gimple_assign_lhs (def_stmt);
2229 phi_def = false;
2230 }
2231 else
2232 {
2233 name = PHI_RESULT (def_stmt);
2234 phi_def = true;
2235 }
2236
2237 nloop_uses = 0;
2238 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2239 {
2240 gimple use_stmt = USE_STMT (use_p);
2241 if (is_gimple_debug (use_stmt))
2242 continue;
2243 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2244 && vinfo_for_stmt (use_stmt)
2245 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2246 nloop_uses++;
2247 if (nloop_uses > 1)
2248 {
2249 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2251 "reduction used in loop.\n");
2252 return NULL;
2253 }
2254 }
2255
2256 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2257 defined in the inner loop. */
2258 if (phi_def)
2259 {
2260 op1 = PHI_ARG_DEF (def_stmt, 0);
2261
2262 if (gimple_phi_num_args (def_stmt) != 1
2263 || TREE_CODE (op1) != SSA_NAME)
2264 {
2265 if (dump_enabled_p ())
2266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2267 "unsupported phi node definition.\n");
2268
2269 return NULL;
2270 }
2271
2272 def1 = SSA_NAME_DEF_STMT (op1);
2273 if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2274 && loop->inner
2275 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2276 && is_gimple_assign (def1))
2277 {
2278 if (dump_enabled_p ())
2279 report_vect_op (MSG_NOTE, def_stmt,
2280 "detected double reduction: ");
2281
2282 *double_reduc = true;
2283 return def_stmt;
2284 }
2285
2286 return NULL;
2287 }
2288
2289 code = orig_code = gimple_assign_rhs_code (def_stmt);
2290
2291 /* We can handle "res -= x[i]", which is non-associative by
2292 simply rewriting this into "res += -x[i]". Avoid changing
2293 gimple instruction for the first simple tests and only do this
2294 if we're allowed to change code at all. */
2295 if (code == MINUS_EXPR
2296 && modify
2297 && (op1 = gimple_assign_rhs1 (def_stmt))
2298 && TREE_CODE (op1) == SSA_NAME
2299 && SSA_NAME_DEF_STMT (op1) == phi)
2300 code = PLUS_EXPR;
2301
2302 if (check_reduction
2303 && (!commutative_tree_code (code) || !associative_tree_code (code)))
2304 {
2305 if (dump_enabled_p ())
2306 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2307 "reduction: not commutative/associative: ");
2308 return NULL;
2309 }
2310
2311 if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2312 {
2313 if (code != COND_EXPR)
2314 {
2315 if (dump_enabled_p ())
2316 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2317 "reduction: not binary operation: ");
2318
2319 return NULL;
2320 }
2321
2322 op3 = gimple_assign_rhs1 (def_stmt);
2323 if (COMPARISON_CLASS_P (op3))
2324 {
2325 op4 = TREE_OPERAND (op3, 1);
2326 op3 = TREE_OPERAND (op3, 0);
2327 }
2328
2329 op1 = gimple_assign_rhs2 (def_stmt);
2330 op2 = gimple_assign_rhs3 (def_stmt);
2331
2332 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2333 {
2334 if (dump_enabled_p ())
2335 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2336 "reduction: uses not ssa_names: ");
2337
2338 return NULL;
2339 }
2340 }
2341 else
2342 {
2343 op1 = gimple_assign_rhs1 (def_stmt);
2344 op2 = gimple_assign_rhs2 (def_stmt);
2345
2346 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2347 {
2348 if (dump_enabled_p ())
2349 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2350 "reduction: uses not ssa_names: ");
2351
2352 return NULL;
2353 }
2354 }
2355
2356 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2357 if ((TREE_CODE (op1) == SSA_NAME
2358 && !types_compatible_p (type,TREE_TYPE (op1)))
2359 || (TREE_CODE (op2) == SSA_NAME
2360 && !types_compatible_p (type, TREE_TYPE (op2)))
2361 || (op3 && TREE_CODE (op3) == SSA_NAME
2362 && !types_compatible_p (type, TREE_TYPE (op3)))
2363 || (op4 && TREE_CODE (op4) == SSA_NAME
2364 && !types_compatible_p (type, TREE_TYPE (op4))))
2365 {
2366 if (dump_enabled_p ())
2367 {
2368 dump_printf_loc (MSG_NOTE, vect_location,
2369 "reduction: multiple types: operation type: ");
2370 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
2371 dump_printf (MSG_NOTE, ", operands types: ");
2372 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2373 TREE_TYPE (op1));
2374 dump_printf (MSG_NOTE, ",");
2375 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2376 TREE_TYPE (op2));
2377 if (op3)
2378 {
2379 dump_printf (MSG_NOTE, ",");
2380 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2381 TREE_TYPE (op3));
2382 }
2383
2384 if (op4)
2385 {
2386 dump_printf (MSG_NOTE, ",");
2387 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2388 TREE_TYPE (op4));
2389 }
2390 dump_printf (MSG_NOTE, "\n");
2391 }
2392
2393 return NULL;
2394 }
2395
2396 /* Check that it's ok to change the order of the computation.
2397 Generally, when vectorizing a reduction we change the order of the
2398 computation. This may change the behavior of the program in some
2399 cases, so we need to check that this is ok. One exception is when
2400 vectorizing an outer-loop: the inner-loop is executed sequentially,
2401 and therefore vectorizing reductions in the inner-loop during
2402 outer-loop vectorization is safe. */
2403
2404 /* CHECKME: check for !flag_finite_math_only too? */
2405 if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2406 && check_reduction)
2407 {
2408 /* Changing the order of operations changes the semantics. */
2409 if (dump_enabled_p ())
2410 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2411 "reduction: unsafe fp math optimization: ");
2412 return NULL;
2413 }
2414 else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2415 && check_reduction)
2416 {
2417 /* Changing the order of operations changes the semantics. */
2418 if (dump_enabled_p ())
2419 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2420 "reduction: unsafe int math optimization: ");
2421 return NULL;
2422 }
2423 else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2424 {
2425 /* Changing the order of operations changes the semantics. */
2426 if (dump_enabled_p ())
2427 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2428 "reduction: unsafe fixed-point math optimization: ");
2429 return NULL;
2430 }
2431
2432 /* If we detected "res -= x[i]" earlier, rewrite it into
2433 "res += -x[i]" now. If this turns out to be useless reassoc
2434 will clean it up again. */
2435 if (orig_code == MINUS_EXPR)
2436 {
2437 tree rhs = gimple_assign_rhs2 (def_stmt);
2438 tree negrhs = make_ssa_name (TREE_TYPE (rhs), NULL);
2439 gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2440 rhs, NULL);
2441 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2442 set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
2443 loop_info, NULL));
2444 gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2445 gimple_assign_set_rhs2 (def_stmt, negrhs);
2446 gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2447 update_stmt (def_stmt);
2448 }
2449
2450 /* Reduction is safe. We're dealing with one of the following:
2451 1) integer arithmetic and no trapv
2452 2) floating point arithmetic, and special flags permit this optimization
2453 3) nested cycle (i.e., outer loop vectorization). */
2454 if (TREE_CODE (op1) == SSA_NAME)
2455 def1 = SSA_NAME_DEF_STMT (op1);
2456
2457 if (TREE_CODE (op2) == SSA_NAME)
2458 def2 = SSA_NAME_DEF_STMT (op2);
2459
2460 if (code != COND_EXPR
2461 && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2462 {
2463 if (dump_enabled_p ())
2464 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
2465 return NULL;
2466 }
2467
2468 /* Check that one def is the reduction def, defined by PHI,
2469 the other def is either defined in the loop ("vect_internal_def"),
2470 or it's an induction (defined by a loop-header phi-node). */
2471
2472 if (def2 && def2 == phi
2473 && (code == COND_EXPR
2474 || !def1 || gimple_nop_p (def1)
2475 || !flow_bb_inside_loop_p (loop, gimple_bb (def1))
2476 || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2477 && (is_gimple_assign (def1)
2478 || is_gimple_call (def1)
2479 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2480 == vect_induction_def
2481 || (gimple_code (def1) == GIMPLE_PHI
2482 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2483 == vect_internal_def
2484 && !is_loop_header_bb_p (gimple_bb (def1)))))))
2485 {
2486 if (dump_enabled_p ())
2487 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2488 return def_stmt;
2489 }
2490
2491 if (def1 && def1 == phi
2492 && (code == COND_EXPR
2493 || !def2 || gimple_nop_p (def2)
2494 || !flow_bb_inside_loop_p (loop, gimple_bb (def2))
2495 || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2496 && (is_gimple_assign (def2)
2497 || is_gimple_call (def2)
2498 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2499 == vect_induction_def
2500 || (gimple_code (def2) == GIMPLE_PHI
2501 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2502 == vect_internal_def
2503 && !is_loop_header_bb_p (gimple_bb (def2)))))))
2504 {
2505 if (check_reduction)
2506 {
2507 /* Swap operands (just for simplicity - so that the rest of the code
2508 can assume that the reduction variable is always the last (second)
2509 argument). */
2510 if (dump_enabled_p ())
2511 report_vect_op (MSG_NOTE, def_stmt,
2512 "detected reduction: need to swap operands: ");
2513
2514 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2515 gimple_assign_rhs2_ptr (def_stmt));
2516
2517 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
2518 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2519 }
2520 else
2521 {
2522 if (dump_enabled_p ())
2523 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2524 }
2525
2526 return def_stmt;
2527 }
2528
2529 /* Try to find SLP reduction chain. */
2530 if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2531 {
2532 if (dump_enabled_p ())
2533 report_vect_op (MSG_NOTE, def_stmt,
2534 "reduction: detected reduction chain: ");
2535
2536 return def_stmt;
2537 }
2538
2539 if (dump_enabled_p ())
2540 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2541 "reduction: unknown pattern: ");
2542
2543 return NULL;
2544 }
2545
2546 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2547 in-place. Arguments as there. */
2548
2549 static gimple
2550 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2551 bool check_reduction, bool *double_reduc)
2552 {
2553 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2554 double_reduc, false);
2555 }
2556
2557 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2558 in-place if it enables detection of more reductions. Arguments
2559 as there. */
2560
2561 gimple
2562 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2563 bool check_reduction, bool *double_reduc)
2564 {
2565 return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2566 double_reduc, true);
2567 }
2568
2569 /* Calculate the cost of one scalar iteration of the loop. */
2570 int
2571 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
2572 {
2573 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2574 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2575 int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2576 int innerloop_iters, i, stmt_cost;
2577
2578 /* Count statements in scalar loop. Using this as scalar cost for a single
2579 iteration for now.
2580
2581 TODO: Add outer loop support.
2582
2583 TODO: Consider assigning different costs to different scalar
2584 statements. */
2585
2586 /* FORNOW. */
2587 innerloop_iters = 1;
2588 if (loop->inner)
2589 innerloop_iters = 50; /* FIXME */
2590
2591 for (i = 0; i < nbbs; i++)
2592 {
2593 gimple_stmt_iterator si;
2594 basic_block bb = bbs[i];
2595
2596 if (bb->loop_father == loop->inner)
2597 factor = innerloop_iters;
2598 else
2599 factor = 1;
2600
2601 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2602 {
2603 gimple stmt = gsi_stmt (si);
2604 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2605
2606 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2607 continue;
2608
2609 /* Skip stmts that are not vectorized inside the loop. */
2610 if (stmt_info
2611 && !STMT_VINFO_RELEVANT_P (stmt_info)
2612 && (!STMT_VINFO_LIVE_P (stmt_info)
2613 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
2614 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
2615 continue;
2616
2617 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2618 {
2619 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2620 stmt_cost = vect_get_stmt_cost (scalar_load);
2621 else
2622 stmt_cost = vect_get_stmt_cost (scalar_store);
2623 }
2624 else
2625 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2626
2627 scalar_single_iter_cost += stmt_cost * factor;
2628 }
2629 }
2630 return scalar_single_iter_cost;
2631 }
2632
2633 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2634 int
2635 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2636 int *peel_iters_epilogue,
2637 int scalar_single_iter_cost,
2638 stmt_vector_for_cost *prologue_cost_vec,
2639 stmt_vector_for_cost *epilogue_cost_vec)
2640 {
2641 int retval = 0;
2642 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2643
2644 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2645 {
2646 *peel_iters_epilogue = vf/2;
2647 if (dump_enabled_p ())
2648 dump_printf_loc (MSG_NOTE, vect_location,
2649 "cost model: epilogue peel iters set to vf/2 "
2650 "because loop iterations are unknown .\n");
2651
2652 /* If peeled iterations are known but number of scalar loop
2653 iterations are unknown, count a taken branch per peeled loop. */
2654 retval = record_stmt_cost (prologue_cost_vec, 2, cond_branch_taken,
2655 NULL, 0, vect_prologue);
2656 }
2657 else
2658 {
2659 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2660 peel_iters_prologue = niters < peel_iters_prologue ?
2661 niters : peel_iters_prologue;
2662 *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2663 /* If we need to peel for gaps, but no peeling is required, we have to
2664 peel VF iterations. */
2665 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2666 *peel_iters_epilogue = vf;
2667 }
2668
2669 if (peel_iters_prologue)
2670 retval += record_stmt_cost (prologue_cost_vec,
2671 peel_iters_prologue * scalar_single_iter_cost,
2672 scalar_stmt, NULL, 0, vect_prologue);
2673 if (*peel_iters_epilogue)
2674 retval += record_stmt_cost (epilogue_cost_vec,
2675 *peel_iters_epilogue * scalar_single_iter_cost,
2676 scalar_stmt, NULL, 0, vect_epilogue);
2677 return retval;
2678 }
2679
2680 /* Function vect_estimate_min_profitable_iters
2681
2682 Return the number of iterations required for the vector version of the
2683 loop to be profitable relative to the cost of the scalar version of the
2684 loop. */
2685
2686 static void
2687 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
2688 int *ret_min_profitable_niters,
2689 int *ret_min_profitable_estimate)
2690 {
2691 int min_profitable_iters;
2692 int min_profitable_estimate;
2693 int peel_iters_prologue;
2694 int peel_iters_epilogue;
2695 unsigned vec_inside_cost = 0;
2696 int vec_outside_cost = 0;
2697 unsigned vec_prologue_cost = 0;
2698 unsigned vec_epilogue_cost = 0;
2699 int scalar_single_iter_cost = 0;
2700 int scalar_outside_cost = 0;
2701 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2702 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2703 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2704
2705 /* Cost model disabled. */
2706 if (unlimited_cost_model ())
2707 {
2708 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
2709 *ret_min_profitable_niters = 0;
2710 *ret_min_profitable_estimate = 0;
2711 return;
2712 }
2713
2714 /* Requires loop versioning tests to handle misalignment. */
2715 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2716 {
2717 /* FIXME: Make cost depend on complexity of individual check. */
2718 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
2719 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2720 vect_prologue);
2721 dump_printf (MSG_NOTE,
2722 "cost model: Adding cost of checks for loop "
2723 "versioning to treat misalignment.\n");
2724 }
2725
2726 /* Requires loop versioning with alias checks. */
2727 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2728 {
2729 /* FIXME: Make cost depend on complexity of individual check. */
2730 unsigned len = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).length ();
2731 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2732 vect_prologue);
2733 dump_printf (MSG_NOTE,
2734 "cost model: Adding cost of checks for loop "
2735 "versioning aliasing.\n");
2736 }
2737
2738 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2739 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2740 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
2741 vect_prologue);
2742
2743 /* Count statements in scalar loop. Using this as scalar cost for a single
2744 iteration for now.
2745
2746 TODO: Add outer loop support.
2747
2748 TODO: Consider assigning different costs to different scalar
2749 statements. */
2750
2751 scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
2752
2753 /* Add additional cost for the peeled instructions in prologue and epilogue
2754 loop.
2755
2756 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2757 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2758
2759 TODO: Build an expression that represents peel_iters for prologue and
2760 epilogue to be used in a run-time test. */
2761
2762 if (npeel < 0)
2763 {
2764 peel_iters_prologue = vf/2;
2765 dump_printf (MSG_NOTE, "cost model: "
2766 "prologue peel iters set to vf/2.\n");
2767
2768 /* If peeling for alignment is unknown, loop bound of main loop becomes
2769 unknown. */
2770 peel_iters_epilogue = vf/2;
2771 dump_printf (MSG_NOTE, "cost model: "
2772 "epilogue peel iters set to vf/2 because "
2773 "peeling for alignment is unknown.\n");
2774
2775 /* If peeled iterations are unknown, count a taken branch and a not taken
2776 branch per peeled loop. Even if scalar loop iterations are known,
2777 vector iterations are not known since peeled prologue iterations are
2778 not known. Hence guards remain the same. */
2779 (void) add_stmt_cost (target_cost_data, 2, cond_branch_taken,
2780 NULL, 0, vect_prologue);
2781 (void) add_stmt_cost (target_cost_data, 2, cond_branch_not_taken,
2782 NULL, 0, vect_prologue);
2783 /* FORNOW: Don't attempt to pass individual scalar instructions to
2784 the model; just assume linear cost for scalar iterations. */
2785 (void) add_stmt_cost (target_cost_data,
2786 peel_iters_prologue * scalar_single_iter_cost,
2787 scalar_stmt, NULL, 0, vect_prologue);
2788 (void) add_stmt_cost (target_cost_data,
2789 peel_iters_epilogue * scalar_single_iter_cost,
2790 scalar_stmt, NULL, 0, vect_epilogue);
2791 }
2792 else
2793 {
2794 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2795 stmt_info_for_cost *si;
2796 int j;
2797 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2798
2799 prologue_cost_vec.create (2);
2800 epilogue_cost_vec.create (2);
2801 peel_iters_prologue = npeel;
2802
2803 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
2804 &peel_iters_epilogue,
2805 scalar_single_iter_cost,
2806 &prologue_cost_vec,
2807 &epilogue_cost_vec);
2808
2809 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
2810 {
2811 struct _stmt_vec_info *stmt_info
2812 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2813 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2814 si->misalign, vect_prologue);
2815 }
2816
2817 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
2818 {
2819 struct _stmt_vec_info *stmt_info
2820 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2821 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2822 si->misalign, vect_epilogue);
2823 }
2824
2825 prologue_cost_vec.release ();
2826 epilogue_cost_vec.release ();
2827 }
2828
2829 /* FORNOW: The scalar outside cost is incremented in one of the
2830 following ways:
2831
2832 1. The vectorizer checks for alignment and aliasing and generates
2833 a condition that allows dynamic vectorization. A cost model
2834 check is ANDED with the versioning condition. Hence scalar code
2835 path now has the added cost of the versioning check.
2836
2837 if (cost > th & versioning_check)
2838 jmp to vector code
2839
2840 Hence run-time scalar is incremented by not-taken branch cost.
2841
2842 2. The vectorizer then checks if a prologue is required. If the
2843 cost model check was not done before during versioning, it has to
2844 be done before the prologue check.
2845
2846 if (cost <= th)
2847 prologue = scalar_iters
2848 if (prologue == 0)
2849 jmp to vector code
2850 else
2851 execute prologue
2852 if (prologue == num_iters)
2853 go to exit
2854
2855 Hence the run-time scalar cost is incremented by a taken branch,
2856 plus a not-taken branch, plus a taken branch cost.
2857
2858 3. The vectorizer then checks if an epilogue is required. If the
2859 cost model check was not done before during prologue check, it
2860 has to be done with the epilogue check.
2861
2862 if (prologue == 0)
2863 jmp to vector code
2864 else
2865 execute prologue
2866 if (prologue == num_iters)
2867 go to exit
2868 vector code:
2869 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2870 jmp to epilogue
2871
2872 Hence the run-time scalar cost should be incremented by 2 taken
2873 branches.
2874
2875 TODO: The back end may reorder the BBS's differently and reverse
2876 conditions/branch directions. Change the estimates below to
2877 something more reasonable. */
2878
2879 /* If the number of iterations is known and we do not do versioning, we can
2880 decide whether to vectorize at compile time. Hence the scalar version
2881 do not carry cost model guard costs. */
2882 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2883 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2884 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2885 {
2886 /* Cost model check occurs at versioning. */
2887 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2888 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2889 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
2890 else
2891 {
2892 /* Cost model check occurs at prologue generation. */
2893 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2894 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
2895 + vect_get_stmt_cost (cond_branch_not_taken);
2896 /* Cost model check occurs at epilogue generation. */
2897 else
2898 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
2899 }
2900 }
2901
2902 /* Complete the target-specific cost calculations. */
2903 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
2904 &vec_inside_cost, &vec_epilogue_cost);
2905
2906 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
2907
2908 /* Calculate number of iterations required to make the vector version
2909 profitable, relative to the loop bodies only. The following condition
2910 must hold true:
2911 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2912 where
2913 SIC = scalar iteration cost, VIC = vector iteration cost,
2914 VOC = vector outside cost, VF = vectorization factor,
2915 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2916 SOC = scalar outside cost for run time cost model check. */
2917
2918 if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
2919 {
2920 if (vec_outside_cost <= 0)
2921 min_profitable_iters = 1;
2922 else
2923 {
2924 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2925 - vec_inside_cost * peel_iters_prologue
2926 - vec_inside_cost * peel_iters_epilogue)
2927 / ((scalar_single_iter_cost * vf)
2928 - vec_inside_cost);
2929
2930 if ((scalar_single_iter_cost * vf * min_profitable_iters)
2931 <= (((int) vec_inside_cost * min_profitable_iters)
2932 + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
2933 min_profitable_iters++;
2934 }
2935 }
2936 /* vector version will never be profitable. */
2937 else
2938 {
2939 if (dump_enabled_p ())
2940 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2941 "cost model: the vector iteration cost = %d "
2942 "divided by the scalar iteration cost = %d "
2943 "is greater or equal to the vectorization factor = %d"
2944 ".\n",
2945 vec_inside_cost, scalar_single_iter_cost, vf);
2946 *ret_min_profitable_niters = -1;
2947 *ret_min_profitable_estimate = -1;
2948 return;
2949 }
2950
2951 if (dump_enabled_p ())
2952 {
2953 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2954 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
2955 vec_inside_cost);
2956 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
2957 vec_prologue_cost);
2958 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
2959 vec_epilogue_cost);
2960 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
2961 scalar_single_iter_cost);
2962 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
2963 scalar_outside_cost);
2964 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
2965 vec_outside_cost);
2966 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
2967 peel_iters_prologue);
2968 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
2969 peel_iters_epilogue);
2970 dump_printf (MSG_NOTE,
2971 " Calculated minimum iters for profitability: %d\n",
2972 min_profitable_iters);
2973 dump_printf (MSG_NOTE, "\n");
2974 }
2975
2976 min_profitable_iters =
2977 min_profitable_iters < vf ? vf : min_profitable_iters;
2978
2979 /* Because the condition we create is:
2980 if (niters <= min_profitable_iters)
2981 then skip the vectorized loop. */
2982 min_profitable_iters--;
2983
2984 if (dump_enabled_p ())
2985 dump_printf_loc (MSG_NOTE, vect_location,
2986 " Runtime profitability threshold = %d\n",
2987 min_profitable_iters);
2988
2989 *ret_min_profitable_niters = min_profitable_iters;
2990
2991 /* Calculate number of iterations required to make the vector version
2992 profitable, relative to the loop bodies only.
2993
2994 Non-vectorized variant is SIC * niters and it must win over vector
2995 variant on the expected loop trip count. The following condition must hold true:
2996 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
2997
2998 if (vec_outside_cost <= 0)
2999 min_profitable_estimate = 1;
3000 else
3001 {
3002 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
3003 - vec_inside_cost * peel_iters_prologue
3004 - vec_inside_cost * peel_iters_epilogue)
3005 / ((scalar_single_iter_cost * vf)
3006 - vec_inside_cost);
3007 }
3008 min_profitable_estimate --;
3009 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3010 if (dump_enabled_p ())
3011 dump_printf_loc (MSG_NOTE, vect_location,
3012 " Static estimate profitability threshold = %d\n",
3013 min_profitable_iters);
3014
3015 *ret_min_profitable_estimate = min_profitable_estimate;
3016 }
3017
3018
3019 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3020 functions. Design better to avoid maintenance issues. */
3021
3022 /* Function vect_model_reduction_cost.
3023
3024 Models cost for a reduction operation, including the vector ops
3025 generated within the strip-mine loop, the initial definition before
3026 the loop, and the epilogue code that must be generated. */
3027
3028 static bool
3029 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
3030 int ncopies)
3031 {
3032 int prologue_cost = 0, epilogue_cost = 0;
3033 enum tree_code code;
3034 optab optab;
3035 tree vectype;
3036 gimple stmt, orig_stmt;
3037 tree reduction_op;
3038 enum machine_mode mode;
3039 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3040 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3041 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3042
3043 /* Cost of reduction op inside loop. */
3044 unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3045 stmt_info, 0, vect_body);
3046 stmt = STMT_VINFO_STMT (stmt_info);
3047
3048 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3049 {
3050 case GIMPLE_SINGLE_RHS:
3051 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
3052 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
3053 break;
3054 case GIMPLE_UNARY_RHS:
3055 reduction_op = gimple_assign_rhs1 (stmt);
3056 break;
3057 case GIMPLE_BINARY_RHS:
3058 reduction_op = gimple_assign_rhs2 (stmt);
3059 break;
3060 case GIMPLE_TERNARY_RHS:
3061 reduction_op = gimple_assign_rhs3 (stmt);
3062 break;
3063 default:
3064 gcc_unreachable ();
3065 }
3066
3067 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3068 if (!vectype)
3069 {
3070 if (dump_enabled_p ())
3071 {
3072 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3073 "unsupported data-type ");
3074 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3075 TREE_TYPE (reduction_op));
3076 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3077 }
3078 return false;
3079 }
3080
3081 mode = TYPE_MODE (vectype);
3082 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3083
3084 if (!orig_stmt)
3085 orig_stmt = STMT_VINFO_STMT (stmt_info);
3086
3087 code = gimple_assign_rhs_code (orig_stmt);
3088
3089 /* Add in cost for initial definition. */
3090 prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec,
3091 stmt_info, 0, vect_prologue);
3092
3093 /* Determine cost of epilogue code.
3094
3095 We have a reduction operator that will reduce the vector in one statement.
3096 Also requires scalar extract. */
3097
3098 if (!nested_in_vect_loop_p (loop, orig_stmt))
3099 {
3100 if (reduc_code != ERROR_MARK)
3101 {
3102 epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
3103 stmt_info, 0, vect_epilogue);
3104 epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar,
3105 stmt_info, 0, vect_epilogue);
3106 }
3107 else
3108 {
3109 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3110 tree bitsize =
3111 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
3112 int element_bitsize = tree_to_uhwi (bitsize);
3113 int nelements = vec_size_in_bits / element_bitsize;
3114
3115 optab = optab_for_tree_code (code, vectype, optab_default);
3116
3117 /* We have a whole vector shift available. */
3118 if (VECTOR_MODE_P (mode)
3119 && optab_handler (optab, mode) != CODE_FOR_nothing
3120 && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3121 {
3122 /* Final reduction via vector shifts and the reduction operator.
3123 Also requires scalar extract. */
3124 epilogue_cost += add_stmt_cost (target_cost_data,
3125 exact_log2 (nelements) * 2,
3126 vector_stmt, stmt_info, 0,
3127 vect_epilogue);
3128 epilogue_cost += add_stmt_cost (target_cost_data, 1,
3129 vec_to_scalar, stmt_info, 0,
3130 vect_epilogue);
3131 }
3132 else
3133 /* Use extracts and reduction op for final reduction. For N
3134 elements, we have N extracts and N-1 reduction ops. */
3135 epilogue_cost += add_stmt_cost (target_cost_data,
3136 nelements + nelements - 1,
3137 vector_stmt, stmt_info, 0,
3138 vect_epilogue);
3139 }
3140 }
3141
3142 if (dump_enabled_p ())
3143 dump_printf (MSG_NOTE,
3144 "vect_model_reduction_cost: inside_cost = %d, "
3145 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3146 prologue_cost, epilogue_cost);
3147
3148 return true;
3149 }
3150
3151
3152 /* Function vect_model_induction_cost.
3153
3154 Models cost for induction operations. */
3155
3156 static void
3157 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
3158 {
3159 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3160 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3161 unsigned inside_cost, prologue_cost;
3162
3163 /* loop cost for vec_loop. */
3164 inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3165 stmt_info, 0, vect_body);
3166
3167 /* prologue cost for vec_init and vec_step. */
3168 prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
3169 stmt_info, 0, vect_prologue);
3170
3171 if (dump_enabled_p ())
3172 dump_printf_loc (MSG_NOTE, vect_location,
3173 "vect_model_induction_cost: inside_cost = %d, "
3174 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3175 }
3176
3177
3178 /* Function get_initial_def_for_induction
3179
3180 Input:
3181 STMT - a stmt that performs an induction operation in the loop.
3182 IV_PHI - the initial value of the induction variable
3183
3184 Output:
3185 Return a vector variable, initialized with the first VF values of
3186 the induction variable. E.g., for an iv with IV_PHI='X' and
3187 evolution S, for a vector of 4 units, we want to return:
3188 [X, X + S, X + 2*S, X + 3*S]. */
3189
3190 static tree
3191 get_initial_def_for_induction (gimple iv_phi)
3192 {
3193 stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
3194 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3195 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3196 tree vectype;
3197 int nunits;
3198 edge pe = loop_preheader_edge (loop);
3199 struct loop *iv_loop;
3200 basic_block new_bb;
3201 tree new_vec, vec_init, vec_step, t;
3202 tree new_var;
3203 tree new_name;
3204 gimple init_stmt, induction_phi, new_stmt;
3205 tree induc_def, vec_def, vec_dest;
3206 tree init_expr, step_expr;
3207 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3208 int i;
3209 int ncopies;
3210 tree expr;
3211 stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
3212 bool nested_in_vect_loop = false;
3213 gimple_seq stmts = NULL;
3214 imm_use_iterator imm_iter;
3215 use_operand_p use_p;
3216 gimple exit_phi;
3217 edge latch_e;
3218 tree loop_arg;
3219 gimple_stmt_iterator si;
3220 basic_block bb = gimple_bb (iv_phi);
3221 tree stepvectype;
3222 tree resvectype;
3223
3224 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
3225 if (nested_in_vect_loop_p (loop, iv_phi))
3226 {
3227 nested_in_vect_loop = true;
3228 iv_loop = loop->inner;
3229 }
3230 else
3231 iv_loop = loop;
3232 gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
3233
3234 latch_e = loop_latch_edge (iv_loop);
3235 loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
3236
3237 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
3238 gcc_assert (step_expr != NULL_TREE);
3239
3240 pe = loop_preheader_edge (iv_loop);
3241 init_expr = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3242 loop_preheader_edge (iv_loop));
3243
3244 vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr));
3245 resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3246 gcc_assert (vectype);
3247 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3248 ncopies = vf / nunits;
3249
3250 gcc_assert (phi_info);
3251 gcc_assert (ncopies >= 1);
3252
3253 /* Convert the step to the desired type. */
3254 step_expr = force_gimple_operand (fold_convert (TREE_TYPE (vectype),
3255 step_expr),
3256 &stmts, true, NULL_TREE);
3257 if (stmts)
3258 {
3259 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3260 gcc_assert (!new_bb);
3261 }
3262
3263 /* Find the first insertion point in the BB. */
3264 si = gsi_after_labels (bb);
3265
3266 /* Create the vector that holds the initial_value of the induction. */
3267 if (nested_in_vect_loop)
3268 {
3269 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3270 been created during vectorization of previous stmts. We obtain it
3271 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3272 vec_init = vect_get_vec_def_for_operand (init_expr, iv_phi, NULL);
3273 /* If the initial value is not of proper type, convert it. */
3274 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
3275 {
3276 new_stmt = gimple_build_assign_with_ops
3277 (VIEW_CONVERT_EXPR,
3278 vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"),
3279 build1 (VIEW_CONVERT_EXPR, vectype, vec_init), NULL_TREE);
3280 vec_init = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3281 gimple_assign_set_lhs (new_stmt, vec_init);
3282 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
3283 new_stmt);
3284 gcc_assert (!new_bb);
3285 set_vinfo_for_stmt (new_stmt,
3286 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3287 }
3288 }
3289 else
3290 {
3291 vec<constructor_elt, va_gc> *v;
3292
3293 /* iv_loop is the loop to be vectorized. Create:
3294 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3295 new_var = vect_get_new_vect_var (TREE_TYPE (vectype),
3296 vect_scalar_var, "var_");
3297 new_name = force_gimple_operand (fold_convert (TREE_TYPE (vectype),
3298 init_expr),
3299 &stmts, false, new_var);
3300 if (stmts)
3301 {
3302 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3303 gcc_assert (!new_bb);
3304 }
3305
3306 vec_alloc (v, nunits);
3307 bool constant_p = is_gimple_min_invariant (new_name);
3308 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3309 for (i = 1; i < nunits; i++)
3310 {
3311 /* Create: new_name_i = new_name + step_expr */
3312 new_name = fold_build2 (PLUS_EXPR, TREE_TYPE (new_name),
3313 new_name, step_expr);
3314 if (!is_gimple_min_invariant (new_name))
3315 {
3316 init_stmt = gimple_build_assign (new_var, new_name);
3317 new_name = make_ssa_name (new_var, init_stmt);
3318 gimple_assign_set_lhs (init_stmt, new_name);
3319 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3320 gcc_assert (!new_bb);
3321 if (dump_enabled_p ())
3322 {
3323 dump_printf_loc (MSG_NOTE, vect_location,
3324 "created new init_stmt: ");
3325 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, init_stmt, 0);
3326 dump_printf (MSG_NOTE, "\n");
3327 }
3328 constant_p = false;
3329 }
3330 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3331 }
3332 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3333 if (constant_p)
3334 new_vec = build_vector_from_ctor (vectype, v);
3335 else
3336 new_vec = build_constructor (vectype, v);
3337 vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL);
3338 }
3339
3340
3341 /* Create the vector that holds the step of the induction. */
3342 if (nested_in_vect_loop)
3343 /* iv_loop is nested in the loop to be vectorized. Generate:
3344 vec_step = [S, S, S, S] */
3345 new_name = step_expr;
3346 else
3347 {
3348 /* iv_loop is the loop to be vectorized. Generate:
3349 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3350 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3351 {
3352 expr = build_int_cst (integer_type_node, vf);
3353 expr = fold_convert (TREE_TYPE (step_expr), expr);
3354 }
3355 else
3356 expr = build_int_cst (TREE_TYPE (step_expr), vf);
3357 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3358 expr, step_expr);
3359 if (TREE_CODE (step_expr) == SSA_NAME)
3360 new_name = vect_init_vector (iv_phi, new_name,
3361 TREE_TYPE (step_expr), NULL);
3362 }
3363
3364 t = unshare_expr (new_name);
3365 gcc_assert (CONSTANT_CLASS_P (new_name)
3366 || TREE_CODE (new_name) == SSA_NAME);
3367 stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3368 gcc_assert (stepvectype);
3369 new_vec = build_vector_from_val (stepvectype, t);
3370 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3371
3372
3373 /* Create the following def-use cycle:
3374 loop prolog:
3375 vec_init = ...
3376 vec_step = ...
3377 loop:
3378 vec_iv = PHI <vec_init, vec_loop>
3379 ...
3380 STMT
3381 ...
3382 vec_loop = vec_iv + vec_step; */
3383
3384 /* Create the induction-phi that defines the induction-operand. */
3385 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3386 induction_phi = create_phi_node (vec_dest, iv_loop->header);
3387 set_vinfo_for_stmt (induction_phi,
3388 new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3389 induc_def = PHI_RESULT (induction_phi);
3390
3391 /* Create the iv update inside the loop */
3392 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3393 induc_def, vec_step);
3394 vec_def = make_ssa_name (vec_dest, new_stmt);
3395 gimple_assign_set_lhs (new_stmt, vec_def);
3396 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3397 set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3398 NULL));
3399
3400 /* Set the arguments of the phi node: */
3401 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3402 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3403 UNKNOWN_LOCATION);
3404
3405
3406 /* In case that vectorization factor (VF) is bigger than the number
3407 of elements that we can fit in a vectype (nunits), we have to generate
3408 more than one vector stmt - i.e - we need to "unroll" the
3409 vector stmt by a factor VF/nunits. For more details see documentation
3410 in vectorizable_operation. */
3411
3412 if (ncopies > 1)
3413 {
3414 stmt_vec_info prev_stmt_vinfo;
3415 /* FORNOW. This restriction should be relaxed. */
3416 gcc_assert (!nested_in_vect_loop);
3417
3418 /* Create the vector that holds the step of the induction. */
3419 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3420 {
3421 expr = build_int_cst (integer_type_node, nunits);
3422 expr = fold_convert (TREE_TYPE (step_expr), expr);
3423 }
3424 else
3425 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3426 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3427 expr, step_expr);
3428 if (TREE_CODE (step_expr) == SSA_NAME)
3429 new_name = vect_init_vector (iv_phi, new_name,
3430 TREE_TYPE (step_expr), NULL);
3431 t = unshare_expr (new_name);
3432 gcc_assert (CONSTANT_CLASS_P (new_name)
3433 || TREE_CODE (new_name) == SSA_NAME);
3434 new_vec = build_vector_from_val (stepvectype, t);
3435 vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3436
3437 vec_def = induc_def;
3438 prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3439 for (i = 1; i < ncopies; i++)
3440 {
3441 /* vec_i = vec_prev + vec_step */
3442 new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3443 vec_def, vec_step);
3444 vec_def = make_ssa_name (vec_dest, new_stmt);
3445 gimple_assign_set_lhs (new_stmt, vec_def);
3446
3447 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3448 if (!useless_type_conversion_p (resvectype, vectype))
3449 {
3450 new_stmt = gimple_build_assign_with_ops
3451 (VIEW_CONVERT_EXPR,
3452 vect_get_new_vect_var (resvectype, vect_simple_var,
3453 "vec_iv_"),
3454 build1 (VIEW_CONVERT_EXPR, resvectype,
3455 gimple_assign_lhs (new_stmt)), NULL_TREE);
3456 gimple_assign_set_lhs (new_stmt,
3457 make_ssa_name
3458 (gimple_assign_lhs (new_stmt), new_stmt));
3459 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3460 }
3461 set_vinfo_for_stmt (new_stmt,
3462 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3463 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3464 prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3465 }
3466 }
3467
3468 if (nested_in_vect_loop)
3469 {
3470 /* Find the loop-closed exit-phi of the induction, and record
3471 the final vector of induction results: */
3472 exit_phi = NULL;
3473 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3474 {
3475 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3476 {
3477 exit_phi = USE_STMT (use_p);
3478 break;
3479 }
3480 }
3481 if (exit_phi)
3482 {
3483 stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3484 /* FORNOW. Currently not supporting the case that an inner-loop induction
3485 is not used in the outer-loop (i.e. only outside the outer-loop). */
3486 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3487 && !STMT_VINFO_LIVE_P (stmt_vinfo));
3488
3489 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3490 if (dump_enabled_p ())
3491 {
3492 dump_printf_loc (MSG_NOTE, vect_location,
3493 "vector of inductions after inner-loop:");
3494 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
3495 dump_printf (MSG_NOTE, "\n");
3496 }
3497 }
3498 }
3499
3500
3501 if (dump_enabled_p ())
3502 {
3503 dump_printf_loc (MSG_NOTE, vect_location,
3504 "transform induction: created def-use cycle: ");
3505 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
3506 dump_printf (MSG_NOTE, "\n");
3507 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
3508 SSA_NAME_DEF_STMT (vec_def), 0);
3509 dump_printf (MSG_NOTE, "\n");
3510 }
3511
3512 STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3513 if (!useless_type_conversion_p (resvectype, vectype))
3514 {
3515 new_stmt = gimple_build_assign_with_ops
3516 (VIEW_CONVERT_EXPR,
3517 vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3518 build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3519 induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3520 gimple_assign_set_lhs (new_stmt, induc_def);
3521 si = gsi_after_labels (bb);
3522 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3523 set_vinfo_for_stmt (new_stmt,
3524 new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3525 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3526 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3527 }
3528
3529 return induc_def;
3530 }
3531
3532
3533 /* Function get_initial_def_for_reduction
3534
3535 Input:
3536 STMT - a stmt that performs a reduction operation in the loop.
3537 INIT_VAL - the initial value of the reduction variable
3538
3539 Output:
3540 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3541 of the reduction (used for adjusting the epilog - see below).
3542 Return a vector variable, initialized according to the operation that STMT
3543 performs. This vector will be used as the initial value of the
3544 vector of partial results.
3545
3546 Option1 (adjust in epilog): Initialize the vector as follows:
3547 add/bit or/xor: [0,0,...,0,0]
3548 mult/bit and: [1,1,...,1,1]
3549 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3550 and when necessary (e.g. add/mult case) let the caller know
3551 that it needs to adjust the result by init_val.
3552
3553 Option2: Initialize the vector as follows:
3554 add/bit or/xor: [init_val,0,0,...,0]
3555 mult/bit and: [init_val,1,1,...,1]
3556 min/max/cond_expr: [init_val,init_val,...,init_val]
3557 and no adjustments are needed.
3558
3559 For example, for the following code:
3560
3561 s = init_val;
3562 for (i=0;i<n;i++)
3563 s = s + a[i];
3564
3565 STMT is 's = s + a[i]', and the reduction variable is 's'.
3566 For a vector of 4 units, we want to return either [0,0,0,init_val],
3567 or [0,0,0,0] and let the caller know that it needs to adjust
3568 the result at the end by 'init_val'.
3569
3570 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3571 initialization vector is simpler (same element in all entries), if
3572 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3573
3574 A cost model should help decide between these two schemes. */
3575
3576 tree
3577 get_initial_def_for_reduction (gimple stmt, tree init_val,
3578 tree *adjustment_def)
3579 {
3580 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3581 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3582 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3583 tree scalar_type = TREE_TYPE (init_val);
3584 tree vectype = get_vectype_for_scalar_type (scalar_type);
3585 int nunits;
3586 enum tree_code code = gimple_assign_rhs_code (stmt);
3587 tree def_for_init;
3588 tree init_def;
3589 tree *elts;
3590 int i;
3591 bool nested_in_vect_loop = false;
3592 tree init_value;
3593 REAL_VALUE_TYPE real_init_val = dconst0;
3594 int int_init_val = 0;
3595 gimple def_stmt = NULL;
3596
3597 gcc_assert (vectype);
3598 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3599
3600 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3601 || SCALAR_FLOAT_TYPE_P (scalar_type));
3602
3603 if (nested_in_vect_loop_p (loop, stmt))
3604 nested_in_vect_loop = true;
3605 else
3606 gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3607
3608 /* In case of double reduction we only create a vector variable to be put
3609 in the reduction phi node. The actual statement creation is done in
3610 vect_create_epilog_for_reduction. */
3611 if (adjustment_def && nested_in_vect_loop
3612 && TREE_CODE (init_val) == SSA_NAME
3613 && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3614 && gimple_code (def_stmt) == GIMPLE_PHI
3615 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3616 && vinfo_for_stmt (def_stmt)
3617 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3618 == vect_double_reduction_def)
3619 {
3620 *adjustment_def = NULL;
3621 return vect_create_destination_var (init_val, vectype);
3622 }
3623
3624 if (TREE_CONSTANT (init_val))
3625 {
3626 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3627 init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3628 else
3629 init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3630 }
3631 else
3632 init_value = init_val;
3633
3634 switch (code)
3635 {
3636 case WIDEN_SUM_EXPR:
3637 case DOT_PROD_EXPR:
3638 case PLUS_EXPR:
3639 case MINUS_EXPR:
3640 case BIT_IOR_EXPR:
3641 case BIT_XOR_EXPR:
3642 case MULT_EXPR:
3643 case BIT_AND_EXPR:
3644 /* ADJUSMENT_DEF is NULL when called from
3645 vect_create_epilog_for_reduction to vectorize double reduction. */
3646 if (adjustment_def)
3647 {
3648 if (nested_in_vect_loop)
3649 *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3650 NULL);
3651 else
3652 *adjustment_def = init_val;
3653 }
3654
3655 if (code == MULT_EXPR)
3656 {
3657 real_init_val = dconst1;
3658 int_init_val = 1;
3659 }
3660
3661 if (code == BIT_AND_EXPR)
3662 int_init_val = -1;
3663
3664 if (SCALAR_FLOAT_TYPE_P (scalar_type))
3665 def_for_init = build_real (scalar_type, real_init_val);
3666 else
3667 def_for_init = build_int_cst (scalar_type, int_init_val);
3668
3669 /* Create a vector of '0' or '1' except the first element. */
3670 elts = XALLOCAVEC (tree, nunits);
3671 for (i = nunits - 2; i >= 0; --i)
3672 elts[i + 1] = def_for_init;
3673
3674 /* Option1: the first element is '0' or '1' as well. */
3675 if (adjustment_def)
3676 {
3677 elts[0] = def_for_init;
3678 init_def = build_vector (vectype, elts);
3679 break;
3680 }
3681
3682 /* Option2: the first element is INIT_VAL. */
3683 elts[0] = init_val;
3684 if (TREE_CONSTANT (init_val))
3685 init_def = build_vector (vectype, elts);
3686 else
3687 {
3688 vec<constructor_elt, va_gc> *v;
3689 vec_alloc (v, nunits);
3690 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
3691 for (i = 1; i < nunits; ++i)
3692 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
3693 init_def = build_constructor (vectype, v);
3694 }
3695
3696 break;
3697
3698 case MIN_EXPR:
3699 case MAX_EXPR:
3700 case COND_EXPR:
3701 if (adjustment_def)
3702 {
3703 *adjustment_def = NULL_TREE;
3704 init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3705 break;
3706 }
3707
3708 init_def = build_vector_from_val (vectype, init_value);
3709 break;
3710
3711 default:
3712 gcc_unreachable ();
3713 }
3714
3715 return init_def;
3716 }
3717
3718
3719 /* Function vect_create_epilog_for_reduction
3720
3721 Create code at the loop-epilog to finalize the result of a reduction
3722 computation.
3723
3724 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3725 reduction statements.
3726 STMT is the scalar reduction stmt that is being vectorized.
3727 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3728 number of elements that we can fit in a vectype (nunits). In this case
3729 we have to generate more than one vector stmt - i.e - we need to "unroll"
3730 the vector stmt by a factor VF/nunits. For more details see documentation
3731 in vectorizable_operation.
3732 REDUC_CODE is the tree-code for the epilog reduction.
3733 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3734 computation.
3735 REDUC_INDEX is the index of the operand in the right hand side of the
3736 statement that is defined by REDUCTION_PHI.
3737 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3738 SLP_NODE is an SLP node containing a group of reduction statements. The
3739 first one in this group is STMT.
3740
3741 This function:
3742 1. Creates the reduction def-use cycles: sets the arguments for
3743 REDUCTION_PHIS:
3744 The loop-entry argument is the vectorized initial-value of the reduction.
3745 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3746 sums.
3747 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3748 by applying the operation specified by REDUC_CODE if available, or by
3749 other means (whole-vector shifts or a scalar loop).
3750 The function also creates a new phi node at the loop exit to preserve
3751 loop-closed form, as illustrated below.
3752
3753 The flow at the entry to this function:
3754
3755 loop:
3756 vec_def = phi <null, null> # REDUCTION_PHI
3757 VECT_DEF = vector_stmt # vectorized form of STMT
3758 s_loop = scalar_stmt # (scalar) STMT
3759 loop_exit:
3760 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3761 use <s_out0>
3762 use <s_out0>
3763
3764 The above is transformed by this function into:
3765
3766 loop:
3767 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3768 VECT_DEF = vector_stmt # vectorized form of STMT
3769 s_loop = scalar_stmt # (scalar) STMT
3770 loop_exit:
3771 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3772 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3773 v_out2 = reduce <v_out1>
3774 s_out3 = extract_field <v_out2, 0>
3775 s_out4 = adjust_result <s_out3>
3776 use <s_out4>
3777 use <s_out4>
3778 */
3779
3780 static void
3781 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
3782 int ncopies, enum tree_code reduc_code,
3783 vec<gimple> reduction_phis,
3784 int reduc_index, bool double_reduc,
3785 slp_tree slp_node)
3786 {
3787 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3788 stmt_vec_info prev_phi_info;
3789 tree vectype;
3790 enum machine_mode mode;
3791 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3792 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3793 basic_block exit_bb;
3794 tree scalar_dest;
3795 tree scalar_type;
3796 gimple new_phi = NULL, phi;
3797 gimple_stmt_iterator exit_gsi;
3798 tree vec_dest;
3799 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3800 gimple epilog_stmt = NULL;
3801 enum tree_code code = gimple_assign_rhs_code (stmt);
3802 gimple exit_phi;
3803 tree bitsize, bitpos;
3804 tree adjustment_def = NULL;
3805 tree vec_initial_def = NULL;
3806 tree reduction_op, expr, def;
3807 tree orig_name, scalar_result;
3808 imm_use_iterator imm_iter, phi_imm_iter;
3809 use_operand_p use_p, phi_use_p;
3810 bool extract_scalar_result = false;
3811 gimple use_stmt, orig_stmt, reduction_phi = NULL;
3812 bool nested_in_vect_loop = false;
3813 auto_vec<gimple> new_phis;
3814 auto_vec<gimple> inner_phis;
3815 enum vect_def_type dt = vect_unknown_def_type;
3816 int j, i;
3817 auto_vec<tree> scalar_results;
3818 unsigned int group_size = 1, k, ratio;
3819 auto_vec<tree> vec_initial_defs;
3820 auto_vec<gimple> phis;
3821 bool slp_reduc = false;
3822 tree new_phi_result;
3823 gimple inner_phi = NULL;
3824
3825 if (slp_node)
3826 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
3827
3828 if (nested_in_vect_loop_p (loop, stmt))
3829 {
3830 outer_loop = loop;
3831 loop = loop->inner;
3832 nested_in_vect_loop = true;
3833 gcc_assert (!slp_node);
3834 }
3835
3836 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3837 {
3838 case GIMPLE_SINGLE_RHS:
3839 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3840 == ternary_op);
3841 reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3842 break;
3843 case GIMPLE_UNARY_RHS:
3844 reduction_op = gimple_assign_rhs1 (stmt);
3845 break;
3846 case GIMPLE_BINARY_RHS:
3847 reduction_op = reduc_index ?
3848 gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3849 break;
3850 case GIMPLE_TERNARY_RHS:
3851 reduction_op = gimple_op (stmt, reduc_index + 1);
3852 break;
3853 default:
3854 gcc_unreachable ();
3855 }
3856
3857 vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3858 gcc_assert (vectype);
3859 mode = TYPE_MODE (vectype);
3860
3861 /* 1. Create the reduction def-use cycle:
3862 Set the arguments of REDUCTION_PHIS, i.e., transform
3863
3864 loop:
3865 vec_def = phi <null, null> # REDUCTION_PHI
3866 VECT_DEF = vector_stmt # vectorized form of STMT
3867 ...
3868
3869 into:
3870
3871 loop:
3872 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3873 VECT_DEF = vector_stmt # vectorized form of STMT
3874 ...
3875
3876 (in case of SLP, do it for all the phis). */
3877
3878 /* Get the loop-entry arguments. */
3879 if (slp_node)
3880 vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3881 NULL, slp_node, reduc_index);
3882 else
3883 {
3884 vec_initial_defs.create (1);
3885 /* For the case of reduction, vect_get_vec_def_for_operand returns
3886 the scalar def before the loop, that defines the initial value
3887 of the reduction variable. */
3888 vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3889 &adjustment_def);
3890 vec_initial_defs.quick_push (vec_initial_def);
3891 }
3892
3893 /* Set phi nodes arguments. */
3894 FOR_EACH_VEC_ELT (reduction_phis, i, phi)
3895 {
3896 tree vec_init_def = vec_initial_defs[i];
3897 tree def = vect_defs[i];
3898 for (j = 0; j < ncopies; j++)
3899 {
3900 /* Set the loop-entry arg of the reduction-phi. */
3901 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3902 UNKNOWN_LOCATION);
3903
3904 /* Set the loop-latch arg for the reduction-phi. */
3905 if (j > 0)
3906 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3907
3908 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3909
3910 if (dump_enabled_p ())
3911 {
3912 dump_printf_loc (MSG_NOTE, vect_location,
3913 "transform reduction: created def-use cycle: ");
3914 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
3915 dump_printf (MSG_NOTE, "\n");
3916 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
3917 dump_printf (MSG_NOTE, "\n");
3918 }
3919
3920 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3921 }
3922 }
3923
3924 /* 2. Create epilog code.
3925 The reduction epilog code operates across the elements of the vector
3926 of partial results computed by the vectorized loop.
3927 The reduction epilog code consists of:
3928
3929 step 1: compute the scalar result in a vector (v_out2)
3930 step 2: extract the scalar result (s_out3) from the vector (v_out2)
3931 step 3: adjust the scalar result (s_out3) if needed.
3932
3933 Step 1 can be accomplished using one the following three schemes:
3934 (scheme 1) using reduc_code, if available.
3935 (scheme 2) using whole-vector shifts, if available.
3936 (scheme 3) using a scalar loop. In this case steps 1+2 above are
3937 combined.
3938
3939 The overall epilog code looks like this:
3940
3941 s_out0 = phi <s_loop> # original EXIT_PHI
3942 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3943 v_out2 = reduce <v_out1> # step 1
3944 s_out3 = extract_field <v_out2, 0> # step 2
3945 s_out4 = adjust_result <s_out3> # step 3
3946
3947 (step 3 is optional, and steps 1 and 2 may be combined).
3948 Lastly, the uses of s_out0 are replaced by s_out4. */
3949
3950
3951 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3952 v_out1 = phi <VECT_DEF>
3953 Store them in NEW_PHIS. */
3954
3955 exit_bb = single_exit (loop)->dest;
3956 prev_phi_info = NULL;
3957 new_phis.create (vect_defs.length ());
3958 FOR_EACH_VEC_ELT (vect_defs, i, def)
3959 {
3960 for (j = 0; j < ncopies; j++)
3961 {
3962 tree new_def = copy_ssa_name (def, NULL);
3963 phi = create_phi_node (new_def, exit_bb);
3964 set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3965 if (j == 0)
3966 new_phis.quick_push (phi);
3967 else
3968 {
3969 def = vect_get_vec_def_for_stmt_copy (dt, def);
3970 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3971 }
3972
3973 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3974 prev_phi_info = vinfo_for_stmt (phi);
3975 }
3976 }
3977
3978 /* The epilogue is created for the outer-loop, i.e., for the loop being
3979 vectorized. Create exit phis for the outer loop. */
3980 if (double_reduc)
3981 {
3982 loop = outer_loop;
3983 exit_bb = single_exit (loop)->dest;
3984 inner_phis.create (vect_defs.length ());
3985 FOR_EACH_VEC_ELT (new_phis, i, phi)
3986 {
3987 tree new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
3988 gimple outer_phi = create_phi_node (new_result, exit_bb);
3989 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3990 PHI_RESULT (phi));
3991 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3992 loop_vinfo, NULL));
3993 inner_phis.quick_push (phi);
3994 new_phis[i] = outer_phi;
3995 prev_phi_info = vinfo_for_stmt (outer_phi);
3996 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
3997 {
3998 phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3999 new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
4000 outer_phi = create_phi_node (new_result, exit_bb);
4001 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4002 PHI_RESULT (phi));
4003 set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
4004 loop_vinfo, NULL));
4005 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
4006 prev_phi_info = vinfo_for_stmt (outer_phi);
4007 }
4008 }
4009 }
4010
4011 exit_gsi = gsi_after_labels (exit_bb);
4012
4013 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4014 (i.e. when reduc_code is not available) and in the final adjustment
4015 code (if needed). Also get the original scalar reduction variable as
4016 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4017 represents a reduction pattern), the tree-code and scalar-def are
4018 taken from the original stmt that the pattern-stmt (STMT) replaces.
4019 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4020 are taken from STMT. */
4021
4022 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4023 if (!orig_stmt)
4024 {
4025 /* Regular reduction */
4026 orig_stmt = stmt;
4027 }
4028 else
4029 {
4030 /* Reduction pattern */
4031 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
4032 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
4033 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4034 }
4035
4036 code = gimple_assign_rhs_code (orig_stmt);
4037 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4038 partial results are added and not subtracted. */
4039 if (code == MINUS_EXPR)
4040 code = PLUS_EXPR;
4041
4042 scalar_dest = gimple_assign_lhs (orig_stmt);
4043 scalar_type = TREE_TYPE (scalar_dest);
4044 scalar_results.create (group_size);
4045 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4046 bitsize = TYPE_SIZE (scalar_type);
4047
4048 /* In case this is a reduction in an inner-loop while vectorizing an outer
4049 loop - we don't need to extract a single scalar result at the end of the
4050 inner-loop (unless it is double reduction, i.e., the use of reduction is
4051 outside the outer-loop). The final vector of partial results will be used
4052 in the vectorized outer-loop, or reduced to a scalar result at the end of
4053 the outer-loop. */
4054 if (nested_in_vect_loop && !double_reduc)
4055 goto vect_finalize_reduction;
4056
4057 /* SLP reduction without reduction chain, e.g.,
4058 # a1 = phi <a2, a0>
4059 # b1 = phi <b2, b0>
4060 a2 = operation (a1)
4061 b2 = operation (b1) */
4062 slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4063
4064 /* In case of reduction chain, e.g.,
4065 # a1 = phi <a3, a0>
4066 a2 = operation (a1)
4067 a3 = operation (a2),
4068
4069 we may end up with more than one vector result. Here we reduce them to
4070 one vector. */
4071 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4072 {
4073 tree first_vect = PHI_RESULT (new_phis[0]);
4074 tree tmp;
4075 gimple new_vec_stmt = NULL;
4076
4077 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4078 for (k = 1; k < new_phis.length (); k++)
4079 {
4080 gimple next_phi = new_phis[k];
4081 tree second_vect = PHI_RESULT (next_phi);
4082
4083 tmp = build2 (code, vectype, first_vect, second_vect);
4084 new_vec_stmt = gimple_build_assign (vec_dest, tmp);
4085 first_vect = make_ssa_name (vec_dest, new_vec_stmt);
4086 gimple_assign_set_lhs (new_vec_stmt, first_vect);
4087 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4088 }
4089
4090 new_phi_result = first_vect;
4091 if (new_vec_stmt)
4092 {
4093 new_phis.truncate (0);
4094 new_phis.safe_push (new_vec_stmt);
4095 }
4096 }
4097 else
4098 new_phi_result = PHI_RESULT (new_phis[0]);
4099
4100 /* 2.3 Create the reduction code, using one of the three schemes described
4101 above. In SLP we simply need to extract all the elements from the
4102 vector (without reducing them), so we use scalar shifts. */
4103 if (reduc_code != ERROR_MARK && !slp_reduc)
4104 {
4105 tree tmp;
4106
4107 /*** Case 1: Create:
4108 v_out2 = reduc_expr <v_out1> */
4109
4110 if (dump_enabled_p ())
4111 dump_printf_loc (MSG_NOTE, vect_location,
4112 "Reduce using direct vector reduction.\n");
4113
4114 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4115 tmp = build1 (reduc_code, vectype, new_phi_result);
4116 epilog_stmt = gimple_build_assign (vec_dest, tmp);
4117 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4118 gimple_assign_set_lhs (epilog_stmt, new_temp);
4119 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4120
4121 extract_scalar_result = true;
4122 }
4123 else
4124 {
4125 enum tree_code shift_code = ERROR_MARK;
4126 bool have_whole_vector_shift = true;
4127 int bit_offset;
4128 int element_bitsize = tree_to_uhwi (bitsize);
4129 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4130 tree vec_temp;
4131
4132 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
4133 shift_code = VEC_RSHIFT_EXPR;
4134 else
4135 have_whole_vector_shift = false;
4136
4137 /* Regardless of whether we have a whole vector shift, if we're
4138 emulating the operation via tree-vect-generic, we don't want
4139 to use it. Only the first round of the reduction is likely
4140 to still be profitable via emulation. */
4141 /* ??? It might be better to emit a reduction tree code here, so that
4142 tree-vect-generic can expand the first round via bit tricks. */
4143 if (!VECTOR_MODE_P (mode))
4144 have_whole_vector_shift = false;
4145 else
4146 {
4147 optab optab = optab_for_tree_code (code, vectype, optab_default);
4148 if (optab_handler (optab, mode) == CODE_FOR_nothing)
4149 have_whole_vector_shift = false;
4150 }
4151
4152 if (have_whole_vector_shift && !slp_reduc)
4153 {
4154 /*** Case 2: Create:
4155 for (offset = VS/2; offset >= element_size; offset/=2)
4156 {
4157 Create: va' = vec_shift <va, offset>
4158 Create: va = vop <va, va'>
4159 } */
4160
4161 if (dump_enabled_p ())
4162 dump_printf_loc (MSG_NOTE, vect_location,
4163 "Reduce using vector shifts\n");
4164
4165 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4166 new_temp = new_phi_result;
4167 for (bit_offset = vec_size_in_bits/2;
4168 bit_offset >= element_bitsize;
4169 bit_offset /= 2)
4170 {
4171 tree bitpos = size_int (bit_offset);
4172
4173 epilog_stmt = gimple_build_assign_with_ops (shift_code,
4174 vec_dest, new_temp, bitpos);
4175 new_name = make_ssa_name (vec_dest, epilog_stmt);
4176 gimple_assign_set_lhs (epilog_stmt, new_name);
4177 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4178
4179 epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
4180 new_name, new_temp);
4181 new_temp = make_ssa_name (vec_dest, epilog_stmt);
4182 gimple_assign_set_lhs (epilog_stmt, new_temp);
4183 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4184 }
4185
4186 extract_scalar_result = true;
4187 }
4188 else
4189 {
4190 tree rhs;
4191
4192 /*** Case 3: Create:
4193 s = extract_field <v_out2, 0>
4194 for (offset = element_size;
4195 offset < vector_size;
4196 offset += element_size;)
4197 {
4198 Create: s' = extract_field <v_out2, offset>
4199 Create: s = op <s, s'> // For non SLP cases
4200 } */
4201
4202 if (dump_enabled_p ())
4203 dump_printf_loc (MSG_NOTE, vect_location,
4204 "Reduce using scalar code.\n");
4205
4206 vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
4207 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
4208 {
4209 if (gimple_code (new_phi) == GIMPLE_PHI)
4210 vec_temp = PHI_RESULT (new_phi);
4211 else
4212 vec_temp = gimple_assign_lhs (new_phi);
4213 rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
4214 bitsize_zero_node);
4215 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4216 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4217 gimple_assign_set_lhs (epilog_stmt, new_temp);
4218 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4219
4220 /* In SLP we don't need to apply reduction operation, so we just
4221 collect s' values in SCALAR_RESULTS. */
4222 if (slp_reduc)
4223 scalar_results.safe_push (new_temp);
4224
4225 for (bit_offset = element_bitsize;
4226 bit_offset < vec_size_in_bits;
4227 bit_offset += element_bitsize)
4228 {
4229 tree bitpos = bitsize_int (bit_offset);
4230 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
4231 bitsize, bitpos);
4232
4233 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4234 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
4235 gimple_assign_set_lhs (epilog_stmt, new_name);
4236 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4237
4238 if (slp_reduc)
4239 {
4240 /* In SLP we don't need to apply reduction operation, so
4241 we just collect s' values in SCALAR_RESULTS. */
4242 new_temp = new_name;
4243 scalar_results.safe_push (new_name);
4244 }
4245 else
4246 {
4247 epilog_stmt = gimple_build_assign_with_ops (code,
4248 new_scalar_dest, new_name, new_temp);
4249 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4250 gimple_assign_set_lhs (epilog_stmt, new_temp);
4251 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4252 }
4253 }
4254 }
4255
4256 /* The only case where we need to reduce scalar results in SLP, is
4257 unrolling. If the size of SCALAR_RESULTS is greater than
4258 GROUP_SIZE, we reduce them combining elements modulo
4259 GROUP_SIZE. */
4260 if (slp_reduc)
4261 {
4262 tree res, first_res, new_res;
4263 gimple new_stmt;
4264
4265 /* Reduce multiple scalar results in case of SLP unrolling. */
4266 for (j = group_size; scalar_results.iterate (j, &res);
4267 j++)
4268 {
4269 first_res = scalar_results[j % group_size];
4270 new_stmt = gimple_build_assign_with_ops (code,
4271 new_scalar_dest, first_res, res);
4272 new_res = make_ssa_name (new_scalar_dest, new_stmt);
4273 gimple_assign_set_lhs (new_stmt, new_res);
4274 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
4275 scalar_results[j % group_size] = new_res;
4276 }
4277 }
4278 else
4279 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
4280 scalar_results.safe_push (new_temp);
4281
4282 extract_scalar_result = false;
4283 }
4284 }
4285
4286 /* 2.4 Extract the final scalar result. Create:
4287 s_out3 = extract_field <v_out2, bitpos> */
4288
4289 if (extract_scalar_result)
4290 {
4291 tree rhs;
4292
4293 if (dump_enabled_p ())
4294 dump_printf_loc (MSG_NOTE, vect_location,
4295 "extract scalar result\n");
4296
4297 if (BYTES_BIG_ENDIAN)
4298 bitpos = size_binop (MULT_EXPR,
4299 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
4300 TYPE_SIZE (scalar_type));
4301 else
4302 bitpos = bitsize_zero_node;
4303
4304 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
4305 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4306 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4307 gimple_assign_set_lhs (epilog_stmt, new_temp);
4308 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4309 scalar_results.safe_push (new_temp);
4310 }
4311
4312 vect_finalize_reduction:
4313
4314 if (double_reduc)
4315 loop = loop->inner;
4316
4317 /* 2.5 Adjust the final result by the initial value of the reduction
4318 variable. (When such adjustment is not needed, then
4319 'adjustment_def' is zero). For example, if code is PLUS we create:
4320 new_temp = loop_exit_def + adjustment_def */
4321
4322 if (adjustment_def)
4323 {
4324 gcc_assert (!slp_reduc);
4325 if (nested_in_vect_loop)
4326 {
4327 new_phi = new_phis[0];
4328 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4329 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4330 new_dest = vect_create_destination_var (scalar_dest, vectype);
4331 }
4332 else
4333 {
4334 new_temp = scalar_results[0];
4335 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4336 expr = build2 (code, scalar_type, new_temp, adjustment_def);
4337 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4338 }
4339
4340 epilog_stmt = gimple_build_assign (new_dest, expr);
4341 new_temp = make_ssa_name (new_dest, epilog_stmt);
4342 gimple_assign_set_lhs (epilog_stmt, new_temp);
4343 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4344 if (nested_in_vect_loop)
4345 {
4346 set_vinfo_for_stmt (epilog_stmt,
4347 new_stmt_vec_info (epilog_stmt, loop_vinfo,
4348 NULL));
4349 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4350 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4351
4352 if (!double_reduc)
4353 scalar_results.quick_push (new_temp);
4354 else
4355 scalar_results[0] = new_temp;
4356 }
4357 else
4358 scalar_results[0] = new_temp;
4359
4360 new_phis[0] = epilog_stmt;
4361 }
4362
4363 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4364 phis with new adjusted scalar results, i.e., replace use <s_out0>
4365 with use <s_out4>.
4366
4367 Transform:
4368 loop_exit:
4369 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4370 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4371 v_out2 = reduce <v_out1>
4372 s_out3 = extract_field <v_out2, 0>
4373 s_out4 = adjust_result <s_out3>
4374 use <s_out0>
4375 use <s_out0>
4376
4377 into:
4378
4379 loop_exit:
4380 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4381 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4382 v_out2 = reduce <v_out1>
4383 s_out3 = extract_field <v_out2, 0>
4384 s_out4 = adjust_result <s_out3>
4385 use <s_out4>
4386 use <s_out4> */
4387
4388
4389 /* In SLP reduction chain we reduce vector results into one vector if
4390 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4391 the last stmt in the reduction chain, since we are looking for the loop
4392 exit phi node. */
4393 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4394 {
4395 scalar_dest = gimple_assign_lhs (
4396 SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
4397 group_size = 1;
4398 }
4399
4400 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4401 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4402 need to match SCALAR_RESULTS with corresponding statements. The first
4403 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4404 the first vector stmt, etc.
4405 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4406 if (group_size > new_phis.length ())
4407 {
4408 ratio = group_size / new_phis.length ();
4409 gcc_assert (!(group_size % new_phis.length ()));
4410 }
4411 else
4412 ratio = 1;
4413
4414 for (k = 0; k < group_size; k++)
4415 {
4416 if (k % ratio == 0)
4417 {
4418 epilog_stmt = new_phis[k / ratio];
4419 reduction_phi = reduction_phis[k / ratio];
4420 if (double_reduc)
4421 inner_phi = inner_phis[k / ratio];
4422 }
4423
4424 if (slp_reduc)
4425 {
4426 gimple current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
4427
4428 orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4429 /* SLP statements can't participate in patterns. */
4430 gcc_assert (!orig_stmt);
4431 scalar_dest = gimple_assign_lhs (current_stmt);
4432 }
4433
4434 phis.create (3);
4435 /* Find the loop-closed-use at the loop exit of the original scalar
4436 result. (The reduction result is expected to have two immediate uses -
4437 one at the latch block, and one at the loop exit). */
4438 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4439 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
4440 && !is_gimple_debug (USE_STMT (use_p)))
4441 phis.safe_push (USE_STMT (use_p));
4442
4443 /* While we expect to have found an exit_phi because of loop-closed-ssa
4444 form we can end up without one if the scalar cycle is dead. */
4445
4446 FOR_EACH_VEC_ELT (phis, i, exit_phi)
4447 {
4448 if (outer_loop)
4449 {
4450 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4451 gimple vect_phi;
4452
4453 /* FORNOW. Currently not supporting the case that an inner-loop
4454 reduction is not used in the outer-loop (but only outside the
4455 outer-loop), unless it is double reduction. */
4456 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4457 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4458 || double_reduc);
4459
4460 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4461 if (!double_reduc
4462 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4463 != vect_double_reduction_def)
4464 continue;
4465
4466 /* Handle double reduction:
4467
4468 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4469 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4470 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4471 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4472
4473 At that point the regular reduction (stmt2 and stmt3) is
4474 already vectorized, as well as the exit phi node, stmt4.
4475 Here we vectorize the phi node of double reduction, stmt1, and
4476 update all relevant statements. */
4477
4478 /* Go through all the uses of s2 to find double reduction phi
4479 node, i.e., stmt1 above. */
4480 orig_name = PHI_RESULT (exit_phi);
4481 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4482 {
4483 stmt_vec_info use_stmt_vinfo;
4484 stmt_vec_info new_phi_vinfo;
4485 tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4486 basic_block bb = gimple_bb (use_stmt);
4487 gimple use;
4488
4489 /* Check that USE_STMT is really double reduction phi
4490 node. */
4491 if (gimple_code (use_stmt) != GIMPLE_PHI
4492 || gimple_phi_num_args (use_stmt) != 2
4493 || bb->loop_father != outer_loop)
4494 continue;
4495 use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4496 if (!use_stmt_vinfo
4497 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4498 != vect_double_reduction_def)
4499 continue;
4500
4501 /* Create vector phi node for double reduction:
4502 vs1 = phi <vs0, vs2>
4503 vs1 was created previously in this function by a call to
4504 vect_get_vec_def_for_operand and is stored in
4505 vec_initial_def;
4506 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4507 vs0 is created here. */
4508
4509 /* Create vector phi node. */
4510 vect_phi = create_phi_node (vec_initial_def, bb);
4511 new_phi_vinfo = new_stmt_vec_info (vect_phi,
4512 loop_vec_info_for_loop (outer_loop), NULL);
4513 set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4514
4515 /* Create vs0 - initial def of the double reduction phi. */
4516 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4517 loop_preheader_edge (outer_loop));
4518 init_def = get_initial_def_for_reduction (stmt,
4519 preheader_arg, NULL);
4520 vect_phi_init = vect_init_vector (use_stmt, init_def,
4521 vectype, NULL);
4522
4523 /* Update phi node arguments with vs0 and vs2. */
4524 add_phi_arg (vect_phi, vect_phi_init,
4525 loop_preheader_edge (outer_loop),
4526 UNKNOWN_LOCATION);
4527 add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4528 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4529 if (dump_enabled_p ())
4530 {
4531 dump_printf_loc (MSG_NOTE, vect_location,
4532 "created double reduction phi node: ");
4533 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
4534 dump_printf (MSG_NOTE, "\n");
4535 }
4536
4537 vect_phi_res = PHI_RESULT (vect_phi);
4538
4539 /* Replace the use, i.e., set the correct vs1 in the regular
4540 reduction phi node. FORNOW, NCOPIES is always 1, so the
4541 loop is redundant. */
4542 use = reduction_phi;
4543 for (j = 0; j < ncopies; j++)
4544 {
4545 edge pr_edge = loop_preheader_edge (loop);
4546 SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4547 use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4548 }
4549 }
4550 }
4551 }
4552
4553 phis.release ();
4554 if (nested_in_vect_loop)
4555 {
4556 if (double_reduc)
4557 loop = outer_loop;
4558 else
4559 continue;
4560 }
4561
4562 phis.create (3);
4563 /* Find the loop-closed-use at the loop exit of the original scalar
4564 result. (The reduction result is expected to have two immediate uses,
4565 one at the latch block, and one at the loop exit). For double
4566 reductions we are looking for exit phis of the outer loop. */
4567 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4568 {
4569 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4570 {
4571 if (!is_gimple_debug (USE_STMT (use_p)))
4572 phis.safe_push (USE_STMT (use_p));
4573 }
4574 else
4575 {
4576 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4577 {
4578 tree phi_res = PHI_RESULT (USE_STMT (use_p));
4579
4580 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4581 {
4582 if (!flow_bb_inside_loop_p (loop,
4583 gimple_bb (USE_STMT (phi_use_p)))
4584 && !is_gimple_debug (USE_STMT (phi_use_p)))
4585 phis.safe_push (USE_STMT (phi_use_p));
4586 }
4587 }
4588 }
4589 }
4590
4591 FOR_EACH_VEC_ELT (phis, i, exit_phi)
4592 {
4593 /* Replace the uses: */
4594 orig_name = PHI_RESULT (exit_phi);
4595 scalar_result = scalar_results[k];
4596 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4597 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4598 SET_USE (use_p, scalar_result);
4599 }
4600
4601 phis.release ();
4602 }
4603 }
4604
4605
4606 /* Function vectorizable_reduction.
4607
4608 Check if STMT performs a reduction operation that can be vectorized.
4609 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4610 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4611 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4612
4613 This function also handles reduction idioms (patterns) that have been
4614 recognized in advance during vect_pattern_recog. In this case, STMT may be
4615 of this form:
4616 X = pattern_expr (arg0, arg1, ..., X)
4617 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4618 sequence that had been detected and replaced by the pattern-stmt (STMT).
4619
4620 In some cases of reduction patterns, the type of the reduction variable X is
4621 different than the type of the other arguments of STMT.
4622 In such cases, the vectype that is used when transforming STMT into a vector
4623 stmt is different than the vectype that is used to determine the
4624 vectorization factor, because it consists of a different number of elements
4625 than the actual number of elements that are being operated upon in parallel.
4626
4627 For example, consider an accumulation of shorts into an int accumulator.
4628 On some targets it's possible to vectorize this pattern operating on 8
4629 shorts at a time (hence, the vectype for purposes of determining the
4630 vectorization factor should be V8HI); on the other hand, the vectype that
4631 is used to create the vector form is actually V4SI (the type of the result).
4632
4633 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4634 indicates what is the actual level of parallelism (V8HI in the example), so
4635 that the right vectorization factor would be derived. This vectype
4636 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4637 be used to create the vectorized stmt. The right vectype for the vectorized
4638 stmt is obtained from the type of the result X:
4639 get_vectype_for_scalar_type (TREE_TYPE (X))
4640
4641 This means that, contrary to "regular" reductions (or "regular" stmts in
4642 general), the following equation:
4643 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4644 does *NOT* necessarily hold for reduction patterns. */
4645
4646 bool
4647 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4648 gimple *vec_stmt, slp_tree slp_node)
4649 {
4650 tree vec_dest;
4651 tree scalar_dest;
4652 tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4653 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4654 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4655 tree vectype_in = NULL_TREE;
4656 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4657 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4658 enum tree_code code, orig_code, epilog_reduc_code;
4659 enum machine_mode vec_mode;
4660 int op_type;
4661 optab optab, reduc_optab;
4662 tree new_temp = NULL_TREE;
4663 tree def;
4664 gimple def_stmt;
4665 enum vect_def_type dt;
4666 gimple new_phi = NULL;
4667 tree scalar_type;
4668 bool is_simple_use;
4669 gimple orig_stmt;
4670 stmt_vec_info orig_stmt_info;
4671 tree expr = NULL_TREE;
4672 int i;
4673 int ncopies;
4674 int epilog_copies;
4675 stmt_vec_info prev_stmt_info, prev_phi_info;
4676 bool single_defuse_cycle = false;
4677 tree reduc_def = NULL_TREE;
4678 gimple new_stmt = NULL;
4679 int j;
4680 tree ops[3];
4681 bool nested_cycle = false, found_nested_cycle_def = false;
4682 gimple reduc_def_stmt = NULL;
4683 /* The default is that the reduction variable is the last in statement. */
4684 int reduc_index = 2;
4685 bool double_reduc = false, dummy;
4686 basic_block def_bb;
4687 struct loop * def_stmt_loop, *outer_loop = NULL;
4688 tree def_arg;
4689 gimple def_arg_stmt;
4690 auto_vec<tree> vec_oprnds0;
4691 auto_vec<tree> vec_oprnds1;
4692 auto_vec<tree> vect_defs;
4693 auto_vec<gimple> phis;
4694 int vec_num;
4695 tree def0, def1, tem, op0, op1 = NULL_TREE;
4696
4697 /* In case of reduction chain we switch to the first stmt in the chain, but
4698 we don't update STMT_INFO, since only the last stmt is marked as reduction
4699 and has reduction properties. */
4700 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4701 stmt = GROUP_FIRST_ELEMENT (stmt_info);
4702
4703 if (nested_in_vect_loop_p (loop, stmt))
4704 {
4705 outer_loop = loop;
4706 loop = loop->inner;
4707 nested_cycle = true;
4708 }
4709
4710 /* 1. Is vectorizable reduction? */
4711 /* Not supportable if the reduction variable is used in the loop, unless
4712 it's a reduction chain. */
4713 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4714 && !GROUP_FIRST_ELEMENT (stmt_info))
4715 return false;
4716
4717 /* Reductions that are not used even in an enclosing outer-loop,
4718 are expected to be "live" (used out of the loop). */
4719 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4720 && !STMT_VINFO_LIVE_P (stmt_info))
4721 return false;
4722
4723 /* Make sure it was already recognized as a reduction computation. */
4724 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4725 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4726 return false;
4727
4728 /* 2. Has this been recognized as a reduction pattern?
4729
4730 Check if STMT represents a pattern that has been recognized
4731 in earlier analysis stages. For stmts that represent a pattern,
4732 the STMT_VINFO_RELATED_STMT field records the last stmt in
4733 the original sequence that constitutes the pattern. */
4734
4735 orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4736 if (orig_stmt)
4737 {
4738 orig_stmt_info = vinfo_for_stmt (orig_stmt);
4739 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4740 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4741 }
4742
4743 /* 3. Check the operands of the operation. The first operands are defined
4744 inside the loop body. The last operand is the reduction variable,
4745 which is defined by the loop-header-phi. */
4746
4747 gcc_assert (is_gimple_assign (stmt));
4748
4749 /* Flatten RHS. */
4750 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4751 {
4752 case GIMPLE_SINGLE_RHS:
4753 op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4754 if (op_type == ternary_op)
4755 {
4756 tree rhs = gimple_assign_rhs1 (stmt);
4757 ops[0] = TREE_OPERAND (rhs, 0);
4758 ops[1] = TREE_OPERAND (rhs, 1);
4759 ops[2] = TREE_OPERAND (rhs, 2);
4760 code = TREE_CODE (rhs);
4761 }
4762 else
4763 return false;
4764 break;
4765
4766 case GIMPLE_BINARY_RHS:
4767 code = gimple_assign_rhs_code (stmt);
4768 op_type = TREE_CODE_LENGTH (code);
4769 gcc_assert (op_type == binary_op);
4770 ops[0] = gimple_assign_rhs1 (stmt);
4771 ops[1] = gimple_assign_rhs2 (stmt);
4772 break;
4773
4774 case GIMPLE_TERNARY_RHS:
4775 code = gimple_assign_rhs_code (stmt);
4776 op_type = TREE_CODE_LENGTH (code);
4777 gcc_assert (op_type == ternary_op);
4778 ops[0] = gimple_assign_rhs1 (stmt);
4779 ops[1] = gimple_assign_rhs2 (stmt);
4780 ops[2] = gimple_assign_rhs3 (stmt);
4781 break;
4782
4783 case GIMPLE_UNARY_RHS:
4784 return false;
4785
4786 default:
4787 gcc_unreachable ();
4788 }
4789
4790 if (code == COND_EXPR && slp_node)
4791 return false;
4792
4793 scalar_dest = gimple_assign_lhs (stmt);
4794 scalar_type = TREE_TYPE (scalar_dest);
4795 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4796 && !SCALAR_FLOAT_TYPE_P (scalar_type))
4797 return false;
4798
4799 /* Do not try to vectorize bit-precision reductions. */
4800 if ((TYPE_PRECISION (scalar_type)
4801 != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4802 return false;
4803
4804 /* All uses but the last are expected to be defined in the loop.
4805 The last use is the reduction variable. In case of nested cycle this
4806 assumption is not true: we use reduc_index to record the index of the
4807 reduction variable. */
4808 for (i = 0; i < op_type - 1; i++)
4809 {
4810 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4811 if (i == 0 && code == COND_EXPR)
4812 continue;
4813
4814 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4815 &def_stmt, &def, &dt, &tem);
4816 if (!vectype_in)
4817 vectype_in = tem;
4818 gcc_assert (is_simple_use);
4819
4820 if (dt != vect_internal_def
4821 && dt != vect_external_def
4822 && dt != vect_constant_def
4823 && dt != vect_induction_def
4824 && !(dt == vect_nested_cycle && nested_cycle))
4825 return false;
4826
4827 if (dt == vect_nested_cycle)
4828 {
4829 found_nested_cycle_def = true;
4830 reduc_def_stmt = def_stmt;
4831 reduc_index = i;
4832 }
4833 }
4834
4835 is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4836 &def_stmt, &def, &dt, &tem);
4837 if (!vectype_in)
4838 vectype_in = tem;
4839 gcc_assert (is_simple_use);
4840 if (!(dt == vect_reduction_def
4841 || dt == vect_nested_cycle
4842 || ((dt == vect_internal_def || dt == vect_external_def
4843 || dt == vect_constant_def || dt == vect_induction_def)
4844 && nested_cycle && found_nested_cycle_def)))
4845 {
4846 /* For pattern recognized stmts, orig_stmt might be a reduction,
4847 but some helper statements for the pattern might not, or
4848 might be COND_EXPRs with reduction uses in the condition. */
4849 gcc_assert (orig_stmt);
4850 return false;
4851 }
4852 if (!found_nested_cycle_def)
4853 reduc_def_stmt = def_stmt;
4854
4855 gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4856 if (orig_stmt)
4857 gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4858 reduc_def_stmt,
4859 !nested_cycle,
4860 &dummy));
4861 else
4862 {
4863 gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4864 !nested_cycle, &dummy);
4865 /* We changed STMT to be the first stmt in reduction chain, hence we
4866 check that in this case the first element in the chain is STMT. */
4867 gcc_assert (stmt == tmp
4868 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4869 }
4870
4871 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4872 return false;
4873
4874 if (slp_node || PURE_SLP_STMT (stmt_info))
4875 ncopies = 1;
4876 else
4877 ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4878 / TYPE_VECTOR_SUBPARTS (vectype_in));
4879
4880 gcc_assert (ncopies >= 1);
4881
4882 vec_mode = TYPE_MODE (vectype_in);
4883
4884 if (code == COND_EXPR)
4885 {
4886 if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4887 {
4888 if (dump_enabled_p ())
4889 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4890 "unsupported condition in reduction\n");
4891
4892 return false;
4893 }
4894 }
4895 else
4896 {
4897 /* 4. Supportable by target? */
4898
4899 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
4900 || code == LROTATE_EXPR || code == RROTATE_EXPR)
4901 {
4902 /* Shifts and rotates are only supported by vectorizable_shifts,
4903 not vectorizable_reduction. */
4904 if (dump_enabled_p ())
4905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4906 "unsupported shift or rotation.\n");
4907 return false;
4908 }
4909
4910 /* 4.1. check support for the operation in the loop */
4911 optab = optab_for_tree_code (code, vectype_in, optab_default);
4912 if (!optab)
4913 {
4914 if (dump_enabled_p ())
4915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4916 "no optab.\n");
4917
4918 return false;
4919 }
4920
4921 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4922 {
4923 if (dump_enabled_p ())
4924 dump_printf (MSG_NOTE, "op not supported by target.\n");
4925
4926 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4927 || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4928 < vect_min_worthwhile_factor (code))
4929 return false;
4930
4931 if (dump_enabled_p ())
4932 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
4933 }
4934
4935 /* Worthwhile without SIMD support? */
4936 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4937 && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4938 < vect_min_worthwhile_factor (code))
4939 {
4940 if (dump_enabled_p ())
4941 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4942 "not worthwhile without SIMD support.\n");
4943
4944 return false;
4945 }
4946 }
4947
4948 /* 4.2. Check support for the epilog operation.
4949
4950 If STMT represents a reduction pattern, then the type of the
4951 reduction variable may be different than the type of the rest
4952 of the arguments. For example, consider the case of accumulation
4953 of shorts into an int accumulator; The original code:
4954 S1: int_a = (int) short_a;
4955 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
4956
4957 was replaced with:
4958 STMT: int_acc = widen_sum <short_a, int_acc>
4959
4960 This means that:
4961 1. The tree-code that is used to create the vector operation in the
4962 epilog code (that reduces the partial results) is not the
4963 tree-code of STMT, but is rather the tree-code of the original
4964 stmt from the pattern that STMT is replacing. I.e, in the example
4965 above we want to use 'widen_sum' in the loop, but 'plus' in the
4966 epilog.
4967 2. The type (mode) we use to check available target support
4968 for the vector operation to be created in the *epilog*, is
4969 determined by the type of the reduction variable (in the example
4970 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4971 However the type (mode) we use to check available target support
4972 for the vector operation to be created *inside the loop*, is
4973 determined by the type of the other arguments to STMT (in the
4974 example we'd check this: optab_handler (widen_sum_optab,
4975 vect_short_mode)).
4976
4977 This is contrary to "regular" reductions, in which the types of all
4978 the arguments are the same as the type of the reduction variable.
4979 For "regular" reductions we can therefore use the same vector type
4980 (and also the same tree-code) when generating the epilog code and
4981 when generating the code inside the loop. */
4982
4983 if (orig_stmt)
4984 {
4985 /* This is a reduction pattern: get the vectype from the type of the
4986 reduction variable, and get the tree-code from orig_stmt. */
4987 orig_code = gimple_assign_rhs_code (orig_stmt);
4988 gcc_assert (vectype_out);
4989 vec_mode = TYPE_MODE (vectype_out);
4990 }
4991 else
4992 {
4993 /* Regular reduction: use the same vectype and tree-code as used for
4994 the vector code inside the loop can be used for the epilog code. */
4995 orig_code = code;
4996 }
4997
4998 if (nested_cycle)
4999 {
5000 def_bb = gimple_bb (reduc_def_stmt);
5001 def_stmt_loop = def_bb->loop_father;
5002 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
5003 loop_preheader_edge (def_stmt_loop));
5004 if (TREE_CODE (def_arg) == SSA_NAME
5005 && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
5006 && gimple_code (def_arg_stmt) == GIMPLE_PHI
5007 && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
5008 && vinfo_for_stmt (def_arg_stmt)
5009 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
5010 == vect_double_reduction_def)
5011 double_reduc = true;
5012 }
5013
5014 epilog_reduc_code = ERROR_MARK;
5015 if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
5016 {
5017 reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
5018 optab_default);
5019 if (!reduc_optab)
5020 {
5021 if (dump_enabled_p ())
5022 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5023 "no optab for reduction.\n");
5024
5025 epilog_reduc_code = ERROR_MARK;
5026 }
5027
5028 if (reduc_optab
5029 && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
5030 {
5031 if (dump_enabled_p ())
5032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5033 "reduc op not supported by target.\n");
5034
5035 epilog_reduc_code = ERROR_MARK;
5036 }
5037 }
5038 else
5039 {
5040 if (!nested_cycle || double_reduc)
5041 {
5042 if (dump_enabled_p ())
5043 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5044 "no reduc code for scalar code.\n");
5045
5046 return false;
5047 }
5048 }
5049
5050 if (double_reduc && ncopies > 1)
5051 {
5052 if (dump_enabled_p ())
5053 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5054 "multiple types in double reduction\n");
5055
5056 return false;
5057 }
5058
5059 /* In case of widenning multiplication by a constant, we update the type
5060 of the constant to be the type of the other operand. We check that the
5061 constant fits the type in the pattern recognition pass. */
5062 if (code == DOT_PROD_EXPR
5063 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
5064 {
5065 if (TREE_CODE (ops[0]) == INTEGER_CST)
5066 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
5067 else if (TREE_CODE (ops[1]) == INTEGER_CST)
5068 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
5069 else
5070 {
5071 if (dump_enabled_p ())
5072 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5073 "invalid types in dot-prod\n");
5074
5075 return false;
5076 }
5077 }
5078
5079 if (!vec_stmt) /* transformation not required. */
5080 {
5081 if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
5082 return false;
5083 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
5084 return true;
5085 }
5086
5087 /** Transform. **/
5088
5089 if (dump_enabled_p ())
5090 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
5091
5092 /* FORNOW: Multiple types are not supported for condition. */
5093 if (code == COND_EXPR)
5094 gcc_assert (ncopies == 1);
5095
5096 /* Create the destination vector */
5097 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5098
5099 /* In case the vectorization factor (VF) is bigger than the number
5100 of elements that we can fit in a vectype (nunits), we have to generate
5101 more than one vector stmt - i.e - we need to "unroll" the
5102 vector stmt by a factor VF/nunits. For more details see documentation
5103 in vectorizable_operation. */
5104
5105 /* If the reduction is used in an outer loop we need to generate
5106 VF intermediate results, like so (e.g. for ncopies=2):
5107 r0 = phi (init, r0)
5108 r1 = phi (init, r1)
5109 r0 = x0 + r0;
5110 r1 = x1 + r1;
5111 (i.e. we generate VF results in 2 registers).
5112 In this case we have a separate def-use cycle for each copy, and therefore
5113 for each copy we get the vector def for the reduction variable from the
5114 respective phi node created for this copy.
5115
5116 Otherwise (the reduction is unused in the loop nest), we can combine
5117 together intermediate results, like so (e.g. for ncopies=2):
5118 r = phi (init, r)
5119 r = x0 + r;
5120 r = x1 + r;
5121 (i.e. we generate VF/2 results in a single register).
5122 In this case for each copy we get the vector def for the reduction variable
5123 from the vectorized reduction operation generated in the previous iteration.
5124 */
5125
5126 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
5127 {
5128 single_defuse_cycle = true;
5129 epilog_copies = 1;
5130 }
5131 else
5132 epilog_copies = ncopies;
5133
5134 prev_stmt_info = NULL;
5135 prev_phi_info = NULL;
5136 if (slp_node)
5137 {
5138 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5139 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out)
5140 == TYPE_VECTOR_SUBPARTS (vectype_in));
5141 }
5142 else
5143 {
5144 vec_num = 1;
5145 vec_oprnds0.create (1);
5146 if (op_type == ternary_op)
5147 vec_oprnds1.create (1);
5148 }
5149
5150 phis.create (vec_num);
5151 vect_defs.create (vec_num);
5152 if (!slp_node)
5153 vect_defs.quick_push (NULL_TREE);
5154
5155 for (j = 0; j < ncopies; j++)
5156 {
5157 if (j == 0 || !single_defuse_cycle)
5158 {
5159 for (i = 0; i < vec_num; i++)
5160 {
5161 /* Create the reduction-phi that defines the reduction
5162 operand. */
5163 new_phi = create_phi_node (vec_dest, loop->header);
5164 set_vinfo_for_stmt (new_phi,
5165 new_stmt_vec_info (new_phi, loop_vinfo,
5166 NULL));
5167 if (j == 0 || slp_node)
5168 phis.quick_push (new_phi);
5169 }
5170 }
5171
5172 if (code == COND_EXPR)
5173 {
5174 gcc_assert (!slp_node);
5175 vectorizable_condition (stmt, gsi, vec_stmt,
5176 PHI_RESULT (phis[0]),
5177 reduc_index, NULL);
5178 /* Multiple types are not supported for condition. */
5179 break;
5180 }
5181
5182 /* Handle uses. */
5183 if (j == 0)
5184 {
5185 op0 = ops[!reduc_index];
5186 if (op_type == ternary_op)
5187 {
5188 if (reduc_index == 0)
5189 op1 = ops[2];
5190 else
5191 op1 = ops[1];
5192 }
5193
5194 if (slp_node)
5195 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5196 slp_node, -1);
5197 else
5198 {
5199 loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
5200 stmt, NULL);
5201 vec_oprnds0.quick_push (loop_vec_def0);
5202 if (op_type == ternary_op)
5203 {
5204 loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
5205 NULL);
5206 vec_oprnds1.quick_push (loop_vec_def1);
5207 }
5208 }
5209 }
5210 else
5211 {
5212 if (!slp_node)
5213 {
5214 enum vect_def_type dt;
5215 gimple dummy_stmt;
5216 tree dummy;
5217
5218 vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
5219 &dummy_stmt, &dummy, &dt);
5220 loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
5221 loop_vec_def0);
5222 vec_oprnds0[0] = loop_vec_def0;
5223 if (op_type == ternary_op)
5224 {
5225 vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
5226 &dummy, &dt);
5227 loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
5228 loop_vec_def1);
5229 vec_oprnds1[0] = loop_vec_def1;
5230 }
5231 }
5232
5233 if (single_defuse_cycle)
5234 reduc_def = gimple_assign_lhs (new_stmt);
5235
5236 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
5237 }
5238
5239 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5240 {
5241 if (slp_node)
5242 reduc_def = PHI_RESULT (phis[i]);
5243 else
5244 {
5245 if (!single_defuse_cycle || j == 0)
5246 reduc_def = PHI_RESULT (new_phi);
5247 }
5248
5249 def1 = ((op_type == ternary_op)
5250 ? vec_oprnds1[i] : NULL);
5251 if (op_type == binary_op)
5252 {
5253 if (reduc_index == 0)
5254 expr = build2 (code, vectype_out, reduc_def, def0);
5255 else
5256 expr = build2 (code, vectype_out, def0, reduc_def);
5257 }
5258 else
5259 {
5260 if (reduc_index == 0)
5261 expr = build3 (code, vectype_out, reduc_def, def0, def1);
5262 else
5263 {
5264 if (reduc_index == 1)
5265 expr = build3 (code, vectype_out, def0, reduc_def, def1);
5266 else
5267 expr = build3 (code, vectype_out, def0, def1, reduc_def);
5268 }
5269 }
5270
5271 new_stmt = gimple_build_assign (vec_dest, expr);
5272 new_temp = make_ssa_name (vec_dest, new_stmt);
5273 gimple_assign_set_lhs (new_stmt, new_temp);
5274 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5275
5276 if (slp_node)
5277 {
5278 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5279 vect_defs.quick_push (new_temp);
5280 }
5281 else
5282 vect_defs[0] = new_temp;
5283 }
5284
5285 if (slp_node)
5286 continue;
5287
5288 if (j == 0)
5289 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5290 else
5291 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5292
5293 prev_stmt_info = vinfo_for_stmt (new_stmt);
5294 prev_phi_info = vinfo_for_stmt (new_phi);
5295 }
5296
5297 /* Finalize the reduction-phi (set its arguments) and create the
5298 epilog reduction code. */
5299 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
5300 {
5301 new_temp = gimple_assign_lhs (*vec_stmt);
5302 vect_defs[0] = new_temp;
5303 }
5304
5305 vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
5306 epilog_reduc_code, phis, reduc_index,
5307 double_reduc, slp_node);
5308
5309 return true;
5310 }
5311
5312 /* Function vect_min_worthwhile_factor.
5313
5314 For a loop where we could vectorize the operation indicated by CODE,
5315 return the minimum vectorization factor that makes it worthwhile
5316 to use generic vectors. */
5317 int
5318 vect_min_worthwhile_factor (enum tree_code code)
5319 {
5320 switch (code)
5321 {
5322 case PLUS_EXPR:
5323 case MINUS_EXPR:
5324 case NEGATE_EXPR:
5325 return 4;
5326
5327 case BIT_AND_EXPR:
5328 case BIT_IOR_EXPR:
5329 case BIT_XOR_EXPR:
5330 case BIT_NOT_EXPR:
5331 return 2;
5332
5333 default:
5334 return INT_MAX;
5335 }
5336 }
5337
5338
5339 /* Function vectorizable_induction
5340
5341 Check if PHI performs an induction computation that can be vectorized.
5342 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5343 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5344 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5345
5346 bool
5347 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5348 gimple *vec_stmt)
5349 {
5350 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5351 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5352 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5353 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5354 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5355 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5356 tree vec_def;
5357
5358 gcc_assert (ncopies >= 1);
5359 /* FORNOW. These restrictions should be relaxed. */
5360 if (nested_in_vect_loop_p (loop, phi))
5361 {
5362 imm_use_iterator imm_iter;
5363 use_operand_p use_p;
5364 gimple exit_phi;
5365 edge latch_e;
5366 tree loop_arg;
5367
5368 if (ncopies > 1)
5369 {
5370 if (dump_enabled_p ())
5371 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5372 "multiple types in nested loop.\n");
5373 return false;
5374 }
5375
5376 exit_phi = NULL;
5377 latch_e = loop_latch_edge (loop->inner);
5378 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
5379 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
5380 {
5381 if (!flow_bb_inside_loop_p (loop->inner,
5382 gimple_bb (USE_STMT (use_p))))
5383 {
5384 exit_phi = USE_STMT (use_p);
5385 break;
5386 }
5387 }
5388 if (exit_phi)
5389 {
5390 stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
5391 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5392 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
5393 {
5394 if (dump_enabled_p ())
5395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5396 "inner-loop induction only used outside "
5397 "of the outer vectorized loop.\n");
5398 return false;
5399 }
5400 }
5401 }
5402
5403 if (!STMT_VINFO_RELEVANT_P (stmt_info))
5404 return false;
5405
5406 /* FORNOW: SLP not supported. */
5407 if (STMT_SLP_TYPE (stmt_info))
5408 return false;
5409
5410 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5411
5412 if (gimple_code (phi) != GIMPLE_PHI)
5413 return false;
5414
5415 if (!vec_stmt) /* transformation not required. */
5416 {
5417 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5418 if (dump_enabled_p ())
5419 dump_printf_loc (MSG_NOTE, vect_location,
5420 "=== vectorizable_induction ===\n");
5421 vect_model_induction_cost (stmt_info, ncopies);
5422 return true;
5423 }
5424
5425 /** Transform. **/
5426
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
5429
5430 vec_def = get_initial_def_for_induction (phi);
5431 *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5432 return true;
5433 }
5434
5435 /* Function vectorizable_live_operation.
5436
5437 STMT computes a value that is used outside the loop. Check if
5438 it can be supported. */
5439
5440 bool
5441 vectorizable_live_operation (gimple stmt,
5442 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5443 gimple *vec_stmt)
5444 {
5445 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5446 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5447 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5448 int i;
5449 int op_type;
5450 tree op;
5451 tree def;
5452 gimple def_stmt;
5453 enum vect_def_type dt;
5454 enum tree_code code;
5455 enum gimple_rhs_class rhs_class;
5456
5457 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5458
5459 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5460 return false;
5461
5462 if (!is_gimple_assign (stmt))
5463 {
5464 if (gimple_call_internal_p (stmt)
5465 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
5466 && gimple_call_lhs (stmt)
5467 && loop->simduid
5468 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
5469 && loop->simduid
5470 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
5471 {
5472 edge e = single_exit (loop);
5473 basic_block merge_bb = e->dest;
5474 imm_use_iterator imm_iter;
5475 use_operand_p use_p;
5476 tree lhs = gimple_call_lhs (stmt);
5477
5478 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
5479 {
5480 gimple use_stmt = USE_STMT (use_p);
5481 if (gimple_code (use_stmt) == GIMPLE_PHI
5482 || gimple_bb (use_stmt) == merge_bb)
5483 {
5484 if (vec_stmt)
5485 {
5486 tree vfm1
5487 = build_int_cst (unsigned_type_node,
5488 loop_vinfo->vectorization_factor - 1);
5489 SET_PHI_ARG_DEF (use_stmt, e->dest_idx, vfm1);
5490 }
5491 return true;
5492 }
5493 }
5494 }
5495
5496 return false;
5497 }
5498
5499 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5500 return false;
5501
5502 /* FORNOW. CHECKME. */
5503 if (nested_in_vect_loop_p (loop, stmt))
5504 return false;
5505
5506 code = gimple_assign_rhs_code (stmt);
5507 op_type = TREE_CODE_LENGTH (code);
5508 rhs_class = get_gimple_rhs_class (code);
5509 gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5510 gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5511
5512 /* FORNOW: support only if all uses are invariant. This means
5513 that the scalar operations can remain in place, unvectorized.
5514 The original last scalar value that they compute will be used. */
5515
5516 for (i = 0; i < op_type; i++)
5517 {
5518 if (rhs_class == GIMPLE_SINGLE_RHS)
5519 op = TREE_OPERAND (gimple_op (stmt, 1), i);
5520 else
5521 op = gimple_op (stmt, i + 1);
5522 if (op
5523 && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5524 &dt))
5525 {
5526 if (dump_enabled_p ())
5527 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5528 "use not simple.\n");
5529 return false;
5530 }
5531
5532 if (dt != vect_external_def && dt != vect_constant_def)
5533 return false;
5534 }
5535
5536 /* No transformation is required for the cases we currently support. */
5537 return true;
5538 }
5539
5540 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5541
5542 static void
5543 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5544 {
5545 ssa_op_iter op_iter;
5546 imm_use_iterator imm_iter;
5547 def_operand_p def_p;
5548 gimple ustmt;
5549
5550 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5551 {
5552 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5553 {
5554 basic_block bb;
5555
5556 if (!is_gimple_debug (ustmt))
5557 continue;
5558
5559 bb = gimple_bb (ustmt);
5560
5561 if (!flow_bb_inside_loop_p (loop, bb))
5562 {
5563 if (gimple_debug_bind_p (ustmt))
5564 {
5565 if (dump_enabled_p ())
5566 dump_printf_loc (MSG_NOTE, vect_location,
5567 "killing debug use\n");
5568
5569 gimple_debug_bind_reset_value (ustmt);
5570 update_stmt (ustmt);
5571 }
5572 else
5573 gcc_unreachable ();
5574 }
5575 }
5576 }
5577 }
5578
5579
5580 /* This function builds ni_name = number of iterations. Statements
5581 are emitted on the loop preheader edge. */
5582
5583 static tree
5584 vect_build_loop_niters (loop_vec_info loop_vinfo)
5585 {
5586 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
5587 if (TREE_CODE (ni) == INTEGER_CST)
5588 return ni;
5589 else
5590 {
5591 tree ni_name, var;
5592 gimple_seq stmts = NULL;
5593 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
5594
5595 var = create_tmp_var (TREE_TYPE (ni), "niters");
5596 ni_name = force_gimple_operand (ni, &stmts, false, var);
5597 if (stmts)
5598 gsi_insert_seq_on_edge_immediate (pe, stmts);
5599
5600 return ni_name;
5601 }
5602 }
5603
5604
5605 /* This function generates the following statements:
5606
5607 ni_name = number of iterations loop executes
5608 ratio = ni_name / vf
5609 ratio_mult_vf_name = ratio * vf
5610
5611 and places them on the loop preheader edge. */
5612
5613 static void
5614 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
5615 tree ni_name,
5616 tree *ratio_mult_vf_name_ptr,
5617 tree *ratio_name_ptr)
5618 {
5619 tree ni_minus_gap_name;
5620 tree var;
5621 tree ratio_name;
5622 tree ratio_mult_vf_name;
5623 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
5624 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5625 edge pe = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
5626 tree log_vf;
5627
5628 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
5629
5630 /* If epilogue loop is required because of data accesses with gaps, we
5631 subtract one iteration from the total number of iterations here for
5632 correct calculation of RATIO. */
5633 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5634 {
5635 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
5636 ni_name,
5637 build_one_cst (TREE_TYPE (ni_name)));
5638 if (!is_gimple_val (ni_minus_gap_name))
5639 {
5640 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
5641 gimple stmts = NULL;
5642 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
5643 true, var);
5644 gsi_insert_seq_on_edge_immediate (pe, stmts);
5645 }
5646 }
5647 else
5648 ni_minus_gap_name = ni_name;
5649
5650 /* Create: ratio = ni >> log2(vf) */
5651
5652 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
5653 ni_minus_gap_name, log_vf);
5654 if (!is_gimple_val (ratio_name))
5655 {
5656 var = create_tmp_var (TREE_TYPE (ni), "bnd");
5657 gimple stmts = NULL;
5658 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
5659 gsi_insert_seq_on_edge_immediate (pe, stmts);
5660 }
5661 *ratio_name_ptr = ratio_name;
5662
5663 /* Create: ratio_mult_vf = ratio << log2 (vf). */
5664
5665 if (ratio_mult_vf_name_ptr)
5666 {
5667 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
5668 ratio_name, log_vf);
5669 if (!is_gimple_val (ratio_mult_vf_name))
5670 {
5671 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
5672 gimple stmts = NULL;
5673 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
5674 true, var);
5675 gsi_insert_seq_on_edge_immediate (pe, stmts);
5676 }
5677 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
5678 }
5679
5680 return;
5681 }
5682
5683
5684 /* Function vect_transform_loop.
5685
5686 The analysis phase has determined that the loop is vectorizable.
5687 Vectorize the loop - created vectorized stmts to replace the scalar
5688 stmts in the loop, and update the loop exit condition. */
5689
5690 void
5691 vect_transform_loop (loop_vec_info loop_vinfo)
5692 {
5693 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5694 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5695 int nbbs = loop->num_nodes;
5696 gimple_stmt_iterator si;
5697 int i;
5698 tree ratio = NULL;
5699 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5700 bool grouped_store;
5701 bool slp_scheduled = false;
5702 unsigned int nunits;
5703 gimple stmt, pattern_stmt;
5704 gimple_seq pattern_def_seq = NULL;
5705 gimple_stmt_iterator pattern_def_si = gsi_none ();
5706 bool transform_pattern_stmt = false;
5707 bool check_profitability = false;
5708 int th;
5709 /* Record number of iterations before we started tampering with the profile. */
5710 gcov_type expected_iterations = expected_loop_iterations_unbounded (loop);
5711
5712 if (dump_enabled_p ())
5713 dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
5714
5715 /* If profile is inprecise, we have chance to fix it up. */
5716 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5717 expected_iterations = LOOP_VINFO_INT_NITERS (loop_vinfo);
5718
5719 /* Use the more conservative vectorization threshold. If the number
5720 of iterations is constant assume the cost check has been performed
5721 by our caller. If the threshold makes all loops profitable that
5722 run at least the vectorization factor number of times checking
5723 is pointless, too. */
5724 th = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
5725 * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
5726 th = MAX (th, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo));
5727 if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
5728 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5729 {
5730 if (dump_enabled_p ())
5731 dump_printf_loc (MSG_NOTE, vect_location,
5732 "Profitability threshold is %d loop iterations.\n",
5733 th);
5734 check_profitability = true;
5735 }
5736
5737 /* Version the loop first, if required, so the profitability check
5738 comes first. */
5739
5740 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5741 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5742 {
5743 vect_loop_versioning (loop_vinfo, th, check_profitability);
5744 check_profitability = false;
5745 }
5746
5747 tree ni_name = vect_build_loop_niters (loop_vinfo);
5748 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = ni_name;
5749
5750 /* Peel the loop if there are data refs with unknown alignment.
5751 Only one data ref with unknown store is allowed. */
5752
5753 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo))
5754 {
5755 vect_do_peeling_for_alignment (loop_vinfo, ni_name,
5756 th, check_profitability);
5757 check_profitability = false;
5758 /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to
5759 be re-computed. */
5760 ni_name = NULL_TREE;
5761 }
5762
5763 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5764 compile time constant), or it is a constant that doesn't divide by the
5765 vectorization factor, then an epilog loop needs to be created.
5766 We therefore duplicate the loop: the original loop will be vectorized,
5767 and will compute the first (n/VF) iterations. The second copy of the loop
5768 will remain scalar and will compute the remaining (n%VF) iterations.
5769 (VF is the vectorization factor). */
5770
5771 if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
5772 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5773 {
5774 tree ratio_mult_vf;
5775 if (!ni_name)
5776 ni_name = vect_build_loop_niters (loop_vinfo);
5777 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, &ratio_mult_vf,
5778 &ratio);
5779 vect_do_peeling_for_loop_bound (loop_vinfo, ni_name, ratio_mult_vf,
5780 th, check_profitability);
5781 }
5782 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5783 ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5784 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5785 else
5786 {
5787 if (!ni_name)
5788 ni_name = vect_build_loop_niters (loop_vinfo);
5789 vect_generate_tmps_on_preheader (loop_vinfo, ni_name, NULL, &ratio);
5790 }
5791
5792 /* 1) Make sure the loop header has exactly two entries
5793 2) Make sure we have a preheader basic block. */
5794
5795 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5796
5797 split_edge (loop_preheader_edge (loop));
5798
5799 /* FORNOW: the vectorizer supports only loops which body consist
5800 of one basic block (header + empty latch). When the vectorizer will
5801 support more involved loop forms, the order by which the BBs are
5802 traversed need to be reconsidered. */
5803
5804 for (i = 0; i < nbbs; i++)
5805 {
5806 basic_block bb = bbs[i];
5807 stmt_vec_info stmt_info;
5808 gimple phi;
5809
5810 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5811 {
5812 phi = gsi_stmt (si);
5813 if (dump_enabled_p ())
5814 {
5815 dump_printf_loc (MSG_NOTE, vect_location,
5816 "------>vectorizing phi: ");
5817 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
5818 dump_printf (MSG_NOTE, "\n");
5819 }
5820 stmt_info = vinfo_for_stmt (phi);
5821 if (!stmt_info)
5822 continue;
5823
5824 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5825 vect_loop_kill_debug_uses (loop, phi);
5826
5827 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5828 && !STMT_VINFO_LIVE_P (stmt_info))
5829 continue;
5830
5831 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5832 != (unsigned HOST_WIDE_INT) vectorization_factor)
5833 && dump_enabled_p ())
5834 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
5835
5836 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5837 {
5838 if (dump_enabled_p ())
5839 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
5840 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5841 }
5842 }
5843
5844 pattern_stmt = NULL;
5845 for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5846 {
5847 bool is_store;
5848
5849 if (transform_pattern_stmt)
5850 stmt = pattern_stmt;
5851 else
5852 {
5853 stmt = gsi_stmt (si);
5854 /* During vectorization remove existing clobber stmts. */
5855 if (gimple_clobber_p (stmt))
5856 {
5857 unlink_stmt_vdef (stmt);
5858 gsi_remove (&si, true);
5859 release_defs (stmt);
5860 continue;
5861 }
5862 }
5863
5864 if (dump_enabled_p ())
5865 {
5866 dump_printf_loc (MSG_NOTE, vect_location,
5867 "------>vectorizing statement: ");
5868 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
5869 dump_printf (MSG_NOTE, "\n");
5870 }
5871
5872 stmt_info = vinfo_for_stmt (stmt);
5873
5874 /* vector stmts created in the outer-loop during vectorization of
5875 stmts in an inner-loop may not have a stmt_info, and do not
5876 need to be vectorized. */
5877 if (!stmt_info)
5878 {
5879 gsi_next (&si);
5880 continue;
5881 }
5882
5883 if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5884 vect_loop_kill_debug_uses (loop, stmt);
5885
5886 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5887 && !STMT_VINFO_LIVE_P (stmt_info))
5888 {
5889 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5890 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5891 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5892 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5893 {
5894 stmt = pattern_stmt;
5895 stmt_info = vinfo_for_stmt (stmt);
5896 }
5897 else
5898 {
5899 gsi_next (&si);
5900 continue;
5901 }
5902 }
5903 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5904 && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5905 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5906 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5907 transform_pattern_stmt = true;
5908
5909 /* If pattern statement has def stmts, vectorize them too. */
5910 if (is_pattern_stmt_p (stmt_info))
5911 {
5912 if (pattern_def_seq == NULL)
5913 {
5914 pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
5915 pattern_def_si = gsi_start (pattern_def_seq);
5916 }
5917 else if (!gsi_end_p (pattern_def_si))
5918 gsi_next (&pattern_def_si);
5919 if (pattern_def_seq != NULL)
5920 {
5921 gimple pattern_def_stmt = NULL;
5922 stmt_vec_info pattern_def_stmt_info = NULL;
5923
5924 while (!gsi_end_p (pattern_def_si))
5925 {
5926 pattern_def_stmt = gsi_stmt (pattern_def_si);
5927 pattern_def_stmt_info
5928 = vinfo_for_stmt (pattern_def_stmt);
5929 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
5930 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
5931 break;
5932 gsi_next (&pattern_def_si);
5933 }
5934
5935 if (!gsi_end_p (pattern_def_si))
5936 {
5937 if (dump_enabled_p ())
5938 {
5939 dump_printf_loc (MSG_NOTE, vect_location,
5940 "==> vectorizing pattern def "
5941 "stmt: ");
5942 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
5943 pattern_def_stmt, 0);
5944 dump_printf (MSG_NOTE, "\n");
5945 }
5946
5947 stmt = pattern_def_stmt;
5948 stmt_info = pattern_def_stmt_info;
5949 }
5950 else
5951 {
5952 pattern_def_si = gsi_none ();
5953 transform_pattern_stmt = false;
5954 }
5955 }
5956 else
5957 transform_pattern_stmt = false;
5958 }
5959
5960 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5961 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5962 STMT_VINFO_VECTYPE (stmt_info));
5963 if (!STMT_SLP_TYPE (stmt_info)
5964 && nunits != (unsigned int) vectorization_factor
5965 && dump_enabled_p ())
5966 /* For SLP VF is set according to unrolling factor, and not to
5967 vector size, hence for SLP this print is not valid. */
5968 dump_printf_loc (MSG_NOTE, vect_location,
5969 "multiple-types.\n");
5970
5971 /* SLP. Schedule all the SLP instances when the first SLP stmt is
5972 reached. */
5973 if (STMT_SLP_TYPE (stmt_info))
5974 {
5975 if (!slp_scheduled)
5976 {
5977 slp_scheduled = true;
5978
5979 if (dump_enabled_p ())
5980 dump_printf_loc (MSG_NOTE, vect_location,
5981 "=== scheduling SLP instances ===\n");
5982
5983 vect_schedule_slp (loop_vinfo, NULL);
5984 }
5985
5986 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
5987 if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5988 {
5989 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5990 {
5991 pattern_def_seq = NULL;
5992 gsi_next (&si);
5993 }
5994 continue;
5995 }
5996 }
5997
5998 /* -------- vectorize statement ------------ */
5999 if (dump_enabled_p ())
6000 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
6001
6002 grouped_store = false;
6003 is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
6004 if (is_store)
6005 {
6006 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6007 {
6008 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6009 interleaving chain was completed - free all the stores in
6010 the chain. */
6011 gsi_next (&si);
6012 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
6013 continue;
6014 }
6015 else
6016 {
6017 /* Free the attached stmt_vec_info and remove the stmt. */
6018 gimple store = gsi_stmt (si);
6019 free_stmt_vec_info (store);
6020 unlink_stmt_vdef (store);
6021 gsi_remove (&si, true);
6022 release_defs (store);
6023 continue;
6024 }
6025 }
6026
6027 if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
6028 {
6029 pattern_def_seq = NULL;
6030 gsi_next (&si);
6031 }
6032 } /* stmts in BB */
6033 } /* BBs in loop */
6034
6035 slpeel_make_loop_iterate_ntimes (loop, ratio);
6036
6037 /* Reduce loop iterations by the vectorization factor. */
6038 scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
6039 expected_iterations / vectorization_factor);
6040 loop->nb_iterations_upper_bound
6041 = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor),
6042 FLOOR_DIV_EXPR);
6043 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6044 && loop->nb_iterations_upper_bound != double_int_zero)
6045 loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one;
6046 if (loop->any_estimate)
6047 {
6048 loop->nb_iterations_estimate
6049 = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor),
6050 FLOOR_DIV_EXPR);
6051 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
6052 && loop->nb_iterations_estimate != double_int_zero)
6053 loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one;
6054 }
6055
6056 if (dump_enabled_p ())
6057 {
6058 dump_printf_loc (MSG_NOTE, vect_location,
6059 "LOOP VECTORIZED\n");
6060 if (loop->inner)
6061 dump_printf_loc (MSG_NOTE, vect_location,
6062 "OUTER LOOP VECTORIZED\n");
6063 dump_printf (MSG_NOTE, "\n");
6064 }
6065 }