Allow inner-loop reductions with variable-length vectors
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
57
58 /* Loop Vectorization Pass.
59
60 This pass tries to vectorize loops.
61
62 For example, the vectorizer transforms the following simple loop:
63
64 short a[N]; short b[N]; short c[N]; int i;
65
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
68 }
69
70 as if it was manually vectorized by rewriting the source code into:
71
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
76
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
82 }
83
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
95
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
101
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
106
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
117
118 For example, say stmt S1 was vectorized into stmt VS1:
119
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
123
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
128
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
133
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
136
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
144
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
151
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
154 */
155
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
157
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
161
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
167 {
168 gimple *stmt = stmt_info->stmt;
169
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
173 {
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
177 }
178
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
183
184 if (stmt_vectype)
185 {
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
197 }
198
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
201
202 return true;
203 }
204
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
211
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
215 {
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
218 {
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
221 }
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
224
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
227 {
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
230
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
234 {
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
237 {
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
242 }
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
246 }
247
248 if (dump_enabled_p ())
249 {
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
253 }
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
256 }
257
258 return true;
259 }
260
261 /* Function vect_determine_vectorization_factor
262
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
268
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
273
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
278 }
279
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
283 }
284 */
285
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
288 {
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
299
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
301
302 for (i = 0; i < nbbs; i++)
303 {
304 basic_block bb = bbs[i];
305
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
308 {
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
312 {
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
315 }
316
317 gcc_assert (stmt_info);
318
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
321 {
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
324
325 if (dump_enabled_p ())
326 {
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
331 }
332
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
335 {
336 if (dump_enabled_p ())
337 {
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
344 }
345 return false;
346 }
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
348
349 if (dump_enabled_p ())
350 {
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
354 }
355
356 if (dump_enabled_p ())
357 {
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
361 }
362
363 vect_update_max_nunits (&vectorization_factor, vectype);
364 }
365 }
366
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
369 {
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
374 }
375 }
376
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
379 {
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
383 }
384
385 if (known_le (vectorization_factor, 1U))
386 {
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
391 }
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
393
394 for (i = 0; i < mask_producers.length (); i++)
395 {
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
401 }
402
403 return true;
404 }
405
406
407 /* Function vect_is_simple_iv_evolution.
408
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
411
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
415 {
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
420
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
425
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
430
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
433
434 if (dump_enabled_p ())
435 {
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
441 }
442
443 *init = init_expr;
444 *step = step_expr;
445
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
455 {
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
460 }
461
462 return true;
463 }
464
465 /* Function vect_analyze_scalar_cycles_1.
466
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
470 enclosing LOOP). */
471
472 static void
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
474 {
475 basic_block bb = loop->header;
476 tree init, step;
477 auto_vec<stmt_vec_info, 64> worklist;
478 gphi_iterator gsi;
479 bool double_reduc;
480
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
482
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
485 changed. */
486 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
487 {
488 gphi *phi = gsi.phi ();
489 tree access_fn = NULL;
490 tree def = PHI_RESULT (phi);
491 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
492
493 if (dump_enabled_p ())
494 {
495 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
497 }
498
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def))
502 continue;
503
504 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
505
506 /* Analyze the evolution function. */
507 access_fn = analyze_scalar_evolution (loop, def);
508 if (access_fn)
509 {
510 STRIP_NOPS (access_fn);
511 if (dump_enabled_p ())
512 {
513 dump_printf_loc (MSG_NOTE, vect_location,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
516 dump_printf (MSG_NOTE, "\n");
517 }
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
519 = initial_condition_in_loop_num (access_fn, loop->num);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
521 = evolution_part_in_loop_num (access_fn, loop->num);
522 }
523
524 if (!access_fn
525 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
526 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
527 && TREE_CODE (step) != INTEGER_CST))
528 {
529 worklist.safe_push (stmt_vinfo);
530 continue;
531 }
532
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
534 != NULL_TREE);
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
536
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
540 }
541
542
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist.length () > 0)
545 {
546 stmt_vec_info stmt_vinfo = worklist.pop ();
547 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
548 tree def = PHI_RESULT (phi);
549
550 if (dump_enabled_p ())
551 {
552 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
553 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
554 }
555
556 gcc_assert (!virtual_operand_p (def)
557 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
558
559 stmt_vec_info reduc_stmt_info
560 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
561 &double_reduc, false);
562 if (reduc_stmt_info)
563 {
564 if (double_reduc)
565 {
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE, vect_location,
568 "Detected double reduction.\n");
569
570 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
571 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
572 = vect_double_reduction_def;
573 }
574 else
575 {
576 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
577 {
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE, vect_location,
580 "Detected vectorizable nested cycle.\n");
581
582 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
583 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
584 }
585 else
586 {
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_NOTE, vect_location,
589 "Detected reduction.\n");
590
591 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
592 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
593 /* Store the reduction cycles for possible vectorization in
594 loop-aware SLP if it was not detected as reduction
595 chain. */
596 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
597 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
598 (reduc_stmt_info);
599 }
600 }
601 }
602 else
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
605 "Unknown def-use cycle pattern.\n");
606 }
607 }
608
609
610 /* Function vect_analyze_scalar_cycles.
611
612 Examine the cross iteration def-use cycles of scalar variables, by
613 analyzing the loop-header PHIs of scalar variables. Classify each
614 cycle as one of the following: invariant, induction, reduction, unknown.
615 We do that for the loop represented by LOOP_VINFO, and also to its
616 inner-loop, if exists.
617 Examples for scalar cycles:
618
619 Example1: reduction:
620
621 loop1:
622 for (i=0; i<N; i++)
623 sum += a[i];
624
625 Example2: induction:
626
627 loop2:
628 for (i=0; i<N; i++)
629 a[i] = i; */
630
631 static void
632 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
633 {
634 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
635
636 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
637
638 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
639 Reductions in such inner-loop therefore have different properties than
640 the reductions in the nest that gets vectorized:
641 1. When vectorized, they are executed in the same order as in the original
642 scalar loop, so we can't change the order of computation when
643 vectorizing them.
644 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
645 current checks are too strict. */
646
647 if (loop->inner)
648 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
649 }
650
651 /* Transfer group and reduction information from STMT_INFO to its
652 pattern stmt. */
653
654 static void
655 vect_fixup_reduc_chain (stmt_vec_info stmt_info)
656 {
657 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
658 stmt_vec_info stmtp;
659 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
660 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
661 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
662 do
663 {
664 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
665 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
666 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
667 if (stmt_info)
668 REDUC_GROUP_NEXT_ELEMENT (stmtp)
669 = STMT_VINFO_RELATED_STMT (stmt_info);
670 }
671 while (stmt_info);
672 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
673 }
674
675 /* Fixup scalar cycles that now have their stmts detected as patterns. */
676
677 static void
678 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
679 {
680 stmt_vec_info first;
681 unsigned i;
682
683 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
684 if (STMT_VINFO_IN_PATTERN_P (first))
685 {
686 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
687 while (next)
688 {
689 if (! STMT_VINFO_IN_PATTERN_P (next))
690 break;
691 next = REDUC_GROUP_NEXT_ELEMENT (next);
692 }
693 /* If not all stmt in the chain are patterns try to handle
694 the chain without patterns. */
695 if (! next)
696 {
697 vect_fixup_reduc_chain (first);
698 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
699 = STMT_VINFO_RELATED_STMT (first);
700 }
701 }
702 }
703
704 /* Function vect_get_loop_niters.
705
706 Determine how many iterations the loop is executed and place it
707 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
708 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
709 niter information holds in ASSUMPTIONS.
710
711 Return the loop exit condition. */
712
713
714 static gcond *
715 vect_get_loop_niters (struct loop *loop, tree *assumptions,
716 tree *number_of_iterations, tree *number_of_iterationsm1)
717 {
718 edge exit = single_exit (loop);
719 struct tree_niter_desc niter_desc;
720 tree niter_assumptions, niter, may_be_zero;
721 gcond *cond = get_loop_exit_condition (loop);
722
723 *assumptions = boolean_true_node;
724 *number_of_iterationsm1 = chrec_dont_know;
725 *number_of_iterations = chrec_dont_know;
726 DUMP_VECT_SCOPE ("get_loop_niters");
727
728 if (!exit)
729 return cond;
730
731 niter = chrec_dont_know;
732 may_be_zero = NULL_TREE;
733 niter_assumptions = boolean_true_node;
734 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
735 || chrec_contains_undetermined (niter_desc.niter))
736 return cond;
737
738 niter_assumptions = niter_desc.assumptions;
739 may_be_zero = niter_desc.may_be_zero;
740 niter = niter_desc.niter;
741
742 if (may_be_zero && integer_zerop (may_be_zero))
743 may_be_zero = NULL_TREE;
744
745 if (may_be_zero)
746 {
747 if (COMPARISON_CLASS_P (may_be_zero))
748 {
749 /* Try to combine may_be_zero with assumptions, this can simplify
750 computation of niter expression. */
751 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
752 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
753 niter_assumptions,
754 fold_build1 (TRUTH_NOT_EXPR,
755 boolean_type_node,
756 may_be_zero));
757 else
758 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
759 build_int_cst (TREE_TYPE (niter), 0),
760 rewrite_to_non_trapping_overflow (niter));
761
762 may_be_zero = NULL_TREE;
763 }
764 else if (integer_nonzerop (may_be_zero))
765 {
766 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
767 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
768 return cond;
769 }
770 else
771 return cond;
772 }
773
774 *assumptions = niter_assumptions;
775 *number_of_iterationsm1 = niter;
776
777 /* We want the number of loop header executions which is the number
778 of latch executions plus one.
779 ??? For UINT_MAX latch executions this number overflows to zero
780 for loops like do { n++; } while (n != 0); */
781 if (niter && !chrec_contains_undetermined (niter))
782 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
783 build_int_cst (TREE_TYPE (niter), 1));
784 *number_of_iterations = niter;
785
786 return cond;
787 }
788
789 /* Function bb_in_loop_p
790
791 Used as predicate for dfs order traversal of the loop bbs. */
792
793 static bool
794 bb_in_loop_p (const_basic_block bb, const void *data)
795 {
796 const struct loop *const loop = (const struct loop *)data;
797 if (flow_bb_inside_loop_p (loop, bb))
798 return true;
799 return false;
800 }
801
802
803 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
804 stmt_vec_info structs for all the stmts in LOOP_IN. */
805
806 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
807 : vec_info (vec_info::loop, init_cost (loop_in), shared),
808 loop (loop_in),
809 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
810 num_itersm1 (NULL_TREE),
811 num_iters (NULL_TREE),
812 num_iters_unchanged (NULL_TREE),
813 num_iters_assumptions (NULL_TREE),
814 th (0),
815 versioning_threshold (0),
816 vectorization_factor (0),
817 max_vectorization_factor (0),
818 mask_skip_niters (NULL_TREE),
819 mask_compare_type (NULL_TREE),
820 unaligned_dr (NULL),
821 peeling_for_alignment (0),
822 ptr_mask (0),
823 ivexpr_map (NULL),
824 slp_unrolling_factor (1),
825 single_scalar_iteration_cost (0),
826 vectorizable (false),
827 can_fully_mask_p (true),
828 fully_masked_p (false),
829 peeling_for_gaps (false),
830 peeling_for_niter (false),
831 operands_swapped (false),
832 no_data_dependencies (false),
833 has_mask_store (false),
834 scalar_loop (NULL),
835 orig_loop_info (NULL)
836 {
837 /* CHECKME: We want to visit all BBs before their successors (except for
838 latch blocks, for which this assertion wouldn't hold). In the simple
839 case of the loop forms we allow, a dfs order of the BBs would the same
840 as reversed postorder traversal, so we are safe. */
841
842 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
843 bbs, loop->num_nodes, loop);
844 gcc_assert (nbbs == loop->num_nodes);
845
846 for (unsigned int i = 0; i < nbbs; i++)
847 {
848 basic_block bb = bbs[i];
849 gimple_stmt_iterator si;
850
851 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
852 {
853 gimple *phi = gsi_stmt (si);
854 gimple_set_uid (phi, 0);
855 add_stmt (phi);
856 }
857
858 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
859 {
860 gimple *stmt = gsi_stmt (si);
861 gimple_set_uid (stmt, 0);
862 add_stmt (stmt);
863 }
864 }
865 }
866
867 /* Free all levels of MASKS. */
868
869 void
870 release_vec_loop_masks (vec_loop_masks *masks)
871 {
872 rgroup_masks *rgm;
873 unsigned int i;
874 FOR_EACH_VEC_ELT (*masks, i, rgm)
875 rgm->masks.release ();
876 masks->release ();
877 }
878
879 /* Free all memory used by the _loop_vec_info, as well as all the
880 stmt_vec_info structs of all the stmts in the loop. */
881
882 _loop_vec_info::~_loop_vec_info ()
883 {
884 int nbbs;
885 gimple_stmt_iterator si;
886 int j;
887
888 nbbs = loop->num_nodes;
889 for (j = 0; j < nbbs; j++)
890 {
891 basic_block bb = bbs[j];
892 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
893 {
894 gimple *stmt = gsi_stmt (si);
895
896 /* We may have broken canonical form by moving a constant
897 into RHS1 of a commutative op. Fix such occurrences. */
898 if (operands_swapped && is_gimple_assign (stmt))
899 {
900 enum tree_code code = gimple_assign_rhs_code (stmt);
901
902 if ((code == PLUS_EXPR
903 || code == POINTER_PLUS_EXPR
904 || code == MULT_EXPR)
905 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
906 swap_ssa_operands (stmt,
907 gimple_assign_rhs1_ptr (stmt),
908 gimple_assign_rhs2_ptr (stmt));
909 else if (code == COND_EXPR
910 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
911 {
912 tree cond_expr = gimple_assign_rhs1 (stmt);
913 enum tree_code cond_code = TREE_CODE (cond_expr);
914
915 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
916 {
917 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
918 0));
919 cond_code = invert_tree_comparison (cond_code,
920 honor_nans);
921 if (cond_code != ERROR_MARK)
922 {
923 TREE_SET_CODE (cond_expr, cond_code);
924 swap_ssa_operands (stmt,
925 gimple_assign_rhs2_ptr (stmt),
926 gimple_assign_rhs3_ptr (stmt));
927 }
928 }
929 }
930 }
931 gsi_next (&si);
932 }
933 }
934
935 free (bbs);
936
937 release_vec_loop_masks (&masks);
938 delete ivexpr_map;
939
940 loop->aux = NULL;
941 }
942
943 /* Return an invariant or register for EXPR and emit necessary
944 computations in the LOOP_VINFO loop preheader. */
945
946 tree
947 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
948 {
949 if (is_gimple_reg (expr)
950 || is_gimple_min_invariant (expr))
951 return expr;
952
953 if (! loop_vinfo->ivexpr_map)
954 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
955 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
956 if (! cached)
957 {
958 gimple_seq stmts = NULL;
959 cached = force_gimple_operand (unshare_expr (expr),
960 &stmts, true, NULL_TREE);
961 if (stmts)
962 {
963 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
964 gsi_insert_seq_on_edge_immediate (e, stmts);
965 }
966 }
967 return cached;
968 }
969
970 /* Return true if we can use CMP_TYPE as the comparison type to produce
971 all masks required to mask LOOP_VINFO. */
972
973 static bool
974 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
975 {
976 rgroup_masks *rgm;
977 unsigned int i;
978 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
979 if (rgm->mask_type != NULL_TREE
980 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
981 cmp_type, rgm->mask_type,
982 OPTIMIZE_FOR_SPEED))
983 return false;
984 return true;
985 }
986
987 /* Calculate the maximum number of scalars per iteration for every
988 rgroup in LOOP_VINFO. */
989
990 static unsigned int
991 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
992 {
993 unsigned int res = 1;
994 unsigned int i;
995 rgroup_masks *rgm;
996 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
997 res = MAX (res, rgm->max_nscalars_per_iter);
998 return res;
999 }
1000
1001 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1002 whether we can actually generate the masks required. Return true if so,
1003 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1004
1005 static bool
1006 vect_verify_full_masking (loop_vec_info loop_vinfo)
1007 {
1008 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1009 unsigned int min_ni_width;
1010
1011 /* Use a normal loop if there are no statements that need masking.
1012 This only happens in rare degenerate cases: it means that the loop
1013 has no loads, no stores, and no live-out values. */
1014 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1015 return false;
1016
1017 /* Get the maximum number of iterations that is representable
1018 in the counter type. */
1019 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1020 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1021
1022 /* Get a more refined estimate for the number of iterations. */
1023 widest_int max_back_edges;
1024 if (max_loop_iterations (loop, &max_back_edges))
1025 max_ni = wi::smin (max_ni, max_back_edges + 1);
1026
1027 /* Account for rgroup masks, in which each bit is replicated N times. */
1028 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1029
1030 /* Work out how many bits we need to represent the limit. */
1031 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1032
1033 /* Find a scalar mode for which WHILE_ULT is supported. */
1034 opt_scalar_int_mode cmp_mode_iter;
1035 tree cmp_type = NULL_TREE;
1036 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1037 {
1038 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1039 if (cmp_bits >= min_ni_width
1040 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1041 {
1042 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1043 if (this_type
1044 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1045 {
1046 /* Although we could stop as soon as we find a valid mode,
1047 it's often better to continue until we hit Pmode, since the
1048 operands to the WHILE are more likely to be reusable in
1049 address calculations. */
1050 cmp_type = this_type;
1051 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1052 break;
1053 }
1054 }
1055 }
1056
1057 if (!cmp_type)
1058 return false;
1059
1060 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1061 return true;
1062 }
1063
1064 /* Calculate the cost of one scalar iteration of the loop. */
1065 static void
1066 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1067 {
1068 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1069 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1070 int nbbs = loop->num_nodes, factor;
1071 int innerloop_iters, i;
1072
1073 /* Gather costs for statements in the scalar loop. */
1074
1075 /* FORNOW. */
1076 innerloop_iters = 1;
1077 if (loop->inner)
1078 innerloop_iters = 50; /* FIXME */
1079
1080 for (i = 0; i < nbbs; i++)
1081 {
1082 gimple_stmt_iterator si;
1083 basic_block bb = bbs[i];
1084
1085 if (bb->loop_father == loop->inner)
1086 factor = innerloop_iters;
1087 else
1088 factor = 1;
1089
1090 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1091 {
1092 gimple *stmt = gsi_stmt (si);
1093 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1094
1095 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1096 continue;
1097
1098 /* Skip stmts that are not vectorized inside the loop. */
1099 if (stmt_info
1100 && !STMT_VINFO_RELEVANT_P (stmt_info)
1101 && (!STMT_VINFO_LIVE_P (stmt_info)
1102 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1103 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1104 continue;
1105
1106 vect_cost_for_stmt kind;
1107 if (STMT_VINFO_DATA_REF (stmt_info))
1108 {
1109 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1110 kind = scalar_load;
1111 else
1112 kind = scalar_store;
1113 }
1114 else
1115 kind = scalar_stmt;
1116
1117 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1118 factor, kind, stmt_info, 0, vect_prologue);
1119 }
1120 }
1121
1122 /* Now accumulate cost. */
1123 void *target_cost_data = init_cost (loop);
1124 stmt_info_for_cost *si;
1125 int j;
1126 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1127 j, si)
1128 (void) add_stmt_cost (target_cost_data, si->count,
1129 si->kind, si->stmt_info, si->misalign,
1130 vect_body);
1131 unsigned dummy, body_cost = 0;
1132 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1133 destroy_cost_data (target_cost_data);
1134 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1135 }
1136
1137
1138 /* Function vect_analyze_loop_form_1.
1139
1140 Verify that certain CFG restrictions hold, including:
1141 - the loop has a pre-header
1142 - the loop has a single entry and exit
1143 - the loop exit condition is simple enough
1144 - the number of iterations can be analyzed, i.e, a countable loop. The
1145 niter could be analyzed under some assumptions. */
1146
1147 bool
1148 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1149 tree *assumptions, tree *number_of_iterationsm1,
1150 tree *number_of_iterations, gcond **inner_loop_cond)
1151 {
1152 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1153
1154 /* Different restrictions apply when we are considering an inner-most loop,
1155 vs. an outer (nested) loop.
1156 (FORNOW. May want to relax some of these restrictions in the future). */
1157
1158 if (!loop->inner)
1159 {
1160 /* Inner-most loop. We currently require that the number of BBs is
1161 exactly 2 (the header and latch). Vectorizable inner-most loops
1162 look like this:
1163
1164 (pre-header)
1165 |
1166 header <--------+
1167 | | |
1168 | +--> latch --+
1169 |
1170 (exit-bb) */
1171
1172 if (loop->num_nodes != 2)
1173 {
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1176 "not vectorized: control flow in loop.\n");
1177 return false;
1178 }
1179
1180 if (empty_block_p (loop->header))
1181 {
1182 if (dump_enabled_p ())
1183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1184 "not vectorized: empty loop.\n");
1185 return false;
1186 }
1187 }
1188 else
1189 {
1190 struct loop *innerloop = loop->inner;
1191 edge entryedge;
1192
1193 /* Nested loop. We currently require that the loop is doubly-nested,
1194 contains a single inner loop, and the number of BBs is exactly 5.
1195 Vectorizable outer-loops look like this:
1196
1197 (pre-header)
1198 |
1199 header <---+
1200 | |
1201 inner-loop |
1202 | |
1203 tail ------+
1204 |
1205 (exit-bb)
1206
1207 The inner-loop has the properties expected of inner-most loops
1208 as described above. */
1209
1210 if ((loop->inner)->inner || (loop->inner)->next)
1211 {
1212 if (dump_enabled_p ())
1213 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1214 "not vectorized: multiple nested loops.\n");
1215 return false;
1216 }
1217
1218 if (loop->num_nodes != 5)
1219 {
1220 if (dump_enabled_p ())
1221 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1222 "not vectorized: control flow in loop.\n");
1223 return false;
1224 }
1225
1226 entryedge = loop_preheader_edge (innerloop);
1227 if (entryedge->src != loop->header
1228 || !single_exit (innerloop)
1229 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1230 {
1231 if (dump_enabled_p ())
1232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1233 "not vectorized: unsupported outerloop form.\n");
1234 return false;
1235 }
1236
1237 /* Analyze the inner-loop. */
1238 tree inner_niterm1, inner_niter, inner_assumptions;
1239 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1240 &inner_assumptions, &inner_niterm1,
1241 &inner_niter, NULL)
1242 /* Don't support analyzing niter under assumptions for inner
1243 loop. */
1244 || !integer_onep (inner_assumptions))
1245 {
1246 if (dump_enabled_p ())
1247 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1248 "not vectorized: Bad inner loop.\n");
1249 return false;
1250 }
1251
1252 if (!expr_invariant_in_loop_p (loop, inner_niter))
1253 {
1254 if (dump_enabled_p ())
1255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1256 "not vectorized: inner-loop count not"
1257 " invariant.\n");
1258 return false;
1259 }
1260
1261 if (dump_enabled_p ())
1262 dump_printf_loc (MSG_NOTE, vect_location,
1263 "Considering outer-loop vectorization.\n");
1264 }
1265
1266 if (!single_exit (loop)
1267 || EDGE_COUNT (loop->header->preds) != 2)
1268 {
1269 if (dump_enabled_p ())
1270 {
1271 if (!single_exit (loop))
1272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1273 "not vectorized: multiple exits.\n");
1274 else if (EDGE_COUNT (loop->header->preds) != 2)
1275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1276 "not vectorized: too many incoming edges.\n");
1277 }
1278 return false;
1279 }
1280
1281 /* We assume that the loop exit condition is at the end of the loop. i.e,
1282 that the loop is represented as a do-while (with a proper if-guard
1283 before the loop if needed), where the loop header contains all the
1284 executable statements, and the latch is empty. */
1285 if (!empty_block_p (loop->latch)
1286 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1287 {
1288 if (dump_enabled_p ())
1289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1290 "not vectorized: latch block not empty.\n");
1291 return false;
1292 }
1293
1294 /* Make sure the exit is not abnormal. */
1295 edge e = single_exit (loop);
1296 if (e->flags & EDGE_ABNORMAL)
1297 {
1298 if (dump_enabled_p ())
1299 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1300 "not vectorized: abnormal loop exit edge.\n");
1301 return false;
1302 }
1303
1304 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1305 number_of_iterationsm1);
1306 if (!*loop_cond)
1307 {
1308 if (dump_enabled_p ())
1309 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1310 "not vectorized: complicated exit condition.\n");
1311 return false;
1312 }
1313
1314 if (integer_zerop (*assumptions)
1315 || !*number_of_iterations
1316 || chrec_contains_undetermined (*number_of_iterations))
1317 {
1318 if (dump_enabled_p ())
1319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1320 "not vectorized: number of iterations cannot be "
1321 "computed.\n");
1322 return false;
1323 }
1324
1325 if (integer_zerop (*number_of_iterations))
1326 {
1327 if (dump_enabled_p ())
1328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1329 "not vectorized: number of iterations = 0.\n");
1330 return false;
1331 }
1332
1333 return true;
1334 }
1335
1336 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1337
1338 loop_vec_info
1339 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1340 {
1341 tree assumptions, number_of_iterations, number_of_iterationsm1;
1342 gcond *loop_cond, *inner_loop_cond = NULL;
1343
1344 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1345 &assumptions, &number_of_iterationsm1,
1346 &number_of_iterations, &inner_loop_cond))
1347 return NULL;
1348
1349 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1350 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1351 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1352 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1353 if (!integer_onep (assumptions))
1354 {
1355 /* We consider to vectorize this loop by versioning it under
1356 some assumptions. In order to do this, we need to clear
1357 existing information computed by scev and niter analyzer. */
1358 scev_reset_htab ();
1359 free_numbers_of_iterations_estimates (loop);
1360 /* Also set flag for this loop so that following scev and niter
1361 analysis are done under the assumptions. */
1362 loop_constraint_set (loop, LOOP_C_FINITE);
1363 /* Also record the assumptions for versioning. */
1364 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1365 }
1366
1367 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1368 {
1369 if (dump_enabled_p ())
1370 {
1371 dump_printf_loc (MSG_NOTE, vect_location,
1372 "Symbolic number of iterations is ");
1373 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1374 dump_printf (MSG_NOTE, "\n");
1375 }
1376 }
1377
1378 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1379 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1380 if (inner_loop_cond)
1381 {
1382 stmt_vec_info inner_loop_cond_info
1383 = loop_vinfo->lookup_stmt (inner_loop_cond);
1384 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1385 }
1386
1387 gcc_assert (!loop->aux);
1388 loop->aux = loop_vinfo;
1389 return loop_vinfo;
1390 }
1391
1392
1393
1394 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1395 statements update the vectorization factor. */
1396
1397 static void
1398 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1399 {
1400 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1401 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1402 int nbbs = loop->num_nodes;
1403 poly_uint64 vectorization_factor;
1404 int i;
1405
1406 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1407
1408 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1409 gcc_assert (known_ne (vectorization_factor, 0U));
1410
1411 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1412 vectorization factor of the loop is the unrolling factor required by
1413 the SLP instances. If that unrolling factor is 1, we say, that we
1414 perform pure SLP on loop - cross iteration parallelism is not
1415 exploited. */
1416 bool only_slp_in_loop = true;
1417 for (i = 0; i < nbbs; i++)
1418 {
1419 basic_block bb = bbs[i];
1420 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1421 gsi_next (&si))
1422 {
1423 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1424 stmt_info = vect_stmt_to_vectorize (stmt_info);
1425 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1426 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1427 && !PURE_SLP_STMT (stmt_info))
1428 /* STMT needs both SLP and loop-based vectorization. */
1429 only_slp_in_loop = false;
1430 }
1431 }
1432
1433 if (only_slp_in_loop)
1434 {
1435 dump_printf_loc (MSG_NOTE, vect_location,
1436 "Loop contains only SLP stmts\n");
1437 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1438 }
1439 else
1440 {
1441 dump_printf_loc (MSG_NOTE, vect_location,
1442 "Loop contains SLP and non-SLP stmts\n");
1443 /* Both the vectorization factor and unroll factor have the form
1444 current_vector_size * X for some rational X, so they must have
1445 a common multiple. */
1446 vectorization_factor
1447 = force_common_multiple (vectorization_factor,
1448 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1449 }
1450
1451 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1452 if (dump_enabled_p ())
1453 {
1454 dump_printf_loc (MSG_NOTE, vect_location,
1455 "Updating vectorization factor to ");
1456 dump_dec (MSG_NOTE, vectorization_factor);
1457 dump_printf (MSG_NOTE, ".\n");
1458 }
1459 }
1460
1461 /* Return true if STMT_INFO describes a double reduction phi and if
1462 the other phi in the reduction is also relevant for vectorization.
1463 This rejects cases such as:
1464
1465 outer1:
1466 x_1 = PHI <x_3(outer2), ...>;
1467 ...
1468
1469 inner:
1470 x_2 = ...;
1471 ...
1472
1473 outer2:
1474 x_3 = PHI <x_2(inner)>;
1475
1476 if nothing in x_2 or elsewhere makes x_1 relevant. */
1477
1478 static bool
1479 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1480 {
1481 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1482 return false;
1483
1484 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1485 }
1486
1487 /* Function vect_analyze_loop_operations.
1488
1489 Scan the loop stmts and make sure they are all vectorizable. */
1490
1491 static bool
1492 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1493 {
1494 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1495 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1496 int nbbs = loop->num_nodes;
1497 int i;
1498 stmt_vec_info stmt_info;
1499 bool need_to_vectorize = false;
1500 bool ok;
1501
1502 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1503
1504 stmt_vector_for_cost cost_vec;
1505 cost_vec.create (2);
1506
1507 for (i = 0; i < nbbs; i++)
1508 {
1509 basic_block bb = bbs[i];
1510
1511 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1512 gsi_next (&si))
1513 {
1514 gphi *phi = si.phi ();
1515 ok = true;
1516
1517 stmt_info = loop_vinfo->lookup_stmt (phi);
1518 if (dump_enabled_p ())
1519 {
1520 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1521 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1522 }
1523 if (virtual_operand_p (gimple_phi_result (phi)))
1524 continue;
1525
1526 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1527 (i.e., a phi in the tail of the outer-loop). */
1528 if (! is_loop_header_bb_p (bb))
1529 {
1530 /* FORNOW: we currently don't support the case that these phis
1531 are not used in the outerloop (unless it is double reduction,
1532 i.e., this phi is vect_reduction_def), cause this case
1533 requires to actually do something here. */
1534 if (STMT_VINFO_LIVE_P (stmt_info)
1535 && !vect_active_double_reduction_p (stmt_info))
1536 {
1537 if (dump_enabled_p ())
1538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1539 "Unsupported loop-closed phi in "
1540 "outer-loop.\n");
1541 return false;
1542 }
1543
1544 /* If PHI is used in the outer loop, we check that its operand
1545 is defined in the inner loop. */
1546 if (STMT_VINFO_RELEVANT_P (stmt_info))
1547 {
1548 tree phi_op;
1549
1550 if (gimple_phi_num_args (phi) != 1)
1551 return false;
1552
1553 phi_op = PHI_ARG_DEF (phi, 0);
1554 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1555 if (!op_def_info)
1556 return false;
1557
1558 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1559 && (STMT_VINFO_RELEVANT (op_def_info)
1560 != vect_used_in_outer_by_reduction))
1561 return false;
1562 }
1563
1564 continue;
1565 }
1566
1567 gcc_assert (stmt_info);
1568
1569 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1570 || STMT_VINFO_LIVE_P (stmt_info))
1571 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1572 {
1573 /* A scalar-dependence cycle that we don't support. */
1574 if (dump_enabled_p ())
1575 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1576 "not vectorized: scalar dependence cycle.\n");
1577 return false;
1578 }
1579
1580 if (STMT_VINFO_RELEVANT_P (stmt_info))
1581 {
1582 need_to_vectorize = true;
1583 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1584 && ! PURE_SLP_STMT (stmt_info))
1585 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1586 &cost_vec);
1587 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1588 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1589 && ! PURE_SLP_STMT (stmt_info))
1590 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1591 &cost_vec);
1592 }
1593
1594 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1595 if (ok
1596 && STMT_VINFO_LIVE_P (stmt_info)
1597 && !PURE_SLP_STMT (stmt_info))
1598 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1599 &cost_vec);
1600
1601 if (!ok)
1602 {
1603 if (dump_enabled_p ())
1604 {
1605 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1606 "not vectorized: relevant phi not "
1607 "supported: ");
1608 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1609 }
1610 return false;
1611 }
1612 }
1613
1614 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1615 gsi_next (&si))
1616 {
1617 gimple *stmt = gsi_stmt (si);
1618 if (!gimple_clobber_p (stmt)
1619 && !vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1620 &need_to_vectorize,
1621 NULL, NULL, &cost_vec))
1622 return false;
1623 }
1624 } /* bbs */
1625
1626 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1627 cost_vec.release ();
1628
1629 /* All operations in the loop are either irrelevant (deal with loop
1630 control, or dead), or only used outside the loop and can be moved
1631 out of the loop (e.g. invariants, inductions). The loop can be
1632 optimized away by scalar optimizations. We're better off not
1633 touching this loop. */
1634 if (!need_to_vectorize)
1635 {
1636 if (dump_enabled_p ())
1637 dump_printf_loc (MSG_NOTE, vect_location,
1638 "All the computation can be taken out of the loop.\n");
1639 if (dump_enabled_p ())
1640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1641 "not vectorized: redundant loop. no profit to "
1642 "vectorize.\n");
1643 return false;
1644 }
1645
1646 return true;
1647 }
1648
1649 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1650 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1651 definitely no, or -1 if it's worth retrying. */
1652
1653 static int
1654 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1655 {
1656 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1657 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1658
1659 /* Only fully-masked loops can have iteration counts less than the
1660 vectorization factor. */
1661 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1662 {
1663 HOST_WIDE_INT max_niter;
1664
1665 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1666 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1667 else
1668 max_niter = max_stmt_executions_int (loop);
1669
1670 if (max_niter != -1
1671 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1672 {
1673 if (dump_enabled_p ())
1674 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1675 "not vectorized: iteration count smaller than "
1676 "vectorization factor.\n");
1677 return 0;
1678 }
1679 }
1680
1681 int min_profitable_iters, min_profitable_estimate;
1682 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1683 &min_profitable_estimate);
1684
1685 if (min_profitable_iters < 0)
1686 {
1687 if (dump_enabled_p ())
1688 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1689 "not vectorized: vectorization not profitable.\n");
1690 if (dump_enabled_p ())
1691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1692 "not vectorized: vector version will never be "
1693 "profitable.\n");
1694 return -1;
1695 }
1696
1697 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1698 * assumed_vf);
1699
1700 /* Use the cost model only if it is more conservative than user specified
1701 threshold. */
1702 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1703 min_profitable_iters);
1704
1705 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1706
1707 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1708 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1709 {
1710 if (dump_enabled_p ())
1711 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1712 "not vectorized: vectorization not profitable.\n");
1713 if (dump_enabled_p ())
1714 dump_printf_loc (MSG_NOTE, vect_location,
1715 "not vectorized: iteration count smaller than user "
1716 "specified loop bound parameter or minimum profitable "
1717 "iterations (whichever is more conservative).\n");
1718 return 0;
1719 }
1720
1721 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1722 if (estimated_niter == -1)
1723 estimated_niter = likely_max_stmt_executions_int (loop);
1724 if (estimated_niter != -1
1725 && ((unsigned HOST_WIDE_INT) estimated_niter
1726 < MAX (th, (unsigned) min_profitable_estimate)))
1727 {
1728 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1730 "not vectorized: estimated iteration count too "
1731 "small.\n");
1732 if (dump_enabled_p ())
1733 dump_printf_loc (MSG_NOTE, vect_location,
1734 "not vectorized: estimated iteration count smaller "
1735 "than specified loop bound parameter or minimum "
1736 "profitable iterations (whichever is more "
1737 "conservative).\n");
1738 return -1;
1739 }
1740
1741 return 1;
1742 }
1743
1744 static bool
1745 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1746 vec<data_reference_p> *datarefs,
1747 unsigned int *n_stmts)
1748 {
1749 *n_stmts = 0;
1750 for (unsigned i = 0; i < loop->num_nodes; i++)
1751 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1752 !gsi_end_p (gsi); gsi_next (&gsi))
1753 {
1754 gimple *stmt = gsi_stmt (gsi);
1755 if (is_gimple_debug (stmt))
1756 continue;
1757 ++(*n_stmts);
1758 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1759 {
1760 if (is_gimple_call (stmt) && loop->safelen)
1761 {
1762 tree fndecl = gimple_call_fndecl (stmt), op;
1763 if (fndecl != NULL_TREE)
1764 {
1765 cgraph_node *node = cgraph_node::get (fndecl);
1766 if (node != NULL && node->simd_clones != NULL)
1767 {
1768 unsigned int j, n = gimple_call_num_args (stmt);
1769 for (j = 0; j < n; j++)
1770 {
1771 op = gimple_call_arg (stmt, j);
1772 if (DECL_P (op)
1773 || (REFERENCE_CLASS_P (op)
1774 && get_base_address (op)))
1775 break;
1776 }
1777 op = gimple_call_lhs (stmt);
1778 /* Ignore #pragma omp declare simd functions
1779 if they don't have data references in the
1780 call stmt itself. */
1781 if (j == n
1782 && !(op
1783 && (DECL_P (op)
1784 || (REFERENCE_CLASS_P (op)
1785 && get_base_address (op)))))
1786 continue;
1787 }
1788 }
1789 }
1790 return false;
1791 }
1792 /* If dependence analysis will give up due to the limit on the
1793 number of datarefs stop here and fail fatally. */
1794 if (datarefs->length ()
1795 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1796 return false;
1797 }
1798 return true;
1799 }
1800
1801 /* Function vect_analyze_loop_2.
1802
1803 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1804 for it. The different analyses will record information in the
1805 loop_vec_info struct. */
1806 static bool
1807 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1808 {
1809 bool ok;
1810 int res;
1811 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1812 poly_uint64 min_vf = 2;
1813
1814 /* The first group of checks is independent of the vector size. */
1815 fatal = true;
1816
1817 /* Find all data references in the loop (which correspond to vdefs/vuses)
1818 and analyze their evolution in the loop. */
1819
1820 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1821
1822 /* Gather the data references and count stmts in the loop. */
1823 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1824 {
1825 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1826 &LOOP_VINFO_DATAREFS (loop_vinfo),
1827 n_stmts))
1828 {
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1831 "not vectorized: loop contains function "
1832 "calls or data references that cannot "
1833 "be analyzed\n");
1834 return false;
1835 }
1836 loop_vinfo->shared->save_datarefs ();
1837 }
1838 else
1839 loop_vinfo->shared->check_datarefs ();
1840
1841 /* Analyze the data references and also adjust the minimal
1842 vectorization factor according to the loads and stores. */
1843
1844 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1845 if (!ok)
1846 {
1847 if (dump_enabled_p ())
1848 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1849 "bad data references.\n");
1850 return false;
1851 }
1852
1853 /* Classify all cross-iteration scalar data-flow cycles.
1854 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1855 vect_analyze_scalar_cycles (loop_vinfo);
1856
1857 vect_pattern_recog (loop_vinfo);
1858
1859 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1860
1861 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1862 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1863
1864 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1865 if (!ok)
1866 {
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1869 "bad data access.\n");
1870 return false;
1871 }
1872
1873 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1874
1875 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1876 if (!ok)
1877 {
1878 if (dump_enabled_p ())
1879 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1880 "unexpected pattern.\n");
1881 return false;
1882 }
1883
1884 /* While the rest of the analysis below depends on it in some way. */
1885 fatal = false;
1886
1887 /* Analyze data dependences between the data-refs in the loop
1888 and adjust the maximum vectorization factor according to
1889 the dependences.
1890 FORNOW: fail at the first data dependence that we encounter. */
1891
1892 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1893 if (!ok
1894 || (max_vf != MAX_VECTORIZATION_FACTOR
1895 && maybe_lt (max_vf, min_vf)))
1896 {
1897 if (dump_enabled_p ())
1898 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1899 "bad data dependence.\n");
1900 return false;
1901 }
1902 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1903
1904 ok = vect_determine_vectorization_factor (loop_vinfo);
1905 if (!ok)
1906 {
1907 if (dump_enabled_p ())
1908 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1909 "can't determine vectorization factor.\n");
1910 return false;
1911 }
1912 if (max_vf != MAX_VECTORIZATION_FACTOR
1913 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1914 {
1915 if (dump_enabled_p ())
1916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1917 "bad data dependence.\n");
1918 return false;
1919 }
1920
1921 /* Compute the scalar iteration cost. */
1922 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1923
1924 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1925 unsigned th;
1926
1927 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1928 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1929 if (!ok)
1930 return false;
1931
1932 /* If there are any SLP instances mark them as pure_slp. */
1933 bool slp = vect_make_slp_decision (loop_vinfo);
1934 if (slp)
1935 {
1936 /* Find stmts that need to be both vectorized and SLPed. */
1937 vect_detect_hybrid_slp (loop_vinfo);
1938
1939 /* Update the vectorization factor based on the SLP decision. */
1940 vect_update_vf_for_slp (loop_vinfo);
1941 }
1942
1943 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1944
1945 /* We don't expect to have to roll back to anything other than an empty
1946 set of rgroups. */
1947 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1948
1949 /* This is the point where we can re-start analysis with SLP forced off. */
1950 start_over:
1951
1952 /* Now the vectorization factor is final. */
1953 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1954 gcc_assert (known_ne (vectorization_factor, 0U));
1955
1956 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1957 {
1958 dump_printf_loc (MSG_NOTE, vect_location,
1959 "vectorization_factor = ");
1960 dump_dec (MSG_NOTE, vectorization_factor);
1961 dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
1962 LOOP_VINFO_INT_NITERS (loop_vinfo));
1963 }
1964
1965 HOST_WIDE_INT max_niter
1966 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1967
1968 /* Analyze the alignment of the data-refs in the loop.
1969 Fail if a data reference is found that cannot be vectorized. */
1970
1971 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1972 if (!ok)
1973 {
1974 if (dump_enabled_p ())
1975 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1976 "bad data alignment.\n");
1977 return false;
1978 }
1979
1980 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1981 It is important to call pruning after vect_analyze_data_ref_accesses,
1982 since we use grouping information gathered by interleaving analysis. */
1983 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1984 if (!ok)
1985 return false;
1986
1987 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
1988 vectorization. */
1989 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1990 {
1991 /* This pass will decide on using loop versioning and/or loop peeling in
1992 order to enhance the alignment of data references in the loop. */
1993 ok = vect_enhance_data_refs_alignment (loop_vinfo);
1994 if (!ok)
1995 {
1996 if (dump_enabled_p ())
1997 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1998 "bad data alignment.\n");
1999 return false;
2000 }
2001 }
2002
2003 if (slp)
2004 {
2005 /* Analyze operations in the SLP instances. Note this may
2006 remove unsupported SLP instances which makes the above
2007 SLP kind detection invalid. */
2008 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2009 vect_slp_analyze_operations (loop_vinfo);
2010 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2011 goto again;
2012 }
2013
2014 /* Scan all the remaining operations in the loop that are not subject
2015 to SLP and make sure they are vectorizable. */
2016 ok = vect_analyze_loop_operations (loop_vinfo);
2017 if (!ok)
2018 {
2019 if (dump_enabled_p ())
2020 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2021 "bad operation or unsupported loop bound.\n");
2022 return false;
2023 }
2024
2025 /* Decide whether to use a fully-masked loop for this vectorization
2026 factor. */
2027 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2028 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2029 && vect_verify_full_masking (loop_vinfo));
2030 if (dump_enabled_p ())
2031 {
2032 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2033 dump_printf_loc (MSG_NOTE, vect_location,
2034 "using a fully-masked loop.\n");
2035 else
2036 dump_printf_loc (MSG_NOTE, vect_location,
2037 "not using a fully-masked loop.\n");
2038 }
2039
2040 /* If epilog loop is required because of data accesses with gaps,
2041 one additional iteration needs to be peeled. Check if there is
2042 enough iterations for vectorization. */
2043 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2044 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2045 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2046 {
2047 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2048 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2049
2050 if (known_lt (wi::to_widest (scalar_niters), vf))
2051 {
2052 if (dump_enabled_p ())
2053 dump_printf_loc (MSG_NOTE, vect_location,
2054 "loop has no enough iterations to support"
2055 " peeling for gaps.\n");
2056 return false;
2057 }
2058 }
2059
2060 /* Check the costings of the loop make vectorizing worthwhile. */
2061 res = vect_analyze_loop_costing (loop_vinfo);
2062 if (res < 0)
2063 goto again;
2064 if (!res)
2065 {
2066 if (dump_enabled_p ())
2067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2068 "Loop costings not worthwhile.\n");
2069 return false;
2070 }
2071
2072 /* Decide whether we need to create an epilogue loop to handle
2073 remaining scalar iterations. */
2074 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2075
2076 unsigned HOST_WIDE_INT const_vf;
2077 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2078 /* The main loop handles all iterations. */
2079 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2080 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2081 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2082 {
2083 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2084 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2085 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2086 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2087 }
2088 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2089 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2090 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2091 < (unsigned) exact_log2 (const_vf))
2092 /* In case of versioning, check if the maximum number of
2093 iterations is greater than th. If they are identical,
2094 the epilogue is unnecessary. */
2095 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2096 || ((unsigned HOST_WIDE_INT) max_niter
2097 > (th / const_vf) * const_vf))))
2098 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2099
2100 /* If an epilogue loop is required make sure we can create one. */
2101 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2102 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2103 {
2104 if (dump_enabled_p ())
2105 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2106 if (!vect_can_advance_ivs_p (loop_vinfo)
2107 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2108 single_exit (LOOP_VINFO_LOOP
2109 (loop_vinfo))))
2110 {
2111 if (dump_enabled_p ())
2112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2113 "not vectorized: can't create required "
2114 "epilog loop\n");
2115 goto again;
2116 }
2117 }
2118
2119 /* During peeling, we need to check if number of loop iterations is
2120 enough for both peeled prolog loop and vector loop. This check
2121 can be merged along with threshold check of loop versioning, so
2122 increase threshold for this case if necessary. */
2123 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2124 {
2125 poly_uint64 niters_th = 0;
2126
2127 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2128 {
2129 /* Niters for peeled prolog loop. */
2130 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2131 {
2132 dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2133 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2134 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2135 }
2136 else
2137 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2138 }
2139
2140 /* Niters for at least one iteration of vectorized loop. */
2141 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2142 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2143 /* One additional iteration because of peeling for gap. */
2144 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2145 niters_th += 1;
2146 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2147 }
2148
2149 gcc_assert (known_eq (vectorization_factor,
2150 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2151
2152 /* Ok to vectorize! */
2153 return true;
2154
2155 again:
2156 /* Try again with SLP forced off but if we didn't do any SLP there is
2157 no point in re-trying. */
2158 if (!slp)
2159 return false;
2160
2161 /* If there are reduction chains re-trying will fail anyway. */
2162 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2163 return false;
2164
2165 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2166 via interleaving or lane instructions. */
2167 slp_instance instance;
2168 slp_tree node;
2169 unsigned i, j;
2170 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2171 {
2172 stmt_vec_info vinfo;
2173 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2174 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2175 continue;
2176 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2177 unsigned int size = DR_GROUP_SIZE (vinfo);
2178 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2179 if (! vect_store_lanes_supported (vectype, size, false)
2180 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2181 && ! vect_grouped_store_supported (vectype, size))
2182 return false;
2183 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2184 {
2185 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2186 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2187 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2188 size = DR_GROUP_SIZE (vinfo);
2189 vectype = STMT_VINFO_VECTYPE (vinfo);
2190 if (! vect_load_lanes_supported (vectype, size, false)
2191 && ! vect_grouped_load_supported (vectype, single_element_p,
2192 size))
2193 return false;
2194 }
2195 }
2196
2197 if (dump_enabled_p ())
2198 dump_printf_loc (MSG_NOTE, vect_location,
2199 "re-trying with SLP disabled\n");
2200
2201 /* Roll back state appropriately. No SLP this time. */
2202 slp = false;
2203 /* Restore vectorization factor as it were without SLP. */
2204 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2205 /* Free the SLP instances. */
2206 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2207 vect_free_slp_instance (instance, false);
2208 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2209 /* Reset SLP type to loop_vect on all stmts. */
2210 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2211 {
2212 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2213 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2214 !gsi_end_p (si); gsi_next (&si))
2215 {
2216 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2217 STMT_SLP_TYPE (stmt_info) = loop_vect;
2218 }
2219 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2220 !gsi_end_p (si); gsi_next (&si))
2221 {
2222 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2223 STMT_SLP_TYPE (stmt_info) = loop_vect;
2224 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2225 {
2226 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2227 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2228 STMT_SLP_TYPE (stmt_info) = loop_vect;
2229 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2230 !gsi_end_p (pi); gsi_next (&pi))
2231 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2232 = loop_vect;
2233 }
2234 }
2235 }
2236 /* Free optimized alias test DDRS. */
2237 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2238 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2239 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2240 /* Reset target cost data. */
2241 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2242 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2243 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2244 /* Reset accumulated rgroup information. */
2245 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2246 /* Reset assorted flags. */
2247 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2248 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2249 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2250 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2251 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2252
2253 goto start_over;
2254 }
2255
2256 /* Function vect_analyze_loop.
2257
2258 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2259 for it. The different analyses will record information in the
2260 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2261 be vectorized. */
2262 loop_vec_info
2263 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2264 vec_info_shared *shared)
2265 {
2266 loop_vec_info loop_vinfo;
2267 auto_vector_sizes vector_sizes;
2268
2269 /* Autodetect first vector size we try. */
2270 current_vector_size = 0;
2271 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2272 unsigned int next_size = 0;
2273
2274 DUMP_VECT_SCOPE ("analyze_loop_nest");
2275
2276 if (loop_outer (loop)
2277 && loop_vec_info_for_loop (loop_outer (loop))
2278 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2279 {
2280 if (dump_enabled_p ())
2281 dump_printf_loc (MSG_NOTE, vect_location,
2282 "outer-loop already vectorized.\n");
2283 return NULL;
2284 }
2285
2286 if (!find_loop_nest (loop, &shared->loop_nest))
2287 {
2288 if (dump_enabled_p ())
2289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2290 "not vectorized: loop nest containing two "
2291 "or more consecutive inner loops cannot be "
2292 "vectorized\n");
2293 return NULL;
2294 }
2295
2296 unsigned n_stmts = 0;
2297 poly_uint64 autodetected_vector_size = 0;
2298 while (1)
2299 {
2300 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2301 loop_vinfo = vect_analyze_loop_form (loop, shared);
2302 if (!loop_vinfo)
2303 {
2304 if (dump_enabled_p ())
2305 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2306 "bad loop form.\n");
2307 return NULL;
2308 }
2309
2310 bool fatal = false;
2311
2312 if (orig_loop_vinfo)
2313 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2314
2315 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2316 {
2317 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2318
2319 return loop_vinfo;
2320 }
2321
2322 delete loop_vinfo;
2323
2324 if (next_size == 0)
2325 autodetected_vector_size = current_vector_size;
2326
2327 if (next_size < vector_sizes.length ()
2328 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2329 next_size += 1;
2330
2331 if (fatal
2332 || next_size == vector_sizes.length ()
2333 || known_eq (current_vector_size, 0U))
2334 return NULL;
2335
2336 /* Try the next biggest vector size. */
2337 current_vector_size = vector_sizes[next_size++];
2338 if (dump_enabled_p ())
2339 {
2340 dump_printf_loc (MSG_NOTE, vect_location,
2341 "***** Re-trying analysis with "
2342 "vector size ");
2343 dump_dec (MSG_NOTE, current_vector_size);
2344 dump_printf (MSG_NOTE, "\n");
2345 }
2346 }
2347 }
2348
2349 /* Return true if there is an in-order reduction function for CODE, storing
2350 it in *REDUC_FN if so. */
2351
2352 static bool
2353 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2354 {
2355 switch (code)
2356 {
2357 case PLUS_EXPR:
2358 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2359 return true;
2360
2361 default:
2362 return false;
2363 }
2364 }
2365
2366 /* Function reduction_fn_for_scalar_code
2367
2368 Input:
2369 CODE - tree_code of a reduction operations.
2370
2371 Output:
2372 REDUC_FN - the corresponding internal function to be used to reduce the
2373 vector of partial results into a single scalar result, or IFN_LAST
2374 if the operation is a supported reduction operation, but does not have
2375 such an internal function.
2376
2377 Return FALSE if CODE currently cannot be vectorized as reduction. */
2378
2379 static bool
2380 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2381 {
2382 switch (code)
2383 {
2384 case MAX_EXPR:
2385 *reduc_fn = IFN_REDUC_MAX;
2386 return true;
2387
2388 case MIN_EXPR:
2389 *reduc_fn = IFN_REDUC_MIN;
2390 return true;
2391
2392 case PLUS_EXPR:
2393 *reduc_fn = IFN_REDUC_PLUS;
2394 return true;
2395
2396 case BIT_AND_EXPR:
2397 *reduc_fn = IFN_REDUC_AND;
2398 return true;
2399
2400 case BIT_IOR_EXPR:
2401 *reduc_fn = IFN_REDUC_IOR;
2402 return true;
2403
2404 case BIT_XOR_EXPR:
2405 *reduc_fn = IFN_REDUC_XOR;
2406 return true;
2407
2408 case MULT_EXPR:
2409 case MINUS_EXPR:
2410 *reduc_fn = IFN_LAST;
2411 return true;
2412
2413 default:
2414 return false;
2415 }
2416 }
2417
2418 /* If there is a neutral value X such that SLP reduction NODE would not
2419 be affected by the introduction of additional X elements, return that X,
2420 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2421 is true if the SLP statements perform a single reduction, false if each
2422 statement performs an independent reduction. */
2423
2424 static tree
2425 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2426 bool reduc_chain)
2427 {
2428 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2429 stmt_vec_info stmt_vinfo = stmts[0];
2430 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2431 tree scalar_type = TREE_TYPE (vector_type);
2432 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2433 gcc_assert (loop);
2434
2435 switch (code)
2436 {
2437 case WIDEN_SUM_EXPR:
2438 case DOT_PROD_EXPR:
2439 case SAD_EXPR:
2440 case PLUS_EXPR:
2441 case MINUS_EXPR:
2442 case BIT_IOR_EXPR:
2443 case BIT_XOR_EXPR:
2444 return build_zero_cst (scalar_type);
2445
2446 case MULT_EXPR:
2447 return build_one_cst (scalar_type);
2448
2449 case BIT_AND_EXPR:
2450 return build_all_ones_cst (scalar_type);
2451
2452 case MAX_EXPR:
2453 case MIN_EXPR:
2454 /* For MIN/MAX the initial values are neutral. A reduction chain
2455 has only a single initial value, so that value is neutral for
2456 all statements. */
2457 if (reduc_chain)
2458 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2459 loop_preheader_edge (loop));
2460 return NULL_TREE;
2461
2462 default:
2463 return NULL_TREE;
2464 }
2465 }
2466
2467 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2468 STMT is printed with a message MSG. */
2469
2470 static void
2471 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2472 {
2473 dump_printf_loc (msg_type, vect_location, "%s", msg);
2474 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2475 }
2476
2477 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2478 operation. Return true if the results of DEF_STMT_INFO are something
2479 that can be accumulated by such a reduction. */
2480
2481 static bool
2482 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2483 {
2484 return (is_gimple_assign (def_stmt_info->stmt)
2485 || is_gimple_call (def_stmt_info->stmt)
2486 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2487 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2488 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2489 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2490 }
2491
2492 /* Detect SLP reduction of the form:
2493
2494 #a1 = phi <a5, a0>
2495 a2 = operation (a1)
2496 a3 = operation (a2)
2497 a4 = operation (a3)
2498 a5 = operation (a4)
2499
2500 #a = phi <a5>
2501
2502 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2503 FIRST_STMT is the first reduction stmt in the chain
2504 (a2 = operation (a1)).
2505
2506 Return TRUE if a reduction chain was detected. */
2507
2508 static bool
2509 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2510 gimple *first_stmt)
2511 {
2512 struct loop *loop = (gimple_bb (phi))->loop_father;
2513 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2514 enum tree_code code;
2515 gimple *loop_use_stmt = NULL;
2516 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2517 tree lhs;
2518 imm_use_iterator imm_iter;
2519 use_operand_p use_p;
2520 int nloop_uses, size = 0, n_out_of_loop_uses;
2521 bool found = false;
2522
2523 if (loop != vect_loop)
2524 return false;
2525
2526 lhs = PHI_RESULT (phi);
2527 code = gimple_assign_rhs_code (first_stmt);
2528 while (1)
2529 {
2530 nloop_uses = 0;
2531 n_out_of_loop_uses = 0;
2532 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2533 {
2534 gimple *use_stmt = USE_STMT (use_p);
2535 if (is_gimple_debug (use_stmt))
2536 continue;
2537
2538 /* Check if we got back to the reduction phi. */
2539 if (use_stmt == phi)
2540 {
2541 loop_use_stmt = use_stmt;
2542 found = true;
2543 break;
2544 }
2545
2546 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2547 {
2548 loop_use_stmt = use_stmt;
2549 nloop_uses++;
2550 }
2551 else
2552 n_out_of_loop_uses++;
2553
2554 /* There are can be either a single use in the loop or two uses in
2555 phi nodes. */
2556 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2557 return false;
2558 }
2559
2560 if (found)
2561 break;
2562
2563 /* We reached a statement with no loop uses. */
2564 if (nloop_uses == 0)
2565 return false;
2566
2567 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2568 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2569 return false;
2570
2571 if (!is_gimple_assign (loop_use_stmt)
2572 || code != gimple_assign_rhs_code (loop_use_stmt)
2573 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2574 return false;
2575
2576 /* Insert USE_STMT into reduction chain. */
2577 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2578 if (current_stmt_info)
2579 {
2580 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2581 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2582 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2583 }
2584 else
2585 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2586
2587 lhs = gimple_assign_lhs (loop_use_stmt);
2588 current_stmt_info = use_stmt_info;
2589 size++;
2590 }
2591
2592 if (!found || loop_use_stmt != phi || size < 2)
2593 return false;
2594
2595 /* Swap the operands, if needed, to make the reduction operand be the second
2596 operand. */
2597 lhs = PHI_RESULT (phi);
2598 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2599 while (next_stmt_info)
2600 {
2601 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2602 if (gimple_assign_rhs2 (next_stmt) == lhs)
2603 {
2604 tree op = gimple_assign_rhs1 (next_stmt);
2605 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2606
2607 /* Check that the other def is either defined in the loop
2608 ("vect_internal_def"), or it's an induction (defined by a
2609 loop-header phi-node). */
2610 if (def_stmt_info
2611 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2612 && vect_valid_reduction_input_p (def_stmt_info))
2613 {
2614 lhs = gimple_assign_lhs (next_stmt);
2615 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2616 continue;
2617 }
2618
2619 return false;
2620 }
2621 else
2622 {
2623 tree op = gimple_assign_rhs2 (next_stmt);
2624 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2625
2626 /* Check that the other def is either defined in the loop
2627 ("vect_internal_def"), or it's an induction (defined by a
2628 loop-header phi-node). */
2629 if (def_stmt_info
2630 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2631 && vect_valid_reduction_input_p (def_stmt_info))
2632 {
2633 if (dump_enabled_p ())
2634 {
2635 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2636 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2637 }
2638
2639 swap_ssa_operands (next_stmt,
2640 gimple_assign_rhs1_ptr (next_stmt),
2641 gimple_assign_rhs2_ptr (next_stmt));
2642 update_stmt (next_stmt);
2643
2644 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2645 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2646 }
2647 else
2648 return false;
2649 }
2650
2651 lhs = gimple_assign_lhs (next_stmt);
2652 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2653 }
2654
2655 /* Save the chain for further analysis in SLP detection. */
2656 stmt_vec_info first_stmt_info
2657 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2658 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2659 REDUC_GROUP_SIZE (first_stmt_info) = size;
2660
2661 return true;
2662 }
2663
2664 /* Return true if we need an in-order reduction for operation CODE
2665 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2666 overflow must wrap. */
2667
2668 static bool
2669 needs_fold_left_reduction_p (tree type, tree_code code,
2670 bool need_wrapping_integral_overflow)
2671 {
2672 /* CHECKME: check for !flag_finite_math_only too? */
2673 if (SCALAR_FLOAT_TYPE_P (type))
2674 switch (code)
2675 {
2676 case MIN_EXPR:
2677 case MAX_EXPR:
2678 return false;
2679
2680 default:
2681 return !flag_associative_math;
2682 }
2683
2684 if (INTEGRAL_TYPE_P (type))
2685 {
2686 if (!operation_no_trapping_overflow (type, code))
2687 return true;
2688 if (need_wrapping_integral_overflow
2689 && !TYPE_OVERFLOW_WRAPS (type)
2690 && operation_can_overflow (code))
2691 return true;
2692 return false;
2693 }
2694
2695 if (SAT_FIXED_POINT_TYPE_P (type))
2696 return true;
2697
2698 return false;
2699 }
2700
2701 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2702 reduction operation CODE has a handled computation expression. */
2703
2704 bool
2705 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2706 tree loop_arg, enum tree_code code)
2707 {
2708 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2709 auto_bitmap visited;
2710 tree lookfor = PHI_RESULT (phi);
2711 ssa_op_iter curri;
2712 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2713 while (USE_FROM_PTR (curr) != loop_arg)
2714 curr = op_iter_next_use (&curri);
2715 curri.i = curri.numops;
2716 do
2717 {
2718 path.safe_push (std::make_pair (curri, curr));
2719 tree use = USE_FROM_PTR (curr);
2720 if (use == lookfor)
2721 break;
2722 gimple *def = SSA_NAME_DEF_STMT (use);
2723 if (gimple_nop_p (def)
2724 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2725 {
2726 pop:
2727 do
2728 {
2729 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2730 curri = x.first;
2731 curr = x.second;
2732 do
2733 curr = op_iter_next_use (&curri);
2734 /* Skip already visited or non-SSA operands (from iterating
2735 over PHI args). */
2736 while (curr != NULL_USE_OPERAND_P
2737 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2738 || ! bitmap_set_bit (visited,
2739 SSA_NAME_VERSION
2740 (USE_FROM_PTR (curr)))));
2741 }
2742 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2743 if (curr == NULL_USE_OPERAND_P)
2744 break;
2745 }
2746 else
2747 {
2748 if (gimple_code (def) == GIMPLE_PHI)
2749 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2750 else
2751 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2752 while (curr != NULL_USE_OPERAND_P
2753 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2754 || ! bitmap_set_bit (visited,
2755 SSA_NAME_VERSION
2756 (USE_FROM_PTR (curr)))))
2757 curr = op_iter_next_use (&curri);
2758 if (curr == NULL_USE_OPERAND_P)
2759 goto pop;
2760 }
2761 }
2762 while (1);
2763 if (dump_file && (dump_flags & TDF_DETAILS))
2764 {
2765 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2766 unsigned i;
2767 std::pair<ssa_op_iter, use_operand_p> *x;
2768 FOR_EACH_VEC_ELT (path, i, x)
2769 {
2770 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2771 dump_printf (MSG_NOTE, " ");
2772 }
2773 dump_printf (MSG_NOTE, "\n");
2774 }
2775
2776 /* Check whether the reduction path detected is valid. */
2777 bool fail = path.length () == 0;
2778 bool neg = false;
2779 for (unsigned i = 1; i < path.length (); ++i)
2780 {
2781 gimple *use_stmt = USE_STMT (path[i].second);
2782 tree op = USE_FROM_PTR (path[i].second);
2783 if (! has_single_use (op)
2784 || ! is_gimple_assign (use_stmt))
2785 {
2786 fail = true;
2787 break;
2788 }
2789 if (gimple_assign_rhs_code (use_stmt) != code)
2790 {
2791 if (code == PLUS_EXPR
2792 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2793 {
2794 /* Track whether we negate the reduction value each iteration. */
2795 if (gimple_assign_rhs2 (use_stmt) == op)
2796 neg = ! neg;
2797 }
2798 else
2799 {
2800 fail = true;
2801 break;
2802 }
2803 }
2804 }
2805 return ! fail && ! neg;
2806 }
2807
2808
2809 /* Function vect_is_simple_reduction
2810
2811 (1) Detect a cross-iteration def-use cycle that represents a simple
2812 reduction computation. We look for the following pattern:
2813
2814 loop_header:
2815 a1 = phi < a0, a2 >
2816 a3 = ...
2817 a2 = operation (a3, a1)
2818
2819 or
2820
2821 a3 = ...
2822 loop_header:
2823 a1 = phi < a0, a2 >
2824 a2 = operation (a3, a1)
2825
2826 such that:
2827 1. operation is commutative and associative and it is safe to
2828 change the order of the computation
2829 2. no uses for a2 in the loop (a2 is used out of the loop)
2830 3. no uses of a1 in the loop besides the reduction operation
2831 4. no uses of a1 outside the loop.
2832
2833 Conditions 1,4 are tested here.
2834 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2835
2836 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2837 nested cycles.
2838
2839 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2840 reductions:
2841
2842 a1 = phi < a0, a2 >
2843 inner loop (def of a3)
2844 a2 = phi < a3 >
2845
2846 (4) Detect condition expressions, ie:
2847 for (int i = 0; i < N; i++)
2848 if (a[i] < val)
2849 ret_val = a[i];
2850
2851 */
2852
2853 static stmt_vec_info
2854 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2855 bool *double_reduc,
2856 bool need_wrapping_integral_overflow,
2857 enum vect_reduction_type *v_reduc_type)
2858 {
2859 gphi *phi = as_a <gphi *> (phi_info->stmt);
2860 struct loop *loop = (gimple_bb (phi))->loop_father;
2861 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2862 gimple *phi_use_stmt = NULL;
2863 enum tree_code orig_code, code;
2864 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2865 tree type;
2866 int nloop_uses;
2867 tree name;
2868 imm_use_iterator imm_iter;
2869 use_operand_p use_p;
2870 bool phi_def;
2871
2872 *double_reduc = false;
2873 *v_reduc_type = TREE_CODE_REDUCTION;
2874
2875 tree phi_name = PHI_RESULT (phi);
2876 /* ??? If there are no uses of the PHI result the inner loop reduction
2877 won't be detected as possibly double-reduction by vectorizable_reduction
2878 because that tries to walk the PHI arg from the preheader edge which
2879 can be constant. See PR60382. */
2880 if (has_zero_uses (phi_name))
2881 return NULL;
2882 nloop_uses = 0;
2883 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2884 {
2885 gimple *use_stmt = USE_STMT (use_p);
2886 if (is_gimple_debug (use_stmt))
2887 continue;
2888
2889 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2890 {
2891 if (dump_enabled_p ())
2892 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2893 "intermediate value used outside loop.\n");
2894
2895 return NULL;
2896 }
2897
2898 nloop_uses++;
2899 if (nloop_uses > 1)
2900 {
2901 if (dump_enabled_p ())
2902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2903 "reduction value used in loop.\n");
2904 return NULL;
2905 }
2906
2907 phi_use_stmt = use_stmt;
2908 }
2909
2910 edge latch_e = loop_latch_edge (loop);
2911 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2912 if (TREE_CODE (loop_arg) != SSA_NAME)
2913 {
2914 if (dump_enabled_p ())
2915 {
2916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2917 "reduction: not ssa_name: ");
2918 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2919 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2920 }
2921 return NULL;
2922 }
2923
2924 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2925 if (!def_stmt_info
2926 || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
2927 return NULL;
2928
2929 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2930 {
2931 name = gimple_assign_lhs (def_stmt);
2932 phi_def = false;
2933 }
2934 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2935 {
2936 name = PHI_RESULT (def_stmt);
2937 phi_def = true;
2938 }
2939 else
2940 {
2941 if (dump_enabled_p ())
2942 {
2943 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2944 "reduction: unhandled reduction operation: ");
2945 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2946 def_stmt_info->stmt, 0);
2947 }
2948 return NULL;
2949 }
2950
2951 nloop_uses = 0;
2952 auto_vec<gphi *, 3> lcphis;
2953 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2954 {
2955 gimple *use_stmt = USE_STMT (use_p);
2956 if (is_gimple_debug (use_stmt))
2957 continue;
2958 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2959 nloop_uses++;
2960 else
2961 /* We can have more than one loop-closed PHI. */
2962 lcphis.safe_push (as_a <gphi *> (use_stmt));
2963 if (nloop_uses > 1)
2964 {
2965 if (dump_enabled_p ())
2966 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2967 "reduction used in loop.\n");
2968 return NULL;
2969 }
2970 }
2971
2972 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2973 defined in the inner loop. */
2974 if (phi_def)
2975 {
2976 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
2977 op1 = PHI_ARG_DEF (def_stmt, 0);
2978
2979 if (gimple_phi_num_args (def_stmt) != 1
2980 || TREE_CODE (op1) != SSA_NAME)
2981 {
2982 if (dump_enabled_p ())
2983 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2984 "unsupported phi node definition.\n");
2985
2986 return NULL;
2987 }
2988
2989 gimple *def1 = SSA_NAME_DEF_STMT (op1);
2990 if (gimple_bb (def1)
2991 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2992 && loop->inner
2993 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2994 && is_gimple_assign (def1)
2995 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
2996 {
2997 if (dump_enabled_p ())
2998 report_vect_op (MSG_NOTE, def_stmt,
2999 "detected double reduction: ");
3000
3001 *double_reduc = true;
3002 return def_stmt_info;
3003 }
3004
3005 return NULL;
3006 }
3007
3008 /* If we are vectorizing an inner reduction we are executing that
3009 in the original order only in case we are not dealing with a
3010 double reduction. */
3011 bool check_reduction = true;
3012 if (flow_loop_nested_p (vect_loop, loop))
3013 {
3014 gphi *lcphi;
3015 unsigned i;
3016 check_reduction = false;
3017 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3018 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3019 {
3020 gimple *use_stmt = USE_STMT (use_p);
3021 if (is_gimple_debug (use_stmt))
3022 continue;
3023 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3024 check_reduction = true;
3025 }
3026 }
3027
3028 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3029 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3030 code = orig_code = gimple_assign_rhs_code (def_stmt);
3031
3032 /* We can handle "res -= x[i]", which is non-associative by
3033 simply rewriting this into "res += -x[i]". Avoid changing
3034 gimple instruction for the first simple tests and only do this
3035 if we're allowed to change code at all. */
3036 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3037 code = PLUS_EXPR;
3038
3039 if (code == COND_EXPR)
3040 {
3041 if (! nested_in_vect_loop)
3042 *v_reduc_type = COND_REDUCTION;
3043
3044 op3 = gimple_assign_rhs1 (def_stmt);
3045 if (COMPARISON_CLASS_P (op3))
3046 {
3047 op4 = TREE_OPERAND (op3, 1);
3048 op3 = TREE_OPERAND (op3, 0);
3049 }
3050 if (op3 == phi_name || op4 == phi_name)
3051 {
3052 if (dump_enabled_p ())
3053 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3054 "reduction: condition depends on previous"
3055 " iteration: ");
3056 return NULL;
3057 }
3058
3059 op1 = gimple_assign_rhs2 (def_stmt);
3060 op2 = gimple_assign_rhs3 (def_stmt);
3061 }
3062 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3063 {
3064 if (dump_enabled_p ())
3065 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3066 "reduction: not commutative/associative: ");
3067 return NULL;
3068 }
3069 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3070 {
3071 op1 = gimple_assign_rhs1 (def_stmt);
3072 op2 = gimple_assign_rhs2 (def_stmt);
3073 }
3074 else
3075 {
3076 if (dump_enabled_p ())
3077 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3078 "reduction: not handled operation: ");
3079 return NULL;
3080 }
3081
3082 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3083 {
3084 if (dump_enabled_p ())
3085 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3086 "reduction: both uses not ssa_names: ");
3087
3088 return NULL;
3089 }
3090
3091 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3092 if ((TREE_CODE (op1) == SSA_NAME
3093 && !types_compatible_p (type,TREE_TYPE (op1)))
3094 || (TREE_CODE (op2) == SSA_NAME
3095 && !types_compatible_p (type, TREE_TYPE (op2)))
3096 || (op3 && TREE_CODE (op3) == SSA_NAME
3097 && !types_compatible_p (type, TREE_TYPE (op3)))
3098 || (op4 && TREE_CODE (op4) == SSA_NAME
3099 && !types_compatible_p (type, TREE_TYPE (op4))))
3100 {
3101 if (dump_enabled_p ())
3102 {
3103 dump_printf_loc (MSG_NOTE, vect_location,
3104 "reduction: multiple types: operation type: ");
3105 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3106 dump_printf (MSG_NOTE, ", operands types: ");
3107 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3108 TREE_TYPE (op1));
3109 dump_printf (MSG_NOTE, ",");
3110 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3111 TREE_TYPE (op2));
3112 if (op3)
3113 {
3114 dump_printf (MSG_NOTE, ",");
3115 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3116 TREE_TYPE (op3));
3117 }
3118
3119 if (op4)
3120 {
3121 dump_printf (MSG_NOTE, ",");
3122 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3123 TREE_TYPE (op4));
3124 }
3125 dump_printf (MSG_NOTE, "\n");
3126 }
3127
3128 return NULL;
3129 }
3130
3131 /* Check whether it's ok to change the order of the computation.
3132 Generally, when vectorizing a reduction we change the order of the
3133 computation. This may change the behavior of the program in some
3134 cases, so we need to check that this is ok. One exception is when
3135 vectorizing an outer-loop: the inner-loop is executed sequentially,
3136 and therefore vectorizing reductions in the inner-loop during
3137 outer-loop vectorization is safe. */
3138 if (check_reduction
3139 && *v_reduc_type == TREE_CODE_REDUCTION
3140 && needs_fold_left_reduction_p (type, code,
3141 need_wrapping_integral_overflow))
3142 *v_reduc_type = FOLD_LEFT_REDUCTION;
3143
3144 /* Reduction is safe. We're dealing with one of the following:
3145 1) integer arithmetic and no trapv
3146 2) floating point arithmetic, and special flags permit this optimization
3147 3) nested cycle (i.e., outer loop vectorization). */
3148 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3149 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3150 if (code != COND_EXPR && !def1_info && !def2_info)
3151 {
3152 if (dump_enabled_p ())
3153 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3154 return NULL;
3155 }
3156
3157 /* Check that one def is the reduction def, defined by PHI,
3158 the other def is either defined in the loop ("vect_internal_def"),
3159 or it's an induction (defined by a loop-header phi-node). */
3160
3161 if (def2_info
3162 && def2_info->stmt == phi
3163 && (code == COND_EXPR
3164 || !def1_info
3165 || !flow_bb_inside_loop_p (loop, gimple_bb (def1_info->stmt))
3166 || vect_valid_reduction_input_p (def1_info)))
3167 {
3168 if (dump_enabled_p ())
3169 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3170 return def_stmt_info;
3171 }
3172
3173 if (def1_info
3174 && def1_info->stmt == phi
3175 && (code == COND_EXPR
3176 || !def2_info
3177 || !flow_bb_inside_loop_p (loop, gimple_bb (def2_info->stmt))
3178 || vect_valid_reduction_input_p (def2_info)))
3179 {
3180 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3181 {
3182 /* Check if we can swap operands (just for simplicity - so that
3183 the rest of the code can assume that the reduction variable
3184 is always the last (second) argument). */
3185 if (code == COND_EXPR)
3186 {
3187 /* Swap cond_expr by inverting the condition. */
3188 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3189 enum tree_code invert_code = ERROR_MARK;
3190 enum tree_code cond_code = TREE_CODE (cond_expr);
3191
3192 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3193 {
3194 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3195 invert_code = invert_tree_comparison (cond_code, honor_nans);
3196 }
3197 if (invert_code != ERROR_MARK)
3198 {
3199 TREE_SET_CODE (cond_expr, invert_code);
3200 swap_ssa_operands (def_stmt,
3201 gimple_assign_rhs2_ptr (def_stmt),
3202 gimple_assign_rhs3_ptr (def_stmt));
3203 }
3204 else
3205 {
3206 if (dump_enabled_p ())
3207 report_vect_op (MSG_NOTE, def_stmt,
3208 "detected reduction: cannot swap operands "
3209 "for cond_expr");
3210 return NULL;
3211 }
3212 }
3213 else
3214 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3215 gimple_assign_rhs2_ptr (def_stmt));
3216
3217 if (dump_enabled_p ())
3218 report_vect_op (MSG_NOTE, def_stmt,
3219 "detected reduction: need to swap operands: ");
3220
3221 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3222 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3223 }
3224 else
3225 {
3226 if (dump_enabled_p ())
3227 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3228 }
3229
3230 return def_stmt_info;
3231 }
3232
3233 /* Try to find SLP reduction chain. */
3234 if (! nested_in_vect_loop
3235 && code != COND_EXPR
3236 && orig_code != MINUS_EXPR
3237 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3238 {
3239 if (dump_enabled_p ())
3240 report_vect_op (MSG_NOTE, def_stmt,
3241 "reduction: detected reduction chain: ");
3242
3243 return def_stmt_info;
3244 }
3245
3246 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3247 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3248 while (first)
3249 {
3250 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3251 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3252 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3253 first = next;
3254 }
3255
3256 /* Look for the expression computing loop_arg from loop PHI result. */
3257 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3258 return def_stmt_info;
3259
3260 if (dump_enabled_p ())
3261 {
3262 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3263 "reduction: unknown pattern: ");
3264 }
3265
3266 return NULL;
3267 }
3268
3269 /* Wrapper around vect_is_simple_reduction, which will modify code
3270 in-place if it enables detection of more reductions. Arguments
3271 as there. */
3272
3273 stmt_vec_info
3274 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3275 bool *double_reduc,
3276 bool need_wrapping_integral_overflow)
3277 {
3278 enum vect_reduction_type v_reduc_type;
3279 stmt_vec_info def_info
3280 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3281 need_wrapping_integral_overflow,
3282 &v_reduc_type);
3283 if (def_info)
3284 {
3285 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3286 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3287 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3288 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3289 }
3290 return def_info;
3291 }
3292
3293 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3294 int
3295 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3296 int *peel_iters_epilogue,
3297 stmt_vector_for_cost *scalar_cost_vec,
3298 stmt_vector_for_cost *prologue_cost_vec,
3299 stmt_vector_for_cost *epilogue_cost_vec)
3300 {
3301 int retval = 0;
3302 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3303
3304 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3305 {
3306 *peel_iters_epilogue = assumed_vf / 2;
3307 if (dump_enabled_p ())
3308 dump_printf_loc (MSG_NOTE, vect_location,
3309 "cost model: epilogue peel iters set to vf/2 "
3310 "because loop iterations are unknown .\n");
3311
3312 /* If peeled iterations are known but number of scalar loop
3313 iterations are unknown, count a taken branch per peeled loop. */
3314 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3315 NULL, 0, vect_prologue);
3316 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3317 NULL, 0, vect_epilogue);
3318 }
3319 else
3320 {
3321 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3322 peel_iters_prologue = niters < peel_iters_prologue ?
3323 niters : peel_iters_prologue;
3324 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3325 /* If we need to peel for gaps, but no peeling is required, we have to
3326 peel VF iterations. */
3327 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3328 *peel_iters_epilogue = assumed_vf;
3329 }
3330
3331 stmt_info_for_cost *si;
3332 int j;
3333 if (peel_iters_prologue)
3334 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3335 retval += record_stmt_cost (prologue_cost_vec,
3336 si->count * peel_iters_prologue,
3337 si->kind, si->stmt_info, si->misalign,
3338 vect_prologue);
3339 if (*peel_iters_epilogue)
3340 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3341 retval += record_stmt_cost (epilogue_cost_vec,
3342 si->count * *peel_iters_epilogue,
3343 si->kind, si->stmt_info, si->misalign,
3344 vect_epilogue);
3345
3346 return retval;
3347 }
3348
3349 /* Function vect_estimate_min_profitable_iters
3350
3351 Return the number of iterations required for the vector version of the
3352 loop to be profitable relative to the cost of the scalar version of the
3353 loop.
3354
3355 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3356 of iterations for vectorization. -1 value means loop vectorization
3357 is not profitable. This returned value may be used for dynamic
3358 profitability check.
3359
3360 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3361 for static check against estimated number of iterations. */
3362
3363 static void
3364 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3365 int *ret_min_profitable_niters,
3366 int *ret_min_profitable_estimate)
3367 {
3368 int min_profitable_iters;
3369 int min_profitable_estimate;
3370 int peel_iters_prologue;
3371 int peel_iters_epilogue;
3372 unsigned vec_inside_cost = 0;
3373 int vec_outside_cost = 0;
3374 unsigned vec_prologue_cost = 0;
3375 unsigned vec_epilogue_cost = 0;
3376 int scalar_single_iter_cost = 0;
3377 int scalar_outside_cost = 0;
3378 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3379 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3380 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3381
3382 /* Cost model disabled. */
3383 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3384 {
3385 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3386 *ret_min_profitable_niters = 0;
3387 *ret_min_profitable_estimate = 0;
3388 return;
3389 }
3390
3391 /* Requires loop versioning tests to handle misalignment. */
3392 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3393 {
3394 /* FIXME: Make cost depend on complexity of individual check. */
3395 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3396 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3397 vect_prologue);
3398 dump_printf (MSG_NOTE,
3399 "cost model: Adding cost of checks for loop "
3400 "versioning to treat misalignment.\n");
3401 }
3402
3403 /* Requires loop versioning with alias checks. */
3404 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3405 {
3406 /* FIXME: Make cost depend on complexity of individual check. */
3407 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3408 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3409 vect_prologue);
3410 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3411 if (len)
3412 /* Count LEN - 1 ANDs and LEN comparisons. */
3413 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3414 NULL, 0, vect_prologue);
3415 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3416 if (len)
3417 {
3418 /* Count LEN - 1 ANDs and LEN comparisons. */
3419 unsigned int nstmts = len * 2 - 1;
3420 /* +1 for each bias that needs adding. */
3421 for (unsigned int i = 0; i < len; ++i)
3422 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3423 nstmts += 1;
3424 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3425 NULL, 0, vect_prologue);
3426 }
3427 dump_printf (MSG_NOTE,
3428 "cost model: Adding cost of checks for loop "
3429 "versioning aliasing.\n");
3430 }
3431
3432 /* Requires loop versioning with niter checks. */
3433 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3434 {
3435 /* FIXME: Make cost depend on complexity of individual check. */
3436 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3437 vect_prologue);
3438 dump_printf (MSG_NOTE,
3439 "cost model: Adding cost of checks for loop "
3440 "versioning niters.\n");
3441 }
3442
3443 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3444 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3445 vect_prologue);
3446
3447 /* Count statements in scalar loop. Using this as scalar cost for a single
3448 iteration for now.
3449
3450 TODO: Add outer loop support.
3451
3452 TODO: Consider assigning different costs to different scalar
3453 statements. */
3454
3455 scalar_single_iter_cost
3456 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3457
3458 /* Add additional cost for the peeled instructions in prologue and epilogue
3459 loop. (For fully-masked loops there will be no peeling.)
3460
3461 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3462 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3463
3464 TODO: Build an expression that represents peel_iters for prologue and
3465 epilogue to be used in a run-time test. */
3466
3467 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3468 {
3469 peel_iters_prologue = 0;
3470 peel_iters_epilogue = 0;
3471
3472 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3473 {
3474 /* We need to peel exactly one iteration. */
3475 peel_iters_epilogue += 1;
3476 stmt_info_for_cost *si;
3477 int j;
3478 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3479 j, si)
3480 (void) add_stmt_cost (target_cost_data, si->count,
3481 si->kind, si->stmt_info, si->misalign,
3482 vect_epilogue);
3483 }
3484 }
3485 else if (npeel < 0)
3486 {
3487 peel_iters_prologue = assumed_vf / 2;
3488 dump_printf (MSG_NOTE, "cost model: "
3489 "prologue peel iters set to vf/2.\n");
3490
3491 /* If peeling for alignment is unknown, loop bound of main loop becomes
3492 unknown. */
3493 peel_iters_epilogue = assumed_vf / 2;
3494 dump_printf (MSG_NOTE, "cost model: "
3495 "epilogue peel iters set to vf/2 because "
3496 "peeling for alignment is unknown.\n");
3497
3498 /* If peeled iterations are unknown, count a taken branch and a not taken
3499 branch per peeled loop. Even if scalar loop iterations are known,
3500 vector iterations are not known since peeled prologue iterations are
3501 not known. Hence guards remain the same. */
3502 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3503 NULL, 0, vect_prologue);
3504 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3505 NULL, 0, vect_prologue);
3506 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3507 NULL, 0, vect_epilogue);
3508 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3509 NULL, 0, vect_epilogue);
3510 stmt_info_for_cost *si;
3511 int j;
3512 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3513 {
3514 (void) add_stmt_cost (target_cost_data,
3515 si->count * peel_iters_prologue,
3516 si->kind, si->stmt_info, si->misalign,
3517 vect_prologue);
3518 (void) add_stmt_cost (target_cost_data,
3519 si->count * peel_iters_epilogue,
3520 si->kind, si->stmt_info, si->misalign,
3521 vect_epilogue);
3522 }
3523 }
3524 else
3525 {
3526 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3527 stmt_info_for_cost *si;
3528 int j;
3529 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3530
3531 prologue_cost_vec.create (2);
3532 epilogue_cost_vec.create (2);
3533 peel_iters_prologue = npeel;
3534
3535 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3536 &peel_iters_epilogue,
3537 &LOOP_VINFO_SCALAR_ITERATION_COST
3538 (loop_vinfo),
3539 &prologue_cost_vec,
3540 &epilogue_cost_vec);
3541
3542 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3543 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3544 si->misalign, vect_prologue);
3545
3546 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3547 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3548 si->misalign, vect_epilogue);
3549
3550 prologue_cost_vec.release ();
3551 epilogue_cost_vec.release ();
3552 }
3553
3554 /* FORNOW: The scalar outside cost is incremented in one of the
3555 following ways:
3556
3557 1. The vectorizer checks for alignment and aliasing and generates
3558 a condition that allows dynamic vectorization. A cost model
3559 check is ANDED with the versioning condition. Hence scalar code
3560 path now has the added cost of the versioning check.
3561
3562 if (cost > th & versioning_check)
3563 jmp to vector code
3564
3565 Hence run-time scalar is incremented by not-taken branch cost.
3566
3567 2. The vectorizer then checks if a prologue is required. If the
3568 cost model check was not done before during versioning, it has to
3569 be done before the prologue check.
3570
3571 if (cost <= th)
3572 prologue = scalar_iters
3573 if (prologue == 0)
3574 jmp to vector code
3575 else
3576 execute prologue
3577 if (prologue == num_iters)
3578 go to exit
3579
3580 Hence the run-time scalar cost is incremented by a taken branch,
3581 plus a not-taken branch, plus a taken branch cost.
3582
3583 3. The vectorizer then checks if an epilogue is required. If the
3584 cost model check was not done before during prologue check, it
3585 has to be done with the epilogue check.
3586
3587 if (prologue == 0)
3588 jmp to vector code
3589 else
3590 execute prologue
3591 if (prologue == num_iters)
3592 go to exit
3593 vector code:
3594 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3595 jmp to epilogue
3596
3597 Hence the run-time scalar cost should be incremented by 2 taken
3598 branches.
3599
3600 TODO: The back end may reorder the BBS's differently and reverse
3601 conditions/branch directions. Change the estimates below to
3602 something more reasonable. */
3603
3604 /* If the number of iterations is known and we do not do versioning, we can
3605 decide whether to vectorize at compile time. Hence the scalar version
3606 do not carry cost model guard costs. */
3607 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3608 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3609 {
3610 /* Cost model check occurs at versioning. */
3611 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3612 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3613 else
3614 {
3615 /* Cost model check occurs at prologue generation. */
3616 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3617 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3618 + vect_get_stmt_cost (cond_branch_not_taken);
3619 /* Cost model check occurs at epilogue generation. */
3620 else
3621 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3622 }
3623 }
3624
3625 /* Complete the target-specific cost calculations. */
3626 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3627 &vec_inside_cost, &vec_epilogue_cost);
3628
3629 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3630
3631 if (dump_enabled_p ())
3632 {
3633 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3634 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3635 vec_inside_cost);
3636 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3637 vec_prologue_cost);
3638 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3639 vec_epilogue_cost);
3640 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3641 scalar_single_iter_cost);
3642 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3643 scalar_outside_cost);
3644 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3645 vec_outside_cost);
3646 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3647 peel_iters_prologue);
3648 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3649 peel_iters_epilogue);
3650 }
3651
3652 /* Calculate number of iterations required to make the vector version
3653 profitable, relative to the loop bodies only. The following condition
3654 must hold true:
3655 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3656 where
3657 SIC = scalar iteration cost, VIC = vector iteration cost,
3658 VOC = vector outside cost, VF = vectorization factor,
3659 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3660 SOC = scalar outside cost for run time cost model check. */
3661
3662 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3663 {
3664 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3665 * assumed_vf
3666 - vec_inside_cost * peel_iters_prologue
3667 - vec_inside_cost * peel_iters_epilogue);
3668 if (min_profitable_iters <= 0)
3669 min_profitable_iters = 0;
3670 else
3671 {
3672 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3673 - vec_inside_cost);
3674
3675 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3676 <= (((int) vec_inside_cost * min_profitable_iters)
3677 + (((int) vec_outside_cost - scalar_outside_cost)
3678 * assumed_vf)))
3679 min_profitable_iters++;
3680 }
3681 }
3682 /* vector version will never be profitable. */
3683 else
3684 {
3685 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3686 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3687 "vectorization did not happen for a simd loop");
3688
3689 if (dump_enabled_p ())
3690 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3691 "cost model: the vector iteration cost = %d "
3692 "divided by the scalar iteration cost = %d "
3693 "is greater or equal to the vectorization factor = %d"
3694 ".\n",
3695 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3696 *ret_min_profitable_niters = -1;
3697 *ret_min_profitable_estimate = -1;
3698 return;
3699 }
3700
3701 dump_printf (MSG_NOTE,
3702 " Calculated minimum iters for profitability: %d\n",
3703 min_profitable_iters);
3704
3705 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3706 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3707 /* We want the vectorized loop to execute at least once. */
3708 min_profitable_iters = assumed_vf + peel_iters_prologue;
3709
3710 if (dump_enabled_p ())
3711 dump_printf_loc (MSG_NOTE, vect_location,
3712 " Runtime profitability threshold = %d\n",
3713 min_profitable_iters);
3714
3715 *ret_min_profitable_niters = min_profitable_iters;
3716
3717 /* Calculate number of iterations required to make the vector version
3718 profitable, relative to the loop bodies only.
3719
3720 Non-vectorized variant is SIC * niters and it must win over vector
3721 variant on the expected loop trip count. The following condition must hold true:
3722 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3723
3724 if (vec_outside_cost <= 0)
3725 min_profitable_estimate = 0;
3726 else
3727 {
3728 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3729 * assumed_vf
3730 - vec_inside_cost * peel_iters_prologue
3731 - vec_inside_cost * peel_iters_epilogue)
3732 / ((scalar_single_iter_cost * assumed_vf)
3733 - vec_inside_cost);
3734 }
3735 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3736 if (dump_enabled_p ())
3737 dump_printf_loc (MSG_NOTE, vect_location,
3738 " Static estimate profitability threshold = %d\n",
3739 min_profitable_estimate);
3740
3741 *ret_min_profitable_estimate = min_profitable_estimate;
3742 }
3743
3744 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3745 vector elements (not bits) for a vector with NELT elements. */
3746 static void
3747 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3748 vec_perm_builder *sel)
3749 {
3750 /* The encoding is a single stepped pattern. Any wrap-around is handled
3751 by vec_perm_indices. */
3752 sel->new_vector (nelt, 1, 3);
3753 for (unsigned int i = 0; i < 3; i++)
3754 sel->quick_push (i + offset);
3755 }
3756
3757 /* Checks whether the target supports whole-vector shifts for vectors of mode
3758 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3759 it supports vec_perm_const with masks for all necessary shift amounts. */
3760 static bool
3761 have_whole_vector_shift (machine_mode mode)
3762 {
3763 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3764 return true;
3765
3766 /* Variable-length vectors should be handled via the optab. */
3767 unsigned int nelt;
3768 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3769 return false;
3770
3771 vec_perm_builder sel;
3772 vec_perm_indices indices;
3773 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3774 {
3775 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3776 indices.new_vector (sel, 2, nelt);
3777 if (!can_vec_perm_const_p (mode, indices, false))
3778 return false;
3779 }
3780 return true;
3781 }
3782
3783 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3784 functions. Design better to avoid maintenance issues. */
3785
3786 /* Function vect_model_reduction_cost.
3787
3788 Models cost for a reduction operation, including the vector ops
3789 generated within the strip-mine loop, the initial definition before
3790 the loop, and the epilogue code that must be generated. */
3791
3792 static void
3793 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3794 int ncopies, stmt_vector_for_cost *cost_vec)
3795 {
3796 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3797 enum tree_code code;
3798 optab optab;
3799 tree vectype;
3800 machine_mode mode;
3801 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3802 struct loop *loop = NULL;
3803
3804 if (loop_vinfo)
3805 loop = LOOP_VINFO_LOOP (loop_vinfo);
3806
3807 /* Condition reductions generate two reductions in the loop. */
3808 vect_reduction_type reduction_type
3809 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3810 if (reduction_type == COND_REDUCTION)
3811 ncopies *= 2;
3812
3813 vectype = STMT_VINFO_VECTYPE (stmt_info);
3814 mode = TYPE_MODE (vectype);
3815 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
3816
3817 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3818
3819 if (reduction_type == EXTRACT_LAST_REDUCTION
3820 || reduction_type == FOLD_LEFT_REDUCTION)
3821 {
3822 /* No extra instructions needed in the prologue. */
3823 prologue_cost = 0;
3824
3825 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3826 /* Count one reduction-like operation per vector. */
3827 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3828 stmt_info, 0, vect_body);
3829 else
3830 {
3831 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3832 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3833 inside_cost = record_stmt_cost (cost_vec, nelements,
3834 vec_to_scalar, stmt_info, 0,
3835 vect_body);
3836 inside_cost += record_stmt_cost (cost_vec, nelements,
3837 scalar_stmt, stmt_info, 0,
3838 vect_body);
3839 }
3840 }
3841 else
3842 {
3843 /* Add in cost for initial definition.
3844 For cond reduction we have four vectors: initial index, step,
3845 initial result of the data reduction, initial value of the index
3846 reduction. */
3847 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3848 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3849 scalar_to_vec, stmt_info, 0,
3850 vect_prologue);
3851
3852 /* Cost of reduction op inside loop. */
3853 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3854 stmt_info, 0, vect_body);
3855 }
3856
3857 /* Determine cost of epilogue code.
3858
3859 We have a reduction operator that will reduce the vector in one statement.
3860 Also requires scalar extract. */
3861
3862 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3863 {
3864 if (reduc_fn != IFN_LAST)
3865 {
3866 if (reduction_type == COND_REDUCTION)
3867 {
3868 /* An EQ stmt and an COND_EXPR stmt. */
3869 epilogue_cost += record_stmt_cost (cost_vec, 2,
3870 vector_stmt, stmt_info, 0,
3871 vect_epilogue);
3872 /* Reduction of the max index and a reduction of the found
3873 values. */
3874 epilogue_cost += record_stmt_cost (cost_vec, 2,
3875 vec_to_scalar, stmt_info, 0,
3876 vect_epilogue);
3877 /* A broadcast of the max value. */
3878 epilogue_cost += record_stmt_cost (cost_vec, 1,
3879 scalar_to_vec, stmt_info, 0,
3880 vect_epilogue);
3881 }
3882 else
3883 {
3884 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3885 stmt_info, 0, vect_epilogue);
3886 epilogue_cost += record_stmt_cost (cost_vec, 1,
3887 vec_to_scalar, stmt_info, 0,
3888 vect_epilogue);
3889 }
3890 }
3891 else if (reduction_type == COND_REDUCTION)
3892 {
3893 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3894 /* Extraction of scalar elements. */
3895 epilogue_cost += record_stmt_cost (cost_vec,
3896 2 * estimated_nunits,
3897 vec_to_scalar, stmt_info, 0,
3898 vect_epilogue);
3899 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3900 epilogue_cost += record_stmt_cost (cost_vec,
3901 2 * estimated_nunits - 3,
3902 scalar_stmt, stmt_info, 0,
3903 vect_epilogue);
3904 }
3905 else if (reduction_type == EXTRACT_LAST_REDUCTION
3906 || reduction_type == FOLD_LEFT_REDUCTION)
3907 /* No extra instructions need in the epilogue. */
3908 ;
3909 else
3910 {
3911 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3912 tree bitsize =
3913 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3914 int element_bitsize = tree_to_uhwi (bitsize);
3915 int nelements = vec_size_in_bits / element_bitsize;
3916
3917 if (code == COND_EXPR)
3918 code = MAX_EXPR;
3919
3920 optab = optab_for_tree_code (code, vectype, optab_default);
3921
3922 /* We have a whole vector shift available. */
3923 if (optab != unknown_optab
3924 && VECTOR_MODE_P (mode)
3925 && optab_handler (optab, mode) != CODE_FOR_nothing
3926 && have_whole_vector_shift (mode))
3927 {
3928 /* Final reduction via vector shifts and the reduction operator.
3929 Also requires scalar extract. */
3930 epilogue_cost += record_stmt_cost (cost_vec,
3931 exact_log2 (nelements) * 2,
3932 vector_stmt, stmt_info, 0,
3933 vect_epilogue);
3934 epilogue_cost += record_stmt_cost (cost_vec, 1,
3935 vec_to_scalar, stmt_info, 0,
3936 vect_epilogue);
3937 }
3938 else
3939 /* Use extracts and reduction op for final reduction. For N
3940 elements, we have N extracts and N-1 reduction ops. */
3941 epilogue_cost += record_stmt_cost (cost_vec,
3942 nelements + nelements - 1,
3943 vector_stmt, stmt_info, 0,
3944 vect_epilogue);
3945 }
3946 }
3947
3948 if (dump_enabled_p ())
3949 dump_printf (MSG_NOTE,
3950 "vect_model_reduction_cost: inside_cost = %d, "
3951 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3952 prologue_cost, epilogue_cost);
3953 }
3954
3955
3956 /* Function vect_model_induction_cost.
3957
3958 Models cost for induction operations. */
3959
3960 static void
3961 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3962 stmt_vector_for_cost *cost_vec)
3963 {
3964 unsigned inside_cost, prologue_cost;
3965
3966 if (PURE_SLP_STMT (stmt_info))
3967 return;
3968
3969 /* loop cost for vec_loop. */
3970 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3971 stmt_info, 0, vect_body);
3972
3973 /* prologue cost for vec_init and vec_step. */
3974 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
3975 stmt_info, 0, vect_prologue);
3976
3977 if (dump_enabled_p ())
3978 dump_printf_loc (MSG_NOTE, vect_location,
3979 "vect_model_induction_cost: inside_cost = %d, "
3980 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3981 }
3982
3983
3984
3985 /* Function get_initial_def_for_reduction
3986
3987 Input:
3988 STMT_VINFO - a stmt that performs a reduction operation in the loop.
3989 INIT_VAL - the initial value of the reduction variable
3990
3991 Output:
3992 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3993 of the reduction (used for adjusting the epilog - see below).
3994 Return a vector variable, initialized according to the operation that
3995 STMT_VINFO performs. This vector will be used as the initial value
3996 of the vector of partial results.
3997
3998 Option1 (adjust in epilog): Initialize the vector as follows:
3999 add/bit or/xor: [0,0,...,0,0]
4000 mult/bit and: [1,1,...,1,1]
4001 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4002 and when necessary (e.g. add/mult case) let the caller know
4003 that it needs to adjust the result by init_val.
4004
4005 Option2: Initialize the vector as follows:
4006 add/bit or/xor: [init_val,0,0,...,0]
4007 mult/bit and: [init_val,1,1,...,1]
4008 min/max/cond_expr: [init_val,init_val,...,init_val]
4009 and no adjustments are needed.
4010
4011 For example, for the following code:
4012
4013 s = init_val;
4014 for (i=0;i<n;i++)
4015 s = s + a[i];
4016
4017 STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
4018 For a vector of 4 units, we want to return either [0,0,0,init_val],
4019 or [0,0,0,0] and let the caller know that it needs to adjust
4020 the result at the end by 'init_val'.
4021
4022 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4023 initialization vector is simpler (same element in all entries), if
4024 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4025
4026 A cost model should help decide between these two schemes. */
4027
4028 tree
4029 get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
4030 tree *adjustment_def)
4031 {
4032 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4033 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4034 tree scalar_type = TREE_TYPE (init_val);
4035 tree vectype = get_vectype_for_scalar_type (scalar_type);
4036 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
4037 tree def_for_init;
4038 tree init_def;
4039 REAL_VALUE_TYPE real_init_val = dconst0;
4040 int int_init_val = 0;
4041 gimple_seq stmts = NULL;
4042
4043 gcc_assert (vectype);
4044
4045 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4046 || SCALAR_FLOAT_TYPE_P (scalar_type));
4047
4048 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4049 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4050
4051 vect_reduction_type reduction_type
4052 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4053
4054 switch (code)
4055 {
4056 case WIDEN_SUM_EXPR:
4057 case DOT_PROD_EXPR:
4058 case SAD_EXPR:
4059 case PLUS_EXPR:
4060 case MINUS_EXPR:
4061 case BIT_IOR_EXPR:
4062 case BIT_XOR_EXPR:
4063 case MULT_EXPR:
4064 case BIT_AND_EXPR:
4065 {
4066 /* ADJUSTMENT_DEF is NULL when called from
4067 vect_create_epilog_for_reduction to vectorize double reduction. */
4068 if (adjustment_def)
4069 *adjustment_def = init_val;
4070
4071 if (code == MULT_EXPR)
4072 {
4073 real_init_val = dconst1;
4074 int_init_val = 1;
4075 }
4076
4077 if (code == BIT_AND_EXPR)
4078 int_init_val = -1;
4079
4080 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4081 def_for_init = build_real (scalar_type, real_init_val);
4082 else
4083 def_for_init = build_int_cst (scalar_type, int_init_val);
4084
4085 if (adjustment_def)
4086 /* Option1: the first element is '0' or '1' as well. */
4087 init_def = gimple_build_vector_from_val (&stmts, vectype,
4088 def_for_init);
4089 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4090 {
4091 /* Option2 (variable length): the first element is INIT_VAL. */
4092 init_def = gimple_build_vector_from_val (&stmts, vectype,
4093 def_for_init);
4094 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4095 vectype, init_def, init_val);
4096 }
4097 else
4098 {
4099 /* Option2: the first element is INIT_VAL. */
4100 tree_vector_builder elts (vectype, 1, 2);
4101 elts.quick_push (init_val);
4102 elts.quick_push (def_for_init);
4103 init_def = gimple_build_vector (&stmts, &elts);
4104 }
4105 }
4106 break;
4107
4108 case MIN_EXPR:
4109 case MAX_EXPR:
4110 case COND_EXPR:
4111 {
4112 if (adjustment_def)
4113 {
4114 *adjustment_def = NULL_TREE;
4115 if (reduction_type != COND_REDUCTION
4116 && reduction_type != EXTRACT_LAST_REDUCTION)
4117 {
4118 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4119 break;
4120 }
4121 }
4122 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4123 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4124 }
4125 break;
4126
4127 default:
4128 gcc_unreachable ();
4129 }
4130
4131 if (stmts)
4132 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4133 return init_def;
4134 }
4135
4136 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4137 NUMBER_OF_VECTORS is the number of vector defs to create.
4138 If NEUTRAL_OP is nonnull, introducing extra elements of that
4139 value will not change the result. */
4140
4141 static void
4142 get_initial_defs_for_reduction (slp_tree slp_node,
4143 vec<tree> *vec_oprnds,
4144 unsigned int number_of_vectors,
4145 bool reduc_chain, tree neutral_op)
4146 {
4147 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4148 stmt_vec_info stmt_vinfo = stmts[0];
4149 unsigned HOST_WIDE_INT nunits;
4150 unsigned j, number_of_places_left_in_vector;
4151 tree vector_type;
4152 tree vop;
4153 int group_size = stmts.length ();
4154 unsigned int vec_num, i;
4155 unsigned number_of_copies = 1;
4156 vec<tree> voprnds;
4157 voprnds.create (number_of_vectors);
4158 struct loop *loop;
4159 auto_vec<tree, 16> permute_results;
4160
4161 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4162
4163 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4164
4165 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4166 gcc_assert (loop);
4167 edge pe = loop_preheader_edge (loop);
4168
4169 gcc_assert (!reduc_chain || neutral_op);
4170
4171 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4172 created vectors. It is greater than 1 if unrolling is performed.
4173
4174 For example, we have two scalar operands, s1 and s2 (e.g., group of
4175 strided accesses of size two), while NUNITS is four (i.e., four scalars
4176 of this type can be packed in a vector). The output vector will contain
4177 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4178 will be 2).
4179
4180 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4181 vectors containing the operands.
4182
4183 For example, NUNITS is four as before, and the group size is 8
4184 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4185 {s5, s6, s7, s8}. */
4186
4187 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4188 nunits = group_size;
4189
4190 number_of_copies = nunits * number_of_vectors / group_size;
4191
4192 number_of_places_left_in_vector = nunits;
4193 bool constant_p = true;
4194 tree_vector_builder elts (vector_type, nunits, 1);
4195 elts.quick_grow (nunits);
4196 for (j = 0; j < number_of_copies; j++)
4197 {
4198 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4199 {
4200 tree op;
4201 /* Get the def before the loop. In reduction chain we have only
4202 one initial value. */
4203 if ((j != (number_of_copies - 1)
4204 || (reduc_chain && i != 0))
4205 && neutral_op)
4206 op = neutral_op;
4207 else
4208 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4209
4210 /* Create 'vect_ = {op0,op1,...,opn}'. */
4211 number_of_places_left_in_vector--;
4212 elts[number_of_places_left_in_vector] = op;
4213 if (!CONSTANT_CLASS_P (op))
4214 constant_p = false;
4215
4216 if (number_of_places_left_in_vector == 0)
4217 {
4218 gimple_seq ctor_seq = NULL;
4219 tree init;
4220 if (constant_p && !neutral_op
4221 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4222 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4223 /* Build the vector directly from ELTS. */
4224 init = gimple_build_vector (&ctor_seq, &elts);
4225 else if (neutral_op)
4226 {
4227 /* Build a vector of the neutral value and shift the
4228 other elements into place. */
4229 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4230 neutral_op);
4231 int k = nunits;
4232 while (k > 0 && elts[k - 1] == neutral_op)
4233 k -= 1;
4234 while (k > 0)
4235 {
4236 k -= 1;
4237 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4238 vector_type, init, elts[k]);
4239 }
4240 }
4241 else
4242 {
4243 /* First time round, duplicate ELTS to fill the
4244 required number of vectors, then cherry pick the
4245 appropriate result for each iteration. */
4246 if (vec_oprnds->is_empty ())
4247 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4248 number_of_vectors,
4249 permute_results);
4250 init = permute_results[number_of_vectors - j - 1];
4251 }
4252 if (ctor_seq != NULL)
4253 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4254 voprnds.quick_push (init);
4255
4256 number_of_places_left_in_vector = nunits;
4257 elts.new_vector (vector_type, nunits, 1);
4258 elts.quick_grow (nunits);
4259 constant_p = true;
4260 }
4261 }
4262 }
4263
4264 /* Since the vectors are created in the reverse order, we should invert
4265 them. */
4266 vec_num = voprnds.length ();
4267 for (j = vec_num; j != 0; j--)
4268 {
4269 vop = voprnds[j - 1];
4270 vec_oprnds->quick_push (vop);
4271 }
4272
4273 voprnds.release ();
4274
4275 /* In case that VF is greater than the unrolling factor needed for the SLP
4276 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4277 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4278 to replicate the vectors. */
4279 tree neutral_vec = NULL;
4280 while (number_of_vectors > vec_oprnds->length ())
4281 {
4282 if (neutral_op)
4283 {
4284 if (!neutral_vec)
4285 {
4286 gimple_seq ctor_seq = NULL;
4287 neutral_vec = gimple_build_vector_from_val
4288 (&ctor_seq, vector_type, neutral_op);
4289 if (ctor_seq != NULL)
4290 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4291 }
4292 vec_oprnds->quick_push (neutral_vec);
4293 }
4294 else
4295 {
4296 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4297 vec_oprnds->quick_push (vop);
4298 }
4299 }
4300 }
4301
4302
4303 /* Function vect_create_epilog_for_reduction
4304
4305 Create code at the loop-epilog to finalize the result of a reduction
4306 computation.
4307
4308 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4309 reduction statements.
4310 STMT_INFO is the scalar reduction stmt that is being vectorized.
4311 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4312 number of elements that we can fit in a vectype (nunits). In this case
4313 we have to generate more than one vector stmt - i.e - we need to "unroll"
4314 the vector stmt by a factor VF/nunits. For more details see documentation
4315 in vectorizable_operation.
4316 REDUC_FN is the internal function for the epilog reduction.
4317 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4318 computation.
4319 REDUC_INDEX is the index of the operand in the right hand side of the
4320 statement that is defined by REDUCTION_PHI.
4321 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4322 SLP_NODE is an SLP node containing a group of reduction statements. The
4323 first one in this group is STMT_INFO.
4324 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4325 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4326 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4327 any value of the IV in the loop.
4328 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4329 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4330 null if this is not an SLP reduction
4331
4332 This function:
4333 1. Creates the reduction def-use cycles: sets the arguments for
4334 REDUCTION_PHIS:
4335 The loop-entry argument is the vectorized initial-value of the reduction.
4336 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4337 sums.
4338 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4339 by calling the function specified by REDUC_FN if available, or by
4340 other means (whole-vector shifts or a scalar loop).
4341 The function also creates a new phi node at the loop exit to preserve
4342 loop-closed form, as illustrated below.
4343
4344 The flow at the entry to this function:
4345
4346 loop:
4347 vec_def = phi <null, null> # REDUCTION_PHI
4348 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4349 s_loop = scalar_stmt # (scalar) STMT_INFO
4350 loop_exit:
4351 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4352 use <s_out0>
4353 use <s_out0>
4354
4355 The above is transformed by this function into:
4356
4357 loop:
4358 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4359 VECT_DEF = vector_stmt # vectorized form of STMT_INFO
4360 s_loop = scalar_stmt # (scalar) STMT_INFO
4361 loop_exit:
4362 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4363 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4364 v_out2 = reduce <v_out1>
4365 s_out3 = extract_field <v_out2, 0>
4366 s_out4 = adjust_result <s_out3>
4367 use <s_out4>
4368 use <s_out4>
4369 */
4370
4371 static void
4372 vect_create_epilog_for_reduction (vec<tree> vect_defs,
4373 stmt_vec_info stmt_info,
4374 gimple *reduc_def_stmt,
4375 int ncopies, internal_fn reduc_fn,
4376 vec<stmt_vec_info> reduction_phis,
4377 bool double_reduc,
4378 slp_tree slp_node,
4379 slp_instance slp_node_instance,
4380 tree induc_val, enum tree_code induc_code,
4381 tree neutral_op)
4382 {
4383 stmt_vec_info prev_phi_info;
4384 tree vectype;
4385 machine_mode mode;
4386 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4387 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4388 basic_block exit_bb;
4389 tree scalar_dest;
4390 tree scalar_type;
4391 gimple *new_phi = NULL, *phi;
4392 stmt_vec_info phi_info;
4393 gimple_stmt_iterator exit_gsi;
4394 tree vec_dest;
4395 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4396 gimple *epilog_stmt = NULL;
4397 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4398 gimple *exit_phi;
4399 tree bitsize;
4400 tree adjustment_def = NULL;
4401 tree vec_initial_def = NULL;
4402 tree expr, def, initial_def = NULL;
4403 tree orig_name, scalar_result;
4404 imm_use_iterator imm_iter, phi_imm_iter;
4405 use_operand_p use_p, phi_use_p;
4406 gimple *use_stmt;
4407 stmt_vec_info reduction_phi_info = NULL;
4408 bool nested_in_vect_loop = false;
4409 auto_vec<gimple *> new_phis;
4410 auto_vec<stmt_vec_info> inner_phis;
4411 int j, i;
4412 auto_vec<tree> scalar_results;
4413 unsigned int group_size = 1, k, ratio;
4414 auto_vec<tree> vec_initial_defs;
4415 auto_vec<gimple *> phis;
4416 bool slp_reduc = false;
4417 bool direct_slp_reduc;
4418 tree new_phi_result;
4419 stmt_vec_info inner_phi = NULL;
4420 tree induction_index = NULL_TREE;
4421
4422 if (slp_node)
4423 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4424
4425 if (nested_in_vect_loop_p (loop, stmt_info))
4426 {
4427 outer_loop = loop;
4428 loop = loop->inner;
4429 nested_in_vect_loop = true;
4430 gcc_assert (!slp_node);
4431 }
4432
4433 vectype = STMT_VINFO_VECTYPE (stmt_info);
4434 gcc_assert (vectype);
4435 mode = TYPE_MODE (vectype);
4436
4437 /* 1. Create the reduction def-use cycle:
4438 Set the arguments of REDUCTION_PHIS, i.e., transform
4439
4440 loop:
4441 vec_def = phi <null, null> # REDUCTION_PHI
4442 VECT_DEF = vector_stmt # vectorized form of STMT
4443 ...
4444
4445 into:
4446
4447 loop:
4448 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4449 VECT_DEF = vector_stmt # vectorized form of STMT
4450 ...
4451
4452 (in case of SLP, do it for all the phis). */
4453
4454 /* Get the loop-entry arguments. */
4455 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4456 if (slp_node)
4457 {
4458 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4459 vec_initial_defs.reserve (vec_num);
4460 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4461 &vec_initial_defs, vec_num,
4462 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4463 neutral_op);
4464 }
4465 else
4466 {
4467 /* Get at the scalar def before the loop, that defines the initial value
4468 of the reduction variable. */
4469 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4470 loop_preheader_edge (loop));
4471 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4472 and we can't use zero for induc_val, use initial_def. Similarly
4473 for REDUC_MIN and initial_def larger than the base. */
4474 if (TREE_CODE (initial_def) == INTEGER_CST
4475 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4476 == INTEGER_INDUC_COND_REDUCTION)
4477 && !integer_zerop (induc_val)
4478 && ((induc_code == MAX_EXPR
4479 && tree_int_cst_lt (initial_def, induc_val))
4480 || (induc_code == MIN_EXPR
4481 && tree_int_cst_lt (induc_val, initial_def))))
4482 induc_val = initial_def;
4483
4484 if (double_reduc)
4485 /* In case of double reduction we only create a vector variable
4486 to be put in the reduction phi node. The actual statement
4487 creation is done later in this function. */
4488 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4489 else if (nested_in_vect_loop)
4490 {
4491 /* Do not use an adjustment def as that case is not supported
4492 correctly if ncopies is not one. */
4493 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4494 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4495 stmt_info);
4496 }
4497 else
4498 vec_initial_def
4499 = get_initial_def_for_reduction (stmt_info, initial_def,
4500 &adjustment_def);
4501 vec_initial_defs.create (1);
4502 vec_initial_defs.quick_push (vec_initial_def);
4503 }
4504
4505 /* Set phi nodes arguments. */
4506 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4507 {
4508 tree vec_init_def = vec_initial_defs[i];
4509 tree def = vect_defs[i];
4510 for (j = 0; j < ncopies; j++)
4511 {
4512 if (j != 0)
4513 {
4514 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4515 if (nested_in_vect_loop)
4516 vec_init_def
4517 = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_init_def);
4518 }
4519
4520 /* Set the loop-entry arg of the reduction-phi. */
4521
4522 gphi *phi = as_a <gphi *> (phi_info->stmt);
4523 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4524 == INTEGER_INDUC_COND_REDUCTION)
4525 {
4526 /* Initialise the reduction phi to zero. This prevents initial
4527 values of non-zero interferring with the reduction op. */
4528 gcc_assert (ncopies == 1);
4529 gcc_assert (i == 0);
4530
4531 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4532 tree induc_val_vec
4533 = build_vector_from_val (vec_init_def_type, induc_val);
4534
4535 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4536 UNKNOWN_LOCATION);
4537 }
4538 else
4539 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4540 UNKNOWN_LOCATION);
4541
4542 /* Set the loop-latch arg for the reduction-phi. */
4543 if (j > 0)
4544 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4545
4546 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4547
4548 if (dump_enabled_p ())
4549 {
4550 dump_printf_loc (MSG_NOTE, vect_location,
4551 "transform reduction: created def-use cycle: ");
4552 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4553 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4554 }
4555 }
4556 }
4557
4558 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4559 which is updated with the current index of the loop for every match of
4560 the original loop's cond_expr (VEC_STMT). This results in a vector
4561 containing the last time the condition passed for that vector lane.
4562 The first match will be a 1 to allow 0 to be used for non-matching
4563 indexes. If there are no matches at all then the vector will be all
4564 zeroes. */
4565 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4566 {
4567 tree indx_before_incr, indx_after_incr;
4568 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4569
4570 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4571 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4572
4573 int scalar_precision
4574 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4575 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4576 tree cr_index_vector_type = build_vector_type
4577 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4578
4579 /* First we create a simple vector induction variable which starts
4580 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4581 vector size (STEP). */
4582
4583 /* Create a {1,2,3,...} vector. */
4584 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4585
4586 /* Create a vector of the step value. */
4587 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4588 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4589
4590 /* Create an induction variable. */
4591 gimple_stmt_iterator incr_gsi;
4592 bool insert_after;
4593 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4594 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4595 insert_after, &indx_before_incr, &indx_after_incr);
4596
4597 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4598 filled with zeros (VEC_ZERO). */
4599
4600 /* Create a vector of 0s. */
4601 tree zero = build_zero_cst (cr_index_scalar_type);
4602 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4603
4604 /* Create a vector phi node. */
4605 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4606 new_phi = create_phi_node (new_phi_tree, loop->header);
4607 loop_vinfo->add_stmt (new_phi);
4608 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4609 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4610
4611 /* Now take the condition from the loops original cond_expr
4612 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4613 every match uses values from the induction variable
4614 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4615 (NEW_PHI_TREE).
4616 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4617 the new cond_expr (INDEX_COND_EXPR). */
4618
4619 /* Duplicate the condition from vec_stmt. */
4620 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4621
4622 /* Create a conditional, where the condition is taken from vec_stmt
4623 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4624 else is the phi (NEW_PHI_TREE). */
4625 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4626 ccompare, indx_before_incr,
4627 new_phi_tree);
4628 induction_index = make_ssa_name (cr_index_vector_type);
4629 gimple *index_condition = gimple_build_assign (induction_index,
4630 index_cond_expr);
4631 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4632 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4633 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4634
4635 /* Update the phi with the vec cond. */
4636 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4637 loop_latch_edge (loop), UNKNOWN_LOCATION);
4638 }
4639
4640 /* 2. Create epilog code.
4641 The reduction epilog code operates across the elements of the vector
4642 of partial results computed by the vectorized loop.
4643 The reduction epilog code consists of:
4644
4645 step 1: compute the scalar result in a vector (v_out2)
4646 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4647 step 3: adjust the scalar result (s_out3) if needed.
4648
4649 Step 1 can be accomplished using one the following three schemes:
4650 (scheme 1) using reduc_fn, if available.
4651 (scheme 2) using whole-vector shifts, if available.
4652 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4653 combined.
4654
4655 The overall epilog code looks like this:
4656
4657 s_out0 = phi <s_loop> # original EXIT_PHI
4658 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4659 v_out2 = reduce <v_out1> # step 1
4660 s_out3 = extract_field <v_out2, 0> # step 2
4661 s_out4 = adjust_result <s_out3> # step 3
4662
4663 (step 3 is optional, and steps 1 and 2 may be combined).
4664 Lastly, the uses of s_out0 are replaced by s_out4. */
4665
4666
4667 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4668 v_out1 = phi <VECT_DEF>
4669 Store them in NEW_PHIS. */
4670
4671 exit_bb = single_exit (loop)->dest;
4672 prev_phi_info = NULL;
4673 new_phis.create (vect_defs.length ());
4674 FOR_EACH_VEC_ELT (vect_defs, i, def)
4675 {
4676 for (j = 0; j < ncopies; j++)
4677 {
4678 tree new_def = copy_ssa_name (def);
4679 phi = create_phi_node (new_def, exit_bb);
4680 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4681 if (j == 0)
4682 new_phis.quick_push (phi);
4683 else
4684 {
4685 def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
4686 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4687 }
4688
4689 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4690 prev_phi_info = phi_info;
4691 }
4692 }
4693
4694 /* The epilogue is created for the outer-loop, i.e., for the loop being
4695 vectorized. Create exit phis for the outer loop. */
4696 if (double_reduc)
4697 {
4698 loop = outer_loop;
4699 exit_bb = single_exit (loop)->dest;
4700 inner_phis.create (vect_defs.length ());
4701 FOR_EACH_VEC_ELT (new_phis, i, phi)
4702 {
4703 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4704 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4705 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4706 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4707 PHI_RESULT (phi));
4708 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4709 inner_phis.quick_push (phi_info);
4710 new_phis[i] = outer_phi;
4711 while (STMT_VINFO_RELATED_STMT (phi_info))
4712 {
4713 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4714 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4715 outer_phi = create_phi_node (new_result, exit_bb);
4716 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4717 PHI_RESULT (phi_info->stmt));
4718 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4719 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4720 prev_phi_info = outer_phi_info;
4721 }
4722 }
4723 }
4724
4725 exit_gsi = gsi_after_labels (exit_bb);
4726
4727 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4728 (i.e. when reduc_fn is not available) and in the final adjustment
4729 code (if needed). Also get the original scalar reduction variable as
4730 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4731 represents a reduction pattern), the tree-code and scalar-def are
4732 taken from the original stmt that the pattern-stmt (STMT) replaces.
4733 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4734 are taken from STMT. */
4735
4736 stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
4737 if (orig_stmt_info != stmt_info)
4738 {
4739 /* Reduction pattern */
4740 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4741 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4742 }
4743
4744 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4745 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4746 partial results are added and not subtracted. */
4747 if (code == MINUS_EXPR)
4748 code = PLUS_EXPR;
4749
4750 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4751 scalar_type = TREE_TYPE (scalar_dest);
4752 scalar_results.create (group_size);
4753 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4754 bitsize = TYPE_SIZE (scalar_type);
4755
4756 /* In case this is a reduction in an inner-loop while vectorizing an outer
4757 loop - we don't need to extract a single scalar result at the end of the
4758 inner-loop (unless it is double reduction, i.e., the use of reduction is
4759 outside the outer-loop). The final vector of partial results will be used
4760 in the vectorized outer-loop, or reduced to a scalar result at the end of
4761 the outer-loop. */
4762 if (nested_in_vect_loop && !double_reduc)
4763 goto vect_finalize_reduction;
4764
4765 /* SLP reduction without reduction chain, e.g.,
4766 # a1 = phi <a2, a0>
4767 # b1 = phi <b2, b0>
4768 a2 = operation (a1)
4769 b2 = operation (b1) */
4770 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4771
4772 /* True if we should implement SLP_REDUC using native reduction operations
4773 instead of scalar operations. */
4774 direct_slp_reduc = (reduc_fn != IFN_LAST
4775 && slp_reduc
4776 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4777
4778 /* In case of reduction chain, e.g.,
4779 # a1 = phi <a3, a0>
4780 a2 = operation (a1)
4781 a3 = operation (a2),
4782
4783 we may end up with more than one vector result. Here we reduce them to
4784 one vector. */
4785 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4786 {
4787 tree first_vect = PHI_RESULT (new_phis[0]);
4788 gassign *new_vec_stmt = NULL;
4789 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4790 for (k = 1; k < new_phis.length (); k++)
4791 {
4792 gimple *next_phi = new_phis[k];
4793 tree second_vect = PHI_RESULT (next_phi);
4794 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4795 new_vec_stmt = gimple_build_assign (tem, code,
4796 first_vect, second_vect);
4797 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4798 first_vect = tem;
4799 }
4800
4801 new_phi_result = first_vect;
4802 if (new_vec_stmt)
4803 {
4804 new_phis.truncate (0);
4805 new_phis.safe_push (new_vec_stmt);
4806 }
4807 }
4808 /* Likewise if we couldn't use a single defuse cycle. */
4809 else if (ncopies > 1)
4810 {
4811 gcc_assert (new_phis.length () == 1);
4812 tree first_vect = PHI_RESULT (new_phis[0]);
4813 gassign *new_vec_stmt = NULL;
4814 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4815 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4816 for (int k = 1; k < ncopies; ++k)
4817 {
4818 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4819 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4820 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4821 new_vec_stmt = gimple_build_assign (tem, code,
4822 first_vect, second_vect);
4823 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4824 first_vect = tem;
4825 }
4826 new_phi_result = first_vect;
4827 new_phis.truncate (0);
4828 new_phis.safe_push (new_vec_stmt);
4829 }
4830 else
4831 new_phi_result = PHI_RESULT (new_phis[0]);
4832
4833 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4834 && reduc_fn != IFN_LAST)
4835 {
4836 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4837 various data values where the condition matched and another vector
4838 (INDUCTION_INDEX) containing all the indexes of those matches. We
4839 need to extract the last matching index (which will be the index with
4840 highest value) and use this to index into the data vector.
4841 For the case where there were no matches, the data vector will contain
4842 all default values and the index vector will be all zeros. */
4843
4844 /* Get various versions of the type of the vector of indexes. */
4845 tree index_vec_type = TREE_TYPE (induction_index);
4846 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4847 tree index_scalar_type = TREE_TYPE (index_vec_type);
4848 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4849 (index_vec_type);
4850
4851 /* Get an unsigned integer version of the type of the data vector. */
4852 int scalar_precision
4853 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4854 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4855 tree vectype_unsigned = build_vector_type
4856 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4857
4858 /* First we need to create a vector (ZERO_VEC) of zeros and another
4859 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4860 can create using a MAX reduction and then expanding.
4861 In the case where the loop never made any matches, the max index will
4862 be zero. */
4863
4864 /* Vector of {0, 0, 0,...}. */
4865 tree zero_vec = make_ssa_name (vectype);
4866 tree zero_vec_rhs = build_zero_cst (vectype);
4867 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4868 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4869
4870 /* Find maximum value from the vector of found indexes. */
4871 tree max_index = make_ssa_name (index_scalar_type);
4872 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4873 1, induction_index);
4874 gimple_call_set_lhs (max_index_stmt, max_index);
4875 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4876
4877 /* Vector of {max_index, max_index, max_index,...}. */
4878 tree max_index_vec = make_ssa_name (index_vec_type);
4879 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4880 max_index);
4881 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4882 max_index_vec_rhs);
4883 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4884
4885 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4886 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4887 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4888 otherwise. Only one value should match, resulting in a vector
4889 (VEC_COND) with one data value and the rest zeros.
4890 In the case where the loop never made any matches, every index will
4891 match, resulting in a vector with all data values (which will all be
4892 the default value). */
4893
4894 /* Compare the max index vector to the vector of found indexes to find
4895 the position of the max value. */
4896 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4897 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4898 induction_index,
4899 max_index_vec);
4900 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4901
4902 /* Use the compare to choose either values from the data vector or
4903 zero. */
4904 tree vec_cond = make_ssa_name (vectype);
4905 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4906 vec_compare, new_phi_result,
4907 zero_vec);
4908 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4909
4910 /* Finally we need to extract the data value from the vector (VEC_COND)
4911 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4912 reduction, but because this doesn't exist, we can use a MAX reduction
4913 instead. The data value might be signed or a float so we need to cast
4914 it first.
4915 In the case where the loop never made any matches, the data values are
4916 all identical, and so will reduce down correctly. */
4917
4918 /* Make the matched data values unsigned. */
4919 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4920 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4921 vec_cond);
4922 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4923 VIEW_CONVERT_EXPR,
4924 vec_cond_cast_rhs);
4925 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4926
4927 /* Reduce down to a scalar value. */
4928 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4929 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4930 1, vec_cond_cast);
4931 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4932 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4933
4934 /* Convert the reduced value back to the result type and set as the
4935 result. */
4936 gimple_seq stmts = NULL;
4937 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4938 data_reduc);
4939 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4940 scalar_results.safe_push (new_temp);
4941 }
4942 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4943 && reduc_fn == IFN_LAST)
4944 {
4945 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4946 idx = 0;
4947 idx_val = induction_index[0];
4948 val = data_reduc[0];
4949 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4950 if (induction_index[i] > idx_val)
4951 val = data_reduc[i], idx_val = induction_index[i];
4952 return val; */
4953
4954 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4955 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4956 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4957 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4958 /* Enforced by vectorizable_reduction, which ensures we have target
4959 support before allowing a conditional reduction on variable-length
4960 vectors. */
4961 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4962 tree idx_val = NULL_TREE, val = NULL_TREE;
4963 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
4964 {
4965 tree old_idx_val = idx_val;
4966 tree old_val = val;
4967 idx_val = make_ssa_name (idx_eltype);
4968 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
4969 build3 (BIT_FIELD_REF, idx_eltype,
4970 induction_index,
4971 bitsize_int (el_size),
4972 bitsize_int (off)));
4973 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4974 val = make_ssa_name (data_eltype);
4975 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
4976 build3 (BIT_FIELD_REF,
4977 data_eltype,
4978 new_phi_result,
4979 bitsize_int (el_size),
4980 bitsize_int (off)));
4981 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4982 if (off != 0)
4983 {
4984 tree new_idx_val = idx_val;
4985 tree new_val = val;
4986 if (off != v_size - el_size)
4987 {
4988 new_idx_val = make_ssa_name (idx_eltype);
4989 epilog_stmt = gimple_build_assign (new_idx_val,
4990 MAX_EXPR, idx_val,
4991 old_idx_val);
4992 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4993 }
4994 new_val = make_ssa_name (data_eltype);
4995 epilog_stmt = gimple_build_assign (new_val,
4996 COND_EXPR,
4997 build2 (GT_EXPR,
4998 boolean_type_node,
4999 idx_val,
5000 old_idx_val),
5001 val, old_val);
5002 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5003 idx_val = new_idx_val;
5004 val = new_val;
5005 }
5006 }
5007 /* Convert the reduced value back to the result type and set as the
5008 result. */
5009 gimple_seq stmts = NULL;
5010 val = gimple_convert (&stmts, scalar_type, val);
5011 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5012 scalar_results.safe_push (val);
5013 }
5014
5015 /* 2.3 Create the reduction code, using one of the three schemes described
5016 above. In SLP we simply need to extract all the elements from the
5017 vector (without reducing them), so we use scalar shifts. */
5018 else if (reduc_fn != IFN_LAST && !slp_reduc)
5019 {
5020 tree tmp;
5021 tree vec_elem_type;
5022
5023 /* Case 1: Create:
5024 v_out2 = reduc_expr <v_out1> */
5025
5026 if (dump_enabled_p ())
5027 dump_printf_loc (MSG_NOTE, vect_location,
5028 "Reduce using direct vector reduction.\n");
5029
5030 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5031 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5032 {
5033 tree tmp_dest
5034 = vect_create_destination_var (scalar_dest, vec_elem_type);
5035 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5036 new_phi_result);
5037 gimple_set_lhs (epilog_stmt, tmp_dest);
5038 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5039 gimple_set_lhs (epilog_stmt, new_temp);
5040 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5041
5042 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5043 new_temp);
5044 }
5045 else
5046 {
5047 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5048 new_phi_result);
5049 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5050 }
5051
5052 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5053 gimple_set_lhs (epilog_stmt, new_temp);
5054 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5055
5056 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5057 == INTEGER_INDUC_COND_REDUCTION)
5058 && !operand_equal_p (initial_def, induc_val, 0))
5059 {
5060 /* Earlier we set the initial value to be a vector if induc_val
5061 values. Check the result and if it is induc_val then replace
5062 with the original initial value, unless induc_val is
5063 the same as initial_def already. */
5064 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5065 induc_val);
5066
5067 tmp = make_ssa_name (new_scalar_dest);
5068 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5069 initial_def, new_temp);
5070 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5071 new_temp = tmp;
5072 }
5073
5074 scalar_results.safe_push (new_temp);
5075 }
5076 else if (direct_slp_reduc)
5077 {
5078 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5079 with the elements for other SLP statements replaced with the
5080 neutral value. We can then do a normal reduction on each vector. */
5081
5082 /* Enforced by vectorizable_reduction. */
5083 gcc_assert (new_phis.length () == 1);
5084 gcc_assert (pow2p_hwi (group_size));
5085
5086 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5087 vec<stmt_vec_info> orig_phis
5088 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5089 gimple_seq seq = NULL;
5090
5091 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5092 and the same element size as VECTYPE. */
5093 tree index = build_index_vector (vectype, 0, 1);
5094 tree index_type = TREE_TYPE (index);
5095 tree index_elt_type = TREE_TYPE (index_type);
5096 tree mask_type = build_same_sized_truth_vector_type (index_type);
5097
5098 /* Create a vector that, for each element, identifies which of
5099 the REDUC_GROUP_SIZE results should use it. */
5100 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5101 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5102 build_vector_from_val (index_type, index_mask));
5103
5104 /* Get a neutral vector value. This is simply a splat of the neutral
5105 scalar value if we have one, otherwise the initial scalar value
5106 is itself a neutral value. */
5107 tree vector_identity = NULL_TREE;
5108 if (neutral_op)
5109 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5110 neutral_op);
5111 for (unsigned int i = 0; i < group_size; ++i)
5112 {
5113 /* If there's no univeral neutral value, we can use the
5114 initial scalar value from the original PHI. This is used
5115 for MIN and MAX reduction, for example. */
5116 if (!neutral_op)
5117 {
5118 tree scalar_value
5119 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5120 loop_preheader_edge (loop));
5121 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5122 scalar_value);
5123 }
5124
5125 /* Calculate the equivalent of:
5126
5127 sel[j] = (index[j] == i);
5128
5129 which selects the elements of NEW_PHI_RESULT that should
5130 be included in the result. */
5131 tree compare_val = build_int_cst (index_elt_type, i);
5132 compare_val = build_vector_from_val (index_type, compare_val);
5133 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5134 index, compare_val);
5135
5136 /* Calculate the equivalent of:
5137
5138 vec = seq ? new_phi_result : vector_identity;
5139
5140 VEC is now suitable for a full vector reduction. */
5141 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5142 sel, new_phi_result, vector_identity);
5143
5144 /* Do the reduction and convert it to the appropriate type. */
5145 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5146 TREE_TYPE (vectype), vec);
5147 scalar = gimple_convert (&seq, scalar_type, scalar);
5148 scalar_results.safe_push (scalar);
5149 }
5150 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5151 }
5152 else
5153 {
5154 bool reduce_with_shift;
5155 tree vec_temp;
5156
5157 /* COND reductions all do the final reduction with MAX_EXPR
5158 or MIN_EXPR. */
5159 if (code == COND_EXPR)
5160 {
5161 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5162 == INTEGER_INDUC_COND_REDUCTION)
5163 code = induc_code;
5164 else
5165 code = MAX_EXPR;
5166 }
5167
5168 /* See if the target wants to do the final (shift) reduction
5169 in a vector mode of smaller size and first reduce upper/lower
5170 halves against each other. */
5171 enum machine_mode mode1 = mode;
5172 tree vectype1 = vectype;
5173 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5174 unsigned sz1 = sz;
5175 if (!slp_reduc
5176 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5177 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5178
5179 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5180 reduce_with_shift = have_whole_vector_shift (mode1);
5181 if (!VECTOR_MODE_P (mode1))
5182 reduce_with_shift = false;
5183 else
5184 {
5185 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5186 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5187 reduce_with_shift = false;
5188 }
5189
5190 /* First reduce the vector to the desired vector size we should
5191 do shift reduction on by combining upper and lower halves. */
5192 new_temp = new_phi_result;
5193 while (sz > sz1)
5194 {
5195 gcc_assert (!slp_reduc);
5196 sz /= 2;
5197 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5198
5199 /* The target has to make sure we support lowpart/highpart
5200 extraction, either via direct vector extract or through
5201 an integer mode punning. */
5202 tree dst1, dst2;
5203 if (convert_optab_handler (vec_extract_optab,
5204 TYPE_MODE (TREE_TYPE (new_temp)),
5205 TYPE_MODE (vectype1))
5206 != CODE_FOR_nothing)
5207 {
5208 /* Extract sub-vectors directly once vec_extract becomes
5209 a conversion optab. */
5210 dst1 = make_ssa_name (vectype1);
5211 epilog_stmt
5212 = gimple_build_assign (dst1, BIT_FIELD_REF,
5213 build3 (BIT_FIELD_REF, vectype1,
5214 new_temp, TYPE_SIZE (vectype1),
5215 bitsize_int (0)));
5216 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5217 dst2 = make_ssa_name (vectype1);
5218 epilog_stmt
5219 = gimple_build_assign (dst2, BIT_FIELD_REF,
5220 build3 (BIT_FIELD_REF, vectype1,
5221 new_temp, TYPE_SIZE (vectype1),
5222 bitsize_int (sz * BITS_PER_UNIT)));
5223 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5224 }
5225 else
5226 {
5227 /* Extract via punning to appropriately sized integer mode
5228 vector. */
5229 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5230 1);
5231 tree etype = build_vector_type (eltype, 2);
5232 gcc_assert (convert_optab_handler (vec_extract_optab,
5233 TYPE_MODE (etype),
5234 TYPE_MODE (eltype))
5235 != CODE_FOR_nothing);
5236 tree tem = make_ssa_name (etype);
5237 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5238 build1 (VIEW_CONVERT_EXPR,
5239 etype, new_temp));
5240 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5241 new_temp = tem;
5242 tem = make_ssa_name (eltype);
5243 epilog_stmt
5244 = gimple_build_assign (tem, BIT_FIELD_REF,
5245 build3 (BIT_FIELD_REF, eltype,
5246 new_temp, TYPE_SIZE (eltype),
5247 bitsize_int (0)));
5248 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5249 dst1 = make_ssa_name (vectype1);
5250 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5251 build1 (VIEW_CONVERT_EXPR,
5252 vectype1, tem));
5253 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5254 tem = make_ssa_name (eltype);
5255 epilog_stmt
5256 = gimple_build_assign (tem, BIT_FIELD_REF,
5257 build3 (BIT_FIELD_REF, eltype,
5258 new_temp, TYPE_SIZE (eltype),
5259 bitsize_int (sz * BITS_PER_UNIT)));
5260 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5261 dst2 = make_ssa_name (vectype1);
5262 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5263 build1 (VIEW_CONVERT_EXPR,
5264 vectype1, tem));
5265 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5266 }
5267
5268 new_temp = make_ssa_name (vectype1);
5269 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5270 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5271 }
5272
5273 if (reduce_with_shift && !slp_reduc)
5274 {
5275 int element_bitsize = tree_to_uhwi (bitsize);
5276 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5277 for variable-length vectors and also requires direct target support
5278 for loop reductions. */
5279 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5280 int nelements = vec_size_in_bits / element_bitsize;
5281 vec_perm_builder sel;
5282 vec_perm_indices indices;
5283
5284 int elt_offset;
5285
5286 tree zero_vec = build_zero_cst (vectype1);
5287 /* Case 2: Create:
5288 for (offset = nelements/2; offset >= 1; offset/=2)
5289 {
5290 Create: va' = vec_shift <va, offset>
5291 Create: va = vop <va, va'>
5292 } */
5293
5294 tree rhs;
5295
5296 if (dump_enabled_p ())
5297 dump_printf_loc (MSG_NOTE, vect_location,
5298 "Reduce using vector shifts\n");
5299
5300 mode1 = TYPE_MODE (vectype1);
5301 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5302 for (elt_offset = nelements / 2;
5303 elt_offset >= 1;
5304 elt_offset /= 2)
5305 {
5306 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5307 indices.new_vector (sel, 2, nelements);
5308 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5309 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5310 new_temp, zero_vec, mask);
5311 new_name = make_ssa_name (vec_dest, epilog_stmt);
5312 gimple_assign_set_lhs (epilog_stmt, new_name);
5313 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5314
5315 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5316 new_temp);
5317 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5318 gimple_assign_set_lhs (epilog_stmt, new_temp);
5319 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5320 }
5321
5322 /* 2.4 Extract the final scalar result. Create:
5323 s_out3 = extract_field <v_out2, bitpos> */
5324
5325 if (dump_enabled_p ())
5326 dump_printf_loc (MSG_NOTE, vect_location,
5327 "extract scalar result\n");
5328
5329 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5330 bitsize, bitsize_zero_node);
5331 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5332 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5333 gimple_assign_set_lhs (epilog_stmt, new_temp);
5334 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5335 scalar_results.safe_push (new_temp);
5336 }
5337 else
5338 {
5339 /* Case 3: Create:
5340 s = extract_field <v_out2, 0>
5341 for (offset = element_size;
5342 offset < vector_size;
5343 offset += element_size;)
5344 {
5345 Create: s' = extract_field <v_out2, offset>
5346 Create: s = op <s, s'> // For non SLP cases
5347 } */
5348
5349 if (dump_enabled_p ())
5350 dump_printf_loc (MSG_NOTE, vect_location,
5351 "Reduce using scalar code.\n");
5352
5353 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5354 int element_bitsize = tree_to_uhwi (bitsize);
5355 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5356 {
5357 int bit_offset;
5358 if (gimple_code (new_phi) == GIMPLE_PHI)
5359 vec_temp = PHI_RESULT (new_phi);
5360 else
5361 vec_temp = gimple_assign_lhs (new_phi);
5362 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5363 bitsize_zero_node);
5364 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5365 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5366 gimple_assign_set_lhs (epilog_stmt, new_temp);
5367 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5368
5369 /* In SLP we don't need to apply reduction operation, so we just
5370 collect s' values in SCALAR_RESULTS. */
5371 if (slp_reduc)
5372 scalar_results.safe_push (new_temp);
5373
5374 for (bit_offset = element_bitsize;
5375 bit_offset < vec_size_in_bits;
5376 bit_offset += element_bitsize)
5377 {
5378 tree bitpos = bitsize_int (bit_offset);
5379 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5380 bitsize, bitpos);
5381
5382 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5383 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5384 gimple_assign_set_lhs (epilog_stmt, new_name);
5385 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5386
5387 if (slp_reduc)
5388 {
5389 /* In SLP we don't need to apply reduction operation, so
5390 we just collect s' values in SCALAR_RESULTS. */
5391 new_temp = new_name;
5392 scalar_results.safe_push (new_name);
5393 }
5394 else
5395 {
5396 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5397 new_name, new_temp);
5398 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5399 gimple_assign_set_lhs (epilog_stmt, new_temp);
5400 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5401 }
5402 }
5403 }
5404
5405 /* The only case where we need to reduce scalar results in SLP, is
5406 unrolling. If the size of SCALAR_RESULTS is greater than
5407 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5408 REDUC_GROUP_SIZE. */
5409 if (slp_reduc)
5410 {
5411 tree res, first_res, new_res;
5412 gimple *new_stmt;
5413
5414 /* Reduce multiple scalar results in case of SLP unrolling. */
5415 for (j = group_size; scalar_results.iterate (j, &res);
5416 j++)
5417 {
5418 first_res = scalar_results[j % group_size];
5419 new_stmt = gimple_build_assign (new_scalar_dest, code,
5420 first_res, res);
5421 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5422 gimple_assign_set_lhs (new_stmt, new_res);
5423 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5424 scalar_results[j % group_size] = new_res;
5425 }
5426 }
5427 else
5428 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5429 scalar_results.safe_push (new_temp);
5430 }
5431
5432 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5433 == INTEGER_INDUC_COND_REDUCTION)
5434 && !operand_equal_p (initial_def, induc_val, 0))
5435 {
5436 /* Earlier we set the initial value to be a vector if induc_val
5437 values. Check the result and if it is induc_val then replace
5438 with the original initial value, unless induc_val is
5439 the same as initial_def already. */
5440 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5441 induc_val);
5442
5443 tree tmp = make_ssa_name (new_scalar_dest);
5444 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5445 initial_def, new_temp);
5446 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5447 scalar_results[0] = tmp;
5448 }
5449 }
5450
5451 vect_finalize_reduction:
5452
5453 if (double_reduc)
5454 loop = loop->inner;
5455
5456 /* 2.5 Adjust the final result by the initial value of the reduction
5457 variable. (When such adjustment is not needed, then
5458 'adjustment_def' is zero). For example, if code is PLUS we create:
5459 new_temp = loop_exit_def + adjustment_def */
5460
5461 if (adjustment_def)
5462 {
5463 gcc_assert (!slp_reduc);
5464 if (nested_in_vect_loop)
5465 {
5466 new_phi = new_phis[0];
5467 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5468 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5469 new_dest = vect_create_destination_var (scalar_dest, vectype);
5470 }
5471 else
5472 {
5473 new_temp = scalar_results[0];
5474 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5475 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5476 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5477 }
5478
5479 epilog_stmt = gimple_build_assign (new_dest, expr);
5480 new_temp = make_ssa_name (new_dest, epilog_stmt);
5481 gimple_assign_set_lhs (epilog_stmt, new_temp);
5482 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5483 if (nested_in_vect_loop)
5484 {
5485 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5486 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5487 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5488
5489 if (!double_reduc)
5490 scalar_results.quick_push (new_temp);
5491 else
5492 scalar_results[0] = new_temp;
5493 }
5494 else
5495 scalar_results[0] = new_temp;
5496
5497 new_phis[0] = epilog_stmt;
5498 }
5499
5500 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5501 phis with new adjusted scalar results, i.e., replace use <s_out0>
5502 with use <s_out4>.
5503
5504 Transform:
5505 loop_exit:
5506 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5507 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5508 v_out2 = reduce <v_out1>
5509 s_out3 = extract_field <v_out2, 0>
5510 s_out4 = adjust_result <s_out3>
5511 use <s_out0>
5512 use <s_out0>
5513
5514 into:
5515
5516 loop_exit:
5517 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5518 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5519 v_out2 = reduce <v_out1>
5520 s_out3 = extract_field <v_out2, 0>
5521 s_out4 = adjust_result <s_out3>
5522 use <s_out4>
5523 use <s_out4> */
5524
5525
5526 /* In SLP reduction chain we reduce vector results into one vector if
5527 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5528 LHS of the last stmt in the reduction chain, since we are looking for
5529 the loop exit phi node. */
5530 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5531 {
5532 stmt_vec_info dest_stmt_info
5533 = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
5534 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5535 group_size = 1;
5536 }
5537
5538 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5539 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5540 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5541 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5542 correspond to the first vector stmt, etc.
5543 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5544 if (group_size > new_phis.length ())
5545 {
5546 ratio = group_size / new_phis.length ();
5547 gcc_assert (!(group_size % new_phis.length ()));
5548 }
5549 else
5550 ratio = 1;
5551
5552 stmt_vec_info epilog_stmt_info = NULL;
5553 for (k = 0; k < group_size; k++)
5554 {
5555 if (k % ratio == 0)
5556 {
5557 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5558 reduction_phi_info = reduction_phis[k / ratio];
5559 if (double_reduc)
5560 inner_phi = inner_phis[k / ratio];
5561 }
5562
5563 if (slp_reduc)
5564 {
5565 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5566
5567 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5568 /* SLP statements can't participate in patterns. */
5569 gcc_assert (!orig_stmt_info);
5570 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5571 }
5572
5573 phis.create (3);
5574 /* Find the loop-closed-use at the loop exit of the original scalar
5575 result. (The reduction result is expected to have two immediate uses -
5576 one at the latch block, and one at the loop exit). */
5577 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5578 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5579 && !is_gimple_debug (USE_STMT (use_p)))
5580 phis.safe_push (USE_STMT (use_p));
5581
5582 /* While we expect to have found an exit_phi because of loop-closed-ssa
5583 form we can end up without one if the scalar cycle is dead. */
5584
5585 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5586 {
5587 if (outer_loop)
5588 {
5589 stmt_vec_info exit_phi_vinfo
5590 = loop_vinfo->lookup_stmt (exit_phi);
5591 gphi *vect_phi;
5592
5593 /* FORNOW. Currently not supporting the case that an inner-loop
5594 reduction is not used in the outer-loop (but only outside the
5595 outer-loop), unless it is double reduction. */
5596 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5597 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5598 || double_reduc);
5599
5600 if (double_reduc)
5601 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5602 else
5603 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5604 if (!double_reduc
5605 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5606 != vect_double_reduction_def)
5607 continue;
5608
5609 /* Handle double reduction:
5610
5611 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5612 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5613 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5614 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5615
5616 At that point the regular reduction (stmt2 and stmt3) is
5617 already vectorized, as well as the exit phi node, stmt4.
5618 Here we vectorize the phi node of double reduction, stmt1, and
5619 update all relevant statements. */
5620
5621 /* Go through all the uses of s2 to find double reduction phi
5622 node, i.e., stmt1 above. */
5623 orig_name = PHI_RESULT (exit_phi);
5624 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5625 {
5626 stmt_vec_info use_stmt_vinfo;
5627 tree vect_phi_init, preheader_arg, vect_phi_res;
5628 basic_block bb = gimple_bb (use_stmt);
5629
5630 /* Check that USE_STMT is really double reduction phi
5631 node. */
5632 if (gimple_code (use_stmt) != GIMPLE_PHI
5633 || gimple_phi_num_args (use_stmt) != 2
5634 || bb->loop_father != outer_loop)
5635 continue;
5636 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5637 if (!use_stmt_vinfo
5638 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5639 != vect_double_reduction_def)
5640 continue;
5641
5642 /* Create vector phi node for double reduction:
5643 vs1 = phi <vs0, vs2>
5644 vs1 was created previously in this function by a call to
5645 vect_get_vec_def_for_operand and is stored in
5646 vec_initial_def;
5647 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5648 vs0 is created here. */
5649
5650 /* Create vector phi node. */
5651 vect_phi = create_phi_node (vec_initial_def, bb);
5652 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5653
5654 /* Create vs0 - initial def of the double reduction phi. */
5655 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5656 loop_preheader_edge (outer_loop));
5657 vect_phi_init = get_initial_def_for_reduction
5658 (stmt_info, preheader_arg, NULL);
5659
5660 /* Update phi node arguments with vs0 and vs2. */
5661 add_phi_arg (vect_phi, vect_phi_init,
5662 loop_preheader_edge (outer_loop),
5663 UNKNOWN_LOCATION);
5664 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5665 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5666 if (dump_enabled_p ())
5667 {
5668 dump_printf_loc (MSG_NOTE, vect_location,
5669 "created double reduction phi node: ");
5670 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5671 }
5672
5673 vect_phi_res = PHI_RESULT (vect_phi);
5674
5675 /* Replace the use, i.e., set the correct vs1 in the regular
5676 reduction phi node. FORNOW, NCOPIES is always 1, so the
5677 loop is redundant. */
5678 stmt_vec_info use_info = reduction_phi_info;
5679 for (j = 0; j < ncopies; j++)
5680 {
5681 edge pr_edge = loop_preheader_edge (loop);
5682 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5683 pr_edge->dest_idx, vect_phi_res);
5684 use_info = STMT_VINFO_RELATED_STMT (use_info);
5685 }
5686 }
5687 }
5688 }
5689
5690 phis.release ();
5691 if (nested_in_vect_loop)
5692 {
5693 if (double_reduc)
5694 loop = outer_loop;
5695 else
5696 continue;
5697 }
5698
5699 phis.create (3);
5700 /* Find the loop-closed-use at the loop exit of the original scalar
5701 result. (The reduction result is expected to have two immediate uses,
5702 one at the latch block, and one at the loop exit). For double
5703 reductions we are looking for exit phis of the outer loop. */
5704 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5705 {
5706 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5707 {
5708 if (!is_gimple_debug (USE_STMT (use_p)))
5709 phis.safe_push (USE_STMT (use_p));
5710 }
5711 else
5712 {
5713 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5714 {
5715 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5716
5717 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5718 {
5719 if (!flow_bb_inside_loop_p (loop,
5720 gimple_bb (USE_STMT (phi_use_p)))
5721 && !is_gimple_debug (USE_STMT (phi_use_p)))
5722 phis.safe_push (USE_STMT (phi_use_p));
5723 }
5724 }
5725 }
5726 }
5727
5728 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5729 {
5730 /* Replace the uses: */
5731 orig_name = PHI_RESULT (exit_phi);
5732 scalar_result = scalar_results[k];
5733 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5734 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5735 SET_USE (use_p, scalar_result);
5736 }
5737
5738 phis.release ();
5739 }
5740 }
5741
5742 /* Return a vector of type VECTYPE that is equal to the vector select
5743 operation "MASK ? VEC : IDENTITY". Insert the select statements
5744 before GSI. */
5745
5746 static tree
5747 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5748 tree vec, tree identity)
5749 {
5750 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5751 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5752 mask, vec, identity);
5753 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5754 return cond;
5755 }
5756
5757 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5758 order, starting with LHS. Insert the extraction statements before GSI and
5759 associate the new scalar SSA names with variable SCALAR_DEST.
5760 Return the SSA name for the result. */
5761
5762 static tree
5763 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5764 tree_code code, tree lhs, tree vector_rhs)
5765 {
5766 tree vectype = TREE_TYPE (vector_rhs);
5767 tree scalar_type = TREE_TYPE (vectype);
5768 tree bitsize = TYPE_SIZE (scalar_type);
5769 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5770 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5771
5772 for (unsigned HOST_WIDE_INT bit_offset = 0;
5773 bit_offset < vec_size_in_bits;
5774 bit_offset += element_bitsize)
5775 {
5776 tree bitpos = bitsize_int (bit_offset);
5777 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5778 bitsize, bitpos);
5779
5780 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5781 rhs = make_ssa_name (scalar_dest, stmt);
5782 gimple_assign_set_lhs (stmt, rhs);
5783 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5784
5785 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5786 tree new_name = make_ssa_name (scalar_dest, stmt);
5787 gimple_assign_set_lhs (stmt, new_name);
5788 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5789 lhs = new_name;
5790 }
5791 return lhs;
5792 }
5793
5794 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT_INFO is the
5795 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5796 statement. CODE is the operation performed by STMT_INFO and OPS are
5797 its scalar operands. REDUC_INDEX is the index of the operand in
5798 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5799 implements in-order reduction, or IFN_LAST if we should open-code it.
5800 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5801 that should be used to control the operation in a fully-masked loop. */
5802
5803 static bool
5804 vectorize_fold_left_reduction (stmt_vec_info stmt_info,
5805 gimple_stmt_iterator *gsi,
5806 stmt_vec_info *vec_stmt, slp_tree slp_node,
5807 gimple *reduc_def_stmt,
5808 tree_code code, internal_fn reduc_fn,
5809 tree ops[3], tree vectype_in,
5810 int reduc_index, vec_loop_masks *masks)
5811 {
5812 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5813 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5814 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5815 stmt_vec_info new_stmt_info = NULL;
5816
5817 int ncopies;
5818 if (slp_node)
5819 ncopies = 1;
5820 else
5821 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5822
5823 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5824 gcc_assert (ncopies == 1);
5825 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5826 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5827 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5828 == FOLD_LEFT_REDUCTION);
5829
5830 if (slp_node)
5831 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5832 TYPE_VECTOR_SUBPARTS (vectype_in)));
5833
5834 tree op0 = ops[1 - reduc_index];
5835
5836 int group_size = 1;
5837 stmt_vec_info scalar_dest_def_info;
5838 auto_vec<tree> vec_oprnds0;
5839 if (slp_node)
5840 {
5841 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5842 slp_node);
5843 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5844 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5845 }
5846 else
5847 {
5848 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5849 vec_oprnds0.create (1);
5850 vec_oprnds0.quick_push (loop_vec_def0);
5851 scalar_dest_def_info = stmt_info;
5852 }
5853
5854 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5855 tree scalar_type = TREE_TYPE (scalar_dest);
5856 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5857
5858 int vec_num = vec_oprnds0.length ();
5859 gcc_assert (vec_num == 1 || slp_node);
5860 tree vec_elem_type = TREE_TYPE (vectype_out);
5861 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5862
5863 tree vector_identity = NULL_TREE;
5864 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5865 vector_identity = build_zero_cst (vectype_out);
5866
5867 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5868 int i;
5869 tree def0;
5870 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5871 {
5872 gimple *new_stmt;
5873 tree mask = NULL_TREE;
5874 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5875 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5876
5877 /* Handle MINUS by adding the negative. */
5878 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5879 {
5880 tree negated = make_ssa_name (vectype_out);
5881 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5882 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5883 def0 = negated;
5884 }
5885
5886 if (mask)
5887 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5888 vector_identity);
5889
5890 /* On the first iteration the input is simply the scalar phi
5891 result, and for subsequent iterations it is the output of
5892 the preceding operation. */
5893 if (reduc_fn != IFN_LAST)
5894 {
5895 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5896 /* For chained SLP reductions the output of the previous reduction
5897 operation serves as the input of the next. For the final statement
5898 the output cannot be a temporary - we reuse the original
5899 scalar destination of the last statement. */
5900 if (i != vec_num - 1)
5901 {
5902 gimple_set_lhs (new_stmt, scalar_dest_var);
5903 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5904 gimple_set_lhs (new_stmt, reduc_var);
5905 }
5906 }
5907 else
5908 {
5909 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5910 reduc_var, def0);
5911 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5912 /* Remove the statement, so that we can use the same code paths
5913 as for statements that we've just created. */
5914 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5915 gsi_remove (&tmp_gsi, false);
5916 }
5917
5918 if (i == vec_num - 1)
5919 {
5920 gimple_set_lhs (new_stmt, scalar_dest);
5921 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5922 new_stmt);
5923 }
5924 else
5925 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5926 new_stmt, gsi);
5927
5928 if (slp_node)
5929 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5930 }
5931
5932 if (!slp_node)
5933 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5934
5935 return true;
5936 }
5937
5938 /* Function is_nonwrapping_integer_induction.
5939
5940 Check if STMT_VINO (which is part of loop LOOP) both increments and
5941 does not cause overflow. */
5942
5943 static bool
5944 is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
5945 {
5946 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
5947 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5948 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5949 tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
5950 widest_int ni, max_loop_value, lhs_max;
5951 wi::overflow_type overflow = wi::OVF_NONE;
5952
5953 /* Make sure the loop is integer based. */
5954 if (TREE_CODE (base) != INTEGER_CST
5955 || TREE_CODE (step) != INTEGER_CST)
5956 return false;
5957
5958 /* Check that the max size of the loop will not wrap. */
5959
5960 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5961 return true;
5962
5963 if (! max_stmt_executions (loop, &ni))
5964 return false;
5965
5966 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5967 &overflow);
5968 if (overflow)
5969 return false;
5970
5971 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5972 TYPE_SIGN (lhs_type), &overflow);
5973 if (overflow)
5974 return false;
5975
5976 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
5977 <= TYPE_PRECISION (lhs_type));
5978 }
5979
5980 /* Function vectorizable_reduction.
5981
5982 Check if STMT_INFO performs a reduction operation that can be vectorized.
5983 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5984 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5985 Return true if STMT_INFO is vectorizable in this way.
5986
5987 This function also handles reduction idioms (patterns) that have been
5988 recognized in advance during vect_pattern_recog. In this case, STMT_INFO
5989 may be of this form:
5990 X = pattern_expr (arg0, arg1, ..., X)
5991 and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
5992 sequence that had been detected and replaced by the pattern-stmt
5993 (STMT_INFO).
5994
5995 This function also handles reduction of condition expressions, for example:
5996 for (int i = 0; i < N; i++)
5997 if (a[i] < value)
5998 last = a[i];
5999 This is handled by vectorising the loop and creating an additional vector
6000 containing the loop indexes for which "a[i] < value" was true. In the
6001 function epilogue this is reduced to a single max value and then used to
6002 index into the vector of results.
6003
6004 In some cases of reduction patterns, the type of the reduction variable X is
6005 different than the type of the other arguments of STMT_INFO.
6006 In such cases, the vectype that is used when transforming STMT_INFO into
6007 a vector stmt is different than the vectype that is used to determine the
6008 vectorization factor, because it consists of a different number of elements
6009 than the actual number of elements that are being operated upon in parallel.
6010
6011 For example, consider an accumulation of shorts into an int accumulator.
6012 On some targets it's possible to vectorize this pattern operating on 8
6013 shorts at a time (hence, the vectype for purposes of determining the
6014 vectorization factor should be V8HI); on the other hand, the vectype that
6015 is used to create the vector form is actually V4SI (the type of the result).
6016
6017 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6018 indicates what is the actual level of parallelism (V8HI in the example), so
6019 that the right vectorization factor would be derived. This vectype
6020 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6021 be used to create the vectorized stmt. The right vectype for the vectorized
6022 stmt is obtained from the type of the result X:
6023 get_vectype_for_scalar_type (TREE_TYPE (X))
6024
6025 This means that, contrary to "regular" reductions (or "regular" stmts in
6026 general), the following equation:
6027 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6028 does *NOT* necessarily hold for reduction patterns. */
6029
6030 bool
6031 vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6032 stmt_vec_info *vec_stmt, slp_tree slp_node,
6033 slp_instance slp_node_instance,
6034 stmt_vector_for_cost *cost_vec)
6035 {
6036 tree vec_dest;
6037 tree scalar_dest;
6038 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6039 tree vectype_in = NULL_TREE;
6040 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6041 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6042 enum tree_code code, orig_code;
6043 internal_fn reduc_fn;
6044 machine_mode vec_mode;
6045 int op_type;
6046 optab optab;
6047 tree new_temp = NULL_TREE;
6048 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6049 stmt_vec_info cond_stmt_vinfo = NULL;
6050 enum tree_code cond_reduc_op_code = ERROR_MARK;
6051 tree scalar_type;
6052 bool is_simple_use;
6053 int i;
6054 int ncopies;
6055 int epilog_copies;
6056 stmt_vec_info prev_stmt_info, prev_phi_info;
6057 bool single_defuse_cycle = false;
6058 stmt_vec_info new_stmt_info = NULL;
6059 int j;
6060 tree ops[3];
6061 enum vect_def_type dts[3];
6062 bool nested_cycle = false, found_nested_cycle_def = false;
6063 bool double_reduc = false;
6064 basic_block def_bb;
6065 struct loop * def_stmt_loop;
6066 tree def_arg;
6067 auto_vec<tree> vec_oprnds0;
6068 auto_vec<tree> vec_oprnds1;
6069 auto_vec<tree> vec_oprnds2;
6070 auto_vec<tree> vect_defs;
6071 auto_vec<stmt_vec_info> phis;
6072 int vec_num;
6073 tree def0, tem;
6074 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6075 tree cond_reduc_val = NULL_TREE;
6076
6077 /* Make sure it was already recognized as a reduction computation. */
6078 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6079 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6080 return false;
6081
6082 if (nested_in_vect_loop_p (loop, stmt_info))
6083 {
6084 loop = loop->inner;
6085 nested_cycle = true;
6086 }
6087
6088 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6089 gcc_assert (slp_node
6090 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6091
6092 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6093 {
6094 tree phi_result = gimple_phi_result (phi);
6095 /* Analysis is fully done on the reduction stmt invocation. */
6096 if (! vec_stmt)
6097 {
6098 if (slp_node)
6099 slp_node_instance->reduc_phis = slp_node;
6100
6101 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6102 return true;
6103 }
6104
6105 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6106 /* Leave the scalar phi in place. Note that checking
6107 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6108 for reductions involving a single statement. */
6109 return true;
6110
6111 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6112 reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
6113
6114 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6115 == EXTRACT_LAST_REDUCTION)
6116 /* Leave the scalar phi in place. */
6117 return true;
6118
6119 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6120 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6121 {
6122 tree op = gimple_op (reduc_stmt, k);
6123 if (op == phi_result)
6124 continue;
6125 if (k == 1
6126 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6127 continue;
6128 if (!vectype_in
6129 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6130 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6131 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6132 break;
6133 }
6134 gcc_assert (vectype_in);
6135
6136 if (slp_node)
6137 ncopies = 1;
6138 else
6139 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6140
6141 stmt_vec_info use_stmt_info;
6142 if (ncopies > 1
6143 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6144 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6145 && vect_stmt_to_vectorize (use_stmt_info) == reduc_stmt_info)
6146 single_defuse_cycle = true;
6147
6148 /* Create the destination vector */
6149 scalar_dest = gimple_assign_lhs (reduc_stmt);
6150 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6151
6152 if (slp_node)
6153 /* The size vect_schedule_slp_instance computes is off for us. */
6154 vec_num = vect_get_num_vectors
6155 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6156 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6157 vectype_in);
6158 else
6159 vec_num = 1;
6160
6161 /* Generate the reduction PHIs upfront. */
6162 prev_phi_info = NULL;
6163 for (j = 0; j < ncopies; j++)
6164 {
6165 if (j == 0 || !single_defuse_cycle)
6166 {
6167 for (i = 0; i < vec_num; i++)
6168 {
6169 /* Create the reduction-phi that defines the reduction
6170 operand. */
6171 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6172 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6173
6174 if (slp_node)
6175 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6176 else
6177 {
6178 if (j == 0)
6179 STMT_VINFO_VEC_STMT (stmt_info)
6180 = *vec_stmt = new_phi_info;
6181 else
6182 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6183 prev_phi_info = new_phi_info;
6184 }
6185 }
6186 }
6187 }
6188
6189 return true;
6190 }
6191
6192 /* 1. Is vectorizable reduction? */
6193 /* Not supportable if the reduction variable is used in the loop, unless
6194 it's a reduction chain. */
6195 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6196 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6197 return false;
6198
6199 /* Reductions that are not used even in an enclosing outer-loop,
6200 are expected to be "live" (used out of the loop). */
6201 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6202 && !STMT_VINFO_LIVE_P (stmt_info))
6203 return false;
6204
6205 /* 2. Has this been recognized as a reduction pattern?
6206
6207 Check if STMT represents a pattern that has been recognized
6208 in earlier analysis stages. For stmts that represent a pattern,
6209 the STMT_VINFO_RELATED_STMT field records the last stmt in
6210 the original sequence that constitutes the pattern. */
6211
6212 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6213 if (orig_stmt_info)
6214 {
6215 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6216 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6217 }
6218
6219 /* 3. Check the operands of the operation. The first operands are defined
6220 inside the loop body. The last operand is the reduction variable,
6221 which is defined by the loop-header-phi. */
6222
6223 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
6224
6225 /* Flatten RHS. */
6226 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6227 {
6228 case GIMPLE_BINARY_RHS:
6229 code = gimple_assign_rhs_code (stmt);
6230 op_type = TREE_CODE_LENGTH (code);
6231 gcc_assert (op_type == binary_op);
6232 ops[0] = gimple_assign_rhs1 (stmt);
6233 ops[1] = gimple_assign_rhs2 (stmt);
6234 break;
6235
6236 case GIMPLE_TERNARY_RHS:
6237 code = gimple_assign_rhs_code (stmt);
6238 op_type = TREE_CODE_LENGTH (code);
6239 gcc_assert (op_type == ternary_op);
6240 ops[0] = gimple_assign_rhs1 (stmt);
6241 ops[1] = gimple_assign_rhs2 (stmt);
6242 ops[2] = gimple_assign_rhs3 (stmt);
6243 break;
6244
6245 case GIMPLE_UNARY_RHS:
6246 return false;
6247
6248 default:
6249 gcc_unreachable ();
6250 }
6251
6252 if (code == COND_EXPR && slp_node)
6253 return false;
6254
6255 scalar_dest = gimple_assign_lhs (stmt);
6256 scalar_type = TREE_TYPE (scalar_dest);
6257 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6258 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6259 return false;
6260
6261 /* Do not try to vectorize bit-precision reductions. */
6262 if (!type_has_mode_precision_p (scalar_type))
6263 return false;
6264
6265 /* All uses but the last are expected to be defined in the loop.
6266 The last use is the reduction variable. In case of nested cycle this
6267 assumption is not true: we use reduc_index to record the index of the
6268 reduction variable. */
6269 stmt_vec_info reduc_def_info = NULL;
6270 int reduc_index = -1;
6271 for (i = 0; i < op_type; i++)
6272 {
6273 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6274 if (i == 0 && code == COND_EXPR)
6275 continue;
6276
6277 stmt_vec_info def_stmt_info;
6278 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6279 &def_stmt_info);
6280 dt = dts[i];
6281 gcc_assert (is_simple_use);
6282 if (dt == vect_reduction_def)
6283 {
6284 reduc_def_info = def_stmt_info;
6285 reduc_index = i;
6286 continue;
6287 }
6288 else if (tem)
6289 {
6290 /* To properly compute ncopies we are interested in the widest
6291 input type in case we're looking at a widening accumulation. */
6292 if (!vectype_in
6293 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6294 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6295 vectype_in = tem;
6296 }
6297
6298 if (dt != vect_internal_def
6299 && dt != vect_external_def
6300 && dt != vect_constant_def
6301 && dt != vect_induction_def
6302 && !(dt == vect_nested_cycle && nested_cycle))
6303 return false;
6304
6305 if (dt == vect_nested_cycle)
6306 {
6307 found_nested_cycle_def = true;
6308 reduc_def_info = def_stmt_info;
6309 reduc_index = i;
6310 }
6311
6312 if (i == 1 && code == COND_EXPR)
6313 {
6314 /* Record how value of COND_EXPR is defined. */
6315 if (dt == vect_constant_def)
6316 {
6317 cond_reduc_dt = dt;
6318 cond_reduc_val = ops[i];
6319 }
6320 if (dt == vect_induction_def
6321 && def_stmt_info
6322 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6323 {
6324 cond_reduc_dt = dt;
6325 cond_stmt_vinfo = def_stmt_info;
6326 }
6327 }
6328 }
6329
6330 if (!vectype_in)
6331 vectype_in = vectype_out;
6332
6333 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6334 directy used in stmt. */
6335 if (reduc_index == -1)
6336 {
6337 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6338 {
6339 if (dump_enabled_p ())
6340 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6341 "in-order reduction chain without SLP.\n");
6342 return false;
6343 }
6344
6345 if (orig_stmt_info)
6346 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6347 else
6348 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6349 }
6350
6351 if (! reduc_def_info)
6352 return false;
6353
6354 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6355 if (!reduc_def_phi)
6356 return false;
6357
6358 if (!(reduc_index == -1
6359 || dts[reduc_index] == vect_reduction_def
6360 || dts[reduc_index] == vect_nested_cycle
6361 || ((dts[reduc_index] == vect_internal_def
6362 || dts[reduc_index] == vect_external_def
6363 || dts[reduc_index] == vect_constant_def
6364 || dts[reduc_index] == vect_induction_def)
6365 && nested_cycle && found_nested_cycle_def)))
6366 {
6367 /* For pattern recognized stmts, orig_stmt might be a reduction,
6368 but some helper statements for the pattern might not, or
6369 might be COND_EXPRs with reduction uses in the condition. */
6370 gcc_assert (orig_stmt_info);
6371 return false;
6372 }
6373
6374 /* PHIs should not participate in patterns. */
6375 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6376 enum vect_reduction_type v_reduc_type
6377 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6378 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6379
6380 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6381 /* If we have a condition reduction, see if we can simplify it further. */
6382 if (v_reduc_type == COND_REDUCTION)
6383 {
6384 /* TODO: We can't yet handle reduction chains, since we need to treat
6385 each COND_EXPR in the chain specially, not just the last one.
6386 E.g. for:
6387
6388 x_1 = PHI <x_3, ...>
6389 x_2 = a_2 ? ... : x_1;
6390 x_3 = a_3 ? ... : x_2;
6391
6392 we're interested in the last element in x_3 for which a_2 || a_3
6393 is true, whereas the current reduction chain handling would
6394 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6395 as a reduction operation. */
6396 if (reduc_index == -1)
6397 {
6398 if (dump_enabled_p ())
6399 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6400 "conditional reduction chains not supported\n");
6401 return false;
6402 }
6403
6404 /* vect_is_simple_reduction ensured that operand 2 is the
6405 loop-carried operand. */
6406 gcc_assert (reduc_index == 2);
6407
6408 /* Loop peeling modifies initial value of reduction PHI, which
6409 makes the reduction stmt to be transformed different to the
6410 original stmt analyzed. We need to record reduction code for
6411 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6412 it can be used directly at transform stage. */
6413 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6414 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6415 {
6416 /* Also set the reduction type to CONST_COND_REDUCTION. */
6417 gcc_assert (cond_reduc_dt == vect_constant_def);
6418 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6419 }
6420 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6421 vectype_in, OPTIMIZE_FOR_SPEED))
6422 {
6423 if (dump_enabled_p ())
6424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6425 "optimizing condition reduction with"
6426 " FOLD_EXTRACT_LAST.\n");
6427 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6428 }
6429 else if (cond_reduc_dt == vect_induction_def)
6430 {
6431 tree base
6432 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6433 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6434
6435 gcc_assert (TREE_CODE (base) == INTEGER_CST
6436 && TREE_CODE (step) == INTEGER_CST);
6437 cond_reduc_val = NULL_TREE;
6438 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6439 above base; punt if base is the minimum value of the type for
6440 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6441 if (tree_int_cst_sgn (step) == -1)
6442 {
6443 cond_reduc_op_code = MIN_EXPR;
6444 if (tree_int_cst_sgn (base) == -1)
6445 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6446 else if (tree_int_cst_lt (base,
6447 TYPE_MAX_VALUE (TREE_TYPE (base))))
6448 cond_reduc_val
6449 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6450 }
6451 else
6452 {
6453 cond_reduc_op_code = MAX_EXPR;
6454 if (tree_int_cst_sgn (base) == 1)
6455 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6456 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6457 base))
6458 cond_reduc_val
6459 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6460 }
6461 if (cond_reduc_val)
6462 {
6463 if (dump_enabled_p ())
6464 dump_printf_loc (MSG_NOTE, vect_location,
6465 "condition expression based on "
6466 "integer induction.\n");
6467 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6468 = INTEGER_INDUC_COND_REDUCTION;
6469 }
6470 }
6471 else if (cond_reduc_dt == vect_constant_def)
6472 {
6473 enum vect_def_type cond_initial_dt;
6474 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6475 tree cond_initial_val
6476 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6477
6478 gcc_assert (cond_reduc_val != NULL_TREE);
6479 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6480 if (cond_initial_dt == vect_constant_def
6481 && types_compatible_p (TREE_TYPE (cond_initial_val),
6482 TREE_TYPE (cond_reduc_val)))
6483 {
6484 tree e = fold_binary (LE_EXPR, boolean_type_node,
6485 cond_initial_val, cond_reduc_val);
6486 if (e && (integer_onep (e) || integer_zerop (e)))
6487 {
6488 if (dump_enabled_p ())
6489 dump_printf_loc (MSG_NOTE, vect_location,
6490 "condition expression based on "
6491 "compile time constant.\n");
6492 /* Record reduction code at analysis stage. */
6493 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6494 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6495 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6496 = CONST_COND_REDUCTION;
6497 }
6498 }
6499 }
6500 }
6501
6502 if (orig_stmt_info)
6503 gcc_assert (tmp == orig_stmt_info
6504 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6505 else
6506 /* We changed STMT to be the first stmt in reduction chain, hence we
6507 check that in this case the first element in the chain is STMT. */
6508 gcc_assert (tmp == stmt_info
6509 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6510
6511 if (STMT_VINFO_LIVE_P (reduc_def_info))
6512 return false;
6513
6514 if (slp_node)
6515 ncopies = 1;
6516 else
6517 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6518
6519 gcc_assert (ncopies >= 1);
6520
6521 vec_mode = TYPE_MODE (vectype_in);
6522 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6523
6524 if (code == COND_EXPR)
6525 {
6526 /* Only call during the analysis stage, otherwise we'll lose
6527 STMT_VINFO_TYPE. */
6528 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6529 ops[reduc_index], 0, NULL,
6530 cost_vec))
6531 {
6532 if (dump_enabled_p ())
6533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6534 "unsupported condition in reduction\n");
6535 return false;
6536 }
6537 }
6538 else
6539 {
6540 /* 4. Supportable by target? */
6541
6542 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6543 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6544 {
6545 /* Shifts and rotates are only supported by vectorizable_shifts,
6546 not vectorizable_reduction. */
6547 if (dump_enabled_p ())
6548 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6549 "unsupported shift or rotation.\n");
6550 return false;
6551 }
6552
6553 /* 4.1. check support for the operation in the loop */
6554 optab = optab_for_tree_code (code, vectype_in, optab_default);
6555 if (!optab)
6556 {
6557 if (dump_enabled_p ())
6558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6559 "no optab.\n");
6560
6561 return false;
6562 }
6563
6564 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6565 {
6566 if (dump_enabled_p ())
6567 dump_printf (MSG_NOTE, "op not supported by target.\n");
6568
6569 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6570 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6571 return false;
6572
6573 if (dump_enabled_p ())
6574 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6575 }
6576
6577 /* Worthwhile without SIMD support? */
6578 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6579 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6580 {
6581 if (dump_enabled_p ())
6582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6583 "not worthwhile without SIMD support.\n");
6584
6585 return false;
6586 }
6587 }
6588
6589 /* 4.2. Check support for the epilog operation.
6590
6591 If STMT represents a reduction pattern, then the type of the
6592 reduction variable may be different than the type of the rest
6593 of the arguments. For example, consider the case of accumulation
6594 of shorts into an int accumulator; The original code:
6595 S1: int_a = (int) short_a;
6596 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6597
6598 was replaced with:
6599 STMT: int_acc = widen_sum <short_a, int_acc>
6600
6601 This means that:
6602 1. The tree-code that is used to create the vector operation in the
6603 epilog code (that reduces the partial results) is not the
6604 tree-code of STMT, but is rather the tree-code of the original
6605 stmt from the pattern that STMT is replacing. I.e, in the example
6606 above we want to use 'widen_sum' in the loop, but 'plus' in the
6607 epilog.
6608 2. The type (mode) we use to check available target support
6609 for the vector operation to be created in the *epilog*, is
6610 determined by the type of the reduction variable (in the example
6611 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6612 However the type (mode) we use to check available target support
6613 for the vector operation to be created *inside the loop*, is
6614 determined by the type of the other arguments to STMT (in the
6615 example we'd check this: optab_handler (widen_sum_optab,
6616 vect_short_mode)).
6617
6618 This is contrary to "regular" reductions, in which the types of all
6619 the arguments are the same as the type of the reduction variable.
6620 For "regular" reductions we can therefore use the same vector type
6621 (and also the same tree-code) when generating the epilog code and
6622 when generating the code inside the loop. */
6623
6624 vect_reduction_type reduction_type
6625 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6626 if (orig_stmt_info
6627 && (reduction_type == TREE_CODE_REDUCTION
6628 || reduction_type == FOLD_LEFT_REDUCTION))
6629 {
6630 /* This is a reduction pattern: get the vectype from the type of the
6631 reduction variable, and get the tree-code from orig_stmt. */
6632 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6633 gcc_assert (vectype_out);
6634 vec_mode = TYPE_MODE (vectype_out);
6635 }
6636 else
6637 {
6638 /* Regular reduction: use the same vectype and tree-code as used for
6639 the vector code inside the loop can be used for the epilog code. */
6640 orig_code = code;
6641
6642 if (code == MINUS_EXPR)
6643 orig_code = PLUS_EXPR;
6644
6645 /* For simple condition reductions, replace with the actual expression
6646 we want to base our reduction around. */
6647 if (reduction_type == CONST_COND_REDUCTION)
6648 {
6649 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6650 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6651 }
6652 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6653 orig_code = cond_reduc_op_code;
6654 }
6655
6656 if (nested_cycle)
6657 {
6658 def_bb = gimple_bb (reduc_def_phi);
6659 def_stmt_loop = def_bb->loop_father;
6660 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6661 loop_preheader_edge (def_stmt_loop));
6662 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6663 if (def_arg_stmt_info
6664 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6665 == vect_double_reduction_def))
6666 double_reduc = true;
6667 }
6668
6669 reduc_fn = IFN_LAST;
6670
6671 if (reduction_type == TREE_CODE_REDUCTION
6672 || reduction_type == FOLD_LEFT_REDUCTION
6673 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6674 || reduction_type == CONST_COND_REDUCTION)
6675 {
6676 if (reduction_type == FOLD_LEFT_REDUCTION
6677 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6678 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6679 {
6680 if (reduc_fn != IFN_LAST
6681 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6682 OPTIMIZE_FOR_SPEED))
6683 {
6684 if (dump_enabled_p ())
6685 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6686 "reduc op not supported by target.\n");
6687
6688 reduc_fn = IFN_LAST;
6689 }
6690 }
6691 else
6692 {
6693 if (!nested_cycle || double_reduc)
6694 {
6695 if (dump_enabled_p ())
6696 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6697 "no reduc code for scalar code.\n");
6698
6699 return false;
6700 }
6701 }
6702 }
6703 else if (reduction_type == COND_REDUCTION)
6704 {
6705 int scalar_precision
6706 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6707 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6708 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6709 nunits_out);
6710
6711 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6712 OPTIMIZE_FOR_SPEED))
6713 reduc_fn = IFN_REDUC_MAX;
6714 }
6715
6716 if (reduction_type != EXTRACT_LAST_REDUCTION
6717 && (!nested_cycle || double_reduc)
6718 && reduc_fn == IFN_LAST
6719 && !nunits_out.is_constant ())
6720 {
6721 if (dump_enabled_p ())
6722 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6723 "missing target support for reduction on"
6724 " variable-length vectors.\n");
6725 return false;
6726 }
6727
6728 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6729 && ncopies > 1)
6730 {
6731 if (dump_enabled_p ())
6732 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6733 "multiple types in double reduction or condition "
6734 "reduction.\n");
6735 return false;
6736 }
6737
6738 /* For SLP reductions, see if there is a neutral value we can use. */
6739 tree neutral_op = NULL_TREE;
6740 if (slp_node)
6741 neutral_op = neutral_op_for_slp_reduction
6742 (slp_node_instance->reduc_phis, code,
6743 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6744
6745 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6746 {
6747 /* We can't support in-order reductions of code such as this:
6748
6749 for (int i = 0; i < n1; ++i)
6750 for (int j = 0; j < n2; ++j)
6751 l += a[j];
6752
6753 since GCC effectively transforms the loop when vectorizing:
6754
6755 for (int i = 0; i < n1 / VF; ++i)
6756 for (int j = 0; j < n2; ++j)
6757 for (int k = 0; k < VF; ++k)
6758 l += a[j];
6759
6760 which is a reassociation of the original operation. */
6761 if (dump_enabled_p ())
6762 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6763 "in-order double reduction not supported.\n");
6764
6765 return false;
6766 }
6767
6768 if (reduction_type == FOLD_LEFT_REDUCTION
6769 && slp_node
6770 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6771 {
6772 /* We cannot use in-order reductions in this case because there is
6773 an implicit reassociation of the operations involved. */
6774 if (dump_enabled_p ())
6775 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6776 "in-order unchained SLP reductions not supported.\n");
6777 return false;
6778 }
6779
6780 /* For double reductions, and for SLP reductions with a neutral value,
6781 we construct a variable-length initial vector by loading a vector
6782 full of the neutral value and then shift-and-inserting the start
6783 values into the low-numbered elements. */
6784 if ((double_reduc || neutral_op)
6785 && !nunits_out.is_constant ()
6786 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6787 vectype_out, OPTIMIZE_FOR_SPEED))
6788 {
6789 if (dump_enabled_p ())
6790 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6791 "reduction on variable-length vectors requires"
6792 " target support for a vector-shift-and-insert"
6793 " operation.\n");
6794 return false;
6795 }
6796
6797 /* Check extra constraints for variable-length unchained SLP reductions. */
6798 if (STMT_SLP_TYPE (stmt_info)
6799 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6800 && !nunits_out.is_constant ())
6801 {
6802 /* We checked above that we could build the initial vector when
6803 there's a neutral element value. Check here for the case in
6804 which each SLP statement has its own initial value and in which
6805 that value needs to be repeated for every instance of the
6806 statement within the initial vector. */
6807 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6808 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6809 if (!neutral_op
6810 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6811 {
6812 if (dump_enabled_p ())
6813 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6814 "unsupported form of SLP reduction for"
6815 " variable-length vectors: cannot build"
6816 " initial vector.\n");
6817 return false;
6818 }
6819 /* The epilogue code relies on the number of elements being a multiple
6820 of the group size. The duplicate-and-interleave approach to setting
6821 up the the initial vector does too. */
6822 if (!multiple_p (nunits_out, group_size))
6823 {
6824 if (dump_enabled_p ())
6825 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6826 "unsupported form of SLP reduction for"
6827 " variable-length vectors: the vector size"
6828 " is not a multiple of the number of results.\n");
6829 return false;
6830 }
6831 }
6832
6833 /* In case of widenning multiplication by a constant, we update the type
6834 of the constant to be the type of the other operand. We check that the
6835 constant fits the type in the pattern recognition pass. */
6836 if (code == DOT_PROD_EXPR
6837 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6838 {
6839 if (TREE_CODE (ops[0]) == INTEGER_CST)
6840 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6841 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6842 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6843 else
6844 {
6845 if (dump_enabled_p ())
6846 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6847 "invalid types in dot-prod\n");
6848
6849 return false;
6850 }
6851 }
6852
6853 if (reduction_type == COND_REDUCTION)
6854 {
6855 widest_int ni;
6856
6857 if (! max_loop_iterations (loop, &ni))
6858 {
6859 if (dump_enabled_p ())
6860 dump_printf_loc (MSG_NOTE, vect_location,
6861 "loop count not known, cannot create cond "
6862 "reduction.\n");
6863 return false;
6864 }
6865 /* Convert backedges to iterations. */
6866 ni += 1;
6867
6868 /* The additional index will be the same type as the condition. Check
6869 that the loop can fit into this less one (because we'll use up the
6870 zero slot for when there are no matches). */
6871 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6872 if (wi::geu_p (ni, wi::to_widest (max_index)))
6873 {
6874 if (dump_enabled_p ())
6875 dump_printf_loc (MSG_NOTE, vect_location,
6876 "loop size is greater than data size.\n");
6877 return false;
6878 }
6879 }
6880
6881 /* In case the vectorization factor (VF) is bigger than the number
6882 of elements that we can fit in a vectype (nunits), we have to generate
6883 more than one vector stmt - i.e - we need to "unroll" the
6884 vector stmt by a factor VF/nunits. For more details see documentation
6885 in vectorizable_operation. */
6886
6887 /* If the reduction is used in an outer loop we need to generate
6888 VF intermediate results, like so (e.g. for ncopies=2):
6889 r0 = phi (init, r0)
6890 r1 = phi (init, r1)
6891 r0 = x0 + r0;
6892 r1 = x1 + r1;
6893 (i.e. we generate VF results in 2 registers).
6894 In this case we have a separate def-use cycle for each copy, and therefore
6895 for each copy we get the vector def for the reduction variable from the
6896 respective phi node created for this copy.
6897
6898 Otherwise (the reduction is unused in the loop nest), we can combine
6899 together intermediate results, like so (e.g. for ncopies=2):
6900 r = phi (init, r)
6901 r = x0 + r;
6902 r = x1 + r;
6903 (i.e. we generate VF/2 results in a single register).
6904 In this case for each copy we get the vector def for the reduction variable
6905 from the vectorized reduction operation generated in the previous iteration.
6906
6907 This only works when we see both the reduction PHI and its only consumer
6908 in vectorizable_reduction and there are no intermediate stmts
6909 participating. */
6910 stmt_vec_info use_stmt_info;
6911 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6912 if (ncopies > 1
6913 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6914 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6915 && vect_stmt_to_vectorize (use_stmt_info) == stmt_info)
6916 {
6917 single_defuse_cycle = true;
6918 epilog_copies = 1;
6919 }
6920 else
6921 epilog_copies = ncopies;
6922
6923 /* If the reduction stmt is one of the patterns that have lane
6924 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6925 if ((ncopies > 1
6926 && ! single_defuse_cycle)
6927 && (code == DOT_PROD_EXPR
6928 || code == WIDEN_SUM_EXPR
6929 || code == SAD_EXPR))
6930 {
6931 if (dump_enabled_p ())
6932 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6933 "multi def-use cycle not possible for lane-reducing "
6934 "reduction operation\n");
6935 return false;
6936 }
6937
6938 if (slp_node)
6939 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6940 else
6941 vec_num = 1;
6942
6943 internal_fn cond_fn = get_conditional_internal_fn (code);
6944 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6945
6946 if (!vec_stmt) /* transformation not required. */
6947 {
6948 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6949 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6950 {
6951 if (reduction_type != FOLD_LEFT_REDUCTION
6952 && (cond_fn == IFN_LAST
6953 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6954 OPTIMIZE_FOR_SPEED)))
6955 {
6956 if (dump_enabled_p ())
6957 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6958 "can't use a fully-masked loop because no"
6959 " conditional operation is available.\n");
6960 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6961 }
6962 else if (reduc_index == -1)
6963 {
6964 if (dump_enabled_p ())
6965 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6966 "can't use a fully-masked loop for chained"
6967 " reductions.\n");
6968 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6969 }
6970 else
6971 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
6972 vectype_in);
6973 }
6974 if (dump_enabled_p ()
6975 && reduction_type == FOLD_LEFT_REDUCTION)
6976 dump_printf_loc (MSG_NOTE, vect_location,
6977 "using an in-order (fold-left) reduction.\n");
6978 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6979 return true;
6980 }
6981
6982 /* Transform. */
6983
6984 if (dump_enabled_p ())
6985 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
6986
6987 /* FORNOW: Multiple types are not supported for condition. */
6988 if (code == COND_EXPR)
6989 gcc_assert (ncopies == 1);
6990
6991 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
6992
6993 if (reduction_type == FOLD_LEFT_REDUCTION)
6994 return vectorize_fold_left_reduction
6995 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
6996 reduc_fn, ops, vectype_in, reduc_index, masks);
6997
6998 if (reduction_type == EXTRACT_LAST_REDUCTION)
6999 {
7000 gcc_assert (!slp_node);
7001 return vectorizable_condition (stmt_info, gsi, vec_stmt,
7002 NULL, reduc_index, NULL, NULL);
7003 }
7004
7005 /* Create the destination vector */
7006 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7007
7008 prev_stmt_info = NULL;
7009 prev_phi_info = NULL;
7010 if (!slp_node)
7011 {
7012 vec_oprnds0.create (1);
7013 vec_oprnds1.create (1);
7014 if (op_type == ternary_op)
7015 vec_oprnds2.create (1);
7016 }
7017
7018 phis.create (vec_num);
7019 vect_defs.create (vec_num);
7020 if (!slp_node)
7021 vect_defs.quick_push (NULL_TREE);
7022
7023 if (slp_node)
7024 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7025 else
7026 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7027
7028 for (j = 0; j < ncopies; j++)
7029 {
7030 if (code == COND_EXPR)
7031 {
7032 gcc_assert (!slp_node);
7033 vectorizable_condition (stmt_info, gsi, vec_stmt,
7034 PHI_RESULT (phis[0]->stmt),
7035 reduc_index, NULL, NULL);
7036 /* Multiple types are not supported for condition. */
7037 break;
7038 }
7039
7040 /* Handle uses. */
7041 if (j == 0)
7042 {
7043 if (slp_node)
7044 {
7045 /* Get vec defs for all the operands except the reduction index,
7046 ensuring the ordering of the ops in the vector is kept. */
7047 auto_vec<tree, 3> slp_ops;
7048 auto_vec<vec<tree>, 3> vec_defs;
7049
7050 slp_ops.quick_push (ops[0]);
7051 slp_ops.quick_push (ops[1]);
7052 if (op_type == ternary_op)
7053 slp_ops.quick_push (ops[2]);
7054
7055 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7056
7057 vec_oprnds0.safe_splice (vec_defs[0]);
7058 vec_defs[0].release ();
7059 vec_oprnds1.safe_splice (vec_defs[1]);
7060 vec_defs[1].release ();
7061 if (op_type == ternary_op)
7062 {
7063 vec_oprnds2.safe_splice (vec_defs[2]);
7064 vec_defs[2].release ();
7065 }
7066 }
7067 else
7068 {
7069 vec_oprnds0.quick_push
7070 (vect_get_vec_def_for_operand (ops[0], stmt_info));
7071 vec_oprnds1.quick_push
7072 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7073 if (op_type == ternary_op)
7074 vec_oprnds2.quick_push
7075 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7076 }
7077 }
7078 else
7079 {
7080 if (!slp_node)
7081 {
7082 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7083
7084 if (single_defuse_cycle && reduc_index == 0)
7085 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7086 else
7087 vec_oprnds0[0]
7088 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7089 vec_oprnds0[0]);
7090 if (single_defuse_cycle && reduc_index == 1)
7091 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7092 else
7093 vec_oprnds1[0]
7094 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7095 vec_oprnds1[0]);
7096 if (op_type == ternary_op)
7097 {
7098 if (single_defuse_cycle && reduc_index == 2)
7099 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7100 else
7101 vec_oprnds2[0]
7102 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
7103 vec_oprnds2[0]);
7104 }
7105 }
7106 }
7107
7108 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7109 {
7110 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7111 if (masked_loop_p)
7112 {
7113 /* Make sure that the reduction accumulator is vop[0]. */
7114 if (reduc_index == 1)
7115 {
7116 gcc_assert (commutative_tree_code (code));
7117 std::swap (vop[0], vop[1]);
7118 }
7119 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7120 vectype_in, i * ncopies + j);
7121 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7122 vop[0], vop[1],
7123 vop[0]);
7124 new_temp = make_ssa_name (vec_dest, call);
7125 gimple_call_set_lhs (call, new_temp);
7126 gimple_call_set_nothrow (call, true);
7127 new_stmt_info
7128 = vect_finish_stmt_generation (stmt_info, call, gsi);
7129 }
7130 else
7131 {
7132 if (op_type == ternary_op)
7133 vop[2] = vec_oprnds2[i];
7134
7135 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7136 vop[0], vop[1], vop[2]);
7137 new_temp = make_ssa_name (vec_dest, new_stmt);
7138 gimple_assign_set_lhs (new_stmt, new_temp);
7139 new_stmt_info
7140 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7141 }
7142
7143 if (slp_node)
7144 {
7145 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7146 vect_defs.quick_push (new_temp);
7147 }
7148 else
7149 vect_defs[0] = new_temp;
7150 }
7151
7152 if (slp_node)
7153 continue;
7154
7155 if (j == 0)
7156 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7157 else
7158 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7159
7160 prev_stmt_info = new_stmt_info;
7161 }
7162
7163 /* Finalize the reduction-phi (set its arguments) and create the
7164 epilog reduction code. */
7165 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7166 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7167
7168 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7169 epilog_copies, reduc_fn, phis,
7170 double_reduc, slp_node, slp_node_instance,
7171 cond_reduc_val, cond_reduc_op_code,
7172 neutral_op);
7173
7174 return true;
7175 }
7176
7177 /* Function vect_min_worthwhile_factor.
7178
7179 For a loop where we could vectorize the operation indicated by CODE,
7180 return the minimum vectorization factor that makes it worthwhile
7181 to use generic vectors. */
7182 static unsigned int
7183 vect_min_worthwhile_factor (enum tree_code code)
7184 {
7185 switch (code)
7186 {
7187 case PLUS_EXPR:
7188 case MINUS_EXPR:
7189 case NEGATE_EXPR:
7190 return 4;
7191
7192 case BIT_AND_EXPR:
7193 case BIT_IOR_EXPR:
7194 case BIT_XOR_EXPR:
7195 case BIT_NOT_EXPR:
7196 return 2;
7197
7198 default:
7199 return INT_MAX;
7200 }
7201 }
7202
7203 /* Return true if VINFO indicates we are doing loop vectorization and if
7204 it is worth decomposing CODE operations into scalar operations for
7205 that loop's vectorization factor. */
7206
7207 bool
7208 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7209 {
7210 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7211 unsigned HOST_WIDE_INT value;
7212 return (loop_vinfo
7213 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7214 && value >= vect_min_worthwhile_factor (code));
7215 }
7216
7217 /* Function vectorizable_induction
7218
7219 Check if STMT_INFO performs an induction computation that can be vectorized.
7220 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7221 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7222 Return true if STMT_INFO is vectorizable in this way. */
7223
7224 bool
7225 vectorizable_induction (stmt_vec_info stmt_info,
7226 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7227 stmt_vec_info *vec_stmt, slp_tree slp_node,
7228 stmt_vector_for_cost *cost_vec)
7229 {
7230 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7231 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7232 unsigned ncopies;
7233 bool nested_in_vect_loop = false;
7234 struct loop *iv_loop;
7235 tree vec_def;
7236 edge pe = loop_preheader_edge (loop);
7237 basic_block new_bb;
7238 tree new_vec, vec_init, vec_step, t;
7239 tree new_name;
7240 gimple *new_stmt;
7241 gphi *induction_phi;
7242 tree induc_def, vec_dest;
7243 tree init_expr, step_expr;
7244 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7245 unsigned i;
7246 tree expr;
7247 gimple_seq stmts;
7248 imm_use_iterator imm_iter;
7249 use_operand_p use_p;
7250 gimple *exit_phi;
7251 edge latch_e;
7252 tree loop_arg;
7253 gimple_stmt_iterator si;
7254
7255 gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
7256 if (!phi)
7257 return false;
7258
7259 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7260 return false;
7261
7262 /* Make sure it was recognized as induction computation. */
7263 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7264 return false;
7265
7266 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7267 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7268
7269 if (slp_node)
7270 ncopies = 1;
7271 else
7272 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7273 gcc_assert (ncopies >= 1);
7274
7275 /* FORNOW. These restrictions should be relaxed. */
7276 if (nested_in_vect_loop_p (loop, stmt_info))
7277 {
7278 imm_use_iterator imm_iter;
7279 use_operand_p use_p;
7280 gimple *exit_phi;
7281 edge latch_e;
7282 tree loop_arg;
7283
7284 if (ncopies > 1)
7285 {
7286 if (dump_enabled_p ())
7287 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7288 "multiple types in nested loop.\n");
7289 return false;
7290 }
7291
7292 /* FORNOW: outer loop induction with SLP not supported. */
7293 if (STMT_SLP_TYPE (stmt_info))
7294 return false;
7295
7296 exit_phi = NULL;
7297 latch_e = loop_latch_edge (loop->inner);
7298 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7299 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7300 {
7301 gimple *use_stmt = USE_STMT (use_p);
7302 if (is_gimple_debug (use_stmt))
7303 continue;
7304
7305 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7306 {
7307 exit_phi = use_stmt;
7308 break;
7309 }
7310 }
7311 if (exit_phi)
7312 {
7313 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7314 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7315 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7316 {
7317 if (dump_enabled_p ())
7318 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7319 "inner-loop induction only used outside "
7320 "of the outer vectorized loop.\n");
7321 return false;
7322 }
7323 }
7324
7325 nested_in_vect_loop = true;
7326 iv_loop = loop->inner;
7327 }
7328 else
7329 iv_loop = loop;
7330 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7331
7332 if (slp_node && !nunits.is_constant ())
7333 {
7334 /* The current SLP code creates the initial value element-by-element. */
7335 if (dump_enabled_p ())
7336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7337 "SLP induction not supported for variable-length"
7338 " vectors.\n");
7339 return false;
7340 }
7341
7342 if (!vec_stmt) /* transformation not required. */
7343 {
7344 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7345 DUMP_VECT_SCOPE ("vectorizable_induction");
7346 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7347 return true;
7348 }
7349
7350 /* Transform. */
7351
7352 /* Compute a vector variable, initialized with the first VF values of
7353 the induction variable. E.g., for an iv with IV_PHI='X' and
7354 evolution S, for a vector of 4 units, we want to compute:
7355 [X, X + S, X + 2*S, X + 3*S]. */
7356
7357 if (dump_enabled_p ())
7358 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7359
7360 latch_e = loop_latch_edge (iv_loop);
7361 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7362
7363 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7364 gcc_assert (step_expr != NULL_TREE);
7365
7366 pe = loop_preheader_edge (iv_loop);
7367 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7368 loop_preheader_edge (iv_loop));
7369
7370 stmts = NULL;
7371 if (!nested_in_vect_loop)
7372 {
7373 /* Convert the initial value to the desired type. */
7374 tree new_type = TREE_TYPE (vectype);
7375 init_expr = gimple_convert (&stmts, new_type, init_expr);
7376
7377 /* If we are using the loop mask to "peel" for alignment then we need
7378 to adjust the start value here. */
7379 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7380 if (skip_niters != NULL_TREE)
7381 {
7382 if (FLOAT_TYPE_P (vectype))
7383 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7384 skip_niters);
7385 else
7386 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7387 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7388 skip_niters, step_expr);
7389 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7390 init_expr, skip_step);
7391 }
7392 }
7393
7394 /* Convert the step to the desired type. */
7395 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7396
7397 if (stmts)
7398 {
7399 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7400 gcc_assert (!new_bb);
7401 }
7402
7403 /* Find the first insertion point in the BB. */
7404 basic_block bb = gimple_bb (phi);
7405 si = gsi_after_labels (bb);
7406
7407 /* For SLP induction we have to generate several IVs as for example
7408 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7409 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7410 [VF*S, VF*S, VF*S, VF*S] for all. */
7411 if (slp_node)
7412 {
7413 /* Enforced above. */
7414 unsigned int const_nunits = nunits.to_constant ();
7415
7416 /* Generate [VF*S, VF*S, ... ]. */
7417 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7418 {
7419 expr = build_int_cst (integer_type_node, vf);
7420 expr = fold_convert (TREE_TYPE (step_expr), expr);
7421 }
7422 else
7423 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7424 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7425 expr, step_expr);
7426 if (! CONSTANT_CLASS_P (new_name))
7427 new_name = vect_init_vector (stmt_info, new_name,
7428 TREE_TYPE (step_expr), NULL);
7429 new_vec = build_vector_from_val (vectype, new_name);
7430 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7431
7432 /* Now generate the IVs. */
7433 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7434 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7435 unsigned elts = const_nunits * nvects;
7436 unsigned nivs = least_common_multiple (group_size,
7437 const_nunits) / const_nunits;
7438 gcc_assert (elts % group_size == 0);
7439 tree elt = init_expr;
7440 unsigned ivn;
7441 for (ivn = 0; ivn < nivs; ++ivn)
7442 {
7443 tree_vector_builder elts (vectype, const_nunits, 1);
7444 stmts = NULL;
7445 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7446 {
7447 if (ivn*const_nunits + eltn >= group_size
7448 && (ivn * const_nunits + eltn) % group_size == 0)
7449 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7450 elt, step_expr);
7451 elts.quick_push (elt);
7452 }
7453 vec_init = gimple_build_vector (&stmts, &elts);
7454 if (stmts)
7455 {
7456 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7457 gcc_assert (!new_bb);
7458 }
7459
7460 /* Create the induction-phi that defines the induction-operand. */
7461 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7462 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7463 stmt_vec_info induction_phi_info
7464 = loop_vinfo->add_stmt (induction_phi);
7465 induc_def = PHI_RESULT (induction_phi);
7466
7467 /* Create the iv update inside the loop */
7468 vec_def = make_ssa_name (vec_dest);
7469 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7470 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7471 loop_vinfo->add_stmt (new_stmt);
7472
7473 /* Set the arguments of the phi node: */
7474 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7475 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7476 UNKNOWN_LOCATION);
7477
7478 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7479 }
7480
7481 /* Re-use IVs when we can. */
7482 if (ivn < nvects)
7483 {
7484 unsigned vfp
7485 = least_common_multiple (group_size, const_nunits) / group_size;
7486 /* Generate [VF'*S, VF'*S, ... ]. */
7487 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7488 {
7489 expr = build_int_cst (integer_type_node, vfp);
7490 expr = fold_convert (TREE_TYPE (step_expr), expr);
7491 }
7492 else
7493 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7494 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7495 expr, step_expr);
7496 if (! CONSTANT_CLASS_P (new_name))
7497 new_name = vect_init_vector (stmt_info, new_name,
7498 TREE_TYPE (step_expr), NULL);
7499 new_vec = build_vector_from_val (vectype, new_name);
7500 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7501 for (; ivn < nvects; ++ivn)
7502 {
7503 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7504 tree def;
7505 if (gimple_code (iv) == GIMPLE_PHI)
7506 def = gimple_phi_result (iv);
7507 else
7508 def = gimple_assign_lhs (iv);
7509 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7510 PLUS_EXPR,
7511 def, vec_step);
7512 if (gimple_code (iv) == GIMPLE_PHI)
7513 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7514 else
7515 {
7516 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7517 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7518 }
7519 SLP_TREE_VEC_STMTS (slp_node).quick_push
7520 (loop_vinfo->add_stmt (new_stmt));
7521 }
7522 }
7523
7524 return true;
7525 }
7526
7527 /* Create the vector that holds the initial_value of the induction. */
7528 if (nested_in_vect_loop)
7529 {
7530 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7531 been created during vectorization of previous stmts. We obtain it
7532 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7533 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7534 /* If the initial value is not of proper type, convert it. */
7535 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7536 {
7537 new_stmt
7538 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7539 vect_simple_var,
7540 "vec_iv_"),
7541 VIEW_CONVERT_EXPR,
7542 build1 (VIEW_CONVERT_EXPR, vectype,
7543 vec_init));
7544 vec_init = gimple_assign_lhs (new_stmt);
7545 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7546 new_stmt);
7547 gcc_assert (!new_bb);
7548 loop_vinfo->add_stmt (new_stmt);
7549 }
7550 }
7551 else
7552 {
7553 /* iv_loop is the loop to be vectorized. Create:
7554 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7555 stmts = NULL;
7556 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7557
7558 unsigned HOST_WIDE_INT const_nunits;
7559 if (nunits.is_constant (&const_nunits))
7560 {
7561 tree_vector_builder elts (vectype, const_nunits, 1);
7562 elts.quick_push (new_name);
7563 for (i = 1; i < const_nunits; i++)
7564 {
7565 /* Create: new_name_i = new_name + step_expr */
7566 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7567 new_name, step_expr);
7568 elts.quick_push (new_name);
7569 }
7570 /* Create a vector from [new_name_0, new_name_1, ...,
7571 new_name_nunits-1] */
7572 vec_init = gimple_build_vector (&stmts, &elts);
7573 }
7574 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7575 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7576 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7577 new_name, step_expr);
7578 else
7579 {
7580 /* Build:
7581 [base, base, base, ...]
7582 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7583 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7584 gcc_assert (flag_associative_math);
7585 tree index = build_index_vector (vectype, 0, 1);
7586 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7587 new_name);
7588 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7589 step_expr);
7590 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7591 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7592 vec_init, step_vec);
7593 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7594 vec_init, base_vec);
7595 }
7596
7597 if (stmts)
7598 {
7599 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7600 gcc_assert (!new_bb);
7601 }
7602 }
7603
7604
7605 /* Create the vector that holds the step of the induction. */
7606 if (nested_in_vect_loop)
7607 /* iv_loop is nested in the loop to be vectorized. Generate:
7608 vec_step = [S, S, S, S] */
7609 new_name = step_expr;
7610 else
7611 {
7612 /* iv_loop is the loop to be vectorized. Generate:
7613 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7614 gimple_seq seq = NULL;
7615 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7616 {
7617 expr = build_int_cst (integer_type_node, vf);
7618 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7619 }
7620 else
7621 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7622 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7623 expr, step_expr);
7624 if (seq)
7625 {
7626 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7627 gcc_assert (!new_bb);
7628 }
7629 }
7630
7631 t = unshare_expr (new_name);
7632 gcc_assert (CONSTANT_CLASS_P (new_name)
7633 || TREE_CODE (new_name) == SSA_NAME);
7634 new_vec = build_vector_from_val (vectype, t);
7635 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7636
7637
7638 /* Create the following def-use cycle:
7639 loop prolog:
7640 vec_init = ...
7641 vec_step = ...
7642 loop:
7643 vec_iv = PHI <vec_init, vec_loop>
7644 ...
7645 STMT
7646 ...
7647 vec_loop = vec_iv + vec_step; */
7648
7649 /* Create the induction-phi that defines the induction-operand. */
7650 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7651 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7652 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7653 induc_def = PHI_RESULT (induction_phi);
7654
7655 /* Create the iv update inside the loop */
7656 vec_def = make_ssa_name (vec_dest);
7657 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7658 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7659 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7660
7661 /* Set the arguments of the phi node: */
7662 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7663 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7664 UNKNOWN_LOCATION);
7665
7666 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7667
7668 /* In case that vectorization factor (VF) is bigger than the number
7669 of elements that we can fit in a vectype (nunits), we have to generate
7670 more than one vector stmt - i.e - we need to "unroll" the
7671 vector stmt by a factor VF/nunits. For more details see documentation
7672 in vectorizable_operation. */
7673
7674 if (ncopies > 1)
7675 {
7676 gimple_seq seq = NULL;
7677 stmt_vec_info prev_stmt_vinfo;
7678 /* FORNOW. This restriction should be relaxed. */
7679 gcc_assert (!nested_in_vect_loop);
7680
7681 /* Create the vector that holds the step of the induction. */
7682 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7683 {
7684 expr = build_int_cst (integer_type_node, nunits);
7685 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7686 }
7687 else
7688 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7689 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7690 expr, step_expr);
7691 if (seq)
7692 {
7693 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7694 gcc_assert (!new_bb);
7695 }
7696
7697 t = unshare_expr (new_name);
7698 gcc_assert (CONSTANT_CLASS_P (new_name)
7699 || TREE_CODE (new_name) == SSA_NAME);
7700 new_vec = build_vector_from_val (vectype, t);
7701 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7702
7703 vec_def = induc_def;
7704 prev_stmt_vinfo = induction_phi_info;
7705 for (i = 1; i < ncopies; i++)
7706 {
7707 /* vec_i = vec_prev + vec_step */
7708 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7709 vec_def, vec_step);
7710 vec_def = make_ssa_name (vec_dest, new_stmt);
7711 gimple_assign_set_lhs (new_stmt, vec_def);
7712
7713 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7714 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7715 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7716 prev_stmt_vinfo = new_stmt_info;
7717 }
7718 }
7719
7720 if (nested_in_vect_loop)
7721 {
7722 /* Find the loop-closed exit-phi of the induction, and record
7723 the final vector of induction results: */
7724 exit_phi = NULL;
7725 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7726 {
7727 gimple *use_stmt = USE_STMT (use_p);
7728 if (is_gimple_debug (use_stmt))
7729 continue;
7730
7731 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7732 {
7733 exit_phi = use_stmt;
7734 break;
7735 }
7736 }
7737 if (exit_phi)
7738 {
7739 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7740 /* FORNOW. Currently not supporting the case that an inner-loop induction
7741 is not used in the outer-loop (i.e. only outside the outer-loop). */
7742 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7743 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7744
7745 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7746 if (dump_enabled_p ())
7747 {
7748 dump_printf_loc (MSG_NOTE, vect_location,
7749 "vector of inductions after inner-loop:");
7750 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7751 }
7752 }
7753 }
7754
7755
7756 if (dump_enabled_p ())
7757 {
7758 dump_printf_loc (MSG_NOTE, vect_location,
7759 "transform induction: created def-use cycle: ");
7760 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7761 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7762 SSA_NAME_DEF_STMT (vec_def), 0);
7763 }
7764
7765 return true;
7766 }
7767
7768 /* Function vectorizable_live_operation.
7769
7770 STMT_INFO computes a value that is used outside the loop. Check if
7771 it can be supported. */
7772
7773 bool
7774 vectorizable_live_operation (stmt_vec_info stmt_info,
7775 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7776 slp_tree slp_node, int slp_index,
7777 stmt_vec_info *vec_stmt,
7778 stmt_vector_for_cost *)
7779 {
7780 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7781 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7782 imm_use_iterator imm_iter;
7783 tree lhs, lhs_type, bitsize, vec_bitsize;
7784 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7785 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7786 int ncopies;
7787 gimple *use_stmt;
7788 auto_vec<tree> vec_oprnds;
7789 int vec_entry = 0;
7790 poly_uint64 vec_index = 0;
7791
7792 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7793
7794 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7795 return false;
7796
7797 /* FORNOW. CHECKME. */
7798 if (nested_in_vect_loop_p (loop, stmt_info))
7799 return false;
7800
7801 /* If STMT is not relevant and it is a simple assignment and its inputs are
7802 invariant then it can remain in place, unvectorized. The original last
7803 scalar value that it computes will be used. */
7804 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7805 {
7806 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7807 if (dump_enabled_p ())
7808 dump_printf_loc (MSG_NOTE, vect_location,
7809 "statement is simple and uses invariant. Leaving in "
7810 "place.\n");
7811 return true;
7812 }
7813
7814 if (slp_node)
7815 ncopies = 1;
7816 else
7817 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7818
7819 if (slp_node)
7820 {
7821 gcc_assert (slp_index >= 0);
7822
7823 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7824 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7825
7826 /* Get the last occurrence of the scalar index from the concatenation of
7827 all the slp vectors. Calculate which slp vector it is and the index
7828 within. */
7829 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7830
7831 /* Calculate which vector contains the result, and which lane of
7832 that vector we need. */
7833 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7834 {
7835 if (dump_enabled_p ())
7836 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7837 "Cannot determine which vector holds the"
7838 " final result.\n");
7839 return false;
7840 }
7841 }
7842
7843 if (!vec_stmt)
7844 {
7845 /* No transformation required. */
7846 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7847 {
7848 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7849 OPTIMIZE_FOR_SPEED))
7850 {
7851 if (dump_enabled_p ())
7852 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7853 "can't use a fully-masked loop because "
7854 "the target doesn't support extract last "
7855 "reduction.\n");
7856 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7857 }
7858 else if (slp_node)
7859 {
7860 if (dump_enabled_p ())
7861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7862 "can't use a fully-masked loop because an "
7863 "SLP statement is live after the loop.\n");
7864 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7865 }
7866 else if (ncopies > 1)
7867 {
7868 if (dump_enabled_p ())
7869 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7870 "can't use a fully-masked loop because"
7871 " ncopies is greater than 1.\n");
7872 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7873 }
7874 else
7875 {
7876 gcc_assert (ncopies == 1 && !slp_node);
7877 vect_record_loop_mask (loop_vinfo,
7878 &LOOP_VINFO_MASKS (loop_vinfo),
7879 1, vectype);
7880 }
7881 }
7882 return true;
7883 }
7884
7885 /* Use the lhs of the original scalar statement. */
7886 gimple *stmt = vect_orig_stmt (stmt_info)->stmt;
7887
7888 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7889 : gimple_get_lhs (stmt);
7890 lhs_type = TREE_TYPE (lhs);
7891
7892 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7893 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7894 : TYPE_SIZE (TREE_TYPE (vectype)));
7895 vec_bitsize = TYPE_SIZE (vectype);
7896
7897 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7898 tree vec_lhs, bitstart;
7899 if (slp_node)
7900 {
7901 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7902
7903 /* Get the correct slp vectorized stmt. */
7904 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7905 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7906 vec_lhs = gimple_phi_result (phi);
7907 else
7908 vec_lhs = gimple_get_lhs (vec_stmt);
7909
7910 /* Get entry to use. */
7911 bitstart = bitsize_int (vec_index);
7912 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7913 }
7914 else
7915 {
7916 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7917 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7918 gcc_checking_assert (ncopies == 1
7919 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7920
7921 /* For multiple copies, get the last copy. */
7922 for (int i = 1; i < ncopies; ++i)
7923 vec_lhs = vect_get_vec_def_for_stmt_copy (loop_vinfo, vec_lhs);
7924
7925 /* Get the last lane in the vector. */
7926 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7927 }
7928
7929 gimple_seq stmts = NULL;
7930 tree new_tree;
7931 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7932 {
7933 /* Emit:
7934
7935 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7936
7937 where VEC_LHS is the vectorized live-out result and MASK is
7938 the loop mask for the final iteration. */
7939 gcc_assert (ncopies == 1 && !slp_node);
7940 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7941 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7942 1, vectype, 0);
7943 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7944 scalar_type, mask, vec_lhs);
7945
7946 /* Convert the extracted vector element to the required scalar type. */
7947 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7948 }
7949 else
7950 {
7951 tree bftype = TREE_TYPE (vectype);
7952 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7953 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7954 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7955 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7956 &stmts, true, NULL_TREE);
7957 }
7958
7959 if (stmts)
7960 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7961
7962 /* Replace use of lhs with newly computed result. If the use stmt is a
7963 single arg PHI, just replace all uses of PHI result. It's necessary
7964 because lcssa PHI defining lhs may be before newly inserted stmt. */
7965 use_operand_p use_p;
7966 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
7967 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
7968 && !is_gimple_debug (use_stmt))
7969 {
7970 if (gimple_code (use_stmt) == GIMPLE_PHI
7971 && gimple_phi_num_args (use_stmt) == 1)
7972 {
7973 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
7974 }
7975 else
7976 {
7977 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
7978 SET_USE (use_p, new_tree);
7979 }
7980 update_stmt (use_stmt);
7981 }
7982
7983 return true;
7984 }
7985
7986 /* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
7987
7988 static void
7989 vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
7990 {
7991 ssa_op_iter op_iter;
7992 imm_use_iterator imm_iter;
7993 def_operand_p def_p;
7994 gimple *ustmt;
7995
7996 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
7997 {
7998 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
7999 {
8000 basic_block bb;
8001
8002 if (!is_gimple_debug (ustmt))
8003 continue;
8004
8005 bb = gimple_bb (ustmt);
8006
8007 if (!flow_bb_inside_loop_p (loop, bb))
8008 {
8009 if (gimple_debug_bind_p (ustmt))
8010 {
8011 if (dump_enabled_p ())
8012 dump_printf_loc (MSG_NOTE, vect_location,
8013 "killing debug use\n");
8014
8015 gimple_debug_bind_reset_value (ustmt);
8016 update_stmt (ustmt);
8017 }
8018 else
8019 gcc_unreachable ();
8020 }
8021 }
8022 }
8023 }
8024
8025 /* Given loop represented by LOOP_VINFO, return true if computation of
8026 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8027 otherwise. */
8028
8029 static bool
8030 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8031 {
8032 /* Constant case. */
8033 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8034 {
8035 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8036 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8037
8038 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8039 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8040 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8041 return true;
8042 }
8043
8044 widest_int max;
8045 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8046 /* Check the upper bound of loop niters. */
8047 if (get_max_loop_iterations (loop, &max))
8048 {
8049 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8050 signop sgn = TYPE_SIGN (type);
8051 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8052 if (max < type_max)
8053 return true;
8054 }
8055 return false;
8056 }
8057
8058 /* Return a mask type with half the number of elements as TYPE. */
8059
8060 tree
8061 vect_halve_mask_nunits (tree type)
8062 {
8063 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8064 return build_truth_vector_type (nunits, current_vector_size);
8065 }
8066
8067 /* Return a mask type with twice as many elements as TYPE. */
8068
8069 tree
8070 vect_double_mask_nunits (tree type)
8071 {
8072 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8073 return build_truth_vector_type (nunits, current_vector_size);
8074 }
8075
8076 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8077 contain a sequence of NVECTORS masks that each control a vector of type
8078 VECTYPE. */
8079
8080 void
8081 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8082 unsigned int nvectors, tree vectype)
8083 {
8084 gcc_assert (nvectors != 0);
8085 if (masks->length () < nvectors)
8086 masks->safe_grow_cleared (nvectors);
8087 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8088 /* The number of scalars per iteration and the number of vectors are
8089 both compile-time constants. */
8090 unsigned int nscalars_per_iter
8091 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8092 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8093 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8094 {
8095 rgm->max_nscalars_per_iter = nscalars_per_iter;
8096 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8097 }
8098 }
8099
8100 /* Given a complete set of masks MASKS, extract mask number INDEX
8101 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8102 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8103
8104 See the comment above vec_loop_masks for more details about the mask
8105 arrangement. */
8106
8107 tree
8108 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8109 unsigned int nvectors, tree vectype, unsigned int index)
8110 {
8111 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8112 tree mask_type = rgm->mask_type;
8113
8114 /* Populate the rgroup's mask array, if this is the first time we've
8115 used it. */
8116 if (rgm->masks.is_empty ())
8117 {
8118 rgm->masks.safe_grow_cleared (nvectors);
8119 for (unsigned int i = 0; i < nvectors; ++i)
8120 {
8121 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8122 /* Provide a dummy definition until the real one is available. */
8123 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8124 rgm->masks[i] = mask;
8125 }
8126 }
8127
8128 tree mask = rgm->masks[index];
8129 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8130 TYPE_VECTOR_SUBPARTS (vectype)))
8131 {
8132 /* A loop mask for data type X can be reused for data type Y
8133 if X has N times more elements than Y and if Y's elements
8134 are N times bigger than X's. In this case each sequence
8135 of N elements in the loop mask will be all-zero or all-one.
8136 We can then view-convert the mask so that each sequence of
8137 N elements is replaced by a single element. */
8138 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8139 TYPE_VECTOR_SUBPARTS (vectype)));
8140 gimple_seq seq = NULL;
8141 mask_type = build_same_sized_truth_vector_type (vectype);
8142 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8143 if (seq)
8144 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8145 }
8146 return mask;
8147 }
8148
8149 /* Scale profiling counters by estimation for LOOP which is vectorized
8150 by factor VF. */
8151
8152 static void
8153 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8154 {
8155 edge preheader = loop_preheader_edge (loop);
8156 /* Reduce loop iterations by the vectorization factor. */
8157 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8158 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8159
8160 if (freq_h.nonzero_p ())
8161 {
8162 profile_probability p;
8163
8164 /* Avoid dropping loop body profile counter to 0 because of zero count
8165 in loop's preheader. */
8166 if (!(freq_e == profile_count::zero ()))
8167 freq_e = freq_e.force_nonzero ();
8168 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8169 scale_loop_frequencies (loop, p);
8170 }
8171
8172 edge exit_e = single_exit (loop);
8173 exit_e->probability = profile_probability::always ()
8174 .apply_scale (1, new_est_niter + 1);
8175
8176 edge exit_l = single_pred_edge (loop->latch);
8177 profile_probability prob = exit_l->probability;
8178 exit_l->probability = exit_e->probability.invert ();
8179 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8180 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8181 }
8182
8183 /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
8184 When vectorizing STMT_INFO as a store, set *SEEN_STORE to its
8185 stmt_vec_info. */
8186
8187 static void
8188 vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
8189 gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
8190 {
8191 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8192 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8193
8194 if (dump_enabled_p ())
8195 {
8196 dump_printf_loc (MSG_NOTE, vect_location,
8197 "------>vectorizing statement: ");
8198 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8199 }
8200
8201 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8202 vect_loop_kill_debug_uses (loop, stmt_info);
8203
8204 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8205 && !STMT_VINFO_LIVE_P (stmt_info))
8206 return;
8207
8208 if (STMT_VINFO_VECTYPE (stmt_info))
8209 {
8210 poly_uint64 nunits
8211 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8212 if (!STMT_SLP_TYPE (stmt_info)
8213 && maybe_ne (nunits, vf)
8214 && dump_enabled_p ())
8215 /* For SLP VF is set according to unrolling factor, and not
8216 to vector size, hence for SLP this print is not valid. */
8217 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8218 }
8219
8220 /* Pure SLP statements have already been vectorized. We still need
8221 to apply loop vectorization to hybrid SLP statements. */
8222 if (PURE_SLP_STMT (stmt_info))
8223 return;
8224
8225 if (dump_enabled_p ())
8226 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8227
8228 if (vect_transform_stmt (stmt_info, gsi, NULL, NULL))
8229 *seen_store = stmt_info;
8230 }
8231
8232 /* Function vect_transform_loop.
8233
8234 The analysis phase has determined that the loop is vectorizable.
8235 Vectorize the loop - created vectorized stmts to replace the scalar
8236 stmts in the loop, and update the loop exit condition.
8237 Returns scalar epilogue loop if any. */
8238
8239 struct loop *
8240 vect_transform_loop (loop_vec_info loop_vinfo)
8241 {
8242 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8243 struct loop *epilogue = NULL;
8244 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8245 int nbbs = loop->num_nodes;
8246 int i;
8247 tree niters_vector = NULL_TREE;
8248 tree step_vector = NULL_TREE;
8249 tree niters_vector_mult_vf = NULL_TREE;
8250 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8251 unsigned int lowest_vf = constant_lower_bound (vf);
8252 gimple *stmt;
8253 bool check_profitability = false;
8254 unsigned int th;
8255
8256 DUMP_VECT_SCOPE ("vec_transform_loop");
8257
8258 loop_vinfo->shared->check_datarefs ();
8259
8260 /* Use the more conservative vectorization threshold. If the number
8261 of iterations is constant assume the cost check has been performed
8262 by our caller. If the threshold makes all loops profitable that
8263 run at least the (estimated) vectorization factor number of times
8264 checking is pointless, too. */
8265 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8266 if (th >= vect_vf_for_cost (loop_vinfo)
8267 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8268 {
8269 if (dump_enabled_p ())
8270 dump_printf_loc (MSG_NOTE, vect_location,
8271 "Profitability threshold is %d loop iterations.\n",
8272 th);
8273 check_profitability = true;
8274 }
8275
8276 /* Make sure there exists a single-predecessor exit bb. Do this before
8277 versioning. */
8278 edge e = single_exit (loop);
8279 if (! single_pred_p (e->dest))
8280 {
8281 split_loop_exit_edge (e);
8282 if (dump_enabled_p ())
8283 dump_printf (MSG_NOTE, "split exit edge\n");
8284 }
8285
8286 /* Version the loop first, if required, so the profitability check
8287 comes first. */
8288
8289 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8290 {
8291 poly_uint64 versioning_threshold
8292 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8293 if (check_profitability
8294 && ordered_p (poly_uint64 (th), versioning_threshold))
8295 {
8296 versioning_threshold = ordered_max (poly_uint64 (th),
8297 versioning_threshold);
8298 check_profitability = false;
8299 }
8300 vect_loop_versioning (loop_vinfo, th, check_profitability,
8301 versioning_threshold);
8302 check_profitability = false;
8303 }
8304
8305 /* Make sure there exists a single-predecessor exit bb also on the
8306 scalar loop copy. Do this after versioning but before peeling
8307 so CFG structure is fine for both scalar and if-converted loop
8308 to make slpeel_duplicate_current_defs_from_edges face matched
8309 loop closed PHI nodes on the exit. */
8310 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8311 {
8312 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8313 if (! single_pred_p (e->dest))
8314 {
8315 split_loop_exit_edge (e);
8316 if (dump_enabled_p ())
8317 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8318 }
8319 }
8320
8321 tree niters = vect_build_loop_niters (loop_vinfo);
8322 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8323 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8324 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8325 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8326 &step_vector, &niters_vector_mult_vf, th,
8327 check_profitability, niters_no_overflow);
8328
8329 if (niters_vector == NULL_TREE)
8330 {
8331 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8332 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8333 && known_eq (lowest_vf, vf))
8334 {
8335 niters_vector
8336 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8337 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8338 step_vector = build_one_cst (TREE_TYPE (niters));
8339 }
8340 else
8341 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8342 &step_vector, niters_no_overflow);
8343 }
8344
8345 /* 1) Make sure the loop header has exactly two entries
8346 2) Make sure we have a preheader basic block. */
8347
8348 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8349
8350 split_edge (loop_preheader_edge (loop));
8351
8352 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8353 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8354 /* This will deal with any possible peeling. */
8355 vect_prepare_for_masked_peels (loop_vinfo);
8356
8357 /* Schedule the SLP instances first, then handle loop vectorization
8358 below. */
8359 if (!loop_vinfo->slp_instances.is_empty ())
8360 {
8361 DUMP_VECT_SCOPE ("scheduling SLP instances");
8362 vect_schedule_slp (loop_vinfo);
8363 }
8364
8365 /* FORNOW: the vectorizer supports only loops which body consist
8366 of one basic block (header + empty latch). When the vectorizer will
8367 support more involved loop forms, the order by which the BBs are
8368 traversed need to be reconsidered. */
8369
8370 for (i = 0; i < nbbs; i++)
8371 {
8372 basic_block bb = bbs[i];
8373 stmt_vec_info stmt_info;
8374
8375 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8376 gsi_next (&si))
8377 {
8378 gphi *phi = si.phi ();
8379 if (dump_enabled_p ())
8380 {
8381 dump_printf_loc (MSG_NOTE, vect_location,
8382 "------>vectorizing phi: ");
8383 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8384 }
8385 stmt_info = loop_vinfo->lookup_stmt (phi);
8386 if (!stmt_info)
8387 continue;
8388
8389 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8390 vect_loop_kill_debug_uses (loop, stmt_info);
8391
8392 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8393 && !STMT_VINFO_LIVE_P (stmt_info))
8394 continue;
8395
8396 if (STMT_VINFO_VECTYPE (stmt_info)
8397 && (maybe_ne
8398 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8399 && dump_enabled_p ())
8400 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8401
8402 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8403 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8404 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8405 && ! PURE_SLP_STMT (stmt_info))
8406 {
8407 if (dump_enabled_p ())
8408 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8409 vect_transform_stmt (stmt_info, NULL, NULL, NULL);
8410 }
8411 }
8412
8413 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8414 !gsi_end_p (si);)
8415 {
8416 stmt = gsi_stmt (si);
8417 /* During vectorization remove existing clobber stmts. */
8418 if (gimple_clobber_p (stmt))
8419 {
8420 unlink_stmt_vdef (stmt);
8421 gsi_remove (&si, true);
8422 release_defs (stmt);
8423 }
8424 else
8425 {
8426 stmt_info = loop_vinfo->lookup_stmt (stmt);
8427
8428 /* vector stmts created in the outer-loop during vectorization of
8429 stmts in an inner-loop may not have a stmt_info, and do not
8430 need to be vectorized. */
8431 stmt_vec_info seen_store = NULL;
8432 if (stmt_info)
8433 {
8434 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8435 {
8436 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8437 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8438 !gsi_end_p (subsi); gsi_next (&subsi))
8439 {
8440 stmt_vec_info pat_stmt_info
8441 = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
8442 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
8443 &si, &seen_store);
8444 }
8445 stmt_vec_info pat_stmt_info
8446 = STMT_VINFO_RELATED_STMT (stmt_info);
8447 vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
8448 &seen_store);
8449 }
8450 vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
8451 &seen_store);
8452 }
8453 gsi_next (&si);
8454 if (seen_store)
8455 {
8456 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8457 /* Interleaving. If IS_STORE is TRUE, the
8458 vectorization of the interleaving chain was
8459 completed - free all the stores in the chain. */
8460 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8461 else
8462 /* Free the attached stmt_vec_info and remove the stmt. */
8463 loop_vinfo->remove_stmt (stmt_info);
8464 }
8465 }
8466 }
8467
8468 /* Stub out scalar statements that must not survive vectorization.
8469 Doing this here helps with grouped statements, or statements that
8470 are involved in patterns. */
8471 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8472 !gsi_end_p (gsi); gsi_next (&gsi))
8473 {
8474 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8475 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8476 {
8477 tree lhs = gimple_get_lhs (call);
8478 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8479 {
8480 tree zero = build_zero_cst (TREE_TYPE (lhs));
8481 gimple *new_stmt = gimple_build_assign (lhs, zero);
8482 gsi_replace (&gsi, new_stmt, true);
8483 }
8484 }
8485 }
8486 } /* BBs in loop */
8487
8488 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8489 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8490 if (integer_onep (step_vector))
8491 niters_no_overflow = true;
8492 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8493 niters_vector_mult_vf, !niters_no_overflow);
8494
8495 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8496 scale_profile_for_vect_loop (loop, assumed_vf);
8497
8498 /* True if the final iteration might not handle a full vector's
8499 worth of scalar iterations. */
8500 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8501 /* The minimum number of iterations performed by the epilogue. This
8502 is 1 when peeling for gaps because we always need a final scalar
8503 iteration. */
8504 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8505 /* +1 to convert latch counts to loop iteration counts,
8506 -min_epilogue_iters to remove iterations that cannot be performed
8507 by the vector code. */
8508 int bias_for_lowest = 1 - min_epilogue_iters;
8509 int bias_for_assumed = bias_for_lowest;
8510 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8511 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8512 {
8513 /* When the amount of peeling is known at compile time, the first
8514 iteration will have exactly alignment_npeels active elements.
8515 In the worst case it will have at least one. */
8516 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8517 bias_for_lowest += lowest_vf - min_first_active;
8518 bias_for_assumed += assumed_vf - min_first_active;
8519 }
8520 /* In these calculations the "- 1" converts loop iteration counts
8521 back to latch counts. */
8522 if (loop->any_upper_bound)
8523 loop->nb_iterations_upper_bound
8524 = (final_iter_may_be_partial
8525 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8526 lowest_vf) - 1
8527 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8528 lowest_vf) - 1);
8529 if (loop->any_likely_upper_bound)
8530 loop->nb_iterations_likely_upper_bound
8531 = (final_iter_may_be_partial
8532 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8533 + bias_for_lowest, lowest_vf) - 1
8534 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8535 + bias_for_lowest, lowest_vf) - 1);
8536 if (loop->any_estimate)
8537 loop->nb_iterations_estimate
8538 = (final_iter_may_be_partial
8539 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8540 assumed_vf) - 1
8541 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8542 assumed_vf) - 1);
8543
8544 if (dump_enabled_p ())
8545 {
8546 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8547 {
8548 dump_printf_loc (MSG_NOTE, vect_location,
8549 "LOOP VECTORIZED\n");
8550 if (loop->inner)
8551 dump_printf_loc (MSG_NOTE, vect_location,
8552 "OUTER LOOP VECTORIZED\n");
8553 dump_printf (MSG_NOTE, "\n");
8554 }
8555 else
8556 {
8557 dump_printf_loc (MSG_NOTE, vect_location,
8558 "LOOP EPILOGUE VECTORIZED (VS=");
8559 dump_dec (MSG_NOTE, current_vector_size);
8560 dump_printf (MSG_NOTE, ")\n");
8561 }
8562 }
8563
8564 /* Free SLP instances here because otherwise stmt reference counting
8565 won't work. */
8566 slp_instance instance;
8567 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8568 vect_free_slp_instance (instance, true);
8569 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8570 /* Clear-up safelen field since its value is invalid after vectorization
8571 since vectorized loop can have loop-carried dependencies. */
8572 loop->safelen = 0;
8573
8574 /* Don't vectorize epilogue for epilogue. */
8575 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8576 epilogue = NULL;
8577
8578 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8579 epilogue = NULL;
8580
8581 if (epilogue)
8582 {
8583 auto_vector_sizes vector_sizes;
8584 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8585 unsigned int next_size = 0;
8586
8587 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8588 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8589 && known_eq (vf, lowest_vf))
8590 {
8591 unsigned int eiters
8592 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8593 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8594 eiters = eiters % lowest_vf;
8595 epilogue->nb_iterations_upper_bound = eiters - 1;
8596
8597 unsigned int ratio;
8598 while (next_size < vector_sizes.length ()
8599 && !(constant_multiple_p (current_vector_size,
8600 vector_sizes[next_size], &ratio)
8601 && eiters >= lowest_vf / ratio))
8602 next_size += 1;
8603 }
8604 else
8605 while (next_size < vector_sizes.length ()
8606 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8607 next_size += 1;
8608
8609 if (next_size == vector_sizes.length ())
8610 epilogue = NULL;
8611 }
8612
8613 if (epilogue)
8614 {
8615 epilogue->force_vectorize = loop->force_vectorize;
8616 epilogue->safelen = loop->safelen;
8617 epilogue->dont_vectorize = false;
8618
8619 /* We may need to if-convert epilogue to vectorize it. */
8620 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8621 tree_if_conversion (epilogue);
8622 }
8623
8624 return epilogue;
8625 }
8626
8627 /* The code below is trying to perform simple optimization - revert
8628 if-conversion for masked stores, i.e. if the mask of a store is zero
8629 do not perform it and all stored value producers also if possible.
8630 For example,
8631 for (i=0; i<n; i++)
8632 if (c[i])
8633 {
8634 p1[i] += 1;
8635 p2[i] = p3[i] +2;
8636 }
8637 this transformation will produce the following semi-hammock:
8638
8639 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8640 {
8641 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8642 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8643 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8644 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8645 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8646 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8647 }
8648 */
8649
8650 void
8651 optimize_mask_stores (struct loop *loop)
8652 {
8653 basic_block *bbs = get_loop_body (loop);
8654 unsigned nbbs = loop->num_nodes;
8655 unsigned i;
8656 basic_block bb;
8657 struct loop *bb_loop;
8658 gimple_stmt_iterator gsi;
8659 gimple *stmt;
8660 auto_vec<gimple *> worklist;
8661
8662 vect_location = find_loop_location (loop);
8663 /* Pick up all masked stores in loop if any. */
8664 for (i = 0; i < nbbs; i++)
8665 {
8666 bb = bbs[i];
8667 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8668 gsi_next (&gsi))
8669 {
8670 stmt = gsi_stmt (gsi);
8671 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8672 worklist.safe_push (stmt);
8673 }
8674 }
8675
8676 free (bbs);
8677 if (worklist.is_empty ())
8678 return;
8679
8680 /* Loop has masked stores. */
8681 while (!worklist.is_empty ())
8682 {
8683 gimple *last, *last_store;
8684 edge e, efalse;
8685 tree mask;
8686 basic_block store_bb, join_bb;
8687 gimple_stmt_iterator gsi_to;
8688 tree vdef, new_vdef;
8689 gphi *phi;
8690 tree vectype;
8691 tree zero;
8692
8693 last = worklist.pop ();
8694 mask = gimple_call_arg (last, 2);
8695 bb = gimple_bb (last);
8696 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8697 the same loop as if_bb. It could be different to LOOP when two
8698 level loop-nest is vectorized and mask_store belongs to the inner
8699 one. */
8700 e = split_block (bb, last);
8701 bb_loop = bb->loop_father;
8702 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8703 join_bb = e->dest;
8704 store_bb = create_empty_bb (bb);
8705 add_bb_to_loop (store_bb, bb_loop);
8706 e->flags = EDGE_TRUE_VALUE;
8707 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8708 /* Put STORE_BB to likely part. */
8709 efalse->probability = profile_probability::unlikely ();
8710 store_bb->count = efalse->count ();
8711 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8712 if (dom_info_available_p (CDI_DOMINATORS))
8713 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8714 if (dump_enabled_p ())
8715 dump_printf_loc (MSG_NOTE, vect_location,
8716 "Create new block %d to sink mask stores.",
8717 store_bb->index);
8718 /* Create vector comparison with boolean result. */
8719 vectype = TREE_TYPE (mask);
8720 zero = build_zero_cst (vectype);
8721 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8722 gsi = gsi_last_bb (bb);
8723 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8724 /* Create new PHI node for vdef of the last masked store:
8725 .MEM_2 = VDEF <.MEM_1>
8726 will be converted to
8727 .MEM.3 = VDEF <.MEM_1>
8728 and new PHI node will be created in join bb
8729 .MEM_2 = PHI <.MEM_1, .MEM_3>
8730 */
8731 vdef = gimple_vdef (last);
8732 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8733 gimple_set_vdef (last, new_vdef);
8734 phi = create_phi_node (vdef, join_bb);
8735 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8736
8737 /* Put all masked stores with the same mask to STORE_BB if possible. */
8738 while (true)
8739 {
8740 gimple_stmt_iterator gsi_from;
8741 gimple *stmt1 = NULL;
8742
8743 /* Move masked store to STORE_BB. */
8744 last_store = last;
8745 gsi = gsi_for_stmt (last);
8746 gsi_from = gsi;
8747 /* Shift GSI to the previous stmt for further traversal. */
8748 gsi_prev (&gsi);
8749 gsi_to = gsi_start_bb (store_bb);
8750 gsi_move_before (&gsi_from, &gsi_to);
8751 /* Setup GSI_TO to the non-empty block start. */
8752 gsi_to = gsi_start_bb (store_bb);
8753 if (dump_enabled_p ())
8754 {
8755 dump_printf_loc (MSG_NOTE, vect_location,
8756 "Move stmt to created bb\n");
8757 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8758 }
8759 /* Move all stored value producers if possible. */
8760 while (!gsi_end_p (gsi))
8761 {
8762 tree lhs;
8763 imm_use_iterator imm_iter;
8764 use_operand_p use_p;
8765 bool res;
8766
8767 /* Skip debug statements. */
8768 if (is_gimple_debug (gsi_stmt (gsi)))
8769 {
8770 gsi_prev (&gsi);
8771 continue;
8772 }
8773 stmt1 = gsi_stmt (gsi);
8774 /* Do not consider statements writing to memory or having
8775 volatile operand. */
8776 if (gimple_vdef (stmt1)
8777 || gimple_has_volatile_ops (stmt1))
8778 break;
8779 gsi_from = gsi;
8780 gsi_prev (&gsi);
8781 lhs = gimple_get_lhs (stmt1);
8782 if (!lhs)
8783 break;
8784
8785 /* LHS of vectorized stmt must be SSA_NAME. */
8786 if (TREE_CODE (lhs) != SSA_NAME)
8787 break;
8788
8789 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8790 {
8791 /* Remove dead scalar statement. */
8792 if (has_zero_uses (lhs))
8793 {
8794 gsi_remove (&gsi_from, true);
8795 continue;
8796 }
8797 }
8798
8799 /* Check that LHS does not have uses outside of STORE_BB. */
8800 res = true;
8801 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8802 {
8803 gimple *use_stmt;
8804 use_stmt = USE_STMT (use_p);
8805 if (is_gimple_debug (use_stmt))
8806 continue;
8807 if (gimple_bb (use_stmt) != store_bb)
8808 {
8809 res = false;
8810 break;
8811 }
8812 }
8813 if (!res)
8814 break;
8815
8816 if (gimple_vuse (stmt1)
8817 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8818 break;
8819
8820 /* Can move STMT1 to STORE_BB. */
8821 if (dump_enabled_p ())
8822 {
8823 dump_printf_loc (MSG_NOTE, vect_location,
8824 "Move stmt to created bb\n");
8825 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8826 }
8827 gsi_move_before (&gsi_from, &gsi_to);
8828 /* Shift GSI_TO for further insertion. */
8829 gsi_prev (&gsi_to);
8830 }
8831 /* Put other masked stores with the same mask to STORE_BB. */
8832 if (worklist.is_empty ()
8833 || gimple_call_arg (worklist.last (), 2) != mask
8834 || worklist.last () != stmt1)
8835 break;
8836 last = worklist.pop ();
8837 }
8838 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
8839 }
8840 }