[30/46] Use stmt_vec_infos rather than gimple stmts for worklists
[gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
57
58 /* Loop Vectorization Pass.
59
60 This pass tries to vectorize loops.
61
62 For example, the vectorizer transforms the following simple loop:
63
64 short a[N]; short b[N]; short c[N]; int i;
65
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
68 }
69
70 as if it was manually vectorized by rewriting the source code into:
71
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
76
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
82 }
83
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
95
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
101
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
106
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
117
118 For example, say stmt S1 was vectorized into stmt VS1:
119
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
123
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
128
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
133
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
136
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
144
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
151
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
154 */
155
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
157
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
161
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
167 {
168 gimple *stmt = stmt_info->stmt;
169
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
173 {
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
177 }
178
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
183
184 if (stmt_vectype)
185 {
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
197 }
198
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
201
202 return true;
203 }
204
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
211
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
215 {
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
218 {
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
221 }
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
224
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
227 {
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
230
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
234 {
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
237 {
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
242 }
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
246 }
247
248 if (dump_enabled_p ())
249 {
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
253 }
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
256 }
257
258 return true;
259 }
260
261 /* Function vect_determine_vectorization_factor
262
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
268
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
273
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
278 }
279
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
283 }
284 */
285
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
288 {
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
299
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
301
302 for (i = 0; i < nbbs; i++)
303 {
304 basic_block bb = bbs[i];
305
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
308 {
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
312 {
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
315 }
316
317 gcc_assert (stmt_info);
318
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
321 {
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
324
325 if (dump_enabled_p ())
326 {
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
331 }
332
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
335 {
336 if (dump_enabled_p ())
337 {
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
344 }
345 return false;
346 }
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
348
349 if (dump_enabled_p ())
350 {
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
354 }
355
356 if (dump_enabled_p ())
357 {
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
361 }
362
363 vect_update_max_nunits (&vectorization_factor, vectype);
364 }
365 }
366
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
369 {
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
374 }
375 }
376
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
379 {
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
383 }
384
385 if (known_le (vectorization_factor, 1U))
386 {
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
391 }
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
393
394 for (i = 0; i < mask_producers.length (); i++)
395 {
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
401 }
402
403 return true;
404 }
405
406
407 /* Function vect_is_simple_iv_evolution.
408
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
411
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
415 {
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
420
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
425
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
430
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
433
434 if (dump_enabled_p ())
435 {
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
441 }
442
443 *init = init_expr;
444 *step = step_expr;
445
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
455 {
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
460 }
461
462 return true;
463 }
464
465 /* Function vect_analyze_scalar_cycles_1.
466
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
470 enclosing LOOP). */
471
472 static void
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
474 {
475 basic_block bb = loop->header;
476 tree init, step;
477 auto_vec<stmt_vec_info, 64> worklist;
478 gphi_iterator gsi;
479 bool double_reduc;
480
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
482
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
485 changed. */
486 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
487 {
488 gphi *phi = gsi.phi ();
489 tree access_fn = NULL;
490 tree def = PHI_RESULT (phi);
491 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
492
493 if (dump_enabled_p ())
494 {
495 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
497 }
498
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def))
502 continue;
503
504 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
505
506 /* Analyze the evolution function. */
507 access_fn = analyze_scalar_evolution (loop, def);
508 if (access_fn)
509 {
510 STRIP_NOPS (access_fn);
511 if (dump_enabled_p ())
512 {
513 dump_printf_loc (MSG_NOTE, vect_location,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
516 dump_printf (MSG_NOTE, "\n");
517 }
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
519 = initial_condition_in_loop_num (access_fn, loop->num);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
521 = evolution_part_in_loop_num (access_fn, loop->num);
522 }
523
524 if (!access_fn
525 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
526 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
527 && TREE_CODE (step) != INTEGER_CST))
528 {
529 worklist.safe_push (stmt_vinfo);
530 continue;
531 }
532
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
534 != NULL_TREE);
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
536
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
540 }
541
542
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist.length () > 0)
545 {
546 stmt_vec_info stmt_vinfo = worklist.pop ();
547 gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
548 tree def = PHI_RESULT (phi);
549
550 if (dump_enabled_p ())
551 {
552 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
553 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
554 }
555
556 gcc_assert (!virtual_operand_p (def)
557 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
558
559 stmt_vec_info reduc_stmt_info
560 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
561 &double_reduc, false);
562 if (reduc_stmt_info)
563 {
564 if (double_reduc)
565 {
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE, vect_location,
568 "Detected double reduction.\n");
569
570 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
571 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
572 = vect_double_reduction_def;
573 }
574 else
575 {
576 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
577 {
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE, vect_location,
580 "Detected vectorizable nested cycle.\n");
581
582 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
583 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
584 }
585 else
586 {
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_NOTE, vect_location,
589 "Detected reduction.\n");
590
591 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
592 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
593 /* Store the reduction cycles for possible vectorization in
594 loop-aware SLP if it was not detected as reduction
595 chain. */
596 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
597 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
598 (reduc_stmt_info);
599 }
600 }
601 }
602 else
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
605 "Unknown def-use cycle pattern.\n");
606 }
607 }
608
609
610 /* Function vect_analyze_scalar_cycles.
611
612 Examine the cross iteration def-use cycles of scalar variables, by
613 analyzing the loop-header PHIs of scalar variables. Classify each
614 cycle as one of the following: invariant, induction, reduction, unknown.
615 We do that for the loop represented by LOOP_VINFO, and also to its
616 inner-loop, if exists.
617 Examples for scalar cycles:
618
619 Example1: reduction:
620
621 loop1:
622 for (i=0; i<N; i++)
623 sum += a[i];
624
625 Example2: induction:
626
627 loop2:
628 for (i=0; i<N; i++)
629 a[i] = i; */
630
631 static void
632 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
633 {
634 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
635
636 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
637
638 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
639 Reductions in such inner-loop therefore have different properties than
640 the reductions in the nest that gets vectorized:
641 1. When vectorized, they are executed in the same order as in the original
642 scalar loop, so we can't change the order of computation when
643 vectorizing them.
644 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
645 current checks are too strict. */
646
647 if (loop->inner)
648 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
649 }
650
651 /* Transfer group and reduction information from STMT to its pattern stmt. */
652
653 static void
654 vect_fixup_reduc_chain (gimple *stmt)
655 {
656 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
657 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
658 stmt_vec_info stmtp;
659 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
660 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
661 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
662 do
663 {
664 stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
665 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
666 stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
667 if (stmt_info)
668 REDUC_GROUP_NEXT_ELEMENT (stmtp)
669 = STMT_VINFO_RELATED_STMT (stmt_info);
670 }
671 while (stmt_info);
672 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
673 }
674
675 /* Fixup scalar cycles that now have their stmts detected as patterns. */
676
677 static void
678 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
679 {
680 stmt_vec_info first;
681 unsigned i;
682
683 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
684 if (STMT_VINFO_IN_PATTERN_P (first))
685 {
686 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
687 while (next)
688 {
689 if (! STMT_VINFO_IN_PATTERN_P (next))
690 break;
691 next = REDUC_GROUP_NEXT_ELEMENT (next);
692 }
693 /* If not all stmt in the chain are patterns try to handle
694 the chain without patterns. */
695 if (! next)
696 {
697 vect_fixup_reduc_chain (first);
698 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
699 = STMT_VINFO_RELATED_STMT (first);
700 }
701 }
702 }
703
704 /* Function vect_get_loop_niters.
705
706 Determine how many iterations the loop is executed and place it
707 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
708 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
709 niter information holds in ASSUMPTIONS.
710
711 Return the loop exit condition. */
712
713
714 static gcond *
715 vect_get_loop_niters (struct loop *loop, tree *assumptions,
716 tree *number_of_iterations, tree *number_of_iterationsm1)
717 {
718 edge exit = single_exit (loop);
719 struct tree_niter_desc niter_desc;
720 tree niter_assumptions, niter, may_be_zero;
721 gcond *cond = get_loop_exit_condition (loop);
722
723 *assumptions = boolean_true_node;
724 *number_of_iterationsm1 = chrec_dont_know;
725 *number_of_iterations = chrec_dont_know;
726 DUMP_VECT_SCOPE ("get_loop_niters");
727
728 if (!exit)
729 return cond;
730
731 niter = chrec_dont_know;
732 may_be_zero = NULL_TREE;
733 niter_assumptions = boolean_true_node;
734 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
735 || chrec_contains_undetermined (niter_desc.niter))
736 return cond;
737
738 niter_assumptions = niter_desc.assumptions;
739 may_be_zero = niter_desc.may_be_zero;
740 niter = niter_desc.niter;
741
742 if (may_be_zero && integer_zerop (may_be_zero))
743 may_be_zero = NULL_TREE;
744
745 if (may_be_zero)
746 {
747 if (COMPARISON_CLASS_P (may_be_zero))
748 {
749 /* Try to combine may_be_zero with assumptions, this can simplify
750 computation of niter expression. */
751 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
752 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
753 niter_assumptions,
754 fold_build1 (TRUTH_NOT_EXPR,
755 boolean_type_node,
756 may_be_zero));
757 else
758 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
759 build_int_cst (TREE_TYPE (niter), 0),
760 rewrite_to_non_trapping_overflow (niter));
761
762 may_be_zero = NULL_TREE;
763 }
764 else if (integer_nonzerop (may_be_zero))
765 {
766 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
767 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
768 return cond;
769 }
770 else
771 return cond;
772 }
773
774 *assumptions = niter_assumptions;
775 *number_of_iterationsm1 = niter;
776
777 /* We want the number of loop header executions which is the number
778 of latch executions plus one.
779 ??? For UINT_MAX latch executions this number overflows to zero
780 for loops like do { n++; } while (n != 0); */
781 if (niter && !chrec_contains_undetermined (niter))
782 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
783 build_int_cst (TREE_TYPE (niter), 1));
784 *number_of_iterations = niter;
785
786 return cond;
787 }
788
789 /* Function bb_in_loop_p
790
791 Used as predicate for dfs order traversal of the loop bbs. */
792
793 static bool
794 bb_in_loop_p (const_basic_block bb, const void *data)
795 {
796 const struct loop *const loop = (const struct loop *)data;
797 if (flow_bb_inside_loop_p (loop, bb))
798 return true;
799 return false;
800 }
801
802
803 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
804 stmt_vec_info structs for all the stmts in LOOP_IN. */
805
806 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
807 : vec_info (vec_info::loop, init_cost (loop_in), shared),
808 loop (loop_in),
809 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
810 num_itersm1 (NULL_TREE),
811 num_iters (NULL_TREE),
812 num_iters_unchanged (NULL_TREE),
813 num_iters_assumptions (NULL_TREE),
814 th (0),
815 versioning_threshold (0),
816 vectorization_factor (0),
817 max_vectorization_factor (0),
818 mask_skip_niters (NULL_TREE),
819 mask_compare_type (NULL_TREE),
820 unaligned_dr (NULL),
821 peeling_for_alignment (0),
822 ptr_mask (0),
823 ivexpr_map (NULL),
824 slp_unrolling_factor (1),
825 single_scalar_iteration_cost (0),
826 vectorizable (false),
827 can_fully_mask_p (true),
828 fully_masked_p (false),
829 peeling_for_gaps (false),
830 peeling_for_niter (false),
831 operands_swapped (false),
832 no_data_dependencies (false),
833 has_mask_store (false),
834 scalar_loop (NULL),
835 orig_loop_info (NULL)
836 {
837 /* Create/Update stmt_info for all stmts in the loop. */
838 basic_block *body = get_loop_body (loop);
839 for (unsigned int i = 0; i < loop->num_nodes; i++)
840 {
841 basic_block bb = body[i];
842 gimple_stmt_iterator si;
843
844 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
845 {
846 gimple *phi = gsi_stmt (si);
847 gimple_set_uid (phi, 0);
848 add_stmt (phi);
849 }
850
851 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
852 {
853 gimple *stmt = gsi_stmt (si);
854 gimple_set_uid (stmt, 0);
855 add_stmt (stmt);
856 }
857 }
858 free (body);
859
860 /* CHECKME: We want to visit all BBs before their successors (except for
861 latch blocks, for which this assertion wouldn't hold). In the simple
862 case of the loop forms we allow, a dfs order of the BBs would the same
863 as reversed postorder traversal, so we are safe. */
864
865 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
866 bbs, loop->num_nodes, loop);
867 gcc_assert (nbbs == loop->num_nodes);
868 }
869
870 /* Free all levels of MASKS. */
871
872 void
873 release_vec_loop_masks (vec_loop_masks *masks)
874 {
875 rgroup_masks *rgm;
876 unsigned int i;
877 FOR_EACH_VEC_ELT (*masks, i, rgm)
878 rgm->masks.release ();
879 masks->release ();
880 }
881
882 /* Free all memory used by the _loop_vec_info, as well as all the
883 stmt_vec_info structs of all the stmts in the loop. */
884
885 _loop_vec_info::~_loop_vec_info ()
886 {
887 int nbbs;
888 gimple_stmt_iterator si;
889 int j;
890
891 /* ??? We're releasing loop_vinfos en-block. */
892 set_stmt_vec_info_vec (&stmt_vec_infos);
893 nbbs = loop->num_nodes;
894 for (j = 0; j < nbbs; j++)
895 {
896 basic_block bb = bbs[j];
897 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
898 free_stmt_vec_info (gsi_stmt (si));
899
900 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
901 {
902 gimple *stmt = gsi_stmt (si);
903
904 /* We may have broken canonical form by moving a constant
905 into RHS1 of a commutative op. Fix such occurrences. */
906 if (operands_swapped && is_gimple_assign (stmt))
907 {
908 enum tree_code code = gimple_assign_rhs_code (stmt);
909
910 if ((code == PLUS_EXPR
911 || code == POINTER_PLUS_EXPR
912 || code == MULT_EXPR)
913 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
914 swap_ssa_operands (stmt,
915 gimple_assign_rhs1_ptr (stmt),
916 gimple_assign_rhs2_ptr (stmt));
917 else if (code == COND_EXPR
918 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
919 {
920 tree cond_expr = gimple_assign_rhs1 (stmt);
921 enum tree_code cond_code = TREE_CODE (cond_expr);
922
923 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
924 {
925 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
926 0));
927 cond_code = invert_tree_comparison (cond_code,
928 honor_nans);
929 if (cond_code != ERROR_MARK)
930 {
931 TREE_SET_CODE (cond_expr, cond_code);
932 swap_ssa_operands (stmt,
933 gimple_assign_rhs2_ptr (stmt),
934 gimple_assign_rhs3_ptr (stmt));
935 }
936 }
937 }
938 }
939
940 /* Free stmt_vec_info. */
941 free_stmt_vec_info (stmt);
942 gsi_next (&si);
943 }
944 }
945
946 free (bbs);
947
948 release_vec_loop_masks (&masks);
949 delete ivexpr_map;
950
951 loop->aux = NULL;
952 }
953
954 /* Return an invariant or register for EXPR and emit necessary
955 computations in the LOOP_VINFO loop preheader. */
956
957 tree
958 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
959 {
960 if (is_gimple_reg (expr)
961 || is_gimple_min_invariant (expr))
962 return expr;
963
964 if (! loop_vinfo->ivexpr_map)
965 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
966 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
967 if (! cached)
968 {
969 gimple_seq stmts = NULL;
970 cached = force_gimple_operand (unshare_expr (expr),
971 &stmts, true, NULL_TREE);
972 if (stmts)
973 {
974 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
975 gsi_insert_seq_on_edge_immediate (e, stmts);
976 }
977 }
978 return cached;
979 }
980
981 /* Return true if we can use CMP_TYPE as the comparison type to produce
982 all masks required to mask LOOP_VINFO. */
983
984 static bool
985 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
986 {
987 rgroup_masks *rgm;
988 unsigned int i;
989 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
990 if (rgm->mask_type != NULL_TREE
991 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
992 cmp_type, rgm->mask_type,
993 OPTIMIZE_FOR_SPEED))
994 return false;
995 return true;
996 }
997
998 /* Calculate the maximum number of scalars per iteration for every
999 rgroup in LOOP_VINFO. */
1000
1001 static unsigned int
1002 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
1003 {
1004 unsigned int res = 1;
1005 unsigned int i;
1006 rgroup_masks *rgm;
1007 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1008 res = MAX (res, rgm->max_nscalars_per_iter);
1009 return res;
1010 }
1011
1012 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1013 whether we can actually generate the masks required. Return true if so,
1014 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1015
1016 static bool
1017 vect_verify_full_masking (loop_vec_info loop_vinfo)
1018 {
1019 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1020 unsigned int min_ni_width;
1021
1022 /* Use a normal loop if there are no statements that need masking.
1023 This only happens in rare degenerate cases: it means that the loop
1024 has no loads, no stores, and no live-out values. */
1025 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1026 return false;
1027
1028 /* Get the maximum number of iterations that is representable
1029 in the counter type. */
1030 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1031 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1032
1033 /* Get a more refined estimate for the number of iterations. */
1034 widest_int max_back_edges;
1035 if (max_loop_iterations (loop, &max_back_edges))
1036 max_ni = wi::smin (max_ni, max_back_edges + 1);
1037
1038 /* Account for rgroup masks, in which each bit is replicated N times. */
1039 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1040
1041 /* Work out how many bits we need to represent the limit. */
1042 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1043
1044 /* Find a scalar mode for which WHILE_ULT is supported. */
1045 opt_scalar_int_mode cmp_mode_iter;
1046 tree cmp_type = NULL_TREE;
1047 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1048 {
1049 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1050 if (cmp_bits >= min_ni_width
1051 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1052 {
1053 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1054 if (this_type
1055 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1056 {
1057 /* Although we could stop as soon as we find a valid mode,
1058 it's often better to continue until we hit Pmode, since the
1059 operands to the WHILE are more likely to be reusable in
1060 address calculations. */
1061 cmp_type = this_type;
1062 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1063 break;
1064 }
1065 }
1066 }
1067
1068 if (!cmp_type)
1069 return false;
1070
1071 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1072 return true;
1073 }
1074
1075 /* Calculate the cost of one scalar iteration of the loop. */
1076 static void
1077 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1078 {
1079 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1080 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1081 int nbbs = loop->num_nodes, factor;
1082 int innerloop_iters, i;
1083
1084 /* Gather costs for statements in the scalar loop. */
1085
1086 /* FORNOW. */
1087 innerloop_iters = 1;
1088 if (loop->inner)
1089 innerloop_iters = 50; /* FIXME */
1090
1091 for (i = 0; i < nbbs; i++)
1092 {
1093 gimple_stmt_iterator si;
1094 basic_block bb = bbs[i];
1095
1096 if (bb->loop_father == loop->inner)
1097 factor = innerloop_iters;
1098 else
1099 factor = 1;
1100
1101 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1102 {
1103 gimple *stmt = gsi_stmt (si);
1104 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1105
1106 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1107 continue;
1108
1109 /* Skip stmts that are not vectorized inside the loop. */
1110 if (stmt_info
1111 && !STMT_VINFO_RELEVANT_P (stmt_info)
1112 && (!STMT_VINFO_LIVE_P (stmt_info)
1113 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1114 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1115 continue;
1116
1117 vect_cost_for_stmt kind;
1118 if (STMT_VINFO_DATA_REF (stmt_info))
1119 {
1120 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1121 kind = scalar_load;
1122 else
1123 kind = scalar_store;
1124 }
1125 else
1126 kind = scalar_stmt;
1127
1128 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1129 factor, kind, stmt_info, 0, vect_prologue);
1130 }
1131 }
1132
1133 /* Now accumulate cost. */
1134 void *target_cost_data = init_cost (loop);
1135 stmt_info_for_cost *si;
1136 int j;
1137 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1138 j, si)
1139 (void) add_stmt_cost (target_cost_data, si->count,
1140 si->kind, si->stmt_info, si->misalign,
1141 vect_body);
1142 unsigned dummy, body_cost = 0;
1143 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1144 destroy_cost_data (target_cost_data);
1145 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1146 }
1147
1148
1149 /* Function vect_analyze_loop_form_1.
1150
1151 Verify that certain CFG restrictions hold, including:
1152 - the loop has a pre-header
1153 - the loop has a single entry and exit
1154 - the loop exit condition is simple enough
1155 - the number of iterations can be analyzed, i.e, a countable loop. The
1156 niter could be analyzed under some assumptions. */
1157
1158 bool
1159 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1160 tree *assumptions, tree *number_of_iterationsm1,
1161 tree *number_of_iterations, gcond **inner_loop_cond)
1162 {
1163 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1164
1165 /* Different restrictions apply when we are considering an inner-most loop,
1166 vs. an outer (nested) loop.
1167 (FORNOW. May want to relax some of these restrictions in the future). */
1168
1169 if (!loop->inner)
1170 {
1171 /* Inner-most loop. We currently require that the number of BBs is
1172 exactly 2 (the header and latch). Vectorizable inner-most loops
1173 look like this:
1174
1175 (pre-header)
1176 |
1177 header <--------+
1178 | | |
1179 | +--> latch --+
1180 |
1181 (exit-bb) */
1182
1183 if (loop->num_nodes != 2)
1184 {
1185 if (dump_enabled_p ())
1186 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1187 "not vectorized: control flow in loop.\n");
1188 return false;
1189 }
1190
1191 if (empty_block_p (loop->header))
1192 {
1193 if (dump_enabled_p ())
1194 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1195 "not vectorized: empty loop.\n");
1196 return false;
1197 }
1198 }
1199 else
1200 {
1201 struct loop *innerloop = loop->inner;
1202 edge entryedge;
1203
1204 /* Nested loop. We currently require that the loop is doubly-nested,
1205 contains a single inner loop, and the number of BBs is exactly 5.
1206 Vectorizable outer-loops look like this:
1207
1208 (pre-header)
1209 |
1210 header <---+
1211 | |
1212 inner-loop |
1213 | |
1214 tail ------+
1215 |
1216 (exit-bb)
1217
1218 The inner-loop has the properties expected of inner-most loops
1219 as described above. */
1220
1221 if ((loop->inner)->inner || (loop->inner)->next)
1222 {
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1225 "not vectorized: multiple nested loops.\n");
1226 return false;
1227 }
1228
1229 if (loop->num_nodes != 5)
1230 {
1231 if (dump_enabled_p ())
1232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1233 "not vectorized: control flow in loop.\n");
1234 return false;
1235 }
1236
1237 entryedge = loop_preheader_edge (innerloop);
1238 if (entryedge->src != loop->header
1239 || !single_exit (innerloop)
1240 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1241 {
1242 if (dump_enabled_p ())
1243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1244 "not vectorized: unsupported outerloop form.\n");
1245 return false;
1246 }
1247
1248 /* Analyze the inner-loop. */
1249 tree inner_niterm1, inner_niter, inner_assumptions;
1250 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1251 &inner_assumptions, &inner_niterm1,
1252 &inner_niter, NULL)
1253 /* Don't support analyzing niter under assumptions for inner
1254 loop. */
1255 || !integer_onep (inner_assumptions))
1256 {
1257 if (dump_enabled_p ())
1258 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1259 "not vectorized: Bad inner loop.\n");
1260 return false;
1261 }
1262
1263 if (!expr_invariant_in_loop_p (loop, inner_niter))
1264 {
1265 if (dump_enabled_p ())
1266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1267 "not vectorized: inner-loop count not"
1268 " invariant.\n");
1269 return false;
1270 }
1271
1272 if (dump_enabled_p ())
1273 dump_printf_loc (MSG_NOTE, vect_location,
1274 "Considering outer-loop vectorization.\n");
1275 }
1276
1277 if (!single_exit (loop)
1278 || EDGE_COUNT (loop->header->preds) != 2)
1279 {
1280 if (dump_enabled_p ())
1281 {
1282 if (!single_exit (loop))
1283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1284 "not vectorized: multiple exits.\n");
1285 else if (EDGE_COUNT (loop->header->preds) != 2)
1286 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1287 "not vectorized: too many incoming edges.\n");
1288 }
1289 return false;
1290 }
1291
1292 /* We assume that the loop exit condition is at the end of the loop. i.e,
1293 that the loop is represented as a do-while (with a proper if-guard
1294 before the loop if needed), where the loop header contains all the
1295 executable statements, and the latch is empty. */
1296 if (!empty_block_p (loop->latch)
1297 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1298 {
1299 if (dump_enabled_p ())
1300 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1301 "not vectorized: latch block not empty.\n");
1302 return false;
1303 }
1304
1305 /* Make sure the exit is not abnormal. */
1306 edge e = single_exit (loop);
1307 if (e->flags & EDGE_ABNORMAL)
1308 {
1309 if (dump_enabled_p ())
1310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1311 "not vectorized: abnormal loop exit edge.\n");
1312 return false;
1313 }
1314
1315 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1316 number_of_iterationsm1);
1317 if (!*loop_cond)
1318 {
1319 if (dump_enabled_p ())
1320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1321 "not vectorized: complicated exit condition.\n");
1322 return false;
1323 }
1324
1325 if (integer_zerop (*assumptions)
1326 || !*number_of_iterations
1327 || chrec_contains_undetermined (*number_of_iterations))
1328 {
1329 if (dump_enabled_p ())
1330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1331 "not vectorized: number of iterations cannot be "
1332 "computed.\n");
1333 return false;
1334 }
1335
1336 if (integer_zerop (*number_of_iterations))
1337 {
1338 if (dump_enabled_p ())
1339 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1340 "not vectorized: number of iterations = 0.\n");
1341 return false;
1342 }
1343
1344 return true;
1345 }
1346
1347 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1348
1349 loop_vec_info
1350 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1351 {
1352 tree assumptions, number_of_iterations, number_of_iterationsm1;
1353 gcond *loop_cond, *inner_loop_cond = NULL;
1354
1355 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1356 &assumptions, &number_of_iterationsm1,
1357 &number_of_iterations, &inner_loop_cond))
1358 return NULL;
1359
1360 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1361 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1362 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1363 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1364 if (!integer_onep (assumptions))
1365 {
1366 /* We consider to vectorize this loop by versioning it under
1367 some assumptions. In order to do this, we need to clear
1368 existing information computed by scev and niter analyzer. */
1369 scev_reset_htab ();
1370 free_numbers_of_iterations_estimates (loop);
1371 /* Also set flag for this loop so that following scev and niter
1372 analysis are done under the assumptions. */
1373 loop_constraint_set (loop, LOOP_C_FINITE);
1374 /* Also record the assumptions for versioning. */
1375 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1376 }
1377
1378 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1379 {
1380 if (dump_enabled_p ())
1381 {
1382 dump_printf_loc (MSG_NOTE, vect_location,
1383 "Symbolic number of iterations is ");
1384 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1385 dump_printf (MSG_NOTE, "\n");
1386 }
1387 }
1388
1389 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1390 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1391 if (inner_loop_cond)
1392 {
1393 stmt_vec_info inner_loop_cond_info
1394 = loop_vinfo->lookup_stmt (inner_loop_cond);
1395 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1396 }
1397
1398 gcc_assert (!loop->aux);
1399 loop->aux = loop_vinfo;
1400 return loop_vinfo;
1401 }
1402
1403
1404
1405 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1406 statements update the vectorization factor. */
1407
1408 static void
1409 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1410 {
1411 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1412 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1413 int nbbs = loop->num_nodes;
1414 poly_uint64 vectorization_factor;
1415 int i;
1416
1417 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1418
1419 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1420 gcc_assert (known_ne (vectorization_factor, 0U));
1421
1422 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1423 vectorization factor of the loop is the unrolling factor required by
1424 the SLP instances. If that unrolling factor is 1, we say, that we
1425 perform pure SLP on loop - cross iteration parallelism is not
1426 exploited. */
1427 bool only_slp_in_loop = true;
1428 for (i = 0; i < nbbs; i++)
1429 {
1430 basic_block bb = bbs[i];
1431 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1432 gsi_next (&si))
1433 {
1434 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1435 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
1436 && STMT_VINFO_RELATED_STMT (stmt_info))
1437 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
1438 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1439 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1440 && !PURE_SLP_STMT (stmt_info))
1441 /* STMT needs both SLP and loop-based vectorization. */
1442 only_slp_in_loop = false;
1443 }
1444 }
1445
1446 if (only_slp_in_loop)
1447 {
1448 dump_printf_loc (MSG_NOTE, vect_location,
1449 "Loop contains only SLP stmts\n");
1450 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1451 }
1452 else
1453 {
1454 dump_printf_loc (MSG_NOTE, vect_location,
1455 "Loop contains SLP and non-SLP stmts\n");
1456 /* Both the vectorization factor and unroll factor have the form
1457 current_vector_size * X for some rational X, so they must have
1458 a common multiple. */
1459 vectorization_factor
1460 = force_common_multiple (vectorization_factor,
1461 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1462 }
1463
1464 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1465 if (dump_enabled_p ())
1466 {
1467 dump_printf_loc (MSG_NOTE, vect_location,
1468 "Updating vectorization factor to ");
1469 dump_dec (MSG_NOTE, vectorization_factor);
1470 dump_printf (MSG_NOTE, ".\n");
1471 }
1472 }
1473
1474 /* Return true if STMT_INFO describes a double reduction phi and if
1475 the other phi in the reduction is also relevant for vectorization.
1476 This rejects cases such as:
1477
1478 outer1:
1479 x_1 = PHI <x_3(outer2), ...>;
1480 ...
1481
1482 inner:
1483 x_2 = ...;
1484 ...
1485
1486 outer2:
1487 x_3 = PHI <x_2(inner)>;
1488
1489 if nothing in x_2 or elsewhere makes x_1 relevant. */
1490
1491 static bool
1492 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1493 {
1494 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1495 return false;
1496
1497 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1498 }
1499
1500 /* Function vect_analyze_loop_operations.
1501
1502 Scan the loop stmts and make sure they are all vectorizable. */
1503
1504 static bool
1505 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1506 {
1507 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1508 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1509 int nbbs = loop->num_nodes;
1510 int i;
1511 stmt_vec_info stmt_info;
1512 bool need_to_vectorize = false;
1513 bool ok;
1514
1515 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1516
1517 stmt_vector_for_cost cost_vec;
1518 cost_vec.create (2);
1519
1520 for (i = 0; i < nbbs; i++)
1521 {
1522 basic_block bb = bbs[i];
1523
1524 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1525 gsi_next (&si))
1526 {
1527 gphi *phi = si.phi ();
1528 ok = true;
1529
1530 stmt_info = loop_vinfo->lookup_stmt (phi);
1531 if (dump_enabled_p ())
1532 {
1533 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1534 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1535 }
1536 if (virtual_operand_p (gimple_phi_result (phi)))
1537 continue;
1538
1539 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1540 (i.e., a phi in the tail of the outer-loop). */
1541 if (! is_loop_header_bb_p (bb))
1542 {
1543 /* FORNOW: we currently don't support the case that these phis
1544 are not used in the outerloop (unless it is double reduction,
1545 i.e., this phi is vect_reduction_def), cause this case
1546 requires to actually do something here. */
1547 if (STMT_VINFO_LIVE_P (stmt_info)
1548 && !vect_active_double_reduction_p (stmt_info))
1549 {
1550 if (dump_enabled_p ())
1551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1552 "Unsupported loop-closed phi in "
1553 "outer-loop.\n");
1554 return false;
1555 }
1556
1557 /* If PHI is used in the outer loop, we check that its operand
1558 is defined in the inner loop. */
1559 if (STMT_VINFO_RELEVANT_P (stmt_info))
1560 {
1561 tree phi_op;
1562
1563 if (gimple_phi_num_args (phi) != 1)
1564 return false;
1565
1566 phi_op = PHI_ARG_DEF (phi, 0);
1567 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1568 if (!op_def_info)
1569 return false;
1570
1571 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1572 && (STMT_VINFO_RELEVANT (op_def_info)
1573 != vect_used_in_outer_by_reduction))
1574 return false;
1575 }
1576
1577 continue;
1578 }
1579
1580 gcc_assert (stmt_info);
1581
1582 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1583 || STMT_VINFO_LIVE_P (stmt_info))
1584 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1585 {
1586 /* A scalar-dependence cycle that we don't support. */
1587 if (dump_enabled_p ())
1588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1589 "not vectorized: scalar dependence cycle.\n");
1590 return false;
1591 }
1592
1593 if (STMT_VINFO_RELEVANT_P (stmt_info))
1594 {
1595 need_to_vectorize = true;
1596 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1597 && ! PURE_SLP_STMT (stmt_info))
1598 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
1599 &cost_vec);
1600 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1601 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1602 && ! PURE_SLP_STMT (stmt_info))
1603 ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
1604 &cost_vec);
1605 }
1606
1607 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1608 if (ok
1609 && STMT_VINFO_LIVE_P (stmt_info)
1610 && !PURE_SLP_STMT (stmt_info))
1611 ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
1612 &cost_vec);
1613
1614 if (!ok)
1615 {
1616 if (dump_enabled_p ())
1617 {
1618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1619 "not vectorized: relevant phi not "
1620 "supported: ");
1621 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1622 }
1623 return false;
1624 }
1625 }
1626
1627 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1628 gsi_next (&si))
1629 {
1630 gimple *stmt = gsi_stmt (si);
1631 if (!gimple_clobber_p (stmt)
1632 && !vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
1633 &need_to_vectorize,
1634 NULL, NULL, &cost_vec))
1635 return false;
1636 }
1637 } /* bbs */
1638
1639 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1640 cost_vec.release ();
1641
1642 /* All operations in the loop are either irrelevant (deal with loop
1643 control, or dead), or only used outside the loop and can be moved
1644 out of the loop (e.g. invariants, inductions). The loop can be
1645 optimized away by scalar optimizations. We're better off not
1646 touching this loop. */
1647 if (!need_to_vectorize)
1648 {
1649 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE, vect_location,
1651 "All the computation can be taken out of the loop.\n");
1652 if (dump_enabled_p ())
1653 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1654 "not vectorized: redundant loop. no profit to "
1655 "vectorize.\n");
1656 return false;
1657 }
1658
1659 return true;
1660 }
1661
1662 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1663 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1664 definitely no, or -1 if it's worth retrying. */
1665
1666 static int
1667 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1668 {
1669 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1670 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1671
1672 /* Only fully-masked loops can have iteration counts less than the
1673 vectorization factor. */
1674 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1675 {
1676 HOST_WIDE_INT max_niter;
1677
1678 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1679 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1680 else
1681 max_niter = max_stmt_executions_int (loop);
1682
1683 if (max_niter != -1
1684 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1685 {
1686 if (dump_enabled_p ())
1687 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1688 "not vectorized: iteration count smaller than "
1689 "vectorization factor.\n");
1690 return 0;
1691 }
1692 }
1693
1694 int min_profitable_iters, min_profitable_estimate;
1695 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1696 &min_profitable_estimate);
1697
1698 if (min_profitable_iters < 0)
1699 {
1700 if (dump_enabled_p ())
1701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1702 "not vectorized: vectorization not profitable.\n");
1703 if (dump_enabled_p ())
1704 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1705 "not vectorized: vector version will never be "
1706 "profitable.\n");
1707 return -1;
1708 }
1709
1710 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1711 * assumed_vf);
1712
1713 /* Use the cost model only if it is more conservative than user specified
1714 threshold. */
1715 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1716 min_profitable_iters);
1717
1718 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1719
1720 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1721 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1722 {
1723 if (dump_enabled_p ())
1724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1725 "not vectorized: vectorization not profitable.\n");
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_NOTE, vect_location,
1728 "not vectorized: iteration count smaller than user "
1729 "specified loop bound parameter or minimum profitable "
1730 "iterations (whichever is more conservative).\n");
1731 return 0;
1732 }
1733
1734 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1735 if (estimated_niter == -1)
1736 estimated_niter = likely_max_stmt_executions_int (loop);
1737 if (estimated_niter != -1
1738 && ((unsigned HOST_WIDE_INT) estimated_niter
1739 < MAX (th, (unsigned) min_profitable_estimate)))
1740 {
1741 if (dump_enabled_p ())
1742 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1743 "not vectorized: estimated iteration count too "
1744 "small.\n");
1745 if (dump_enabled_p ())
1746 dump_printf_loc (MSG_NOTE, vect_location,
1747 "not vectorized: estimated iteration count smaller "
1748 "than specified loop bound parameter or minimum "
1749 "profitable iterations (whichever is more "
1750 "conservative).\n");
1751 return -1;
1752 }
1753
1754 return 1;
1755 }
1756
1757 static bool
1758 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1759 vec<data_reference_p> *datarefs,
1760 unsigned int *n_stmts)
1761 {
1762 *n_stmts = 0;
1763 for (unsigned i = 0; i < loop->num_nodes; i++)
1764 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1765 !gsi_end_p (gsi); gsi_next (&gsi))
1766 {
1767 gimple *stmt = gsi_stmt (gsi);
1768 if (is_gimple_debug (stmt))
1769 continue;
1770 ++(*n_stmts);
1771 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1772 {
1773 if (is_gimple_call (stmt) && loop->safelen)
1774 {
1775 tree fndecl = gimple_call_fndecl (stmt), op;
1776 if (fndecl != NULL_TREE)
1777 {
1778 cgraph_node *node = cgraph_node::get (fndecl);
1779 if (node != NULL && node->simd_clones != NULL)
1780 {
1781 unsigned int j, n = gimple_call_num_args (stmt);
1782 for (j = 0; j < n; j++)
1783 {
1784 op = gimple_call_arg (stmt, j);
1785 if (DECL_P (op)
1786 || (REFERENCE_CLASS_P (op)
1787 && get_base_address (op)))
1788 break;
1789 }
1790 op = gimple_call_lhs (stmt);
1791 /* Ignore #pragma omp declare simd functions
1792 if they don't have data references in the
1793 call stmt itself. */
1794 if (j == n
1795 && !(op
1796 && (DECL_P (op)
1797 || (REFERENCE_CLASS_P (op)
1798 && get_base_address (op)))))
1799 continue;
1800 }
1801 }
1802 }
1803 return false;
1804 }
1805 /* If dependence analysis will give up due to the limit on the
1806 number of datarefs stop here and fail fatally. */
1807 if (datarefs->length ()
1808 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1809 return false;
1810 }
1811 return true;
1812 }
1813
1814 /* Function vect_analyze_loop_2.
1815
1816 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1817 for it. The different analyses will record information in the
1818 loop_vec_info struct. */
1819 static bool
1820 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1821 {
1822 bool ok;
1823 int res;
1824 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1825 poly_uint64 min_vf = 2;
1826
1827 /* The first group of checks is independent of the vector size. */
1828 fatal = true;
1829
1830 /* Find all data references in the loop (which correspond to vdefs/vuses)
1831 and analyze their evolution in the loop. */
1832
1833 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1834
1835 /* Gather the data references and count stmts in the loop. */
1836 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1837 {
1838 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1839 &LOOP_VINFO_DATAREFS (loop_vinfo),
1840 n_stmts))
1841 {
1842 if (dump_enabled_p ())
1843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1844 "not vectorized: loop contains function "
1845 "calls or data references that cannot "
1846 "be analyzed\n");
1847 return false;
1848 }
1849 loop_vinfo->shared->save_datarefs ();
1850 }
1851 else
1852 loop_vinfo->shared->check_datarefs ();
1853
1854 /* Analyze the data references and also adjust the minimal
1855 vectorization factor according to the loads and stores. */
1856
1857 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1858 if (!ok)
1859 {
1860 if (dump_enabled_p ())
1861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1862 "bad data references.\n");
1863 return false;
1864 }
1865
1866 /* Classify all cross-iteration scalar data-flow cycles.
1867 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1868 vect_analyze_scalar_cycles (loop_vinfo);
1869
1870 vect_pattern_recog (loop_vinfo);
1871
1872 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1873
1874 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1875 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1876
1877 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1878 if (!ok)
1879 {
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1882 "bad data access.\n");
1883 return false;
1884 }
1885
1886 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1887
1888 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1889 if (!ok)
1890 {
1891 if (dump_enabled_p ())
1892 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1893 "unexpected pattern.\n");
1894 return false;
1895 }
1896
1897 /* While the rest of the analysis below depends on it in some way. */
1898 fatal = false;
1899
1900 /* Analyze data dependences between the data-refs in the loop
1901 and adjust the maximum vectorization factor according to
1902 the dependences.
1903 FORNOW: fail at the first data dependence that we encounter. */
1904
1905 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1906 if (!ok
1907 || (max_vf != MAX_VECTORIZATION_FACTOR
1908 && maybe_lt (max_vf, min_vf)))
1909 {
1910 if (dump_enabled_p ())
1911 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1912 "bad data dependence.\n");
1913 return false;
1914 }
1915 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1916
1917 ok = vect_determine_vectorization_factor (loop_vinfo);
1918 if (!ok)
1919 {
1920 if (dump_enabled_p ())
1921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1922 "can't determine vectorization factor.\n");
1923 return false;
1924 }
1925 if (max_vf != MAX_VECTORIZATION_FACTOR
1926 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1927 {
1928 if (dump_enabled_p ())
1929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1930 "bad data dependence.\n");
1931 return false;
1932 }
1933
1934 /* Compute the scalar iteration cost. */
1935 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1936
1937 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1938 unsigned th;
1939
1940 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1941 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1942 if (!ok)
1943 return false;
1944
1945 /* If there are any SLP instances mark them as pure_slp. */
1946 bool slp = vect_make_slp_decision (loop_vinfo);
1947 if (slp)
1948 {
1949 /* Find stmts that need to be both vectorized and SLPed. */
1950 vect_detect_hybrid_slp (loop_vinfo);
1951
1952 /* Update the vectorization factor based on the SLP decision. */
1953 vect_update_vf_for_slp (loop_vinfo);
1954 }
1955
1956 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1957
1958 /* We don't expect to have to roll back to anything other than an empty
1959 set of rgroups. */
1960 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1961
1962 /* This is the point where we can re-start analysis with SLP forced off. */
1963 start_over:
1964
1965 /* Now the vectorization factor is final. */
1966 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1967 gcc_assert (known_ne (vectorization_factor, 0U));
1968
1969 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1970 {
1971 dump_printf_loc (MSG_NOTE, vect_location,
1972 "vectorization_factor = ");
1973 dump_dec (MSG_NOTE, vectorization_factor);
1974 dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
1975 LOOP_VINFO_INT_NITERS (loop_vinfo));
1976 }
1977
1978 HOST_WIDE_INT max_niter
1979 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1980
1981 /* Analyze the alignment of the data-refs in the loop.
1982 Fail if a data reference is found that cannot be vectorized. */
1983
1984 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1985 if (!ok)
1986 {
1987 if (dump_enabled_p ())
1988 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1989 "bad data alignment.\n");
1990 return false;
1991 }
1992
1993 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1994 It is important to call pruning after vect_analyze_data_ref_accesses,
1995 since we use grouping information gathered by interleaving analysis. */
1996 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1997 if (!ok)
1998 return false;
1999
2000 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2001 vectorization. */
2002 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2003 {
2004 /* This pass will decide on using loop versioning and/or loop peeling in
2005 order to enhance the alignment of data references in the loop. */
2006 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2007 if (!ok)
2008 {
2009 if (dump_enabled_p ())
2010 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2011 "bad data alignment.\n");
2012 return false;
2013 }
2014 }
2015
2016 if (slp)
2017 {
2018 /* Analyze operations in the SLP instances. Note this may
2019 remove unsupported SLP instances which makes the above
2020 SLP kind detection invalid. */
2021 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2022 vect_slp_analyze_operations (loop_vinfo);
2023 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2024 goto again;
2025 }
2026
2027 /* Scan all the remaining operations in the loop that are not subject
2028 to SLP and make sure they are vectorizable. */
2029 ok = vect_analyze_loop_operations (loop_vinfo);
2030 if (!ok)
2031 {
2032 if (dump_enabled_p ())
2033 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2034 "bad operation or unsupported loop bound.\n");
2035 return false;
2036 }
2037
2038 /* Decide whether to use a fully-masked loop for this vectorization
2039 factor. */
2040 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2041 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2042 && vect_verify_full_masking (loop_vinfo));
2043 if (dump_enabled_p ())
2044 {
2045 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2046 dump_printf_loc (MSG_NOTE, vect_location,
2047 "using a fully-masked loop.\n");
2048 else
2049 dump_printf_loc (MSG_NOTE, vect_location,
2050 "not using a fully-masked loop.\n");
2051 }
2052
2053 /* If epilog loop is required because of data accesses with gaps,
2054 one additional iteration needs to be peeled. Check if there is
2055 enough iterations for vectorization. */
2056 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2057 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2058 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2059 {
2060 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2061 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2062
2063 if (known_lt (wi::to_widest (scalar_niters), vf))
2064 {
2065 if (dump_enabled_p ())
2066 dump_printf_loc (MSG_NOTE, vect_location,
2067 "loop has no enough iterations to support"
2068 " peeling for gaps.\n");
2069 return false;
2070 }
2071 }
2072
2073 /* Check the costings of the loop make vectorizing worthwhile. */
2074 res = vect_analyze_loop_costing (loop_vinfo);
2075 if (res < 0)
2076 goto again;
2077 if (!res)
2078 {
2079 if (dump_enabled_p ())
2080 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2081 "Loop costings not worthwhile.\n");
2082 return false;
2083 }
2084
2085 /* Decide whether we need to create an epilogue loop to handle
2086 remaining scalar iterations. */
2087 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2088
2089 unsigned HOST_WIDE_INT const_vf;
2090 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2091 /* The main loop handles all iterations. */
2092 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2093 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2094 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2095 {
2096 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2097 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2098 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2099 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2100 }
2101 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2102 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2103 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2104 < (unsigned) exact_log2 (const_vf))
2105 /* In case of versioning, check if the maximum number of
2106 iterations is greater than th. If they are identical,
2107 the epilogue is unnecessary. */
2108 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2109 || ((unsigned HOST_WIDE_INT) max_niter
2110 > (th / const_vf) * const_vf))))
2111 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2112
2113 /* If an epilogue loop is required make sure we can create one. */
2114 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2115 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2116 {
2117 if (dump_enabled_p ())
2118 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2119 if (!vect_can_advance_ivs_p (loop_vinfo)
2120 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2121 single_exit (LOOP_VINFO_LOOP
2122 (loop_vinfo))))
2123 {
2124 if (dump_enabled_p ())
2125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2126 "not vectorized: can't create required "
2127 "epilog loop\n");
2128 goto again;
2129 }
2130 }
2131
2132 /* During peeling, we need to check if number of loop iterations is
2133 enough for both peeled prolog loop and vector loop. This check
2134 can be merged along with threshold check of loop versioning, so
2135 increase threshold for this case if necessary. */
2136 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2137 {
2138 poly_uint64 niters_th = 0;
2139
2140 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2141 {
2142 /* Niters for peeled prolog loop. */
2143 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2144 {
2145 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2146 tree vectype = STMT_VINFO_VECTYPE (vect_dr_stmt (dr));
2147 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2148 }
2149 else
2150 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2151 }
2152
2153 /* Niters for at least one iteration of vectorized loop. */
2154 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2155 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2156 /* One additional iteration because of peeling for gap. */
2157 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2158 niters_th += 1;
2159 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2160 }
2161
2162 gcc_assert (known_eq (vectorization_factor,
2163 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2164
2165 /* Ok to vectorize! */
2166 return true;
2167
2168 again:
2169 /* Try again with SLP forced off but if we didn't do any SLP there is
2170 no point in re-trying. */
2171 if (!slp)
2172 return false;
2173
2174 /* If there are reduction chains re-trying will fail anyway. */
2175 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2176 return false;
2177
2178 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2179 via interleaving or lane instructions. */
2180 slp_instance instance;
2181 slp_tree node;
2182 unsigned i, j;
2183 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2184 {
2185 stmt_vec_info vinfo;
2186 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2187 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2188 continue;
2189 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2190 unsigned int size = DR_GROUP_SIZE (vinfo);
2191 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2192 if (! vect_store_lanes_supported (vectype, size, false)
2193 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2194 && ! vect_grouped_store_supported (vectype, size))
2195 return false;
2196 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2197 {
2198 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2199 vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
2200 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2201 size = DR_GROUP_SIZE (vinfo);
2202 vectype = STMT_VINFO_VECTYPE (vinfo);
2203 if (! vect_load_lanes_supported (vectype, size, false)
2204 && ! vect_grouped_load_supported (vectype, single_element_p,
2205 size))
2206 return false;
2207 }
2208 }
2209
2210 if (dump_enabled_p ())
2211 dump_printf_loc (MSG_NOTE, vect_location,
2212 "re-trying with SLP disabled\n");
2213
2214 /* Roll back state appropriately. No SLP this time. */
2215 slp = false;
2216 /* Restore vectorization factor as it were without SLP. */
2217 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2218 /* Free the SLP instances. */
2219 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2220 vect_free_slp_instance (instance, false);
2221 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2222 /* Reset SLP type to loop_vect on all stmts. */
2223 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2224 {
2225 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2226 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2227 !gsi_end_p (si); gsi_next (&si))
2228 {
2229 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2230 STMT_SLP_TYPE (stmt_info) = loop_vect;
2231 }
2232 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2233 !gsi_end_p (si); gsi_next (&si))
2234 {
2235 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2236 STMT_SLP_TYPE (stmt_info) = loop_vect;
2237 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2238 {
2239 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2240 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2241 STMT_SLP_TYPE (stmt_info) = loop_vect;
2242 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2243 !gsi_end_p (pi); gsi_next (&pi))
2244 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2245 = loop_vect;
2246 }
2247 }
2248 }
2249 /* Free optimized alias test DDRS. */
2250 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2251 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2252 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2253 /* Reset target cost data. */
2254 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2255 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2256 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2257 /* Reset accumulated rgroup information. */
2258 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2259 /* Reset assorted flags. */
2260 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2261 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2262 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2263 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2264 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2265
2266 goto start_over;
2267 }
2268
2269 /* Function vect_analyze_loop.
2270
2271 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2272 for it. The different analyses will record information in the
2273 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2274 be vectorized. */
2275 loop_vec_info
2276 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2277 vec_info_shared *shared)
2278 {
2279 loop_vec_info loop_vinfo;
2280 auto_vector_sizes vector_sizes;
2281
2282 /* Autodetect first vector size we try. */
2283 current_vector_size = 0;
2284 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2285 unsigned int next_size = 0;
2286
2287 DUMP_VECT_SCOPE ("analyze_loop_nest");
2288
2289 if (loop_outer (loop)
2290 && loop_vec_info_for_loop (loop_outer (loop))
2291 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2292 {
2293 if (dump_enabled_p ())
2294 dump_printf_loc (MSG_NOTE, vect_location,
2295 "outer-loop already vectorized.\n");
2296 return NULL;
2297 }
2298
2299 if (!find_loop_nest (loop, &shared->loop_nest))
2300 {
2301 if (dump_enabled_p ())
2302 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2303 "not vectorized: loop nest containing two "
2304 "or more consecutive inner loops cannot be "
2305 "vectorized\n");
2306 return NULL;
2307 }
2308
2309 unsigned n_stmts = 0;
2310 poly_uint64 autodetected_vector_size = 0;
2311 while (1)
2312 {
2313 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2314 loop_vinfo = vect_analyze_loop_form (loop, shared);
2315 if (!loop_vinfo)
2316 {
2317 if (dump_enabled_p ())
2318 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2319 "bad loop form.\n");
2320 return NULL;
2321 }
2322
2323 bool fatal = false;
2324
2325 if (orig_loop_vinfo)
2326 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2327
2328 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2329 {
2330 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2331
2332 return loop_vinfo;
2333 }
2334
2335 delete loop_vinfo;
2336
2337 if (next_size == 0)
2338 autodetected_vector_size = current_vector_size;
2339
2340 if (next_size < vector_sizes.length ()
2341 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2342 next_size += 1;
2343
2344 if (fatal
2345 || next_size == vector_sizes.length ()
2346 || known_eq (current_vector_size, 0U))
2347 return NULL;
2348
2349 /* Try the next biggest vector size. */
2350 current_vector_size = vector_sizes[next_size++];
2351 if (dump_enabled_p ())
2352 {
2353 dump_printf_loc (MSG_NOTE, vect_location,
2354 "***** Re-trying analysis with "
2355 "vector size ");
2356 dump_dec (MSG_NOTE, current_vector_size);
2357 dump_printf (MSG_NOTE, "\n");
2358 }
2359 }
2360 }
2361
2362 /* Return true if there is an in-order reduction function for CODE, storing
2363 it in *REDUC_FN if so. */
2364
2365 static bool
2366 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2367 {
2368 switch (code)
2369 {
2370 case PLUS_EXPR:
2371 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2372 return true;
2373
2374 default:
2375 return false;
2376 }
2377 }
2378
2379 /* Function reduction_fn_for_scalar_code
2380
2381 Input:
2382 CODE - tree_code of a reduction operations.
2383
2384 Output:
2385 REDUC_FN - the corresponding internal function to be used to reduce the
2386 vector of partial results into a single scalar result, or IFN_LAST
2387 if the operation is a supported reduction operation, but does not have
2388 such an internal function.
2389
2390 Return FALSE if CODE currently cannot be vectorized as reduction. */
2391
2392 static bool
2393 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2394 {
2395 switch (code)
2396 {
2397 case MAX_EXPR:
2398 *reduc_fn = IFN_REDUC_MAX;
2399 return true;
2400
2401 case MIN_EXPR:
2402 *reduc_fn = IFN_REDUC_MIN;
2403 return true;
2404
2405 case PLUS_EXPR:
2406 *reduc_fn = IFN_REDUC_PLUS;
2407 return true;
2408
2409 case BIT_AND_EXPR:
2410 *reduc_fn = IFN_REDUC_AND;
2411 return true;
2412
2413 case BIT_IOR_EXPR:
2414 *reduc_fn = IFN_REDUC_IOR;
2415 return true;
2416
2417 case BIT_XOR_EXPR:
2418 *reduc_fn = IFN_REDUC_XOR;
2419 return true;
2420
2421 case MULT_EXPR:
2422 case MINUS_EXPR:
2423 *reduc_fn = IFN_LAST;
2424 return true;
2425
2426 default:
2427 return false;
2428 }
2429 }
2430
2431 /* If there is a neutral value X such that SLP reduction NODE would not
2432 be affected by the introduction of additional X elements, return that X,
2433 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2434 is true if the SLP statements perform a single reduction, false if each
2435 statement performs an independent reduction. */
2436
2437 static tree
2438 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2439 bool reduc_chain)
2440 {
2441 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2442 stmt_vec_info stmt_vinfo = stmts[0];
2443 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2444 tree scalar_type = TREE_TYPE (vector_type);
2445 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2446 gcc_assert (loop);
2447
2448 switch (code)
2449 {
2450 case WIDEN_SUM_EXPR:
2451 case DOT_PROD_EXPR:
2452 case SAD_EXPR:
2453 case PLUS_EXPR:
2454 case MINUS_EXPR:
2455 case BIT_IOR_EXPR:
2456 case BIT_XOR_EXPR:
2457 return build_zero_cst (scalar_type);
2458
2459 case MULT_EXPR:
2460 return build_one_cst (scalar_type);
2461
2462 case BIT_AND_EXPR:
2463 return build_all_ones_cst (scalar_type);
2464
2465 case MAX_EXPR:
2466 case MIN_EXPR:
2467 /* For MIN/MAX the initial values are neutral. A reduction chain
2468 has only a single initial value, so that value is neutral for
2469 all statements. */
2470 if (reduc_chain)
2471 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2472 loop_preheader_edge (loop));
2473 return NULL_TREE;
2474
2475 default:
2476 return NULL_TREE;
2477 }
2478 }
2479
2480 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2481 STMT is printed with a message MSG. */
2482
2483 static void
2484 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2485 {
2486 dump_printf_loc (msg_type, vect_location, "%s", msg);
2487 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2488 }
2489
2490 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2491 operation. Return true if the results of DEF_STMT_INFO are something
2492 that can be accumulated by such a reduction. */
2493
2494 static bool
2495 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2496 {
2497 return (is_gimple_assign (def_stmt_info->stmt)
2498 || is_gimple_call (def_stmt_info->stmt)
2499 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2500 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2501 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2502 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2503 }
2504
2505 /* Detect SLP reduction of the form:
2506
2507 #a1 = phi <a5, a0>
2508 a2 = operation (a1)
2509 a3 = operation (a2)
2510 a4 = operation (a3)
2511 a5 = operation (a4)
2512
2513 #a = phi <a5>
2514
2515 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2516 FIRST_STMT is the first reduction stmt in the chain
2517 (a2 = operation (a1)).
2518
2519 Return TRUE if a reduction chain was detected. */
2520
2521 static bool
2522 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2523 gimple *first_stmt)
2524 {
2525 struct loop *loop = (gimple_bb (phi))->loop_father;
2526 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2527 enum tree_code code;
2528 gimple *loop_use_stmt = NULL;
2529 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2530 tree lhs;
2531 imm_use_iterator imm_iter;
2532 use_operand_p use_p;
2533 int nloop_uses, size = 0, n_out_of_loop_uses;
2534 bool found = false;
2535
2536 if (loop != vect_loop)
2537 return false;
2538
2539 lhs = PHI_RESULT (phi);
2540 code = gimple_assign_rhs_code (first_stmt);
2541 while (1)
2542 {
2543 nloop_uses = 0;
2544 n_out_of_loop_uses = 0;
2545 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2546 {
2547 gimple *use_stmt = USE_STMT (use_p);
2548 if (is_gimple_debug (use_stmt))
2549 continue;
2550
2551 /* Check if we got back to the reduction phi. */
2552 if (use_stmt == phi)
2553 {
2554 loop_use_stmt = use_stmt;
2555 found = true;
2556 break;
2557 }
2558
2559 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2560 {
2561 loop_use_stmt = use_stmt;
2562 nloop_uses++;
2563 }
2564 else
2565 n_out_of_loop_uses++;
2566
2567 /* There are can be either a single use in the loop or two uses in
2568 phi nodes. */
2569 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2570 return false;
2571 }
2572
2573 if (found)
2574 break;
2575
2576 /* We reached a statement with no loop uses. */
2577 if (nloop_uses == 0)
2578 return false;
2579
2580 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2581 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2582 return false;
2583
2584 if (!is_gimple_assign (loop_use_stmt)
2585 || code != gimple_assign_rhs_code (loop_use_stmt)
2586 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2587 return false;
2588
2589 /* Insert USE_STMT into reduction chain. */
2590 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2591 if (current_stmt_info)
2592 {
2593 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = use_stmt_info;
2594 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2595 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2596 }
2597 else
2598 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = use_stmt_info;
2599
2600 lhs = gimple_assign_lhs (loop_use_stmt);
2601 current_stmt_info = use_stmt_info;
2602 size++;
2603 }
2604
2605 if (!found || loop_use_stmt != phi || size < 2)
2606 return false;
2607
2608 /* Swap the operands, if needed, to make the reduction operand be the second
2609 operand. */
2610 lhs = PHI_RESULT (phi);
2611 stmt_vec_info next_stmt_info = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2612 while (next_stmt_info)
2613 {
2614 gassign *next_stmt = as_a <gassign *> (next_stmt_info->stmt);
2615 if (gimple_assign_rhs2 (next_stmt) == lhs)
2616 {
2617 tree op = gimple_assign_rhs1 (next_stmt);
2618 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2619
2620 /* Check that the other def is either defined in the loop
2621 ("vect_internal_def"), or it's an induction (defined by a
2622 loop-header phi-node). */
2623 if (def_stmt_info
2624 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2625 && vect_valid_reduction_input_p (def_stmt_info))
2626 {
2627 lhs = gimple_assign_lhs (next_stmt);
2628 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2629 continue;
2630 }
2631
2632 return false;
2633 }
2634 else
2635 {
2636 tree op = gimple_assign_rhs2 (next_stmt);
2637 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2638
2639 /* Check that the other def is either defined in the loop
2640 ("vect_internal_def"), or it's an induction (defined by a
2641 loop-header phi-node). */
2642 if (def_stmt_info
2643 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2644 && vect_valid_reduction_input_p (def_stmt_info))
2645 {
2646 if (dump_enabled_p ())
2647 {
2648 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2649 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2650 }
2651
2652 swap_ssa_operands (next_stmt,
2653 gimple_assign_rhs1_ptr (next_stmt),
2654 gimple_assign_rhs2_ptr (next_stmt));
2655 update_stmt (next_stmt);
2656
2657 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2658 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2659 }
2660 else
2661 return false;
2662 }
2663
2664 lhs = gimple_assign_lhs (next_stmt);
2665 next_stmt_info = REDUC_GROUP_NEXT_ELEMENT (next_stmt_info);
2666 }
2667
2668 /* Save the chain for further analysis in SLP detection. */
2669 stmt_vec_info first_stmt_info
2670 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2671 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first_stmt_info);
2672 REDUC_GROUP_SIZE (first_stmt_info) = size;
2673
2674 return true;
2675 }
2676
2677 /* Return true if we need an in-order reduction for operation CODE
2678 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2679 overflow must wrap. */
2680
2681 static bool
2682 needs_fold_left_reduction_p (tree type, tree_code code,
2683 bool need_wrapping_integral_overflow)
2684 {
2685 /* CHECKME: check for !flag_finite_math_only too? */
2686 if (SCALAR_FLOAT_TYPE_P (type))
2687 switch (code)
2688 {
2689 case MIN_EXPR:
2690 case MAX_EXPR:
2691 return false;
2692
2693 default:
2694 return !flag_associative_math;
2695 }
2696
2697 if (INTEGRAL_TYPE_P (type))
2698 {
2699 if (!operation_no_trapping_overflow (type, code))
2700 return true;
2701 if (need_wrapping_integral_overflow
2702 && !TYPE_OVERFLOW_WRAPS (type)
2703 && operation_can_overflow (code))
2704 return true;
2705 return false;
2706 }
2707
2708 if (SAT_FIXED_POINT_TYPE_P (type))
2709 return true;
2710
2711 return false;
2712 }
2713
2714 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2715 reduction operation CODE has a handled computation expression. */
2716
2717 bool
2718 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2719 tree loop_arg, enum tree_code code)
2720 {
2721 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2722 auto_bitmap visited;
2723 tree lookfor = PHI_RESULT (phi);
2724 ssa_op_iter curri;
2725 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2726 while (USE_FROM_PTR (curr) != loop_arg)
2727 curr = op_iter_next_use (&curri);
2728 curri.i = curri.numops;
2729 do
2730 {
2731 path.safe_push (std::make_pair (curri, curr));
2732 tree use = USE_FROM_PTR (curr);
2733 if (use == lookfor)
2734 break;
2735 gimple *def = SSA_NAME_DEF_STMT (use);
2736 if (gimple_nop_p (def)
2737 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2738 {
2739 pop:
2740 do
2741 {
2742 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2743 curri = x.first;
2744 curr = x.second;
2745 do
2746 curr = op_iter_next_use (&curri);
2747 /* Skip already visited or non-SSA operands (from iterating
2748 over PHI args). */
2749 while (curr != NULL_USE_OPERAND_P
2750 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2751 || ! bitmap_set_bit (visited,
2752 SSA_NAME_VERSION
2753 (USE_FROM_PTR (curr)))));
2754 }
2755 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2756 if (curr == NULL_USE_OPERAND_P)
2757 break;
2758 }
2759 else
2760 {
2761 if (gimple_code (def) == GIMPLE_PHI)
2762 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2763 else
2764 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2765 while (curr != NULL_USE_OPERAND_P
2766 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2767 || ! bitmap_set_bit (visited,
2768 SSA_NAME_VERSION
2769 (USE_FROM_PTR (curr)))))
2770 curr = op_iter_next_use (&curri);
2771 if (curr == NULL_USE_OPERAND_P)
2772 goto pop;
2773 }
2774 }
2775 while (1);
2776 if (dump_file && (dump_flags & TDF_DETAILS))
2777 {
2778 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2779 unsigned i;
2780 std::pair<ssa_op_iter, use_operand_p> *x;
2781 FOR_EACH_VEC_ELT (path, i, x)
2782 {
2783 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2784 dump_printf (MSG_NOTE, " ");
2785 }
2786 dump_printf (MSG_NOTE, "\n");
2787 }
2788
2789 /* Check whether the reduction path detected is valid. */
2790 bool fail = path.length () == 0;
2791 bool neg = false;
2792 for (unsigned i = 1; i < path.length (); ++i)
2793 {
2794 gimple *use_stmt = USE_STMT (path[i].second);
2795 tree op = USE_FROM_PTR (path[i].second);
2796 if (! has_single_use (op)
2797 || ! is_gimple_assign (use_stmt))
2798 {
2799 fail = true;
2800 break;
2801 }
2802 if (gimple_assign_rhs_code (use_stmt) != code)
2803 {
2804 if (code == PLUS_EXPR
2805 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2806 {
2807 /* Track whether we negate the reduction value each iteration. */
2808 if (gimple_assign_rhs2 (use_stmt) == op)
2809 neg = ! neg;
2810 }
2811 else
2812 {
2813 fail = true;
2814 break;
2815 }
2816 }
2817 }
2818 return ! fail && ! neg;
2819 }
2820
2821
2822 /* Function vect_is_simple_reduction
2823
2824 (1) Detect a cross-iteration def-use cycle that represents a simple
2825 reduction computation. We look for the following pattern:
2826
2827 loop_header:
2828 a1 = phi < a0, a2 >
2829 a3 = ...
2830 a2 = operation (a3, a1)
2831
2832 or
2833
2834 a3 = ...
2835 loop_header:
2836 a1 = phi < a0, a2 >
2837 a2 = operation (a3, a1)
2838
2839 such that:
2840 1. operation is commutative and associative and it is safe to
2841 change the order of the computation
2842 2. no uses for a2 in the loop (a2 is used out of the loop)
2843 3. no uses of a1 in the loop besides the reduction operation
2844 4. no uses of a1 outside the loop.
2845
2846 Conditions 1,4 are tested here.
2847 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2848
2849 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2850 nested cycles.
2851
2852 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2853 reductions:
2854
2855 a1 = phi < a0, a2 >
2856 inner loop (def of a3)
2857 a2 = phi < a3 >
2858
2859 (4) Detect condition expressions, ie:
2860 for (int i = 0; i < N; i++)
2861 if (a[i] < val)
2862 ret_val = a[i];
2863
2864 */
2865
2866 static stmt_vec_info
2867 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2868 bool *double_reduc,
2869 bool need_wrapping_integral_overflow,
2870 enum vect_reduction_type *v_reduc_type)
2871 {
2872 gphi *phi = as_a <gphi *> (phi_info->stmt);
2873 struct loop *loop = (gimple_bb (phi))->loop_father;
2874 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2875 gimple *phi_use_stmt = NULL;
2876 enum tree_code orig_code, code;
2877 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2878 tree type;
2879 int nloop_uses;
2880 tree name;
2881 imm_use_iterator imm_iter;
2882 use_operand_p use_p;
2883 bool phi_def;
2884
2885 *double_reduc = false;
2886 *v_reduc_type = TREE_CODE_REDUCTION;
2887
2888 tree phi_name = PHI_RESULT (phi);
2889 /* ??? If there are no uses of the PHI result the inner loop reduction
2890 won't be detected as possibly double-reduction by vectorizable_reduction
2891 because that tries to walk the PHI arg from the preheader edge which
2892 can be constant. See PR60382. */
2893 if (has_zero_uses (phi_name))
2894 return NULL;
2895 nloop_uses = 0;
2896 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2897 {
2898 gimple *use_stmt = USE_STMT (use_p);
2899 if (is_gimple_debug (use_stmt))
2900 continue;
2901
2902 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2903 {
2904 if (dump_enabled_p ())
2905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2906 "intermediate value used outside loop.\n");
2907
2908 return NULL;
2909 }
2910
2911 nloop_uses++;
2912 if (nloop_uses > 1)
2913 {
2914 if (dump_enabled_p ())
2915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2916 "reduction value used in loop.\n");
2917 return NULL;
2918 }
2919
2920 phi_use_stmt = use_stmt;
2921 }
2922
2923 edge latch_e = loop_latch_edge (loop);
2924 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2925 if (TREE_CODE (loop_arg) != SSA_NAME)
2926 {
2927 if (dump_enabled_p ())
2928 {
2929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2930 "reduction: not ssa_name: ");
2931 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2932 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2933 }
2934 return NULL;
2935 }
2936
2937 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2938 if (!def_stmt_info)
2939 return NULL;
2940
2941 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2942 {
2943 name = gimple_assign_lhs (def_stmt);
2944 phi_def = false;
2945 }
2946 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2947 {
2948 name = PHI_RESULT (def_stmt);
2949 phi_def = true;
2950 }
2951 else
2952 {
2953 if (dump_enabled_p ())
2954 {
2955 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2956 "reduction: unhandled reduction operation: ");
2957 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2958 def_stmt_info->stmt, 0);
2959 }
2960 return NULL;
2961 }
2962
2963 nloop_uses = 0;
2964 auto_vec<gphi *, 3> lcphis;
2965 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2966 {
2967 gimple *use_stmt = USE_STMT (use_p);
2968 if (is_gimple_debug (use_stmt))
2969 continue;
2970 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2971 nloop_uses++;
2972 else
2973 /* We can have more than one loop-closed PHI. */
2974 lcphis.safe_push (as_a <gphi *> (use_stmt));
2975 if (nloop_uses > 1)
2976 {
2977 if (dump_enabled_p ())
2978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2979 "reduction used in loop.\n");
2980 return NULL;
2981 }
2982 }
2983
2984 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2985 defined in the inner loop. */
2986 if (phi_def)
2987 {
2988 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
2989 op1 = PHI_ARG_DEF (def_stmt, 0);
2990
2991 if (gimple_phi_num_args (def_stmt) != 1
2992 || TREE_CODE (op1) != SSA_NAME)
2993 {
2994 if (dump_enabled_p ())
2995 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2996 "unsupported phi node definition.\n");
2997
2998 return NULL;
2999 }
3000
3001 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3002 if (gimple_bb (def1)
3003 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3004 && loop->inner
3005 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3006 && is_gimple_assign (def1)
3007 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3008 {
3009 if (dump_enabled_p ())
3010 report_vect_op (MSG_NOTE, def_stmt,
3011 "detected double reduction: ");
3012
3013 *double_reduc = true;
3014 return def_stmt_info;
3015 }
3016
3017 return NULL;
3018 }
3019
3020 /* If we are vectorizing an inner reduction we are executing that
3021 in the original order only in case we are not dealing with a
3022 double reduction. */
3023 bool check_reduction = true;
3024 if (flow_loop_nested_p (vect_loop, loop))
3025 {
3026 gphi *lcphi;
3027 unsigned i;
3028 check_reduction = false;
3029 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3030 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3031 {
3032 gimple *use_stmt = USE_STMT (use_p);
3033 if (is_gimple_debug (use_stmt))
3034 continue;
3035 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3036 check_reduction = true;
3037 }
3038 }
3039
3040 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3041 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3042 code = orig_code = gimple_assign_rhs_code (def_stmt);
3043
3044 /* We can handle "res -= x[i]", which is non-associative by
3045 simply rewriting this into "res += -x[i]". Avoid changing
3046 gimple instruction for the first simple tests and only do this
3047 if we're allowed to change code at all. */
3048 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3049 code = PLUS_EXPR;
3050
3051 if (code == COND_EXPR)
3052 {
3053 if (! nested_in_vect_loop)
3054 *v_reduc_type = COND_REDUCTION;
3055
3056 op3 = gimple_assign_rhs1 (def_stmt);
3057 if (COMPARISON_CLASS_P (op3))
3058 {
3059 op4 = TREE_OPERAND (op3, 1);
3060 op3 = TREE_OPERAND (op3, 0);
3061 }
3062 if (op3 == phi_name || op4 == phi_name)
3063 {
3064 if (dump_enabled_p ())
3065 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3066 "reduction: condition depends on previous"
3067 " iteration: ");
3068 return NULL;
3069 }
3070
3071 op1 = gimple_assign_rhs2 (def_stmt);
3072 op2 = gimple_assign_rhs3 (def_stmt);
3073 }
3074 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3075 {
3076 if (dump_enabled_p ())
3077 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3078 "reduction: not commutative/associative: ");
3079 return NULL;
3080 }
3081 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3082 {
3083 op1 = gimple_assign_rhs1 (def_stmt);
3084 op2 = gimple_assign_rhs2 (def_stmt);
3085 }
3086 else
3087 {
3088 if (dump_enabled_p ())
3089 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3090 "reduction: not handled operation: ");
3091 return NULL;
3092 }
3093
3094 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3095 {
3096 if (dump_enabled_p ())
3097 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3098 "reduction: both uses not ssa_names: ");
3099
3100 return NULL;
3101 }
3102
3103 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3104 if ((TREE_CODE (op1) == SSA_NAME
3105 && !types_compatible_p (type,TREE_TYPE (op1)))
3106 || (TREE_CODE (op2) == SSA_NAME
3107 && !types_compatible_p (type, TREE_TYPE (op2)))
3108 || (op3 && TREE_CODE (op3) == SSA_NAME
3109 && !types_compatible_p (type, TREE_TYPE (op3)))
3110 || (op4 && TREE_CODE (op4) == SSA_NAME
3111 && !types_compatible_p (type, TREE_TYPE (op4))))
3112 {
3113 if (dump_enabled_p ())
3114 {
3115 dump_printf_loc (MSG_NOTE, vect_location,
3116 "reduction: multiple types: operation type: ");
3117 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3118 dump_printf (MSG_NOTE, ", operands types: ");
3119 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3120 TREE_TYPE (op1));
3121 dump_printf (MSG_NOTE, ",");
3122 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3123 TREE_TYPE (op2));
3124 if (op3)
3125 {
3126 dump_printf (MSG_NOTE, ",");
3127 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3128 TREE_TYPE (op3));
3129 }
3130
3131 if (op4)
3132 {
3133 dump_printf (MSG_NOTE, ",");
3134 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3135 TREE_TYPE (op4));
3136 }
3137 dump_printf (MSG_NOTE, "\n");
3138 }
3139
3140 return NULL;
3141 }
3142
3143 /* Check whether it's ok to change the order of the computation.
3144 Generally, when vectorizing a reduction we change the order of the
3145 computation. This may change the behavior of the program in some
3146 cases, so we need to check that this is ok. One exception is when
3147 vectorizing an outer-loop: the inner-loop is executed sequentially,
3148 and therefore vectorizing reductions in the inner-loop during
3149 outer-loop vectorization is safe. */
3150 if (check_reduction
3151 && *v_reduc_type == TREE_CODE_REDUCTION
3152 && needs_fold_left_reduction_p (type, code,
3153 need_wrapping_integral_overflow))
3154 *v_reduc_type = FOLD_LEFT_REDUCTION;
3155
3156 /* Reduction is safe. We're dealing with one of the following:
3157 1) integer arithmetic and no trapv
3158 2) floating point arithmetic, and special flags permit this optimization
3159 3) nested cycle (i.e., outer loop vectorization). */
3160 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3161 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3162 if (code != COND_EXPR && !def1_info && !def2_info)
3163 {
3164 if (dump_enabled_p ())
3165 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3166 return NULL;
3167 }
3168
3169 /* Check that one def is the reduction def, defined by PHI,
3170 the other def is either defined in the loop ("vect_internal_def"),
3171 or it's an induction (defined by a loop-header phi-node). */
3172
3173 if (def2_info
3174 && def2_info->stmt == phi
3175 && (code == COND_EXPR
3176 || !def1_info
3177 || vect_valid_reduction_input_p (def1_info)))
3178 {
3179 if (dump_enabled_p ())
3180 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3181 return def_stmt_info;
3182 }
3183
3184 if (def1_info
3185 && def1_info->stmt == phi
3186 && (code == COND_EXPR
3187 || !def2_info
3188 || vect_valid_reduction_input_p (def2_info)))
3189 {
3190 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3191 {
3192 /* Check if we can swap operands (just for simplicity - so that
3193 the rest of the code can assume that the reduction variable
3194 is always the last (second) argument). */
3195 if (code == COND_EXPR)
3196 {
3197 /* Swap cond_expr by inverting the condition. */
3198 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3199 enum tree_code invert_code = ERROR_MARK;
3200 enum tree_code cond_code = TREE_CODE (cond_expr);
3201
3202 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3203 {
3204 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3205 invert_code = invert_tree_comparison (cond_code, honor_nans);
3206 }
3207 if (invert_code != ERROR_MARK)
3208 {
3209 TREE_SET_CODE (cond_expr, invert_code);
3210 swap_ssa_operands (def_stmt,
3211 gimple_assign_rhs2_ptr (def_stmt),
3212 gimple_assign_rhs3_ptr (def_stmt));
3213 }
3214 else
3215 {
3216 if (dump_enabled_p ())
3217 report_vect_op (MSG_NOTE, def_stmt,
3218 "detected reduction: cannot swap operands "
3219 "for cond_expr");
3220 return NULL;
3221 }
3222 }
3223 else
3224 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3225 gimple_assign_rhs2_ptr (def_stmt));
3226
3227 if (dump_enabled_p ())
3228 report_vect_op (MSG_NOTE, def_stmt,
3229 "detected reduction: need to swap operands: ");
3230
3231 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3232 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3233 }
3234 else
3235 {
3236 if (dump_enabled_p ())
3237 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3238 }
3239
3240 return def_stmt_info;
3241 }
3242
3243 /* Try to find SLP reduction chain. */
3244 if (! nested_in_vect_loop
3245 && code != COND_EXPR
3246 && orig_code != MINUS_EXPR
3247 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3248 {
3249 if (dump_enabled_p ())
3250 report_vect_op (MSG_NOTE, def_stmt,
3251 "reduction: detected reduction chain: ");
3252
3253 return def_stmt_info;
3254 }
3255
3256 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3257 stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (def_stmt_info);
3258 while (first)
3259 {
3260 stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
3261 REDUC_GROUP_FIRST_ELEMENT (first) = NULL;
3262 REDUC_GROUP_NEXT_ELEMENT (first) = NULL;
3263 first = next;
3264 }
3265
3266 /* Look for the expression computing loop_arg from loop PHI result. */
3267 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3268 return def_stmt_info;
3269
3270 if (dump_enabled_p ())
3271 {
3272 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3273 "reduction: unknown pattern: ");
3274 }
3275
3276 return NULL;
3277 }
3278
3279 /* Wrapper around vect_is_simple_reduction, which will modify code
3280 in-place if it enables detection of more reductions. Arguments
3281 as there. */
3282
3283 stmt_vec_info
3284 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3285 bool *double_reduc,
3286 bool need_wrapping_integral_overflow)
3287 {
3288 enum vect_reduction_type v_reduc_type;
3289 stmt_vec_info def_info
3290 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3291 need_wrapping_integral_overflow,
3292 &v_reduc_type);
3293 if (def_info)
3294 {
3295 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3296 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3297 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3298 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3299 }
3300 return def_info;
3301 }
3302
3303 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3304 int
3305 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3306 int *peel_iters_epilogue,
3307 stmt_vector_for_cost *scalar_cost_vec,
3308 stmt_vector_for_cost *prologue_cost_vec,
3309 stmt_vector_for_cost *epilogue_cost_vec)
3310 {
3311 int retval = 0;
3312 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3313
3314 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3315 {
3316 *peel_iters_epilogue = assumed_vf / 2;
3317 if (dump_enabled_p ())
3318 dump_printf_loc (MSG_NOTE, vect_location,
3319 "cost model: epilogue peel iters set to vf/2 "
3320 "because loop iterations are unknown .\n");
3321
3322 /* If peeled iterations are known but number of scalar loop
3323 iterations are unknown, count a taken branch per peeled loop. */
3324 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3325 NULL, 0, vect_prologue);
3326 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3327 NULL, 0, vect_epilogue);
3328 }
3329 else
3330 {
3331 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3332 peel_iters_prologue = niters < peel_iters_prologue ?
3333 niters : peel_iters_prologue;
3334 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3335 /* If we need to peel for gaps, but no peeling is required, we have to
3336 peel VF iterations. */
3337 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3338 *peel_iters_epilogue = assumed_vf;
3339 }
3340
3341 stmt_info_for_cost *si;
3342 int j;
3343 if (peel_iters_prologue)
3344 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3345 retval += record_stmt_cost (prologue_cost_vec,
3346 si->count * peel_iters_prologue,
3347 si->kind, si->stmt_info, si->misalign,
3348 vect_prologue);
3349 if (*peel_iters_epilogue)
3350 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3351 retval += record_stmt_cost (epilogue_cost_vec,
3352 si->count * *peel_iters_epilogue,
3353 si->kind, si->stmt_info, si->misalign,
3354 vect_epilogue);
3355
3356 return retval;
3357 }
3358
3359 /* Function vect_estimate_min_profitable_iters
3360
3361 Return the number of iterations required for the vector version of the
3362 loop to be profitable relative to the cost of the scalar version of the
3363 loop.
3364
3365 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3366 of iterations for vectorization. -1 value means loop vectorization
3367 is not profitable. This returned value may be used for dynamic
3368 profitability check.
3369
3370 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3371 for static check against estimated number of iterations. */
3372
3373 static void
3374 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3375 int *ret_min_profitable_niters,
3376 int *ret_min_profitable_estimate)
3377 {
3378 int min_profitable_iters;
3379 int min_profitable_estimate;
3380 int peel_iters_prologue;
3381 int peel_iters_epilogue;
3382 unsigned vec_inside_cost = 0;
3383 int vec_outside_cost = 0;
3384 unsigned vec_prologue_cost = 0;
3385 unsigned vec_epilogue_cost = 0;
3386 int scalar_single_iter_cost = 0;
3387 int scalar_outside_cost = 0;
3388 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3389 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3390 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3391
3392 /* Cost model disabled. */
3393 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3394 {
3395 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3396 *ret_min_profitable_niters = 0;
3397 *ret_min_profitable_estimate = 0;
3398 return;
3399 }
3400
3401 /* Requires loop versioning tests to handle misalignment. */
3402 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3403 {
3404 /* FIXME: Make cost depend on complexity of individual check. */
3405 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3406 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3407 vect_prologue);
3408 dump_printf (MSG_NOTE,
3409 "cost model: Adding cost of checks for loop "
3410 "versioning to treat misalignment.\n");
3411 }
3412
3413 /* Requires loop versioning with alias checks. */
3414 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3415 {
3416 /* FIXME: Make cost depend on complexity of individual check. */
3417 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3418 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3419 vect_prologue);
3420 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3421 if (len)
3422 /* Count LEN - 1 ANDs and LEN comparisons. */
3423 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3424 NULL, 0, vect_prologue);
3425 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3426 if (len)
3427 {
3428 /* Count LEN - 1 ANDs and LEN comparisons. */
3429 unsigned int nstmts = len * 2 - 1;
3430 /* +1 for each bias that needs adding. */
3431 for (unsigned int i = 0; i < len; ++i)
3432 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3433 nstmts += 1;
3434 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3435 NULL, 0, vect_prologue);
3436 }
3437 dump_printf (MSG_NOTE,
3438 "cost model: Adding cost of checks for loop "
3439 "versioning aliasing.\n");
3440 }
3441
3442 /* Requires loop versioning with niter checks. */
3443 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3444 {
3445 /* FIXME: Make cost depend on complexity of individual check. */
3446 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3447 vect_prologue);
3448 dump_printf (MSG_NOTE,
3449 "cost model: Adding cost of checks for loop "
3450 "versioning niters.\n");
3451 }
3452
3453 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3454 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3455 vect_prologue);
3456
3457 /* Count statements in scalar loop. Using this as scalar cost for a single
3458 iteration for now.
3459
3460 TODO: Add outer loop support.
3461
3462 TODO: Consider assigning different costs to different scalar
3463 statements. */
3464
3465 scalar_single_iter_cost
3466 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3467
3468 /* Add additional cost for the peeled instructions in prologue and epilogue
3469 loop. (For fully-masked loops there will be no peeling.)
3470
3471 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3472 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3473
3474 TODO: Build an expression that represents peel_iters for prologue and
3475 epilogue to be used in a run-time test. */
3476
3477 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3478 {
3479 peel_iters_prologue = 0;
3480 peel_iters_epilogue = 0;
3481
3482 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3483 {
3484 /* We need to peel exactly one iteration. */
3485 peel_iters_epilogue += 1;
3486 stmt_info_for_cost *si;
3487 int j;
3488 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3489 j, si)
3490 (void) add_stmt_cost (target_cost_data, si->count,
3491 si->kind, si->stmt_info, si->misalign,
3492 vect_epilogue);
3493 }
3494 }
3495 else if (npeel < 0)
3496 {
3497 peel_iters_prologue = assumed_vf / 2;
3498 dump_printf (MSG_NOTE, "cost model: "
3499 "prologue peel iters set to vf/2.\n");
3500
3501 /* If peeling for alignment is unknown, loop bound of main loop becomes
3502 unknown. */
3503 peel_iters_epilogue = assumed_vf / 2;
3504 dump_printf (MSG_NOTE, "cost model: "
3505 "epilogue peel iters set to vf/2 because "
3506 "peeling for alignment is unknown.\n");
3507
3508 /* If peeled iterations are unknown, count a taken branch and a not taken
3509 branch per peeled loop. Even if scalar loop iterations are known,
3510 vector iterations are not known since peeled prologue iterations are
3511 not known. Hence guards remain the same. */
3512 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3513 NULL, 0, vect_prologue);
3514 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3515 NULL, 0, vect_prologue);
3516 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3517 NULL, 0, vect_epilogue);
3518 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3519 NULL, 0, vect_epilogue);
3520 stmt_info_for_cost *si;
3521 int j;
3522 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3523 {
3524 (void) add_stmt_cost (target_cost_data,
3525 si->count * peel_iters_prologue,
3526 si->kind, si->stmt_info, si->misalign,
3527 vect_prologue);
3528 (void) add_stmt_cost (target_cost_data,
3529 si->count * peel_iters_epilogue,
3530 si->kind, si->stmt_info, si->misalign,
3531 vect_epilogue);
3532 }
3533 }
3534 else
3535 {
3536 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3537 stmt_info_for_cost *si;
3538 int j;
3539 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3540
3541 prologue_cost_vec.create (2);
3542 epilogue_cost_vec.create (2);
3543 peel_iters_prologue = npeel;
3544
3545 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3546 &peel_iters_epilogue,
3547 &LOOP_VINFO_SCALAR_ITERATION_COST
3548 (loop_vinfo),
3549 &prologue_cost_vec,
3550 &epilogue_cost_vec);
3551
3552 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3553 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3554 si->misalign, vect_prologue);
3555
3556 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3557 (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
3558 si->misalign, vect_epilogue);
3559
3560 prologue_cost_vec.release ();
3561 epilogue_cost_vec.release ();
3562 }
3563
3564 /* FORNOW: The scalar outside cost is incremented in one of the
3565 following ways:
3566
3567 1. The vectorizer checks for alignment and aliasing and generates
3568 a condition that allows dynamic vectorization. A cost model
3569 check is ANDED with the versioning condition. Hence scalar code
3570 path now has the added cost of the versioning check.
3571
3572 if (cost > th & versioning_check)
3573 jmp to vector code
3574
3575 Hence run-time scalar is incremented by not-taken branch cost.
3576
3577 2. The vectorizer then checks if a prologue is required. If the
3578 cost model check was not done before during versioning, it has to
3579 be done before the prologue check.
3580
3581 if (cost <= th)
3582 prologue = scalar_iters
3583 if (prologue == 0)
3584 jmp to vector code
3585 else
3586 execute prologue
3587 if (prologue == num_iters)
3588 go to exit
3589
3590 Hence the run-time scalar cost is incremented by a taken branch,
3591 plus a not-taken branch, plus a taken branch cost.
3592
3593 3. The vectorizer then checks if an epilogue is required. If the
3594 cost model check was not done before during prologue check, it
3595 has to be done with the epilogue check.
3596
3597 if (prologue == 0)
3598 jmp to vector code
3599 else
3600 execute prologue
3601 if (prologue == num_iters)
3602 go to exit
3603 vector code:
3604 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3605 jmp to epilogue
3606
3607 Hence the run-time scalar cost should be incremented by 2 taken
3608 branches.
3609
3610 TODO: The back end may reorder the BBS's differently and reverse
3611 conditions/branch directions. Change the estimates below to
3612 something more reasonable. */
3613
3614 /* If the number of iterations is known and we do not do versioning, we can
3615 decide whether to vectorize at compile time. Hence the scalar version
3616 do not carry cost model guard costs. */
3617 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3618 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3619 {
3620 /* Cost model check occurs at versioning. */
3621 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3622 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3623 else
3624 {
3625 /* Cost model check occurs at prologue generation. */
3626 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3627 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3628 + vect_get_stmt_cost (cond_branch_not_taken);
3629 /* Cost model check occurs at epilogue generation. */
3630 else
3631 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3632 }
3633 }
3634
3635 /* Complete the target-specific cost calculations. */
3636 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3637 &vec_inside_cost, &vec_epilogue_cost);
3638
3639 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3640
3641 if (dump_enabled_p ())
3642 {
3643 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3644 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3645 vec_inside_cost);
3646 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3647 vec_prologue_cost);
3648 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3649 vec_epilogue_cost);
3650 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3651 scalar_single_iter_cost);
3652 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3653 scalar_outside_cost);
3654 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3655 vec_outside_cost);
3656 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3657 peel_iters_prologue);
3658 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3659 peel_iters_epilogue);
3660 }
3661
3662 /* Calculate number of iterations required to make the vector version
3663 profitable, relative to the loop bodies only. The following condition
3664 must hold true:
3665 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3666 where
3667 SIC = scalar iteration cost, VIC = vector iteration cost,
3668 VOC = vector outside cost, VF = vectorization factor,
3669 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3670 SOC = scalar outside cost for run time cost model check. */
3671
3672 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3673 {
3674 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3675 * assumed_vf
3676 - vec_inside_cost * peel_iters_prologue
3677 - vec_inside_cost * peel_iters_epilogue);
3678 if (min_profitable_iters <= 0)
3679 min_profitable_iters = 0;
3680 else
3681 {
3682 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3683 - vec_inside_cost);
3684
3685 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3686 <= (((int) vec_inside_cost * min_profitable_iters)
3687 + (((int) vec_outside_cost - scalar_outside_cost)
3688 * assumed_vf)))
3689 min_profitable_iters++;
3690 }
3691 }
3692 /* vector version will never be profitable. */
3693 else
3694 {
3695 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3696 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3697 "vectorization did not happen for a simd loop");
3698
3699 if (dump_enabled_p ())
3700 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3701 "cost model: the vector iteration cost = %d "
3702 "divided by the scalar iteration cost = %d "
3703 "is greater or equal to the vectorization factor = %d"
3704 ".\n",
3705 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3706 *ret_min_profitable_niters = -1;
3707 *ret_min_profitable_estimate = -1;
3708 return;
3709 }
3710
3711 dump_printf (MSG_NOTE,
3712 " Calculated minimum iters for profitability: %d\n",
3713 min_profitable_iters);
3714
3715 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3716 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3717 /* We want the vectorized loop to execute at least once. */
3718 min_profitable_iters = assumed_vf + peel_iters_prologue;
3719
3720 if (dump_enabled_p ())
3721 dump_printf_loc (MSG_NOTE, vect_location,
3722 " Runtime profitability threshold = %d\n",
3723 min_profitable_iters);
3724
3725 *ret_min_profitable_niters = min_profitable_iters;
3726
3727 /* Calculate number of iterations required to make the vector version
3728 profitable, relative to the loop bodies only.
3729
3730 Non-vectorized variant is SIC * niters and it must win over vector
3731 variant on the expected loop trip count. The following condition must hold true:
3732 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3733
3734 if (vec_outside_cost <= 0)
3735 min_profitable_estimate = 0;
3736 else
3737 {
3738 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3739 * assumed_vf
3740 - vec_inside_cost * peel_iters_prologue
3741 - vec_inside_cost * peel_iters_epilogue)
3742 / ((scalar_single_iter_cost * assumed_vf)
3743 - vec_inside_cost);
3744 }
3745 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3746 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_NOTE, vect_location,
3748 " Static estimate profitability threshold = %d\n",
3749 min_profitable_estimate);
3750
3751 *ret_min_profitable_estimate = min_profitable_estimate;
3752 }
3753
3754 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3755 vector elements (not bits) for a vector with NELT elements. */
3756 static void
3757 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3758 vec_perm_builder *sel)
3759 {
3760 /* The encoding is a single stepped pattern. Any wrap-around is handled
3761 by vec_perm_indices. */
3762 sel->new_vector (nelt, 1, 3);
3763 for (unsigned int i = 0; i < 3; i++)
3764 sel->quick_push (i + offset);
3765 }
3766
3767 /* Checks whether the target supports whole-vector shifts for vectors of mode
3768 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3769 it supports vec_perm_const with masks for all necessary shift amounts. */
3770 static bool
3771 have_whole_vector_shift (machine_mode mode)
3772 {
3773 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3774 return true;
3775
3776 /* Variable-length vectors should be handled via the optab. */
3777 unsigned int nelt;
3778 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3779 return false;
3780
3781 vec_perm_builder sel;
3782 vec_perm_indices indices;
3783 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3784 {
3785 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3786 indices.new_vector (sel, 2, nelt);
3787 if (!can_vec_perm_const_p (mode, indices, false))
3788 return false;
3789 }
3790 return true;
3791 }
3792
3793 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3794 functions. Design better to avoid maintenance issues. */
3795
3796 /* Function vect_model_reduction_cost.
3797
3798 Models cost for a reduction operation, including the vector ops
3799 generated within the strip-mine loop, the initial definition before
3800 the loop, and the epilogue code that must be generated. */
3801
3802 static void
3803 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3804 int ncopies, stmt_vector_for_cost *cost_vec)
3805 {
3806 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3807 enum tree_code code;
3808 optab optab;
3809 tree vectype;
3810 machine_mode mode;
3811 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3812 struct loop *loop = NULL;
3813
3814 if (loop_vinfo)
3815 loop = LOOP_VINFO_LOOP (loop_vinfo);
3816
3817 /* Condition reductions generate two reductions in the loop. */
3818 vect_reduction_type reduction_type
3819 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3820 if (reduction_type == COND_REDUCTION)
3821 ncopies *= 2;
3822
3823 vectype = STMT_VINFO_VECTYPE (stmt_info);
3824 mode = TYPE_MODE (vectype);
3825 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
3826
3827 if (!orig_stmt_info)
3828 orig_stmt_info = stmt_info;
3829
3830 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3831
3832 if (reduction_type == EXTRACT_LAST_REDUCTION
3833 || reduction_type == FOLD_LEFT_REDUCTION)
3834 {
3835 /* No extra instructions needed in the prologue. */
3836 prologue_cost = 0;
3837
3838 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3839 /* Count one reduction-like operation per vector. */
3840 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3841 stmt_info, 0, vect_body);
3842 else
3843 {
3844 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3845 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3846 inside_cost = record_stmt_cost (cost_vec, nelements,
3847 vec_to_scalar, stmt_info, 0,
3848 vect_body);
3849 inside_cost += record_stmt_cost (cost_vec, nelements,
3850 scalar_stmt, stmt_info, 0,
3851 vect_body);
3852 }
3853 }
3854 else
3855 {
3856 /* Add in cost for initial definition.
3857 For cond reduction we have four vectors: initial index, step,
3858 initial result of the data reduction, initial value of the index
3859 reduction. */
3860 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3861 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3862 scalar_to_vec, stmt_info, 0,
3863 vect_prologue);
3864
3865 /* Cost of reduction op inside loop. */
3866 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3867 stmt_info, 0, vect_body);
3868 }
3869
3870 /* Determine cost of epilogue code.
3871
3872 We have a reduction operator that will reduce the vector in one statement.
3873 Also requires scalar extract. */
3874
3875 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3876 {
3877 if (reduc_fn != IFN_LAST)
3878 {
3879 if (reduction_type == COND_REDUCTION)
3880 {
3881 /* An EQ stmt and an COND_EXPR stmt. */
3882 epilogue_cost += record_stmt_cost (cost_vec, 2,
3883 vector_stmt, stmt_info, 0,
3884 vect_epilogue);
3885 /* Reduction of the max index and a reduction of the found
3886 values. */
3887 epilogue_cost += record_stmt_cost (cost_vec, 2,
3888 vec_to_scalar, stmt_info, 0,
3889 vect_epilogue);
3890 /* A broadcast of the max value. */
3891 epilogue_cost += record_stmt_cost (cost_vec, 1,
3892 scalar_to_vec, stmt_info, 0,
3893 vect_epilogue);
3894 }
3895 else
3896 {
3897 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3898 stmt_info, 0, vect_epilogue);
3899 epilogue_cost += record_stmt_cost (cost_vec, 1,
3900 vec_to_scalar, stmt_info, 0,
3901 vect_epilogue);
3902 }
3903 }
3904 else if (reduction_type == COND_REDUCTION)
3905 {
3906 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3907 /* Extraction of scalar elements. */
3908 epilogue_cost += record_stmt_cost (cost_vec,
3909 2 * estimated_nunits,
3910 vec_to_scalar, stmt_info, 0,
3911 vect_epilogue);
3912 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3913 epilogue_cost += record_stmt_cost (cost_vec,
3914 2 * estimated_nunits - 3,
3915 scalar_stmt, stmt_info, 0,
3916 vect_epilogue);
3917 }
3918 else if (reduction_type == EXTRACT_LAST_REDUCTION
3919 || reduction_type == FOLD_LEFT_REDUCTION)
3920 /* No extra instructions need in the epilogue. */
3921 ;
3922 else
3923 {
3924 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3925 tree bitsize =
3926 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3927 int element_bitsize = tree_to_uhwi (bitsize);
3928 int nelements = vec_size_in_bits / element_bitsize;
3929
3930 if (code == COND_EXPR)
3931 code = MAX_EXPR;
3932
3933 optab = optab_for_tree_code (code, vectype, optab_default);
3934
3935 /* We have a whole vector shift available. */
3936 if (optab != unknown_optab
3937 && VECTOR_MODE_P (mode)
3938 && optab_handler (optab, mode) != CODE_FOR_nothing
3939 && have_whole_vector_shift (mode))
3940 {
3941 /* Final reduction via vector shifts and the reduction operator.
3942 Also requires scalar extract. */
3943 epilogue_cost += record_stmt_cost (cost_vec,
3944 exact_log2 (nelements) * 2,
3945 vector_stmt, stmt_info, 0,
3946 vect_epilogue);
3947 epilogue_cost += record_stmt_cost (cost_vec, 1,
3948 vec_to_scalar, stmt_info, 0,
3949 vect_epilogue);
3950 }
3951 else
3952 /* Use extracts and reduction op for final reduction. For N
3953 elements, we have N extracts and N-1 reduction ops. */
3954 epilogue_cost += record_stmt_cost (cost_vec,
3955 nelements + nelements - 1,
3956 vector_stmt, stmt_info, 0,
3957 vect_epilogue);
3958 }
3959 }
3960
3961 if (dump_enabled_p ())
3962 dump_printf (MSG_NOTE,
3963 "vect_model_reduction_cost: inside_cost = %d, "
3964 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3965 prologue_cost, epilogue_cost);
3966 }
3967
3968
3969 /* Function vect_model_induction_cost.
3970
3971 Models cost for induction operations. */
3972
3973 static void
3974 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3975 stmt_vector_for_cost *cost_vec)
3976 {
3977 unsigned inside_cost, prologue_cost;
3978
3979 if (PURE_SLP_STMT (stmt_info))
3980 return;
3981
3982 /* loop cost for vec_loop. */
3983 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3984 stmt_info, 0, vect_body);
3985
3986 /* prologue cost for vec_init and vec_step. */
3987 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
3988 stmt_info, 0, vect_prologue);
3989
3990 if (dump_enabled_p ())
3991 dump_printf_loc (MSG_NOTE, vect_location,
3992 "vect_model_induction_cost: inside_cost = %d, "
3993 "prologue_cost = %d .\n", inside_cost, prologue_cost);
3994 }
3995
3996
3997
3998 /* Function get_initial_def_for_reduction
3999
4000 Input:
4001 STMT - a stmt that performs a reduction operation in the loop.
4002 INIT_VAL - the initial value of the reduction variable
4003
4004 Output:
4005 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4006 of the reduction (used for adjusting the epilog - see below).
4007 Return a vector variable, initialized according to the operation that STMT
4008 performs. This vector will be used as the initial value of the
4009 vector of partial results.
4010
4011 Option1 (adjust in epilog): Initialize the vector as follows:
4012 add/bit or/xor: [0,0,...,0,0]
4013 mult/bit and: [1,1,...,1,1]
4014 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4015 and when necessary (e.g. add/mult case) let the caller know
4016 that it needs to adjust the result by init_val.
4017
4018 Option2: Initialize the vector as follows:
4019 add/bit or/xor: [init_val,0,0,...,0]
4020 mult/bit and: [init_val,1,1,...,1]
4021 min/max/cond_expr: [init_val,init_val,...,init_val]
4022 and no adjustments are needed.
4023
4024 For example, for the following code:
4025
4026 s = init_val;
4027 for (i=0;i<n;i++)
4028 s = s + a[i];
4029
4030 STMT is 's = s + a[i]', and the reduction variable is 's'.
4031 For a vector of 4 units, we want to return either [0,0,0,init_val],
4032 or [0,0,0,0] and let the caller know that it needs to adjust
4033 the result at the end by 'init_val'.
4034
4035 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4036 initialization vector is simpler (same element in all entries), if
4037 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4038
4039 A cost model should help decide between these two schemes. */
4040
4041 tree
4042 get_initial_def_for_reduction (gimple *stmt, tree init_val,
4043 tree *adjustment_def)
4044 {
4045 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
4046 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4047 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4048 tree scalar_type = TREE_TYPE (init_val);
4049 tree vectype = get_vectype_for_scalar_type (scalar_type);
4050 enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
4051 tree def_for_init;
4052 tree init_def;
4053 REAL_VALUE_TYPE real_init_val = dconst0;
4054 int int_init_val = 0;
4055 gimple_seq stmts = NULL;
4056
4057 gcc_assert (vectype);
4058
4059 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4060 || SCALAR_FLOAT_TYPE_P (scalar_type));
4061
4062 gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
4063 || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
4064
4065 vect_reduction_type reduction_type
4066 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4067
4068 switch (code)
4069 {
4070 case WIDEN_SUM_EXPR:
4071 case DOT_PROD_EXPR:
4072 case SAD_EXPR:
4073 case PLUS_EXPR:
4074 case MINUS_EXPR:
4075 case BIT_IOR_EXPR:
4076 case BIT_XOR_EXPR:
4077 case MULT_EXPR:
4078 case BIT_AND_EXPR:
4079 {
4080 /* ADJUSTMENT_DEF is NULL when called from
4081 vect_create_epilog_for_reduction to vectorize double reduction. */
4082 if (adjustment_def)
4083 *adjustment_def = init_val;
4084
4085 if (code == MULT_EXPR)
4086 {
4087 real_init_val = dconst1;
4088 int_init_val = 1;
4089 }
4090
4091 if (code == BIT_AND_EXPR)
4092 int_init_val = -1;
4093
4094 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4095 def_for_init = build_real (scalar_type, real_init_val);
4096 else
4097 def_for_init = build_int_cst (scalar_type, int_init_val);
4098
4099 if (adjustment_def)
4100 /* Option1: the first element is '0' or '1' as well. */
4101 init_def = gimple_build_vector_from_val (&stmts, vectype,
4102 def_for_init);
4103 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4104 {
4105 /* Option2 (variable length): the first element is INIT_VAL. */
4106 init_def = gimple_build_vector_from_val (&stmts, vectype,
4107 def_for_init);
4108 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4109 vectype, init_def, init_val);
4110 }
4111 else
4112 {
4113 /* Option2: the first element is INIT_VAL. */
4114 tree_vector_builder elts (vectype, 1, 2);
4115 elts.quick_push (init_val);
4116 elts.quick_push (def_for_init);
4117 init_def = gimple_build_vector (&stmts, &elts);
4118 }
4119 }
4120 break;
4121
4122 case MIN_EXPR:
4123 case MAX_EXPR:
4124 case COND_EXPR:
4125 {
4126 if (adjustment_def)
4127 {
4128 *adjustment_def = NULL_TREE;
4129 if (reduction_type != COND_REDUCTION
4130 && reduction_type != EXTRACT_LAST_REDUCTION)
4131 {
4132 init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
4133 break;
4134 }
4135 }
4136 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4137 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4138 }
4139 break;
4140
4141 default:
4142 gcc_unreachable ();
4143 }
4144
4145 if (stmts)
4146 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4147 return init_def;
4148 }
4149
4150 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4151 NUMBER_OF_VECTORS is the number of vector defs to create.
4152 If NEUTRAL_OP is nonnull, introducing extra elements of that
4153 value will not change the result. */
4154
4155 static void
4156 get_initial_defs_for_reduction (slp_tree slp_node,
4157 vec<tree> *vec_oprnds,
4158 unsigned int number_of_vectors,
4159 bool reduc_chain, tree neutral_op)
4160 {
4161 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4162 stmt_vec_info stmt_vinfo = stmts[0];
4163 unsigned HOST_WIDE_INT nunits;
4164 unsigned j, number_of_places_left_in_vector;
4165 tree vector_type;
4166 tree vop;
4167 int group_size = stmts.length ();
4168 unsigned int vec_num, i;
4169 unsigned number_of_copies = 1;
4170 vec<tree> voprnds;
4171 voprnds.create (number_of_vectors);
4172 struct loop *loop;
4173 auto_vec<tree, 16> permute_results;
4174
4175 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4176
4177 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4178
4179 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4180 gcc_assert (loop);
4181 edge pe = loop_preheader_edge (loop);
4182
4183 gcc_assert (!reduc_chain || neutral_op);
4184
4185 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4186 created vectors. It is greater than 1 if unrolling is performed.
4187
4188 For example, we have two scalar operands, s1 and s2 (e.g., group of
4189 strided accesses of size two), while NUNITS is four (i.e., four scalars
4190 of this type can be packed in a vector). The output vector will contain
4191 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4192 will be 2).
4193
4194 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4195 vectors containing the operands.
4196
4197 For example, NUNITS is four as before, and the group size is 8
4198 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4199 {s5, s6, s7, s8}. */
4200
4201 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4202 nunits = group_size;
4203
4204 number_of_copies = nunits * number_of_vectors / group_size;
4205
4206 number_of_places_left_in_vector = nunits;
4207 bool constant_p = true;
4208 tree_vector_builder elts (vector_type, nunits, 1);
4209 elts.quick_grow (nunits);
4210 for (j = 0; j < number_of_copies; j++)
4211 {
4212 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4213 {
4214 tree op;
4215 /* Get the def before the loop. In reduction chain we have only
4216 one initial value. */
4217 if ((j != (number_of_copies - 1)
4218 || (reduc_chain && i != 0))
4219 && neutral_op)
4220 op = neutral_op;
4221 else
4222 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4223
4224 /* Create 'vect_ = {op0,op1,...,opn}'. */
4225 number_of_places_left_in_vector--;
4226 elts[number_of_places_left_in_vector] = op;
4227 if (!CONSTANT_CLASS_P (op))
4228 constant_p = false;
4229
4230 if (number_of_places_left_in_vector == 0)
4231 {
4232 gimple_seq ctor_seq = NULL;
4233 tree init;
4234 if (constant_p && !neutral_op
4235 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4236 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4237 /* Build the vector directly from ELTS. */
4238 init = gimple_build_vector (&ctor_seq, &elts);
4239 else if (neutral_op)
4240 {
4241 /* Build a vector of the neutral value and shift the
4242 other elements into place. */
4243 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4244 neutral_op);
4245 int k = nunits;
4246 while (k > 0 && elts[k - 1] == neutral_op)
4247 k -= 1;
4248 while (k > 0)
4249 {
4250 k -= 1;
4251 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4252 vector_type, init, elts[k]);
4253 }
4254 }
4255 else
4256 {
4257 /* First time round, duplicate ELTS to fill the
4258 required number of vectors, then cherry pick the
4259 appropriate result for each iteration. */
4260 if (vec_oprnds->is_empty ())
4261 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4262 number_of_vectors,
4263 permute_results);
4264 init = permute_results[number_of_vectors - j - 1];
4265 }
4266 if (ctor_seq != NULL)
4267 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4268 voprnds.quick_push (init);
4269
4270 number_of_places_left_in_vector = nunits;
4271 elts.new_vector (vector_type, nunits, 1);
4272 elts.quick_grow (nunits);
4273 constant_p = true;
4274 }
4275 }
4276 }
4277
4278 /* Since the vectors are created in the reverse order, we should invert
4279 them. */
4280 vec_num = voprnds.length ();
4281 for (j = vec_num; j != 0; j--)
4282 {
4283 vop = voprnds[j - 1];
4284 vec_oprnds->quick_push (vop);
4285 }
4286
4287 voprnds.release ();
4288
4289 /* In case that VF is greater than the unrolling factor needed for the SLP
4290 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4291 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4292 to replicate the vectors. */
4293 tree neutral_vec = NULL;
4294 while (number_of_vectors > vec_oprnds->length ())
4295 {
4296 if (neutral_op)
4297 {
4298 if (!neutral_vec)
4299 {
4300 gimple_seq ctor_seq = NULL;
4301 neutral_vec = gimple_build_vector_from_val
4302 (&ctor_seq, vector_type, neutral_op);
4303 if (ctor_seq != NULL)
4304 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4305 }
4306 vec_oprnds->quick_push (neutral_vec);
4307 }
4308 else
4309 {
4310 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4311 vec_oprnds->quick_push (vop);
4312 }
4313 }
4314 }
4315
4316
4317 /* Function vect_create_epilog_for_reduction
4318
4319 Create code at the loop-epilog to finalize the result of a reduction
4320 computation.
4321
4322 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4323 reduction statements.
4324 STMT is the scalar reduction stmt that is being vectorized.
4325 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4326 number of elements that we can fit in a vectype (nunits). In this case
4327 we have to generate more than one vector stmt - i.e - we need to "unroll"
4328 the vector stmt by a factor VF/nunits. For more details see documentation
4329 in vectorizable_operation.
4330 REDUC_FN is the internal function for the epilog reduction.
4331 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4332 computation.
4333 REDUC_INDEX is the index of the operand in the right hand side of the
4334 statement that is defined by REDUCTION_PHI.
4335 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4336 SLP_NODE is an SLP node containing a group of reduction statements. The
4337 first one in this group is STMT.
4338 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4339 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4340 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4341 any value of the IV in the loop.
4342 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4343 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4344 null if this is not an SLP reduction
4345
4346 This function:
4347 1. Creates the reduction def-use cycles: sets the arguments for
4348 REDUCTION_PHIS:
4349 The loop-entry argument is the vectorized initial-value of the reduction.
4350 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4351 sums.
4352 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4353 by calling the function specified by REDUC_FN if available, or by
4354 other means (whole-vector shifts or a scalar loop).
4355 The function also creates a new phi node at the loop exit to preserve
4356 loop-closed form, as illustrated below.
4357
4358 The flow at the entry to this function:
4359
4360 loop:
4361 vec_def = phi <null, null> # REDUCTION_PHI
4362 VECT_DEF = vector_stmt # vectorized form of STMT
4363 s_loop = scalar_stmt # (scalar) STMT
4364 loop_exit:
4365 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4366 use <s_out0>
4367 use <s_out0>
4368
4369 The above is transformed by this function into:
4370
4371 loop:
4372 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4373 VECT_DEF = vector_stmt # vectorized form of STMT
4374 s_loop = scalar_stmt # (scalar) STMT
4375 loop_exit:
4376 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4377 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4378 v_out2 = reduce <v_out1>
4379 s_out3 = extract_field <v_out2, 0>
4380 s_out4 = adjust_result <s_out3>
4381 use <s_out4>
4382 use <s_out4>
4383 */
4384
4385 static void
4386 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
4387 gimple *reduc_def_stmt,
4388 int ncopies, internal_fn reduc_fn,
4389 vec<stmt_vec_info> reduction_phis,
4390 bool double_reduc,
4391 slp_tree slp_node,
4392 slp_instance slp_node_instance,
4393 tree induc_val, enum tree_code induc_code,
4394 tree neutral_op)
4395 {
4396 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4397 stmt_vec_info prev_phi_info;
4398 tree vectype;
4399 machine_mode mode;
4400 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4401 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4402 basic_block exit_bb;
4403 tree scalar_dest;
4404 tree scalar_type;
4405 gimple *new_phi = NULL, *phi;
4406 stmt_vec_info phi_info;
4407 gimple_stmt_iterator exit_gsi;
4408 tree vec_dest;
4409 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4410 gimple *epilog_stmt = NULL;
4411 enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
4412 gimple *exit_phi;
4413 tree bitsize;
4414 tree adjustment_def = NULL;
4415 tree vec_initial_def = NULL;
4416 tree expr, def, initial_def = NULL;
4417 tree orig_name, scalar_result;
4418 imm_use_iterator imm_iter, phi_imm_iter;
4419 use_operand_p use_p, phi_use_p;
4420 gimple *use_stmt;
4421 stmt_vec_info reduction_phi_info = NULL;
4422 bool nested_in_vect_loop = false;
4423 auto_vec<gimple *> new_phis;
4424 auto_vec<stmt_vec_info> inner_phis;
4425 enum vect_def_type dt = vect_unknown_def_type;
4426 int j, i;
4427 auto_vec<tree> scalar_results;
4428 unsigned int group_size = 1, k, ratio;
4429 auto_vec<tree> vec_initial_defs;
4430 auto_vec<gimple *> phis;
4431 bool slp_reduc = false;
4432 bool direct_slp_reduc;
4433 tree new_phi_result;
4434 stmt_vec_info inner_phi = NULL;
4435 tree induction_index = NULL_TREE;
4436
4437 if (slp_node)
4438 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4439
4440 if (nested_in_vect_loop_p (loop, stmt_info))
4441 {
4442 outer_loop = loop;
4443 loop = loop->inner;
4444 nested_in_vect_loop = true;
4445 gcc_assert (!slp_node);
4446 }
4447
4448 vectype = STMT_VINFO_VECTYPE (stmt_info);
4449 gcc_assert (vectype);
4450 mode = TYPE_MODE (vectype);
4451
4452 /* 1. Create the reduction def-use cycle:
4453 Set the arguments of REDUCTION_PHIS, i.e., transform
4454
4455 loop:
4456 vec_def = phi <null, null> # REDUCTION_PHI
4457 VECT_DEF = vector_stmt # vectorized form of STMT
4458 ...
4459
4460 into:
4461
4462 loop:
4463 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4464 VECT_DEF = vector_stmt # vectorized form of STMT
4465 ...
4466
4467 (in case of SLP, do it for all the phis). */
4468
4469 /* Get the loop-entry arguments. */
4470 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4471 if (slp_node)
4472 {
4473 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4474 vec_initial_defs.reserve (vec_num);
4475 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4476 &vec_initial_defs, vec_num,
4477 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4478 neutral_op);
4479 }
4480 else
4481 {
4482 /* Get at the scalar def before the loop, that defines the initial value
4483 of the reduction variable. */
4484 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4485 loop_preheader_edge (loop));
4486 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4487 and we can't use zero for induc_val, use initial_def. Similarly
4488 for REDUC_MIN and initial_def larger than the base. */
4489 if (TREE_CODE (initial_def) == INTEGER_CST
4490 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4491 == INTEGER_INDUC_COND_REDUCTION)
4492 && !integer_zerop (induc_val)
4493 && ((induc_code == MAX_EXPR
4494 && tree_int_cst_lt (initial_def, induc_val))
4495 || (induc_code == MIN_EXPR
4496 && tree_int_cst_lt (induc_val, initial_def))))
4497 induc_val = initial_def;
4498
4499 if (double_reduc)
4500 /* In case of double reduction we only create a vector variable
4501 to be put in the reduction phi node. The actual statement
4502 creation is done later in this function. */
4503 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4504 else if (nested_in_vect_loop)
4505 {
4506 /* Do not use an adjustment def as that case is not supported
4507 correctly if ncopies is not one. */
4508 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4509 vec_initial_def = vect_get_vec_def_for_operand (initial_def,
4510 stmt_info);
4511 }
4512 else
4513 vec_initial_def
4514 = get_initial_def_for_reduction (stmt_info, initial_def,
4515 &adjustment_def);
4516 vec_initial_defs.create (1);
4517 vec_initial_defs.quick_push (vec_initial_def);
4518 }
4519
4520 /* Set phi nodes arguments. */
4521 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4522 {
4523 tree vec_init_def = vec_initial_defs[i];
4524 tree def = vect_defs[i];
4525 for (j = 0; j < ncopies; j++)
4526 {
4527 if (j != 0)
4528 {
4529 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4530 if (nested_in_vect_loop)
4531 vec_init_def
4532 = vect_get_vec_def_for_stmt_copy (initial_def_dt,
4533 vec_init_def);
4534 }
4535
4536 /* Set the loop-entry arg of the reduction-phi. */
4537
4538 gphi *phi = as_a <gphi *> (phi_info->stmt);
4539 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4540 == INTEGER_INDUC_COND_REDUCTION)
4541 {
4542 /* Initialise the reduction phi to zero. This prevents initial
4543 values of non-zero interferring with the reduction op. */
4544 gcc_assert (ncopies == 1);
4545 gcc_assert (i == 0);
4546
4547 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4548 tree induc_val_vec
4549 = build_vector_from_val (vec_init_def_type, induc_val);
4550
4551 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4552 UNKNOWN_LOCATION);
4553 }
4554 else
4555 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4556 UNKNOWN_LOCATION);
4557
4558 /* Set the loop-latch arg for the reduction-phi. */
4559 if (j > 0)
4560 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
4561
4562 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4563
4564 if (dump_enabled_p ())
4565 {
4566 dump_printf_loc (MSG_NOTE, vect_location,
4567 "transform reduction: created def-use cycle: ");
4568 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4569 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4570 }
4571 }
4572 }
4573
4574 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4575 which is updated with the current index of the loop for every match of
4576 the original loop's cond_expr (VEC_STMT). This results in a vector
4577 containing the last time the condition passed for that vector lane.
4578 The first match will be a 1 to allow 0 to be used for non-matching
4579 indexes. If there are no matches at all then the vector will be all
4580 zeroes. */
4581 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4582 {
4583 tree indx_before_incr, indx_after_incr;
4584 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4585
4586 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4587 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4588
4589 int scalar_precision
4590 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4591 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4592 tree cr_index_vector_type = build_vector_type
4593 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4594
4595 /* First we create a simple vector induction variable which starts
4596 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4597 vector size (STEP). */
4598
4599 /* Create a {1,2,3,...} vector. */
4600 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4601
4602 /* Create a vector of the step value. */
4603 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4604 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4605
4606 /* Create an induction variable. */
4607 gimple_stmt_iterator incr_gsi;
4608 bool insert_after;
4609 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4610 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4611 insert_after, &indx_before_incr, &indx_after_incr);
4612
4613 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4614 filled with zeros (VEC_ZERO). */
4615
4616 /* Create a vector of 0s. */
4617 tree zero = build_zero_cst (cr_index_scalar_type);
4618 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4619
4620 /* Create a vector phi node. */
4621 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4622 new_phi = create_phi_node (new_phi_tree, loop->header);
4623 loop_vinfo->add_stmt (new_phi);
4624 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4625 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4626
4627 /* Now take the condition from the loops original cond_expr
4628 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4629 every match uses values from the induction variable
4630 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4631 (NEW_PHI_TREE).
4632 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4633 the new cond_expr (INDEX_COND_EXPR). */
4634
4635 /* Duplicate the condition from vec_stmt. */
4636 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4637
4638 /* Create a conditional, where the condition is taken from vec_stmt
4639 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4640 else is the phi (NEW_PHI_TREE). */
4641 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4642 ccompare, indx_before_incr,
4643 new_phi_tree);
4644 induction_index = make_ssa_name (cr_index_vector_type);
4645 gimple *index_condition = gimple_build_assign (induction_index,
4646 index_cond_expr);
4647 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4648 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4649 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4650
4651 /* Update the phi with the vec cond. */
4652 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4653 loop_latch_edge (loop), UNKNOWN_LOCATION);
4654 }
4655
4656 /* 2. Create epilog code.
4657 The reduction epilog code operates across the elements of the vector
4658 of partial results computed by the vectorized loop.
4659 The reduction epilog code consists of:
4660
4661 step 1: compute the scalar result in a vector (v_out2)
4662 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4663 step 3: adjust the scalar result (s_out3) if needed.
4664
4665 Step 1 can be accomplished using one the following three schemes:
4666 (scheme 1) using reduc_fn, if available.
4667 (scheme 2) using whole-vector shifts, if available.
4668 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4669 combined.
4670
4671 The overall epilog code looks like this:
4672
4673 s_out0 = phi <s_loop> # original EXIT_PHI
4674 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4675 v_out2 = reduce <v_out1> # step 1
4676 s_out3 = extract_field <v_out2, 0> # step 2
4677 s_out4 = adjust_result <s_out3> # step 3
4678
4679 (step 3 is optional, and steps 1 and 2 may be combined).
4680 Lastly, the uses of s_out0 are replaced by s_out4. */
4681
4682
4683 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4684 v_out1 = phi <VECT_DEF>
4685 Store them in NEW_PHIS. */
4686
4687 exit_bb = single_exit (loop)->dest;
4688 prev_phi_info = NULL;
4689 new_phis.create (vect_defs.length ());
4690 FOR_EACH_VEC_ELT (vect_defs, i, def)
4691 {
4692 for (j = 0; j < ncopies; j++)
4693 {
4694 tree new_def = copy_ssa_name (def);
4695 phi = create_phi_node (new_def, exit_bb);
4696 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4697 if (j == 0)
4698 new_phis.quick_push (phi);
4699 else
4700 {
4701 def = vect_get_vec_def_for_stmt_copy (dt, def);
4702 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4703 }
4704
4705 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4706 prev_phi_info = phi_info;
4707 }
4708 }
4709
4710 /* The epilogue is created for the outer-loop, i.e., for the loop being
4711 vectorized. Create exit phis for the outer loop. */
4712 if (double_reduc)
4713 {
4714 loop = outer_loop;
4715 exit_bb = single_exit (loop)->dest;
4716 inner_phis.create (vect_defs.length ());
4717 FOR_EACH_VEC_ELT (new_phis, i, phi)
4718 {
4719 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4720 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4721 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4722 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4723 PHI_RESULT (phi));
4724 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4725 inner_phis.quick_push (phi_info);
4726 new_phis[i] = outer_phi;
4727 while (STMT_VINFO_RELATED_STMT (phi_info))
4728 {
4729 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4730 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4731 outer_phi = create_phi_node (new_result, exit_bb);
4732 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4733 PHI_RESULT (phi_info->stmt));
4734 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4735 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4736 prev_phi_info = outer_phi_info;
4737 }
4738 }
4739 }
4740
4741 exit_gsi = gsi_after_labels (exit_bb);
4742
4743 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4744 (i.e. when reduc_fn is not available) and in the final adjustment
4745 code (if needed). Also get the original scalar reduction variable as
4746 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4747 represents a reduction pattern), the tree-code and scalar-def are
4748 taken from the original stmt that the pattern-stmt (STMT) replaces.
4749 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4750 are taken from STMT. */
4751
4752 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
4753 if (!orig_stmt_info)
4754 {
4755 /* Regular reduction */
4756 orig_stmt_info = stmt_info;
4757 }
4758 else
4759 {
4760 /* Reduction pattern */
4761 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4762 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4763 }
4764
4765 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4766 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4767 partial results are added and not subtracted. */
4768 if (code == MINUS_EXPR)
4769 code = PLUS_EXPR;
4770
4771 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4772 scalar_type = TREE_TYPE (scalar_dest);
4773 scalar_results.create (group_size);
4774 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4775 bitsize = TYPE_SIZE (scalar_type);
4776
4777 /* In case this is a reduction in an inner-loop while vectorizing an outer
4778 loop - we don't need to extract a single scalar result at the end of the
4779 inner-loop (unless it is double reduction, i.e., the use of reduction is
4780 outside the outer-loop). The final vector of partial results will be used
4781 in the vectorized outer-loop, or reduced to a scalar result at the end of
4782 the outer-loop. */
4783 if (nested_in_vect_loop && !double_reduc)
4784 goto vect_finalize_reduction;
4785
4786 /* SLP reduction without reduction chain, e.g.,
4787 # a1 = phi <a2, a0>
4788 # b1 = phi <b2, b0>
4789 a2 = operation (a1)
4790 b2 = operation (b1) */
4791 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
4792
4793 /* True if we should implement SLP_REDUC using native reduction operations
4794 instead of scalar operations. */
4795 direct_slp_reduc = (reduc_fn != IFN_LAST
4796 && slp_reduc
4797 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4798
4799 /* In case of reduction chain, e.g.,
4800 # a1 = phi <a3, a0>
4801 a2 = operation (a1)
4802 a3 = operation (a2),
4803
4804 we may end up with more than one vector result. Here we reduce them to
4805 one vector. */
4806 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
4807 {
4808 tree first_vect = PHI_RESULT (new_phis[0]);
4809 gassign *new_vec_stmt = NULL;
4810 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4811 for (k = 1; k < new_phis.length (); k++)
4812 {
4813 gimple *next_phi = new_phis[k];
4814 tree second_vect = PHI_RESULT (next_phi);
4815 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4816 new_vec_stmt = gimple_build_assign (tem, code,
4817 first_vect, second_vect);
4818 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4819 first_vect = tem;
4820 }
4821
4822 new_phi_result = first_vect;
4823 if (new_vec_stmt)
4824 {
4825 new_phis.truncate (0);
4826 new_phis.safe_push (new_vec_stmt);
4827 }
4828 }
4829 /* Likewise if we couldn't use a single defuse cycle. */
4830 else if (ncopies > 1)
4831 {
4832 gcc_assert (new_phis.length () == 1);
4833 tree first_vect = PHI_RESULT (new_phis[0]);
4834 gassign *new_vec_stmt = NULL;
4835 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4836 stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
4837 for (int k = 1; k < ncopies; ++k)
4838 {
4839 next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
4840 tree second_vect = PHI_RESULT (next_phi_info->stmt);
4841 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4842 new_vec_stmt = gimple_build_assign (tem, code,
4843 first_vect, second_vect);
4844 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4845 first_vect = tem;
4846 }
4847 new_phi_result = first_vect;
4848 new_phis.truncate (0);
4849 new_phis.safe_push (new_vec_stmt);
4850 }
4851 else
4852 new_phi_result = PHI_RESULT (new_phis[0]);
4853
4854 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4855 && reduc_fn != IFN_LAST)
4856 {
4857 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4858 various data values where the condition matched and another vector
4859 (INDUCTION_INDEX) containing all the indexes of those matches. We
4860 need to extract the last matching index (which will be the index with
4861 highest value) and use this to index into the data vector.
4862 For the case where there were no matches, the data vector will contain
4863 all default values and the index vector will be all zeros. */
4864
4865 /* Get various versions of the type of the vector of indexes. */
4866 tree index_vec_type = TREE_TYPE (induction_index);
4867 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4868 tree index_scalar_type = TREE_TYPE (index_vec_type);
4869 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4870 (index_vec_type);
4871
4872 /* Get an unsigned integer version of the type of the data vector. */
4873 int scalar_precision
4874 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4875 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4876 tree vectype_unsigned = build_vector_type
4877 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4878
4879 /* First we need to create a vector (ZERO_VEC) of zeros and another
4880 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4881 can create using a MAX reduction and then expanding.
4882 In the case where the loop never made any matches, the max index will
4883 be zero. */
4884
4885 /* Vector of {0, 0, 0,...}. */
4886 tree zero_vec = make_ssa_name (vectype);
4887 tree zero_vec_rhs = build_zero_cst (vectype);
4888 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4889 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4890
4891 /* Find maximum value from the vector of found indexes. */
4892 tree max_index = make_ssa_name (index_scalar_type);
4893 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4894 1, induction_index);
4895 gimple_call_set_lhs (max_index_stmt, max_index);
4896 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4897
4898 /* Vector of {max_index, max_index, max_index,...}. */
4899 tree max_index_vec = make_ssa_name (index_vec_type);
4900 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4901 max_index);
4902 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4903 max_index_vec_rhs);
4904 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4905
4906 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4907 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4908 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4909 otherwise. Only one value should match, resulting in a vector
4910 (VEC_COND) with one data value and the rest zeros.
4911 In the case where the loop never made any matches, every index will
4912 match, resulting in a vector with all data values (which will all be
4913 the default value). */
4914
4915 /* Compare the max index vector to the vector of found indexes to find
4916 the position of the max value. */
4917 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4918 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4919 induction_index,
4920 max_index_vec);
4921 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4922
4923 /* Use the compare to choose either values from the data vector or
4924 zero. */
4925 tree vec_cond = make_ssa_name (vectype);
4926 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4927 vec_compare, new_phi_result,
4928 zero_vec);
4929 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4930
4931 /* Finally we need to extract the data value from the vector (VEC_COND)
4932 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4933 reduction, but because this doesn't exist, we can use a MAX reduction
4934 instead. The data value might be signed or a float so we need to cast
4935 it first.
4936 In the case where the loop never made any matches, the data values are
4937 all identical, and so will reduce down correctly. */
4938
4939 /* Make the matched data values unsigned. */
4940 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4941 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4942 vec_cond);
4943 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4944 VIEW_CONVERT_EXPR,
4945 vec_cond_cast_rhs);
4946 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4947
4948 /* Reduce down to a scalar value. */
4949 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4950 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4951 1, vec_cond_cast);
4952 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4953 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4954
4955 /* Convert the reduced value back to the result type and set as the
4956 result. */
4957 gimple_seq stmts = NULL;
4958 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4959 data_reduc);
4960 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4961 scalar_results.safe_push (new_temp);
4962 }
4963 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4964 && reduc_fn == IFN_LAST)
4965 {
4966 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4967 idx = 0;
4968 idx_val = induction_index[0];
4969 val = data_reduc[0];
4970 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4971 if (induction_index[i] > idx_val)
4972 val = data_reduc[i], idx_val = induction_index[i];
4973 return val; */
4974
4975 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4976 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4977 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4978 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4979 /* Enforced by vectorizable_reduction, which ensures we have target
4980 support before allowing a conditional reduction on variable-length
4981 vectors. */
4982 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
4983 tree idx_val = NULL_TREE, val = NULL_TREE;
4984 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
4985 {
4986 tree old_idx_val = idx_val;
4987 tree old_val = val;
4988 idx_val = make_ssa_name (idx_eltype);
4989 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
4990 build3 (BIT_FIELD_REF, idx_eltype,
4991 induction_index,
4992 bitsize_int (el_size),
4993 bitsize_int (off)));
4994 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4995 val = make_ssa_name (data_eltype);
4996 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
4997 build3 (BIT_FIELD_REF,
4998 data_eltype,
4999 new_phi_result,
5000 bitsize_int (el_size),
5001 bitsize_int (off)));
5002 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5003 if (off != 0)
5004 {
5005 tree new_idx_val = idx_val;
5006 tree new_val = val;
5007 if (off != v_size - el_size)
5008 {
5009 new_idx_val = make_ssa_name (idx_eltype);
5010 epilog_stmt = gimple_build_assign (new_idx_val,
5011 MAX_EXPR, idx_val,
5012 old_idx_val);
5013 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5014 }
5015 new_val = make_ssa_name (data_eltype);
5016 epilog_stmt = gimple_build_assign (new_val,
5017 COND_EXPR,
5018 build2 (GT_EXPR,
5019 boolean_type_node,
5020 idx_val,
5021 old_idx_val),
5022 val, old_val);
5023 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5024 idx_val = new_idx_val;
5025 val = new_val;
5026 }
5027 }
5028 /* Convert the reduced value back to the result type and set as the
5029 result. */
5030 gimple_seq stmts = NULL;
5031 val = gimple_convert (&stmts, scalar_type, val);
5032 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5033 scalar_results.safe_push (val);
5034 }
5035
5036 /* 2.3 Create the reduction code, using one of the three schemes described
5037 above. In SLP we simply need to extract all the elements from the
5038 vector (without reducing them), so we use scalar shifts. */
5039 else if (reduc_fn != IFN_LAST && !slp_reduc)
5040 {
5041 tree tmp;
5042 tree vec_elem_type;
5043
5044 /* Case 1: Create:
5045 v_out2 = reduc_expr <v_out1> */
5046
5047 if (dump_enabled_p ())
5048 dump_printf_loc (MSG_NOTE, vect_location,
5049 "Reduce using direct vector reduction.\n");
5050
5051 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5052 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5053 {
5054 tree tmp_dest
5055 = vect_create_destination_var (scalar_dest, vec_elem_type);
5056 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5057 new_phi_result);
5058 gimple_set_lhs (epilog_stmt, tmp_dest);
5059 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5060 gimple_set_lhs (epilog_stmt, new_temp);
5061 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5062
5063 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5064 new_temp);
5065 }
5066 else
5067 {
5068 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5069 new_phi_result);
5070 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5071 }
5072
5073 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5074 gimple_set_lhs (epilog_stmt, new_temp);
5075 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5076
5077 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5078 == INTEGER_INDUC_COND_REDUCTION)
5079 && !operand_equal_p (initial_def, induc_val, 0))
5080 {
5081 /* Earlier we set the initial value to be a vector if induc_val
5082 values. Check the result and if it is induc_val then replace
5083 with the original initial value, unless induc_val is
5084 the same as initial_def already. */
5085 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5086 induc_val);
5087
5088 tmp = make_ssa_name (new_scalar_dest);
5089 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5090 initial_def, new_temp);
5091 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5092 new_temp = tmp;
5093 }
5094
5095 scalar_results.safe_push (new_temp);
5096 }
5097 else if (direct_slp_reduc)
5098 {
5099 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5100 with the elements for other SLP statements replaced with the
5101 neutral value. We can then do a normal reduction on each vector. */
5102
5103 /* Enforced by vectorizable_reduction. */
5104 gcc_assert (new_phis.length () == 1);
5105 gcc_assert (pow2p_hwi (group_size));
5106
5107 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5108 vec<stmt_vec_info> orig_phis
5109 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5110 gimple_seq seq = NULL;
5111
5112 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5113 and the same element size as VECTYPE. */
5114 tree index = build_index_vector (vectype, 0, 1);
5115 tree index_type = TREE_TYPE (index);
5116 tree index_elt_type = TREE_TYPE (index_type);
5117 tree mask_type = build_same_sized_truth_vector_type (index_type);
5118
5119 /* Create a vector that, for each element, identifies which of
5120 the REDUC_GROUP_SIZE results should use it. */
5121 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5122 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5123 build_vector_from_val (index_type, index_mask));
5124
5125 /* Get a neutral vector value. This is simply a splat of the neutral
5126 scalar value if we have one, otherwise the initial scalar value
5127 is itself a neutral value. */
5128 tree vector_identity = NULL_TREE;
5129 if (neutral_op)
5130 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5131 neutral_op);
5132 for (unsigned int i = 0; i < group_size; ++i)
5133 {
5134 /* If there's no univeral neutral value, we can use the
5135 initial scalar value from the original PHI. This is used
5136 for MIN and MAX reduction, for example. */
5137 if (!neutral_op)
5138 {
5139 tree scalar_value
5140 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5141 loop_preheader_edge (loop));
5142 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5143 scalar_value);
5144 }
5145
5146 /* Calculate the equivalent of:
5147
5148 sel[j] = (index[j] == i);
5149
5150 which selects the elements of NEW_PHI_RESULT that should
5151 be included in the result. */
5152 tree compare_val = build_int_cst (index_elt_type, i);
5153 compare_val = build_vector_from_val (index_type, compare_val);
5154 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5155 index, compare_val);
5156
5157 /* Calculate the equivalent of:
5158
5159 vec = seq ? new_phi_result : vector_identity;
5160
5161 VEC is now suitable for a full vector reduction. */
5162 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5163 sel, new_phi_result, vector_identity);
5164
5165 /* Do the reduction and convert it to the appropriate type. */
5166 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5167 TREE_TYPE (vectype), vec);
5168 scalar = gimple_convert (&seq, scalar_type, scalar);
5169 scalar_results.safe_push (scalar);
5170 }
5171 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5172 }
5173 else
5174 {
5175 bool reduce_with_shift;
5176 tree vec_temp;
5177
5178 /* COND reductions all do the final reduction with MAX_EXPR
5179 or MIN_EXPR. */
5180 if (code == COND_EXPR)
5181 {
5182 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5183 == INTEGER_INDUC_COND_REDUCTION)
5184 code = induc_code;
5185 else
5186 code = MAX_EXPR;
5187 }
5188
5189 /* See if the target wants to do the final (shift) reduction
5190 in a vector mode of smaller size and first reduce upper/lower
5191 halves against each other. */
5192 enum machine_mode mode1 = mode;
5193 tree vectype1 = vectype;
5194 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5195 unsigned sz1 = sz;
5196 if (!slp_reduc
5197 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5198 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5199
5200 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5201 reduce_with_shift = have_whole_vector_shift (mode1);
5202 if (!VECTOR_MODE_P (mode1))
5203 reduce_with_shift = false;
5204 else
5205 {
5206 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5207 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5208 reduce_with_shift = false;
5209 }
5210
5211 /* First reduce the vector to the desired vector size we should
5212 do shift reduction on by combining upper and lower halves. */
5213 new_temp = new_phi_result;
5214 while (sz > sz1)
5215 {
5216 gcc_assert (!slp_reduc);
5217 sz /= 2;
5218 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5219
5220 /* The target has to make sure we support lowpart/highpart
5221 extraction, either via direct vector extract or through
5222 an integer mode punning. */
5223 tree dst1, dst2;
5224 if (convert_optab_handler (vec_extract_optab,
5225 TYPE_MODE (TREE_TYPE (new_temp)),
5226 TYPE_MODE (vectype1))
5227 != CODE_FOR_nothing)
5228 {
5229 /* Extract sub-vectors directly once vec_extract becomes
5230 a conversion optab. */
5231 dst1 = make_ssa_name (vectype1);
5232 epilog_stmt
5233 = gimple_build_assign (dst1, BIT_FIELD_REF,
5234 build3 (BIT_FIELD_REF, vectype1,
5235 new_temp, TYPE_SIZE (vectype1),
5236 bitsize_int (0)));
5237 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5238 dst2 = make_ssa_name (vectype1);
5239 epilog_stmt
5240 = gimple_build_assign (dst2, BIT_FIELD_REF,
5241 build3 (BIT_FIELD_REF, vectype1,
5242 new_temp, TYPE_SIZE (vectype1),
5243 bitsize_int (sz * BITS_PER_UNIT)));
5244 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5245 }
5246 else
5247 {
5248 /* Extract via punning to appropriately sized integer mode
5249 vector. */
5250 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5251 1);
5252 tree etype = build_vector_type (eltype, 2);
5253 gcc_assert (convert_optab_handler (vec_extract_optab,
5254 TYPE_MODE (etype),
5255 TYPE_MODE (eltype))
5256 != CODE_FOR_nothing);
5257 tree tem = make_ssa_name (etype);
5258 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5259 build1 (VIEW_CONVERT_EXPR,
5260 etype, new_temp));
5261 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5262 new_temp = tem;
5263 tem = make_ssa_name (eltype);
5264 epilog_stmt
5265 = gimple_build_assign (tem, BIT_FIELD_REF,
5266 build3 (BIT_FIELD_REF, eltype,
5267 new_temp, TYPE_SIZE (eltype),
5268 bitsize_int (0)));
5269 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5270 dst1 = make_ssa_name (vectype1);
5271 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5272 build1 (VIEW_CONVERT_EXPR,
5273 vectype1, tem));
5274 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5275 tem = make_ssa_name (eltype);
5276 epilog_stmt
5277 = gimple_build_assign (tem, BIT_FIELD_REF,
5278 build3 (BIT_FIELD_REF, eltype,
5279 new_temp, TYPE_SIZE (eltype),
5280 bitsize_int (sz * BITS_PER_UNIT)));
5281 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5282 dst2 = make_ssa_name (vectype1);
5283 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5284 build1 (VIEW_CONVERT_EXPR,
5285 vectype1, tem));
5286 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5287 }
5288
5289 new_temp = make_ssa_name (vectype1);
5290 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5291 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5292 }
5293
5294 if (reduce_with_shift && !slp_reduc)
5295 {
5296 int element_bitsize = tree_to_uhwi (bitsize);
5297 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5298 for variable-length vectors and also requires direct target support
5299 for loop reductions. */
5300 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5301 int nelements = vec_size_in_bits / element_bitsize;
5302 vec_perm_builder sel;
5303 vec_perm_indices indices;
5304
5305 int elt_offset;
5306
5307 tree zero_vec = build_zero_cst (vectype1);
5308 /* Case 2: Create:
5309 for (offset = nelements/2; offset >= 1; offset/=2)
5310 {
5311 Create: va' = vec_shift <va, offset>
5312 Create: va = vop <va, va'>
5313 } */
5314
5315 tree rhs;
5316
5317 if (dump_enabled_p ())
5318 dump_printf_loc (MSG_NOTE, vect_location,
5319 "Reduce using vector shifts\n");
5320
5321 mode1 = TYPE_MODE (vectype1);
5322 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5323 for (elt_offset = nelements / 2;
5324 elt_offset >= 1;
5325 elt_offset /= 2)
5326 {
5327 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5328 indices.new_vector (sel, 2, nelements);
5329 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5330 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5331 new_temp, zero_vec, mask);
5332 new_name = make_ssa_name (vec_dest, epilog_stmt);
5333 gimple_assign_set_lhs (epilog_stmt, new_name);
5334 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5335
5336 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5337 new_temp);
5338 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5339 gimple_assign_set_lhs (epilog_stmt, new_temp);
5340 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5341 }
5342
5343 /* 2.4 Extract the final scalar result. Create:
5344 s_out3 = extract_field <v_out2, bitpos> */
5345
5346 if (dump_enabled_p ())
5347 dump_printf_loc (MSG_NOTE, vect_location,
5348 "extract scalar result\n");
5349
5350 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5351 bitsize, bitsize_zero_node);
5352 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5353 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5354 gimple_assign_set_lhs (epilog_stmt, new_temp);
5355 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5356 scalar_results.safe_push (new_temp);
5357 }
5358 else
5359 {
5360 /* Case 3: Create:
5361 s = extract_field <v_out2, 0>
5362 for (offset = element_size;
5363 offset < vector_size;
5364 offset += element_size;)
5365 {
5366 Create: s' = extract_field <v_out2, offset>
5367 Create: s = op <s, s'> // For non SLP cases
5368 } */
5369
5370 if (dump_enabled_p ())
5371 dump_printf_loc (MSG_NOTE, vect_location,
5372 "Reduce using scalar code.\n");
5373
5374 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5375 int element_bitsize = tree_to_uhwi (bitsize);
5376 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5377 {
5378 int bit_offset;
5379 if (gimple_code (new_phi) == GIMPLE_PHI)
5380 vec_temp = PHI_RESULT (new_phi);
5381 else
5382 vec_temp = gimple_assign_lhs (new_phi);
5383 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5384 bitsize_zero_node);
5385 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5386 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5387 gimple_assign_set_lhs (epilog_stmt, new_temp);
5388 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5389
5390 /* In SLP we don't need to apply reduction operation, so we just
5391 collect s' values in SCALAR_RESULTS. */
5392 if (slp_reduc)
5393 scalar_results.safe_push (new_temp);
5394
5395 for (bit_offset = element_bitsize;
5396 bit_offset < vec_size_in_bits;
5397 bit_offset += element_bitsize)
5398 {
5399 tree bitpos = bitsize_int (bit_offset);
5400 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5401 bitsize, bitpos);
5402
5403 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5404 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5405 gimple_assign_set_lhs (epilog_stmt, new_name);
5406 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5407
5408 if (slp_reduc)
5409 {
5410 /* In SLP we don't need to apply reduction operation, so
5411 we just collect s' values in SCALAR_RESULTS. */
5412 new_temp = new_name;
5413 scalar_results.safe_push (new_name);
5414 }
5415 else
5416 {
5417 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5418 new_name, new_temp);
5419 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5420 gimple_assign_set_lhs (epilog_stmt, new_temp);
5421 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5422 }
5423 }
5424 }
5425
5426 /* The only case where we need to reduce scalar results in SLP, is
5427 unrolling. If the size of SCALAR_RESULTS is greater than
5428 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5429 REDUC_GROUP_SIZE. */
5430 if (slp_reduc)
5431 {
5432 tree res, first_res, new_res;
5433 gimple *new_stmt;
5434
5435 /* Reduce multiple scalar results in case of SLP unrolling. */
5436 for (j = group_size; scalar_results.iterate (j, &res);
5437 j++)
5438 {
5439 first_res = scalar_results[j % group_size];
5440 new_stmt = gimple_build_assign (new_scalar_dest, code,
5441 first_res, res);
5442 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5443 gimple_assign_set_lhs (new_stmt, new_res);
5444 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5445 scalar_results[j % group_size] = new_res;
5446 }
5447 }
5448 else
5449 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5450 scalar_results.safe_push (new_temp);
5451 }
5452
5453 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5454 == INTEGER_INDUC_COND_REDUCTION)
5455 && !operand_equal_p (initial_def, induc_val, 0))
5456 {
5457 /* Earlier we set the initial value to be a vector if induc_val
5458 values. Check the result and if it is induc_val then replace
5459 with the original initial value, unless induc_val is
5460 the same as initial_def already. */
5461 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5462 induc_val);
5463
5464 tree tmp = make_ssa_name (new_scalar_dest);
5465 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5466 initial_def, new_temp);
5467 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5468 scalar_results[0] = tmp;
5469 }
5470 }
5471
5472 vect_finalize_reduction:
5473
5474 if (double_reduc)
5475 loop = loop->inner;
5476
5477 /* 2.5 Adjust the final result by the initial value of the reduction
5478 variable. (When such adjustment is not needed, then
5479 'adjustment_def' is zero). For example, if code is PLUS we create:
5480 new_temp = loop_exit_def + adjustment_def */
5481
5482 if (adjustment_def)
5483 {
5484 gcc_assert (!slp_reduc);
5485 if (nested_in_vect_loop)
5486 {
5487 new_phi = new_phis[0];
5488 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5489 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5490 new_dest = vect_create_destination_var (scalar_dest, vectype);
5491 }
5492 else
5493 {
5494 new_temp = scalar_results[0];
5495 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5496 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5497 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5498 }
5499
5500 epilog_stmt = gimple_build_assign (new_dest, expr);
5501 new_temp = make_ssa_name (new_dest, epilog_stmt);
5502 gimple_assign_set_lhs (epilog_stmt, new_temp);
5503 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5504 if (nested_in_vect_loop)
5505 {
5506 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5507 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5508 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5509
5510 if (!double_reduc)
5511 scalar_results.quick_push (new_temp);
5512 else
5513 scalar_results[0] = new_temp;
5514 }
5515 else
5516 scalar_results[0] = new_temp;
5517
5518 new_phis[0] = epilog_stmt;
5519 }
5520
5521 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5522 phis with new adjusted scalar results, i.e., replace use <s_out0>
5523 with use <s_out4>.
5524
5525 Transform:
5526 loop_exit:
5527 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5528 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5529 v_out2 = reduce <v_out1>
5530 s_out3 = extract_field <v_out2, 0>
5531 s_out4 = adjust_result <s_out3>
5532 use <s_out0>
5533 use <s_out0>
5534
5535 into:
5536
5537 loop_exit:
5538 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5539 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5540 v_out2 = reduce <v_out1>
5541 s_out3 = extract_field <v_out2, 0>
5542 s_out4 = adjust_result <s_out3>
5543 use <s_out4>
5544 use <s_out4> */
5545
5546
5547 /* In SLP reduction chain we reduce vector results into one vector if
5548 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5549 LHS of the last stmt in the reduction chain, since we are looking for
5550 the loop exit phi node. */
5551 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
5552 {
5553 stmt_vec_info dest_stmt_info
5554 = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5555 /* Handle reduction patterns. */
5556 if (STMT_VINFO_RELATED_STMT (dest_stmt_info))
5557 dest_stmt_info = STMT_VINFO_RELATED_STMT (dest_stmt_info);
5558
5559 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5560 group_size = 1;
5561 }
5562
5563 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5564 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5565 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5566 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5567 correspond to the first vector stmt, etc.
5568 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5569 if (group_size > new_phis.length ())
5570 {
5571 ratio = group_size / new_phis.length ();
5572 gcc_assert (!(group_size % new_phis.length ()));
5573 }
5574 else
5575 ratio = 1;
5576
5577 stmt_vec_info epilog_stmt_info = NULL;
5578 for (k = 0; k < group_size; k++)
5579 {
5580 if (k % ratio == 0)
5581 {
5582 epilog_stmt_info = loop_vinfo->lookup_stmt (new_phis[k / ratio]);
5583 reduction_phi_info = reduction_phis[k / ratio];
5584 if (double_reduc)
5585 inner_phi = inner_phis[k / ratio];
5586 }
5587
5588 if (slp_reduc)
5589 {
5590 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5591
5592 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5593 /* SLP statements can't participate in patterns. */
5594 gcc_assert (!orig_stmt_info);
5595 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5596 }
5597
5598 phis.create (3);
5599 /* Find the loop-closed-use at the loop exit of the original scalar
5600 result. (The reduction result is expected to have two immediate uses -
5601 one at the latch block, and one at the loop exit). */
5602 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5603 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5604 && !is_gimple_debug (USE_STMT (use_p)))
5605 phis.safe_push (USE_STMT (use_p));
5606
5607 /* While we expect to have found an exit_phi because of loop-closed-ssa
5608 form we can end up without one if the scalar cycle is dead. */
5609
5610 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5611 {
5612 if (outer_loop)
5613 {
5614 stmt_vec_info exit_phi_vinfo
5615 = loop_vinfo->lookup_stmt (exit_phi);
5616 gphi *vect_phi;
5617
5618 /* FORNOW. Currently not supporting the case that an inner-loop
5619 reduction is not used in the outer-loop (but only outside the
5620 outer-loop), unless it is double reduction. */
5621 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5622 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5623 || double_reduc);
5624
5625 if (double_reduc)
5626 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5627 else
5628 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt_info;
5629 if (!double_reduc
5630 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5631 != vect_double_reduction_def)
5632 continue;
5633
5634 /* Handle double reduction:
5635
5636 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5637 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5638 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5639 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5640
5641 At that point the regular reduction (stmt2 and stmt3) is
5642 already vectorized, as well as the exit phi node, stmt4.
5643 Here we vectorize the phi node of double reduction, stmt1, and
5644 update all relevant statements. */
5645
5646 /* Go through all the uses of s2 to find double reduction phi
5647 node, i.e., stmt1 above. */
5648 orig_name = PHI_RESULT (exit_phi);
5649 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5650 {
5651 stmt_vec_info use_stmt_vinfo;
5652 tree vect_phi_init, preheader_arg, vect_phi_res;
5653 basic_block bb = gimple_bb (use_stmt);
5654
5655 /* Check that USE_STMT is really double reduction phi
5656 node. */
5657 if (gimple_code (use_stmt) != GIMPLE_PHI
5658 || gimple_phi_num_args (use_stmt) != 2
5659 || bb->loop_father != outer_loop)
5660 continue;
5661 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5662 if (!use_stmt_vinfo
5663 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5664 != vect_double_reduction_def)
5665 continue;
5666
5667 /* Create vector phi node for double reduction:
5668 vs1 = phi <vs0, vs2>
5669 vs1 was created previously in this function by a call to
5670 vect_get_vec_def_for_operand and is stored in
5671 vec_initial_def;
5672 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5673 vs0 is created here. */
5674
5675 /* Create vector phi node. */
5676 vect_phi = create_phi_node (vec_initial_def, bb);
5677 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5678
5679 /* Create vs0 - initial def of the double reduction phi. */
5680 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5681 loop_preheader_edge (outer_loop));
5682 vect_phi_init = get_initial_def_for_reduction
5683 (stmt_info, preheader_arg, NULL);
5684
5685 /* Update phi node arguments with vs0 and vs2. */
5686 add_phi_arg (vect_phi, vect_phi_init,
5687 loop_preheader_edge (outer_loop),
5688 UNKNOWN_LOCATION);
5689 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5690 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5691 if (dump_enabled_p ())
5692 {
5693 dump_printf_loc (MSG_NOTE, vect_location,
5694 "created double reduction phi node: ");
5695 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5696 }
5697
5698 vect_phi_res = PHI_RESULT (vect_phi);
5699
5700 /* Replace the use, i.e., set the correct vs1 in the regular
5701 reduction phi node. FORNOW, NCOPIES is always 1, so the
5702 loop is redundant. */
5703 stmt_vec_info use_info = reduction_phi_info;
5704 for (j = 0; j < ncopies; j++)
5705 {
5706 edge pr_edge = loop_preheader_edge (loop);
5707 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5708 pr_edge->dest_idx, vect_phi_res);
5709 use_info = STMT_VINFO_RELATED_STMT (use_info);
5710 }
5711 }
5712 }
5713 }
5714
5715 phis.release ();
5716 if (nested_in_vect_loop)
5717 {
5718 if (double_reduc)
5719 loop = outer_loop;
5720 else
5721 continue;
5722 }
5723
5724 phis.create (3);
5725 /* Find the loop-closed-use at the loop exit of the original scalar
5726 result. (The reduction result is expected to have two immediate uses,
5727 one at the latch block, and one at the loop exit). For double
5728 reductions we are looking for exit phis of the outer loop. */
5729 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5730 {
5731 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5732 {
5733 if (!is_gimple_debug (USE_STMT (use_p)))
5734 phis.safe_push (USE_STMT (use_p));
5735 }
5736 else
5737 {
5738 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5739 {
5740 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5741
5742 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5743 {
5744 if (!flow_bb_inside_loop_p (loop,
5745 gimple_bb (USE_STMT (phi_use_p)))
5746 && !is_gimple_debug (USE_STMT (phi_use_p)))
5747 phis.safe_push (USE_STMT (phi_use_p));
5748 }
5749 }
5750 }
5751 }
5752
5753 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5754 {
5755 /* Replace the uses: */
5756 orig_name = PHI_RESULT (exit_phi);
5757 scalar_result = scalar_results[k];
5758 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5759 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5760 SET_USE (use_p, scalar_result);
5761 }
5762
5763 phis.release ();
5764 }
5765 }
5766
5767 /* Return a vector of type VECTYPE that is equal to the vector select
5768 operation "MASK ? VEC : IDENTITY". Insert the select statements
5769 before GSI. */
5770
5771 static tree
5772 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5773 tree vec, tree identity)
5774 {
5775 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5776 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5777 mask, vec, identity);
5778 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5779 return cond;
5780 }
5781
5782 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5783 order, starting with LHS. Insert the extraction statements before GSI and
5784 associate the new scalar SSA names with variable SCALAR_DEST.
5785 Return the SSA name for the result. */
5786
5787 static tree
5788 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5789 tree_code code, tree lhs, tree vector_rhs)
5790 {
5791 tree vectype = TREE_TYPE (vector_rhs);
5792 tree scalar_type = TREE_TYPE (vectype);
5793 tree bitsize = TYPE_SIZE (scalar_type);
5794 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5795 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5796
5797 for (unsigned HOST_WIDE_INT bit_offset = 0;
5798 bit_offset < vec_size_in_bits;
5799 bit_offset += element_bitsize)
5800 {
5801 tree bitpos = bitsize_int (bit_offset);
5802 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5803 bitsize, bitpos);
5804
5805 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5806 rhs = make_ssa_name (scalar_dest, stmt);
5807 gimple_assign_set_lhs (stmt, rhs);
5808 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5809
5810 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5811 tree new_name = make_ssa_name (scalar_dest, stmt);
5812 gimple_assign_set_lhs (stmt, new_name);
5813 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5814 lhs = new_name;
5815 }
5816 return lhs;
5817 }
5818
5819 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
5820 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5821 statement. CODE is the operation performed by STMT and OPS are
5822 its scalar operands. REDUC_INDEX is the index of the operand in
5823 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5824 implements in-order reduction, or IFN_LAST if we should open-code it.
5825 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5826 that should be used to control the operation in a fully-masked loop. */
5827
5828 static bool
5829 vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
5830 stmt_vec_info *vec_stmt, slp_tree slp_node,
5831 gimple *reduc_def_stmt,
5832 tree_code code, internal_fn reduc_fn,
5833 tree ops[3], tree vectype_in,
5834 int reduc_index, vec_loop_masks *masks)
5835 {
5836 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5837 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5838 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5839 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5840 stmt_vec_info new_stmt_info = NULL;
5841
5842 int ncopies;
5843 if (slp_node)
5844 ncopies = 1;
5845 else
5846 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5847
5848 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
5849 gcc_assert (ncopies == 1);
5850 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5851 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5852 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5853 == FOLD_LEFT_REDUCTION);
5854
5855 if (slp_node)
5856 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5857 TYPE_VECTOR_SUBPARTS (vectype_in)));
5858
5859 tree op0 = ops[1 - reduc_index];
5860
5861 int group_size = 1;
5862 stmt_vec_info scalar_dest_def_info;
5863 auto_vec<tree> vec_oprnds0;
5864 if (slp_node)
5865 {
5866 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5867 slp_node);
5868 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5869 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5870 }
5871 else
5872 {
5873 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
5874 vec_oprnds0.create (1);
5875 vec_oprnds0.quick_push (loop_vec_def0);
5876 scalar_dest_def_info = stmt_info;
5877 }
5878
5879 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5880 tree scalar_type = TREE_TYPE (scalar_dest);
5881 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5882
5883 int vec_num = vec_oprnds0.length ();
5884 gcc_assert (vec_num == 1 || slp_node);
5885 tree vec_elem_type = TREE_TYPE (vectype_out);
5886 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5887
5888 tree vector_identity = NULL_TREE;
5889 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5890 vector_identity = build_zero_cst (vectype_out);
5891
5892 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5893 int i;
5894 tree def0;
5895 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5896 {
5897 gimple *new_stmt;
5898 tree mask = NULL_TREE;
5899 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5900 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5901
5902 /* Handle MINUS by adding the negative. */
5903 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5904 {
5905 tree negated = make_ssa_name (vectype_out);
5906 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5907 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5908 def0 = negated;
5909 }
5910
5911 if (mask)
5912 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5913 vector_identity);
5914
5915 /* On the first iteration the input is simply the scalar phi
5916 result, and for subsequent iterations it is the output of
5917 the preceding operation. */
5918 if (reduc_fn != IFN_LAST)
5919 {
5920 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5921 /* For chained SLP reductions the output of the previous reduction
5922 operation serves as the input of the next. For the final statement
5923 the output cannot be a temporary - we reuse the original
5924 scalar destination of the last statement. */
5925 if (i != vec_num - 1)
5926 {
5927 gimple_set_lhs (new_stmt, scalar_dest_var);
5928 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5929 gimple_set_lhs (new_stmt, reduc_var);
5930 }
5931 }
5932 else
5933 {
5934 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5935 reduc_var, def0);
5936 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5937 /* Remove the statement, so that we can use the same code paths
5938 as for statements that we've just created. */
5939 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5940 gsi_remove (&tmp_gsi, false);
5941 }
5942
5943 if (i == vec_num - 1)
5944 {
5945 gimple_set_lhs (new_stmt, scalar_dest);
5946 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5947 new_stmt);
5948 }
5949 else
5950 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5951 new_stmt, gsi);
5952
5953 if (slp_node)
5954 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5955 }
5956
5957 if (!slp_node)
5958 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5959
5960 return true;
5961 }
5962
5963 /* Function is_nonwrapping_integer_induction.
5964
5965 Check if STMT (which is part of loop LOOP) both increments and
5966 does not cause overflow. */
5967
5968 static bool
5969 is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
5970 {
5971 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
5972 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5973 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5974 tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
5975 widest_int ni, max_loop_value, lhs_max;
5976 wi::overflow_type overflow = wi::OVF_NONE;
5977
5978 /* Make sure the loop is integer based. */
5979 if (TREE_CODE (base) != INTEGER_CST
5980 || TREE_CODE (step) != INTEGER_CST)
5981 return false;
5982
5983 /* Check that the max size of the loop will not wrap. */
5984
5985 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
5986 return true;
5987
5988 if (! max_stmt_executions (loop, &ni))
5989 return false;
5990
5991 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
5992 &overflow);
5993 if (overflow)
5994 return false;
5995
5996 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
5997 TYPE_SIGN (lhs_type), &overflow);
5998 if (overflow)
5999 return false;
6000
6001 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6002 <= TYPE_PRECISION (lhs_type));
6003 }
6004
6005 /* Function vectorizable_reduction.
6006
6007 Check if STMT performs a reduction operation that can be vectorized.
6008 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6009 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6010 Return FALSE if not a vectorizable STMT, TRUE otherwise.
6011
6012 This function also handles reduction idioms (patterns) that have been
6013 recognized in advance during vect_pattern_recog. In this case, STMT may be
6014 of this form:
6015 X = pattern_expr (arg0, arg1, ..., X)
6016 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
6017 sequence that had been detected and replaced by the pattern-stmt (STMT).
6018
6019 This function also handles reduction of condition expressions, for example:
6020 for (int i = 0; i < N; i++)
6021 if (a[i] < value)
6022 last = a[i];
6023 This is handled by vectorising the loop and creating an additional vector
6024 containing the loop indexes for which "a[i] < value" was true. In the
6025 function epilogue this is reduced to a single max value and then used to
6026 index into the vector of results.
6027
6028 In some cases of reduction patterns, the type of the reduction variable X is
6029 different than the type of the other arguments of STMT.
6030 In such cases, the vectype that is used when transforming STMT into a vector
6031 stmt is different than the vectype that is used to determine the
6032 vectorization factor, because it consists of a different number of elements
6033 than the actual number of elements that are being operated upon in parallel.
6034
6035 For example, consider an accumulation of shorts into an int accumulator.
6036 On some targets it's possible to vectorize this pattern operating on 8
6037 shorts at a time (hence, the vectype for purposes of determining the
6038 vectorization factor should be V8HI); on the other hand, the vectype that
6039 is used to create the vector form is actually V4SI (the type of the result).
6040
6041 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6042 indicates what is the actual level of parallelism (V8HI in the example), so
6043 that the right vectorization factor would be derived. This vectype
6044 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6045 be used to create the vectorized stmt. The right vectype for the vectorized
6046 stmt is obtained from the type of the result X:
6047 get_vectype_for_scalar_type (TREE_TYPE (X))
6048
6049 This means that, contrary to "regular" reductions (or "regular" stmts in
6050 general), the following equation:
6051 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6052 does *NOT* necessarily hold for reduction patterns. */
6053
6054 bool
6055 vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
6056 stmt_vec_info *vec_stmt, slp_tree slp_node,
6057 slp_instance slp_node_instance,
6058 stmt_vector_for_cost *cost_vec)
6059 {
6060 tree vec_dest;
6061 tree scalar_dest;
6062 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6063 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6064 tree vectype_in = NULL_TREE;
6065 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6066 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6067 enum tree_code code, orig_code;
6068 internal_fn reduc_fn;
6069 machine_mode vec_mode;
6070 int op_type;
6071 optab optab;
6072 tree new_temp = NULL_TREE;
6073 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6074 stmt_vec_info cond_stmt_vinfo = NULL;
6075 enum tree_code cond_reduc_op_code = ERROR_MARK;
6076 tree scalar_type;
6077 bool is_simple_use;
6078 int i;
6079 int ncopies;
6080 int epilog_copies;
6081 stmt_vec_info prev_stmt_info, prev_phi_info;
6082 bool single_defuse_cycle = false;
6083 stmt_vec_info new_stmt_info = NULL;
6084 int j;
6085 tree ops[3];
6086 enum vect_def_type dts[3];
6087 bool nested_cycle = false, found_nested_cycle_def = false;
6088 bool double_reduc = false;
6089 basic_block def_bb;
6090 struct loop * def_stmt_loop;
6091 tree def_arg;
6092 auto_vec<tree> vec_oprnds0;
6093 auto_vec<tree> vec_oprnds1;
6094 auto_vec<tree> vec_oprnds2;
6095 auto_vec<tree> vect_defs;
6096 auto_vec<stmt_vec_info> phis;
6097 int vec_num;
6098 tree def0, tem;
6099 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6100 tree cond_reduc_val = NULL_TREE;
6101
6102 /* Make sure it was already recognized as a reduction computation. */
6103 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
6104 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
6105 return false;
6106
6107 if (nested_in_vect_loop_p (loop, stmt_info))
6108 {
6109 loop = loop->inner;
6110 nested_cycle = true;
6111 }
6112
6113 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6114 gcc_assert (slp_node
6115 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
6116
6117 if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
6118 {
6119 tree phi_result = gimple_phi_result (phi);
6120 /* Analysis is fully done on the reduction stmt invocation. */
6121 if (! vec_stmt)
6122 {
6123 if (slp_node)
6124 slp_node_instance->reduc_phis = slp_node;
6125
6126 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6127 return true;
6128 }
6129
6130 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6131 /* Leave the scalar phi in place. Note that checking
6132 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6133 for reductions involving a single statement. */
6134 return true;
6135
6136 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6137 if (STMT_VINFO_IN_PATTERN_P (reduc_stmt_info))
6138 reduc_stmt_info = STMT_VINFO_RELATED_STMT (reduc_stmt_info);
6139
6140 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6141 == EXTRACT_LAST_REDUCTION)
6142 /* Leave the scalar phi in place. */
6143 return true;
6144
6145 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6146 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6147 {
6148 tree op = gimple_op (reduc_stmt, k);
6149 if (op == phi_result)
6150 continue;
6151 if (k == 1
6152 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6153 continue;
6154 if (!vectype_in
6155 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6156 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6157 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6158 break;
6159 }
6160 gcc_assert (vectype_in);
6161
6162 if (slp_node)
6163 ncopies = 1;
6164 else
6165 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6166
6167 stmt_vec_info use_stmt_info;
6168 if (ncopies > 1
6169 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6170 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6171 && (use_stmt_info == reduc_stmt_info
6172 || STMT_VINFO_RELATED_STMT (use_stmt_info) == reduc_stmt_info))
6173 single_defuse_cycle = true;
6174
6175 /* Create the destination vector */
6176 scalar_dest = gimple_assign_lhs (reduc_stmt);
6177 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6178
6179 if (slp_node)
6180 /* The size vect_schedule_slp_instance computes is off for us. */
6181 vec_num = vect_get_num_vectors
6182 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6183 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6184 vectype_in);
6185 else
6186 vec_num = 1;
6187
6188 /* Generate the reduction PHIs upfront. */
6189 prev_phi_info = NULL;
6190 for (j = 0; j < ncopies; j++)
6191 {
6192 if (j == 0 || !single_defuse_cycle)
6193 {
6194 for (i = 0; i < vec_num; i++)
6195 {
6196 /* Create the reduction-phi that defines the reduction
6197 operand. */
6198 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6199 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6200
6201 if (slp_node)
6202 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6203 else
6204 {
6205 if (j == 0)
6206 STMT_VINFO_VEC_STMT (stmt_info)
6207 = *vec_stmt = new_phi_info;
6208 else
6209 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6210 prev_phi_info = new_phi_info;
6211 }
6212 }
6213 }
6214 }
6215
6216 return true;
6217 }
6218
6219 /* 1. Is vectorizable reduction? */
6220 /* Not supportable if the reduction variable is used in the loop, unless
6221 it's a reduction chain. */
6222 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6223 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6224 return false;
6225
6226 /* Reductions that are not used even in an enclosing outer-loop,
6227 are expected to be "live" (used out of the loop). */
6228 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6229 && !STMT_VINFO_LIVE_P (stmt_info))
6230 return false;
6231
6232 /* 2. Has this been recognized as a reduction pattern?
6233
6234 Check if STMT represents a pattern that has been recognized
6235 in earlier analysis stages. For stmts that represent a pattern,
6236 the STMT_VINFO_RELATED_STMT field records the last stmt in
6237 the original sequence that constitutes the pattern. */
6238
6239 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6240 if (orig_stmt_info)
6241 {
6242 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6243 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6244 }
6245
6246 /* 3. Check the operands of the operation. The first operands are defined
6247 inside the loop body. The last operand is the reduction variable,
6248 which is defined by the loop-header-phi. */
6249
6250 gcc_assert (is_gimple_assign (stmt));
6251
6252 /* Flatten RHS. */
6253 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6254 {
6255 case GIMPLE_BINARY_RHS:
6256 code = gimple_assign_rhs_code (stmt);
6257 op_type = TREE_CODE_LENGTH (code);
6258 gcc_assert (op_type == binary_op);
6259 ops[0] = gimple_assign_rhs1 (stmt);
6260 ops[1] = gimple_assign_rhs2 (stmt);
6261 break;
6262
6263 case GIMPLE_TERNARY_RHS:
6264 code = gimple_assign_rhs_code (stmt);
6265 op_type = TREE_CODE_LENGTH (code);
6266 gcc_assert (op_type == ternary_op);
6267 ops[0] = gimple_assign_rhs1 (stmt);
6268 ops[1] = gimple_assign_rhs2 (stmt);
6269 ops[2] = gimple_assign_rhs3 (stmt);
6270 break;
6271
6272 case GIMPLE_UNARY_RHS:
6273 return false;
6274
6275 default:
6276 gcc_unreachable ();
6277 }
6278
6279 if (code == COND_EXPR && slp_node)
6280 return false;
6281
6282 scalar_dest = gimple_assign_lhs (stmt);
6283 scalar_type = TREE_TYPE (scalar_dest);
6284 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6285 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6286 return false;
6287
6288 /* Do not try to vectorize bit-precision reductions. */
6289 if (!type_has_mode_precision_p (scalar_type))
6290 return false;
6291
6292 /* All uses but the last are expected to be defined in the loop.
6293 The last use is the reduction variable. In case of nested cycle this
6294 assumption is not true: we use reduc_index to record the index of the
6295 reduction variable. */
6296 stmt_vec_info reduc_def_info = NULL;
6297 int reduc_index = -1;
6298 for (i = 0; i < op_type; i++)
6299 {
6300 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6301 if (i == 0 && code == COND_EXPR)
6302 continue;
6303
6304 stmt_vec_info def_stmt_info;
6305 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6306 &def_stmt_info);
6307 dt = dts[i];
6308 gcc_assert (is_simple_use);
6309 if (dt == vect_reduction_def)
6310 {
6311 reduc_def_info = def_stmt_info;
6312 reduc_index = i;
6313 continue;
6314 }
6315 else if (tem)
6316 {
6317 /* To properly compute ncopies we are interested in the widest
6318 input type in case we're looking at a widening accumulation. */
6319 if (!vectype_in
6320 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6321 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6322 vectype_in = tem;
6323 }
6324
6325 if (dt != vect_internal_def
6326 && dt != vect_external_def
6327 && dt != vect_constant_def
6328 && dt != vect_induction_def
6329 && !(dt == vect_nested_cycle && nested_cycle))
6330 return false;
6331
6332 if (dt == vect_nested_cycle)
6333 {
6334 found_nested_cycle_def = true;
6335 reduc_def_info = def_stmt_info;
6336 reduc_index = i;
6337 }
6338
6339 if (i == 1 && code == COND_EXPR)
6340 {
6341 /* Record how value of COND_EXPR is defined. */
6342 if (dt == vect_constant_def)
6343 {
6344 cond_reduc_dt = dt;
6345 cond_reduc_val = ops[i];
6346 }
6347 if (dt == vect_induction_def
6348 && def_stmt_info
6349 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6350 {
6351 cond_reduc_dt = dt;
6352 cond_stmt_vinfo = def_stmt_info;
6353 }
6354 }
6355 }
6356
6357 if (!vectype_in)
6358 vectype_in = vectype_out;
6359
6360 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6361 directy used in stmt. */
6362 if (reduc_index == -1)
6363 {
6364 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6365 {
6366 if (dump_enabled_p ())
6367 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6368 "in-order reduction chain without SLP.\n");
6369 return false;
6370 }
6371
6372 if (orig_stmt_info)
6373 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6374 else
6375 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6376 }
6377
6378 if (! reduc_def_info)
6379 return false;
6380
6381 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6382 if (!reduc_def_phi)
6383 return false;
6384
6385 if (!(reduc_index == -1
6386 || dts[reduc_index] == vect_reduction_def
6387 || dts[reduc_index] == vect_nested_cycle
6388 || ((dts[reduc_index] == vect_internal_def
6389 || dts[reduc_index] == vect_external_def
6390 || dts[reduc_index] == vect_constant_def
6391 || dts[reduc_index] == vect_induction_def)
6392 && nested_cycle && found_nested_cycle_def)))
6393 {
6394 /* For pattern recognized stmts, orig_stmt might be a reduction,
6395 but some helper statements for the pattern might not, or
6396 might be COND_EXPRs with reduction uses in the condition. */
6397 gcc_assert (orig_stmt_info);
6398 return false;
6399 }
6400
6401 /* PHIs should not participate in patterns. */
6402 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6403 enum vect_reduction_type v_reduc_type
6404 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6405 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6406
6407 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6408 /* If we have a condition reduction, see if we can simplify it further. */
6409 if (v_reduc_type == COND_REDUCTION)
6410 {
6411 /* TODO: We can't yet handle reduction chains, since we need to treat
6412 each COND_EXPR in the chain specially, not just the last one.
6413 E.g. for:
6414
6415 x_1 = PHI <x_3, ...>
6416 x_2 = a_2 ? ... : x_1;
6417 x_3 = a_3 ? ... : x_2;
6418
6419 we're interested in the last element in x_3 for which a_2 || a_3
6420 is true, whereas the current reduction chain handling would
6421 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6422 as a reduction operation. */
6423 if (reduc_index == -1)
6424 {
6425 if (dump_enabled_p ())
6426 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6427 "conditional reduction chains not supported\n");
6428 return false;
6429 }
6430
6431 /* vect_is_simple_reduction ensured that operand 2 is the
6432 loop-carried operand. */
6433 gcc_assert (reduc_index == 2);
6434
6435 /* Loop peeling modifies initial value of reduction PHI, which
6436 makes the reduction stmt to be transformed different to the
6437 original stmt analyzed. We need to record reduction code for
6438 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6439 it can be used directly at transform stage. */
6440 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6441 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6442 {
6443 /* Also set the reduction type to CONST_COND_REDUCTION. */
6444 gcc_assert (cond_reduc_dt == vect_constant_def);
6445 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6446 }
6447 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6448 vectype_in, OPTIMIZE_FOR_SPEED))
6449 {
6450 if (dump_enabled_p ())
6451 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6452 "optimizing condition reduction with"
6453 " FOLD_EXTRACT_LAST.\n");
6454 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6455 }
6456 else if (cond_reduc_dt == vect_induction_def)
6457 {
6458 tree base
6459 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6460 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6461
6462 gcc_assert (TREE_CODE (base) == INTEGER_CST
6463 && TREE_CODE (step) == INTEGER_CST);
6464 cond_reduc_val = NULL_TREE;
6465 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6466 above base; punt if base is the minimum value of the type for
6467 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6468 if (tree_int_cst_sgn (step) == -1)
6469 {
6470 cond_reduc_op_code = MIN_EXPR;
6471 if (tree_int_cst_sgn (base) == -1)
6472 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6473 else if (tree_int_cst_lt (base,
6474 TYPE_MAX_VALUE (TREE_TYPE (base))))
6475 cond_reduc_val
6476 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6477 }
6478 else
6479 {
6480 cond_reduc_op_code = MAX_EXPR;
6481 if (tree_int_cst_sgn (base) == 1)
6482 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6483 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6484 base))
6485 cond_reduc_val
6486 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6487 }
6488 if (cond_reduc_val)
6489 {
6490 if (dump_enabled_p ())
6491 dump_printf_loc (MSG_NOTE, vect_location,
6492 "condition expression based on "
6493 "integer induction.\n");
6494 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6495 = INTEGER_INDUC_COND_REDUCTION;
6496 }
6497 }
6498 else if (cond_reduc_dt == vect_constant_def)
6499 {
6500 enum vect_def_type cond_initial_dt;
6501 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6502 tree cond_initial_val
6503 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6504
6505 gcc_assert (cond_reduc_val != NULL_TREE);
6506 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6507 if (cond_initial_dt == vect_constant_def
6508 && types_compatible_p (TREE_TYPE (cond_initial_val),
6509 TREE_TYPE (cond_reduc_val)))
6510 {
6511 tree e = fold_binary (LE_EXPR, boolean_type_node,
6512 cond_initial_val, cond_reduc_val);
6513 if (e && (integer_onep (e) || integer_zerop (e)))
6514 {
6515 if (dump_enabled_p ())
6516 dump_printf_loc (MSG_NOTE, vect_location,
6517 "condition expression based on "
6518 "compile time constant.\n");
6519 /* Record reduction code at analysis stage. */
6520 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6521 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6522 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6523 = CONST_COND_REDUCTION;
6524 }
6525 }
6526 }
6527 }
6528
6529 if (orig_stmt_info)
6530 gcc_assert (tmp == orig_stmt_info
6531 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6532 else
6533 /* We changed STMT to be the first stmt in reduction chain, hence we
6534 check that in this case the first element in the chain is STMT. */
6535 gcc_assert (tmp == stmt_info
6536 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6537
6538 if (STMT_VINFO_LIVE_P (reduc_def_info))
6539 return false;
6540
6541 if (slp_node)
6542 ncopies = 1;
6543 else
6544 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6545
6546 gcc_assert (ncopies >= 1);
6547
6548 vec_mode = TYPE_MODE (vectype_in);
6549 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6550
6551 if (code == COND_EXPR)
6552 {
6553 /* Only call during the analysis stage, otherwise we'll lose
6554 STMT_VINFO_TYPE. */
6555 if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
6556 ops[reduc_index], 0, NULL,
6557 cost_vec))
6558 {
6559 if (dump_enabled_p ())
6560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6561 "unsupported condition in reduction\n");
6562 return false;
6563 }
6564 }
6565 else
6566 {
6567 /* 4. Supportable by target? */
6568
6569 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6570 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6571 {
6572 /* Shifts and rotates are only supported by vectorizable_shifts,
6573 not vectorizable_reduction. */
6574 if (dump_enabled_p ())
6575 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6576 "unsupported shift or rotation.\n");
6577 return false;
6578 }
6579
6580 /* 4.1. check support for the operation in the loop */
6581 optab = optab_for_tree_code (code, vectype_in, optab_default);
6582 if (!optab)
6583 {
6584 if (dump_enabled_p ())
6585 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6586 "no optab.\n");
6587
6588 return false;
6589 }
6590
6591 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6592 {
6593 if (dump_enabled_p ())
6594 dump_printf (MSG_NOTE, "op not supported by target.\n");
6595
6596 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6597 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6598 return false;
6599
6600 if (dump_enabled_p ())
6601 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6602 }
6603
6604 /* Worthwhile without SIMD support? */
6605 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6606 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6607 {
6608 if (dump_enabled_p ())
6609 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6610 "not worthwhile without SIMD support.\n");
6611
6612 return false;
6613 }
6614 }
6615
6616 /* 4.2. Check support for the epilog operation.
6617
6618 If STMT represents a reduction pattern, then the type of the
6619 reduction variable may be different than the type of the rest
6620 of the arguments. For example, consider the case of accumulation
6621 of shorts into an int accumulator; The original code:
6622 S1: int_a = (int) short_a;
6623 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6624
6625 was replaced with:
6626 STMT: int_acc = widen_sum <short_a, int_acc>
6627
6628 This means that:
6629 1. The tree-code that is used to create the vector operation in the
6630 epilog code (that reduces the partial results) is not the
6631 tree-code of STMT, but is rather the tree-code of the original
6632 stmt from the pattern that STMT is replacing. I.e, in the example
6633 above we want to use 'widen_sum' in the loop, but 'plus' in the
6634 epilog.
6635 2. The type (mode) we use to check available target support
6636 for the vector operation to be created in the *epilog*, is
6637 determined by the type of the reduction variable (in the example
6638 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6639 However the type (mode) we use to check available target support
6640 for the vector operation to be created *inside the loop*, is
6641 determined by the type of the other arguments to STMT (in the
6642 example we'd check this: optab_handler (widen_sum_optab,
6643 vect_short_mode)).
6644
6645 This is contrary to "regular" reductions, in which the types of all
6646 the arguments are the same as the type of the reduction variable.
6647 For "regular" reductions we can therefore use the same vector type
6648 (and also the same tree-code) when generating the epilog code and
6649 when generating the code inside the loop. */
6650
6651 vect_reduction_type reduction_type
6652 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6653 if (orig_stmt_info
6654 && (reduction_type == TREE_CODE_REDUCTION
6655 || reduction_type == FOLD_LEFT_REDUCTION))
6656 {
6657 /* This is a reduction pattern: get the vectype from the type of the
6658 reduction variable, and get the tree-code from orig_stmt. */
6659 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6660 gcc_assert (vectype_out);
6661 vec_mode = TYPE_MODE (vectype_out);
6662 }
6663 else
6664 {
6665 /* Regular reduction: use the same vectype and tree-code as used for
6666 the vector code inside the loop can be used for the epilog code. */
6667 orig_code = code;
6668
6669 if (code == MINUS_EXPR)
6670 orig_code = PLUS_EXPR;
6671
6672 /* For simple condition reductions, replace with the actual expression
6673 we want to base our reduction around. */
6674 if (reduction_type == CONST_COND_REDUCTION)
6675 {
6676 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6677 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6678 }
6679 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6680 orig_code = cond_reduc_op_code;
6681 }
6682
6683 if (nested_cycle)
6684 {
6685 def_bb = gimple_bb (reduc_def_phi);
6686 def_stmt_loop = def_bb->loop_father;
6687 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6688 loop_preheader_edge (def_stmt_loop));
6689 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6690 if (def_arg_stmt_info
6691 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6692 == vect_double_reduction_def))
6693 double_reduc = true;
6694 }
6695
6696 reduc_fn = IFN_LAST;
6697
6698 if (reduction_type == TREE_CODE_REDUCTION
6699 || reduction_type == FOLD_LEFT_REDUCTION
6700 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6701 || reduction_type == CONST_COND_REDUCTION)
6702 {
6703 if (reduction_type == FOLD_LEFT_REDUCTION
6704 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6705 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6706 {
6707 if (reduc_fn != IFN_LAST
6708 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6709 OPTIMIZE_FOR_SPEED))
6710 {
6711 if (dump_enabled_p ())
6712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6713 "reduc op not supported by target.\n");
6714
6715 reduc_fn = IFN_LAST;
6716 }
6717 }
6718 else
6719 {
6720 if (!nested_cycle || double_reduc)
6721 {
6722 if (dump_enabled_p ())
6723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6724 "no reduc code for scalar code.\n");
6725
6726 return false;
6727 }
6728 }
6729 }
6730 else if (reduction_type == COND_REDUCTION)
6731 {
6732 int scalar_precision
6733 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6734 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6735 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6736 nunits_out);
6737
6738 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6739 OPTIMIZE_FOR_SPEED))
6740 reduc_fn = IFN_REDUC_MAX;
6741 }
6742
6743 if (reduction_type != EXTRACT_LAST_REDUCTION
6744 && reduc_fn == IFN_LAST
6745 && !nunits_out.is_constant ())
6746 {
6747 if (dump_enabled_p ())
6748 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6749 "missing target support for reduction on"
6750 " variable-length vectors.\n");
6751 return false;
6752 }
6753
6754 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6755 && ncopies > 1)
6756 {
6757 if (dump_enabled_p ())
6758 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6759 "multiple types in double reduction or condition "
6760 "reduction.\n");
6761 return false;
6762 }
6763
6764 /* For SLP reductions, see if there is a neutral value we can use. */
6765 tree neutral_op = NULL_TREE;
6766 if (slp_node)
6767 neutral_op = neutral_op_for_slp_reduction
6768 (slp_node_instance->reduc_phis, code,
6769 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL_STMT_VEC_INFO);
6770
6771 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6772 {
6773 /* We can't support in-order reductions of code such as this:
6774
6775 for (int i = 0; i < n1; ++i)
6776 for (int j = 0; j < n2; ++j)
6777 l += a[j];
6778
6779 since GCC effectively transforms the loop when vectorizing:
6780
6781 for (int i = 0; i < n1 / VF; ++i)
6782 for (int j = 0; j < n2; ++j)
6783 for (int k = 0; k < VF; ++k)
6784 l += a[j];
6785
6786 which is a reassociation of the original operation. */
6787 if (dump_enabled_p ())
6788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6789 "in-order double reduction not supported.\n");
6790
6791 return false;
6792 }
6793
6794 if (reduction_type == FOLD_LEFT_REDUCTION
6795 && slp_node
6796 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6797 {
6798 /* We cannot use in-order reductions in this case because there is
6799 an implicit reassociation of the operations involved. */
6800 if (dump_enabled_p ())
6801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6802 "in-order unchained SLP reductions not supported.\n");
6803 return false;
6804 }
6805
6806 /* For double reductions, and for SLP reductions with a neutral value,
6807 we construct a variable-length initial vector by loading a vector
6808 full of the neutral value and then shift-and-inserting the start
6809 values into the low-numbered elements. */
6810 if ((double_reduc || neutral_op)
6811 && !nunits_out.is_constant ()
6812 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6813 vectype_out, OPTIMIZE_FOR_SPEED))
6814 {
6815 if (dump_enabled_p ())
6816 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6817 "reduction on variable-length vectors requires"
6818 " target support for a vector-shift-and-insert"
6819 " operation.\n");
6820 return false;
6821 }
6822
6823 /* Check extra constraints for variable-length unchained SLP reductions. */
6824 if (STMT_SLP_TYPE (stmt_info)
6825 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
6826 && !nunits_out.is_constant ())
6827 {
6828 /* We checked above that we could build the initial vector when
6829 there's a neutral element value. Check here for the case in
6830 which each SLP statement has its own initial value and in which
6831 that value needs to be repeated for every instance of the
6832 statement within the initial vector. */
6833 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6834 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6835 if (!neutral_op
6836 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6837 {
6838 if (dump_enabled_p ())
6839 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6840 "unsupported form of SLP reduction for"
6841 " variable-length vectors: cannot build"
6842 " initial vector.\n");
6843 return false;
6844 }
6845 /* The epilogue code relies on the number of elements being a multiple
6846 of the group size. The duplicate-and-interleave approach to setting
6847 up the the initial vector does too. */
6848 if (!multiple_p (nunits_out, group_size))
6849 {
6850 if (dump_enabled_p ())
6851 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6852 "unsupported form of SLP reduction for"
6853 " variable-length vectors: the vector size"
6854 " is not a multiple of the number of results.\n");
6855 return false;
6856 }
6857 }
6858
6859 /* In case of widenning multiplication by a constant, we update the type
6860 of the constant to be the type of the other operand. We check that the
6861 constant fits the type in the pattern recognition pass. */
6862 if (code == DOT_PROD_EXPR
6863 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6864 {
6865 if (TREE_CODE (ops[0]) == INTEGER_CST)
6866 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6867 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6868 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6869 else
6870 {
6871 if (dump_enabled_p ())
6872 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6873 "invalid types in dot-prod\n");
6874
6875 return false;
6876 }
6877 }
6878
6879 if (reduction_type == COND_REDUCTION)
6880 {
6881 widest_int ni;
6882
6883 if (! max_loop_iterations (loop, &ni))
6884 {
6885 if (dump_enabled_p ())
6886 dump_printf_loc (MSG_NOTE, vect_location,
6887 "loop count not known, cannot create cond "
6888 "reduction.\n");
6889 return false;
6890 }
6891 /* Convert backedges to iterations. */
6892 ni += 1;
6893
6894 /* The additional index will be the same type as the condition. Check
6895 that the loop can fit into this less one (because we'll use up the
6896 zero slot for when there are no matches). */
6897 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6898 if (wi::geu_p (ni, wi::to_widest (max_index)))
6899 {
6900 if (dump_enabled_p ())
6901 dump_printf_loc (MSG_NOTE, vect_location,
6902 "loop size is greater than data size.\n");
6903 return false;
6904 }
6905 }
6906
6907 /* In case the vectorization factor (VF) is bigger than the number
6908 of elements that we can fit in a vectype (nunits), we have to generate
6909 more than one vector stmt - i.e - we need to "unroll" the
6910 vector stmt by a factor VF/nunits. For more details see documentation
6911 in vectorizable_operation. */
6912
6913 /* If the reduction is used in an outer loop we need to generate
6914 VF intermediate results, like so (e.g. for ncopies=2):
6915 r0 = phi (init, r0)
6916 r1 = phi (init, r1)
6917 r0 = x0 + r0;
6918 r1 = x1 + r1;
6919 (i.e. we generate VF results in 2 registers).
6920 In this case we have a separate def-use cycle for each copy, and therefore
6921 for each copy we get the vector def for the reduction variable from the
6922 respective phi node created for this copy.
6923
6924 Otherwise (the reduction is unused in the loop nest), we can combine
6925 together intermediate results, like so (e.g. for ncopies=2):
6926 r = phi (init, r)
6927 r = x0 + r;
6928 r = x1 + r;
6929 (i.e. we generate VF/2 results in a single register).
6930 In this case for each copy we get the vector def for the reduction variable
6931 from the vectorized reduction operation generated in the previous iteration.
6932
6933 This only works when we see both the reduction PHI and its only consumer
6934 in vectorizable_reduction and there are no intermediate stmts
6935 participating. */
6936 stmt_vec_info use_stmt_info;
6937 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6938 if (ncopies > 1
6939 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6940 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6941 && (use_stmt_info == stmt_info
6942 || STMT_VINFO_RELATED_STMT (use_stmt_info) == stmt_info))
6943 {
6944 single_defuse_cycle = true;
6945 epilog_copies = 1;
6946 }
6947 else
6948 epilog_copies = ncopies;
6949
6950 /* If the reduction stmt is one of the patterns that have lane
6951 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6952 if ((ncopies > 1
6953 && ! single_defuse_cycle)
6954 && (code == DOT_PROD_EXPR
6955 || code == WIDEN_SUM_EXPR
6956 || code == SAD_EXPR))
6957 {
6958 if (dump_enabled_p ())
6959 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6960 "multi def-use cycle not possible for lane-reducing "
6961 "reduction operation\n");
6962 return false;
6963 }
6964
6965 if (slp_node)
6966 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6967 else
6968 vec_num = 1;
6969
6970 internal_fn cond_fn = get_conditional_internal_fn (code);
6971 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6972
6973 if (!vec_stmt) /* transformation not required. */
6974 {
6975 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6976 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6977 {
6978 if (reduction_type != FOLD_LEFT_REDUCTION
6979 && (cond_fn == IFN_LAST
6980 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
6981 OPTIMIZE_FOR_SPEED)))
6982 {
6983 if (dump_enabled_p ())
6984 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6985 "can't use a fully-masked loop because no"
6986 " conditional operation is available.\n");
6987 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6988 }
6989 else if (reduc_index == -1)
6990 {
6991 if (dump_enabled_p ())
6992 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6993 "can't use a fully-masked loop for chained"
6994 " reductions.\n");
6995 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
6996 }
6997 else
6998 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
6999 vectype_in);
7000 }
7001 if (dump_enabled_p ()
7002 && reduction_type == FOLD_LEFT_REDUCTION)
7003 dump_printf_loc (MSG_NOTE, vect_location,
7004 "using an in-order (fold-left) reduction.\n");
7005 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
7006 return true;
7007 }
7008
7009 /* Transform. */
7010
7011 if (dump_enabled_p ())
7012 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7013
7014 /* FORNOW: Multiple types are not supported for condition. */
7015 if (code == COND_EXPR)
7016 gcc_assert (ncopies == 1);
7017
7018 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7019
7020 if (reduction_type == FOLD_LEFT_REDUCTION)
7021 return vectorize_fold_left_reduction
7022 (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7023 reduc_fn, ops, vectype_in, reduc_index, masks);
7024
7025 if (reduction_type == EXTRACT_LAST_REDUCTION)
7026 {
7027 gcc_assert (!slp_node);
7028 return vectorizable_condition (stmt_info, gsi, vec_stmt,
7029 NULL, reduc_index, NULL, NULL);
7030 }
7031
7032 /* Create the destination vector */
7033 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7034
7035 prev_stmt_info = NULL;
7036 prev_phi_info = NULL;
7037 if (!slp_node)
7038 {
7039 vec_oprnds0.create (1);
7040 vec_oprnds1.create (1);
7041 if (op_type == ternary_op)
7042 vec_oprnds2.create (1);
7043 }
7044
7045 phis.create (vec_num);
7046 vect_defs.create (vec_num);
7047 if (!slp_node)
7048 vect_defs.quick_push (NULL_TREE);
7049
7050 if (slp_node)
7051 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7052 else
7053 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7054
7055 for (j = 0; j < ncopies; j++)
7056 {
7057 if (code == COND_EXPR)
7058 {
7059 gcc_assert (!slp_node);
7060 vectorizable_condition (stmt_info, gsi, vec_stmt,
7061 PHI_RESULT (phis[0]->stmt),
7062 reduc_index, NULL, NULL);
7063 /* Multiple types are not supported for condition. */
7064 break;
7065 }
7066
7067 /* Handle uses. */
7068 if (j == 0)
7069 {
7070 if (slp_node)
7071 {
7072 /* Get vec defs for all the operands except the reduction index,
7073 ensuring the ordering of the ops in the vector is kept. */
7074 auto_vec<tree, 3> slp_ops;
7075 auto_vec<vec<tree>, 3> vec_defs;
7076
7077 slp_ops.quick_push (ops[0]);
7078 slp_ops.quick_push (ops[1]);
7079 if (op_type == ternary_op)
7080 slp_ops.quick_push (ops[2]);
7081
7082 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7083
7084 vec_oprnds0.safe_splice (vec_defs[0]);
7085 vec_defs[0].release ();
7086 vec_oprnds1.safe_splice (vec_defs[1]);
7087 vec_defs[1].release ();
7088 if (op_type == ternary_op)
7089 {
7090 vec_oprnds2.safe_splice (vec_defs[2]);
7091 vec_defs[2].release ();
7092 }
7093 }
7094 else
7095 {
7096 vec_oprnds0.quick_push
7097 (vect_get_vec_def_for_operand (ops[0], stmt_info));
7098 vec_oprnds1.quick_push
7099 (vect_get_vec_def_for_operand (ops[1], stmt_info));
7100 if (op_type == ternary_op)
7101 vec_oprnds2.quick_push
7102 (vect_get_vec_def_for_operand (ops[2], stmt_info));
7103 }
7104 }
7105 else
7106 {
7107 if (!slp_node)
7108 {
7109 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7110
7111 if (single_defuse_cycle && reduc_index == 0)
7112 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7113 else
7114 vec_oprnds0[0]
7115 = vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]);
7116 if (single_defuse_cycle && reduc_index == 1)
7117 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7118 else
7119 vec_oprnds1[0]
7120 = vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]);
7121 if (op_type == ternary_op)
7122 {
7123 if (single_defuse_cycle && reduc_index == 2)
7124 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7125 else
7126 vec_oprnds2[0]
7127 = vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]);
7128 }
7129 }
7130 }
7131
7132 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7133 {
7134 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7135 if (masked_loop_p)
7136 {
7137 /* Make sure that the reduction accumulator is vop[0]. */
7138 if (reduc_index == 1)
7139 {
7140 gcc_assert (commutative_tree_code (code));
7141 std::swap (vop[0], vop[1]);
7142 }
7143 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7144 vectype_in, i * ncopies + j);
7145 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7146 vop[0], vop[1],
7147 vop[0]);
7148 new_temp = make_ssa_name (vec_dest, call);
7149 gimple_call_set_lhs (call, new_temp);
7150 gimple_call_set_nothrow (call, true);
7151 new_stmt_info
7152 = vect_finish_stmt_generation (stmt_info, call, gsi);
7153 }
7154 else
7155 {
7156 if (op_type == ternary_op)
7157 vop[2] = vec_oprnds2[i];
7158
7159 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7160 vop[0], vop[1], vop[2]);
7161 new_temp = make_ssa_name (vec_dest, new_stmt);
7162 gimple_assign_set_lhs (new_stmt, new_temp);
7163 new_stmt_info
7164 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7165 }
7166
7167 if (slp_node)
7168 {
7169 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7170 vect_defs.quick_push (new_temp);
7171 }
7172 else
7173 vect_defs[0] = new_temp;
7174 }
7175
7176 if (slp_node)
7177 continue;
7178
7179 if (j == 0)
7180 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7181 else
7182 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7183
7184 prev_stmt_info = new_stmt_info;
7185 }
7186
7187 /* Finalize the reduction-phi (set its arguments) and create the
7188 epilog reduction code. */
7189 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7190 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7191
7192 vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
7193 epilog_copies, reduc_fn, phis,
7194 double_reduc, slp_node, slp_node_instance,
7195 cond_reduc_val, cond_reduc_op_code,
7196 neutral_op);
7197
7198 return true;
7199 }
7200
7201 /* Function vect_min_worthwhile_factor.
7202
7203 For a loop where we could vectorize the operation indicated by CODE,
7204 return the minimum vectorization factor that makes it worthwhile
7205 to use generic vectors. */
7206 static unsigned int
7207 vect_min_worthwhile_factor (enum tree_code code)
7208 {
7209 switch (code)
7210 {
7211 case PLUS_EXPR:
7212 case MINUS_EXPR:
7213 case NEGATE_EXPR:
7214 return 4;
7215
7216 case BIT_AND_EXPR:
7217 case BIT_IOR_EXPR:
7218 case BIT_XOR_EXPR:
7219 case BIT_NOT_EXPR:
7220 return 2;
7221
7222 default:
7223 return INT_MAX;
7224 }
7225 }
7226
7227 /* Return true if VINFO indicates we are doing loop vectorization and if
7228 it is worth decomposing CODE operations into scalar operations for
7229 that loop's vectorization factor. */
7230
7231 bool
7232 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7233 {
7234 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7235 unsigned HOST_WIDE_INT value;
7236 return (loop_vinfo
7237 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7238 && value >= vect_min_worthwhile_factor (code));
7239 }
7240
7241 /* Function vectorizable_induction
7242
7243 Check if PHI performs an induction computation that can be vectorized.
7244 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7245 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7246 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7247
7248 bool
7249 vectorizable_induction (gimple *phi,
7250 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7251 stmt_vec_info *vec_stmt, slp_tree slp_node,
7252 stmt_vector_for_cost *cost_vec)
7253 {
7254 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
7255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7256 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7257 unsigned ncopies;
7258 bool nested_in_vect_loop = false;
7259 struct loop *iv_loop;
7260 tree vec_def;
7261 edge pe = loop_preheader_edge (loop);
7262 basic_block new_bb;
7263 tree new_vec, vec_init, vec_step, t;
7264 tree new_name;
7265 gimple *new_stmt;
7266 gphi *induction_phi;
7267 tree induc_def, vec_dest;
7268 tree init_expr, step_expr;
7269 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7270 unsigned i;
7271 tree expr;
7272 gimple_seq stmts;
7273 imm_use_iterator imm_iter;
7274 use_operand_p use_p;
7275 gimple *exit_phi;
7276 edge latch_e;
7277 tree loop_arg;
7278 gimple_stmt_iterator si;
7279 basic_block bb = gimple_bb (phi);
7280
7281 if (gimple_code (phi) != GIMPLE_PHI)
7282 return false;
7283
7284 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7285 return false;
7286
7287 /* Make sure it was recognized as induction computation. */
7288 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7289 return false;
7290
7291 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7292 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7293
7294 if (slp_node)
7295 ncopies = 1;
7296 else
7297 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7298 gcc_assert (ncopies >= 1);
7299
7300 /* FORNOW. These restrictions should be relaxed. */
7301 if (nested_in_vect_loop_p (loop, stmt_info))
7302 {
7303 imm_use_iterator imm_iter;
7304 use_operand_p use_p;
7305 gimple *exit_phi;
7306 edge latch_e;
7307 tree loop_arg;
7308
7309 if (ncopies > 1)
7310 {
7311 if (dump_enabled_p ())
7312 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7313 "multiple types in nested loop.\n");
7314 return false;
7315 }
7316
7317 /* FORNOW: outer loop induction with SLP not supported. */
7318 if (STMT_SLP_TYPE (stmt_info))
7319 return false;
7320
7321 exit_phi = NULL;
7322 latch_e = loop_latch_edge (loop->inner);
7323 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7324 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7325 {
7326 gimple *use_stmt = USE_STMT (use_p);
7327 if (is_gimple_debug (use_stmt))
7328 continue;
7329
7330 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7331 {
7332 exit_phi = use_stmt;
7333 break;
7334 }
7335 }
7336 if (exit_phi)
7337 {
7338 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7339 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7340 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7341 {
7342 if (dump_enabled_p ())
7343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7344 "inner-loop induction only used outside "
7345 "of the outer vectorized loop.\n");
7346 return false;
7347 }
7348 }
7349
7350 nested_in_vect_loop = true;
7351 iv_loop = loop->inner;
7352 }
7353 else
7354 iv_loop = loop;
7355 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7356
7357 if (slp_node && !nunits.is_constant ())
7358 {
7359 /* The current SLP code creates the initial value element-by-element. */
7360 if (dump_enabled_p ())
7361 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7362 "SLP induction not supported for variable-length"
7363 " vectors.\n");
7364 return false;
7365 }
7366
7367 if (!vec_stmt) /* transformation not required. */
7368 {
7369 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7370 DUMP_VECT_SCOPE ("vectorizable_induction");
7371 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7372 return true;
7373 }
7374
7375 /* Transform. */
7376
7377 /* Compute a vector variable, initialized with the first VF values of
7378 the induction variable. E.g., for an iv with IV_PHI='X' and
7379 evolution S, for a vector of 4 units, we want to compute:
7380 [X, X + S, X + 2*S, X + 3*S]. */
7381
7382 if (dump_enabled_p ())
7383 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7384
7385 latch_e = loop_latch_edge (iv_loop);
7386 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7387
7388 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7389 gcc_assert (step_expr != NULL_TREE);
7390
7391 pe = loop_preheader_edge (iv_loop);
7392 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7393 loop_preheader_edge (iv_loop));
7394
7395 stmts = NULL;
7396 if (!nested_in_vect_loop)
7397 {
7398 /* Convert the initial value to the desired type. */
7399 tree new_type = TREE_TYPE (vectype);
7400 init_expr = gimple_convert (&stmts, new_type, init_expr);
7401
7402 /* If we are using the loop mask to "peel" for alignment then we need
7403 to adjust the start value here. */
7404 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7405 if (skip_niters != NULL_TREE)
7406 {
7407 if (FLOAT_TYPE_P (vectype))
7408 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7409 skip_niters);
7410 else
7411 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7412 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7413 skip_niters, step_expr);
7414 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7415 init_expr, skip_step);
7416 }
7417 }
7418
7419 /* Convert the step to the desired type. */
7420 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7421
7422 if (stmts)
7423 {
7424 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7425 gcc_assert (!new_bb);
7426 }
7427
7428 /* Find the first insertion point in the BB. */
7429 si = gsi_after_labels (bb);
7430
7431 /* For SLP induction we have to generate several IVs as for example
7432 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7433 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7434 [VF*S, VF*S, VF*S, VF*S] for all. */
7435 if (slp_node)
7436 {
7437 /* Enforced above. */
7438 unsigned int const_nunits = nunits.to_constant ();
7439
7440 /* Generate [VF*S, VF*S, ... ]. */
7441 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7442 {
7443 expr = build_int_cst (integer_type_node, vf);
7444 expr = fold_convert (TREE_TYPE (step_expr), expr);
7445 }
7446 else
7447 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7448 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7449 expr, step_expr);
7450 if (! CONSTANT_CLASS_P (new_name))
7451 new_name = vect_init_vector (stmt_info, new_name,
7452 TREE_TYPE (step_expr), NULL);
7453 new_vec = build_vector_from_val (vectype, new_name);
7454 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7455
7456 /* Now generate the IVs. */
7457 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7458 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7459 unsigned elts = const_nunits * nvects;
7460 unsigned nivs = least_common_multiple (group_size,
7461 const_nunits) / const_nunits;
7462 gcc_assert (elts % group_size == 0);
7463 tree elt = init_expr;
7464 unsigned ivn;
7465 for (ivn = 0; ivn < nivs; ++ivn)
7466 {
7467 tree_vector_builder elts (vectype, const_nunits, 1);
7468 stmts = NULL;
7469 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7470 {
7471 if (ivn*const_nunits + eltn >= group_size
7472 && (ivn * const_nunits + eltn) % group_size == 0)
7473 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7474 elt, step_expr);
7475 elts.quick_push (elt);
7476 }
7477 vec_init = gimple_build_vector (&stmts, &elts);
7478 if (stmts)
7479 {
7480 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7481 gcc_assert (!new_bb);
7482 }
7483
7484 /* Create the induction-phi that defines the induction-operand. */
7485 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7486 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7487 stmt_vec_info induction_phi_info
7488 = loop_vinfo->add_stmt (induction_phi);
7489 induc_def = PHI_RESULT (induction_phi);
7490
7491 /* Create the iv update inside the loop */
7492 vec_def = make_ssa_name (vec_dest);
7493 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7494 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7495 loop_vinfo->add_stmt (new_stmt);
7496
7497 /* Set the arguments of the phi node: */
7498 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7499 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7500 UNKNOWN_LOCATION);
7501
7502 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7503 }
7504
7505 /* Re-use IVs when we can. */
7506 if (ivn < nvects)
7507 {
7508 unsigned vfp
7509 = least_common_multiple (group_size, const_nunits) / group_size;
7510 /* Generate [VF'*S, VF'*S, ... ]. */
7511 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7512 {
7513 expr = build_int_cst (integer_type_node, vfp);
7514 expr = fold_convert (TREE_TYPE (step_expr), expr);
7515 }
7516 else
7517 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7518 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7519 expr, step_expr);
7520 if (! CONSTANT_CLASS_P (new_name))
7521 new_name = vect_init_vector (stmt_info, new_name,
7522 TREE_TYPE (step_expr), NULL);
7523 new_vec = build_vector_from_val (vectype, new_name);
7524 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7525 for (; ivn < nvects; ++ivn)
7526 {
7527 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7528 tree def;
7529 if (gimple_code (iv) == GIMPLE_PHI)
7530 def = gimple_phi_result (iv);
7531 else
7532 def = gimple_assign_lhs (iv);
7533 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7534 PLUS_EXPR,
7535 def, vec_step);
7536 if (gimple_code (iv) == GIMPLE_PHI)
7537 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7538 else
7539 {
7540 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7541 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7542 }
7543 SLP_TREE_VEC_STMTS (slp_node).quick_push
7544 (loop_vinfo->add_stmt (new_stmt));
7545 }
7546 }
7547
7548 return true;
7549 }
7550
7551 /* Create the vector that holds the initial_value of the induction. */
7552 if (nested_in_vect_loop)
7553 {
7554 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7555 been created during vectorization of previous stmts. We obtain it
7556 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7557 vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
7558 /* If the initial value is not of proper type, convert it. */
7559 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7560 {
7561 new_stmt
7562 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7563 vect_simple_var,
7564 "vec_iv_"),
7565 VIEW_CONVERT_EXPR,
7566 build1 (VIEW_CONVERT_EXPR, vectype,
7567 vec_init));
7568 vec_init = gimple_assign_lhs (new_stmt);
7569 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7570 new_stmt);
7571 gcc_assert (!new_bb);
7572 loop_vinfo->add_stmt (new_stmt);
7573 }
7574 }
7575 else
7576 {
7577 /* iv_loop is the loop to be vectorized. Create:
7578 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7579 stmts = NULL;
7580 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7581
7582 unsigned HOST_WIDE_INT const_nunits;
7583 if (nunits.is_constant (&const_nunits))
7584 {
7585 tree_vector_builder elts (vectype, const_nunits, 1);
7586 elts.quick_push (new_name);
7587 for (i = 1; i < const_nunits; i++)
7588 {
7589 /* Create: new_name_i = new_name + step_expr */
7590 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7591 new_name, step_expr);
7592 elts.quick_push (new_name);
7593 }
7594 /* Create a vector from [new_name_0, new_name_1, ...,
7595 new_name_nunits-1] */
7596 vec_init = gimple_build_vector (&stmts, &elts);
7597 }
7598 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7599 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7600 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7601 new_name, step_expr);
7602 else
7603 {
7604 /* Build:
7605 [base, base, base, ...]
7606 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7607 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7608 gcc_assert (flag_associative_math);
7609 tree index = build_index_vector (vectype, 0, 1);
7610 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7611 new_name);
7612 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7613 step_expr);
7614 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7615 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7616 vec_init, step_vec);
7617 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7618 vec_init, base_vec);
7619 }
7620
7621 if (stmts)
7622 {
7623 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7624 gcc_assert (!new_bb);
7625 }
7626 }
7627
7628
7629 /* Create the vector that holds the step of the induction. */
7630 if (nested_in_vect_loop)
7631 /* iv_loop is nested in the loop to be vectorized. Generate:
7632 vec_step = [S, S, S, S] */
7633 new_name = step_expr;
7634 else
7635 {
7636 /* iv_loop is the loop to be vectorized. Generate:
7637 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7638 gimple_seq seq = NULL;
7639 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7640 {
7641 expr = build_int_cst (integer_type_node, vf);
7642 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7643 }
7644 else
7645 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7646 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7647 expr, step_expr);
7648 if (seq)
7649 {
7650 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7651 gcc_assert (!new_bb);
7652 }
7653 }
7654
7655 t = unshare_expr (new_name);
7656 gcc_assert (CONSTANT_CLASS_P (new_name)
7657 || TREE_CODE (new_name) == SSA_NAME);
7658 new_vec = build_vector_from_val (vectype, t);
7659 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7660
7661
7662 /* Create the following def-use cycle:
7663 loop prolog:
7664 vec_init = ...
7665 vec_step = ...
7666 loop:
7667 vec_iv = PHI <vec_init, vec_loop>
7668 ...
7669 STMT
7670 ...
7671 vec_loop = vec_iv + vec_step; */
7672
7673 /* Create the induction-phi that defines the induction-operand. */
7674 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7675 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7676 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7677 induc_def = PHI_RESULT (induction_phi);
7678
7679 /* Create the iv update inside the loop */
7680 vec_def = make_ssa_name (vec_dest);
7681 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7682 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7683 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7684
7685 /* Set the arguments of the phi node: */
7686 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7687 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7688 UNKNOWN_LOCATION);
7689
7690 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7691
7692 /* In case that vectorization factor (VF) is bigger than the number
7693 of elements that we can fit in a vectype (nunits), we have to generate
7694 more than one vector stmt - i.e - we need to "unroll" the
7695 vector stmt by a factor VF/nunits. For more details see documentation
7696 in vectorizable_operation. */
7697
7698 if (ncopies > 1)
7699 {
7700 gimple_seq seq = NULL;
7701 stmt_vec_info prev_stmt_vinfo;
7702 /* FORNOW. This restriction should be relaxed. */
7703 gcc_assert (!nested_in_vect_loop);
7704
7705 /* Create the vector that holds the step of the induction. */
7706 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7707 {
7708 expr = build_int_cst (integer_type_node, nunits);
7709 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7710 }
7711 else
7712 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7713 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7714 expr, step_expr);
7715 if (seq)
7716 {
7717 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7718 gcc_assert (!new_bb);
7719 }
7720
7721 t = unshare_expr (new_name);
7722 gcc_assert (CONSTANT_CLASS_P (new_name)
7723 || TREE_CODE (new_name) == SSA_NAME);
7724 new_vec = build_vector_from_val (vectype, t);
7725 vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
7726
7727 vec_def = induc_def;
7728 prev_stmt_vinfo = induction_phi_info;
7729 for (i = 1; i < ncopies; i++)
7730 {
7731 /* vec_i = vec_prev + vec_step */
7732 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7733 vec_def, vec_step);
7734 vec_def = make_ssa_name (vec_dest, new_stmt);
7735 gimple_assign_set_lhs (new_stmt, vec_def);
7736
7737 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7738 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7739 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7740 prev_stmt_vinfo = new_stmt_info;
7741 }
7742 }
7743
7744 if (nested_in_vect_loop)
7745 {
7746 /* Find the loop-closed exit-phi of the induction, and record
7747 the final vector of induction results: */
7748 exit_phi = NULL;
7749 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7750 {
7751 gimple *use_stmt = USE_STMT (use_p);
7752 if (is_gimple_debug (use_stmt))
7753 continue;
7754
7755 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7756 {
7757 exit_phi = use_stmt;
7758 break;
7759 }
7760 }
7761 if (exit_phi)
7762 {
7763 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7764 /* FORNOW. Currently not supporting the case that an inner-loop induction
7765 is not used in the outer-loop (i.e. only outside the outer-loop). */
7766 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7767 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7768
7769 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7770 if (dump_enabled_p ())
7771 {
7772 dump_printf_loc (MSG_NOTE, vect_location,
7773 "vector of inductions after inner-loop:");
7774 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7775 }
7776 }
7777 }
7778
7779
7780 if (dump_enabled_p ())
7781 {
7782 dump_printf_loc (MSG_NOTE, vect_location,
7783 "transform induction: created def-use cycle: ");
7784 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7785 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7786 SSA_NAME_DEF_STMT (vec_def), 0);
7787 }
7788
7789 return true;
7790 }
7791
7792 /* Function vectorizable_live_operation.
7793
7794 STMT computes a value that is used outside the loop. Check if
7795 it can be supported. */
7796
7797 bool
7798 vectorizable_live_operation (gimple *stmt,
7799 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7800 slp_tree slp_node, int slp_index,
7801 stmt_vec_info *vec_stmt,
7802 stmt_vector_for_cost *)
7803 {
7804 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7805 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7806 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7807 imm_use_iterator imm_iter;
7808 tree lhs, lhs_type, bitsize, vec_bitsize;
7809 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7810 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7811 int ncopies;
7812 gimple *use_stmt;
7813 auto_vec<tree> vec_oprnds;
7814 int vec_entry = 0;
7815 poly_uint64 vec_index = 0;
7816
7817 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7818
7819 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7820 return false;
7821
7822 /* FORNOW. CHECKME. */
7823 if (nested_in_vect_loop_p (loop, stmt_info))
7824 return false;
7825
7826 /* If STMT is not relevant and it is a simple assignment and its inputs are
7827 invariant then it can remain in place, unvectorized. The original last
7828 scalar value that it computes will be used. */
7829 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7830 {
7831 gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
7832 if (dump_enabled_p ())
7833 dump_printf_loc (MSG_NOTE, vect_location,
7834 "statement is simple and uses invariant. Leaving in "
7835 "place.\n");
7836 return true;
7837 }
7838
7839 if (slp_node)
7840 ncopies = 1;
7841 else
7842 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7843
7844 if (slp_node)
7845 {
7846 gcc_assert (slp_index >= 0);
7847
7848 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7849 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7850
7851 /* Get the last occurrence of the scalar index from the concatenation of
7852 all the slp vectors. Calculate which slp vector it is and the index
7853 within. */
7854 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7855
7856 /* Calculate which vector contains the result, and which lane of
7857 that vector we need. */
7858 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7859 {
7860 if (dump_enabled_p ())
7861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7862 "Cannot determine which vector holds the"
7863 " final result.\n");
7864 return false;
7865 }
7866 }
7867
7868 if (!vec_stmt)
7869 {
7870 /* No transformation required. */
7871 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7872 {
7873 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7874 OPTIMIZE_FOR_SPEED))
7875 {
7876 if (dump_enabled_p ())
7877 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7878 "can't use a fully-masked loop because "
7879 "the target doesn't support extract last "
7880 "reduction.\n");
7881 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7882 }
7883 else if (slp_node)
7884 {
7885 if (dump_enabled_p ())
7886 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7887 "can't use a fully-masked loop because an "
7888 "SLP statement is live after the loop.\n");
7889 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7890 }
7891 else if (ncopies > 1)
7892 {
7893 if (dump_enabled_p ())
7894 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7895 "can't use a fully-masked loop because"
7896 " ncopies is greater than 1.\n");
7897 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7898 }
7899 else
7900 {
7901 gcc_assert (ncopies == 1 && !slp_node);
7902 vect_record_loop_mask (loop_vinfo,
7903 &LOOP_VINFO_MASKS (loop_vinfo),
7904 1, vectype);
7905 }
7906 }
7907 return true;
7908 }
7909
7910 /* If stmt has a related stmt, then use that for getting the lhs. */
7911 if (is_pattern_stmt_p (stmt_info))
7912 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7913
7914 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7915 : gimple_get_lhs (stmt);
7916 lhs_type = TREE_TYPE (lhs);
7917
7918 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7919 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7920 : TYPE_SIZE (TREE_TYPE (vectype)));
7921 vec_bitsize = TYPE_SIZE (vectype);
7922
7923 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7924 tree vec_lhs, bitstart;
7925 if (slp_node)
7926 {
7927 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7928
7929 /* Get the correct slp vectorized stmt. */
7930 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7931 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7932 vec_lhs = gimple_phi_result (phi);
7933 else
7934 vec_lhs = gimple_get_lhs (vec_stmt);
7935
7936 /* Get entry to use. */
7937 bitstart = bitsize_int (vec_index);
7938 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7939 }
7940 else
7941 {
7942 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7943 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7944 gcc_checking_assert (ncopies == 1
7945 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7946
7947 /* For multiple copies, get the last copy. */
7948 for (int i = 1; i < ncopies; ++i)
7949 vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type,
7950 vec_lhs);
7951
7952 /* Get the last lane in the vector. */
7953 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7954 }
7955
7956 gimple_seq stmts = NULL;
7957 tree new_tree;
7958 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7959 {
7960 /* Emit:
7961
7962 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7963
7964 where VEC_LHS is the vectorized live-out result and MASK is
7965 the loop mask for the final iteration. */
7966 gcc_assert (ncopies == 1 && !slp_node);
7967 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7968 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7969 1, vectype, 0);
7970 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7971 scalar_type, mask, vec_lhs);
7972
7973 /* Convert the extracted vector element to the required scalar type. */
7974 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7975 }
7976 else
7977 {
7978 tree bftype = TREE_TYPE (vectype);
7979 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7980 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7981 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
7982 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
7983 &stmts, true, NULL_TREE);
7984 }
7985
7986 if (stmts)
7987 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
7988
7989 /* Replace use of lhs with newly computed result. If the use stmt is a
7990 single arg PHI, just replace all uses of PHI result. It's necessary
7991 because lcssa PHI defining lhs may be before newly inserted stmt. */
7992 use_operand_p use_p;
7993 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
7994 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
7995 && !is_gimple_debug (use_stmt))
7996 {
7997 if (gimple_code (use_stmt) == GIMPLE_PHI
7998 && gimple_phi_num_args (use_stmt) == 1)
7999 {
8000 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8001 }
8002 else
8003 {
8004 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8005 SET_USE (use_p, new_tree);
8006 }
8007 update_stmt (use_stmt);
8008 }
8009
8010 return true;
8011 }
8012
8013 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
8014
8015 static void
8016 vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
8017 {
8018 ssa_op_iter op_iter;
8019 imm_use_iterator imm_iter;
8020 def_operand_p def_p;
8021 gimple *ustmt;
8022
8023 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
8024 {
8025 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8026 {
8027 basic_block bb;
8028
8029 if (!is_gimple_debug (ustmt))
8030 continue;
8031
8032 bb = gimple_bb (ustmt);
8033
8034 if (!flow_bb_inside_loop_p (loop, bb))
8035 {
8036 if (gimple_debug_bind_p (ustmt))
8037 {
8038 if (dump_enabled_p ())
8039 dump_printf_loc (MSG_NOTE, vect_location,
8040 "killing debug use\n");
8041
8042 gimple_debug_bind_reset_value (ustmt);
8043 update_stmt (ustmt);
8044 }
8045 else
8046 gcc_unreachable ();
8047 }
8048 }
8049 }
8050 }
8051
8052 /* Given loop represented by LOOP_VINFO, return true if computation of
8053 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8054 otherwise. */
8055
8056 static bool
8057 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8058 {
8059 /* Constant case. */
8060 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8061 {
8062 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8063 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8064
8065 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8066 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8067 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8068 return true;
8069 }
8070
8071 widest_int max;
8072 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8073 /* Check the upper bound of loop niters. */
8074 if (get_max_loop_iterations (loop, &max))
8075 {
8076 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8077 signop sgn = TYPE_SIGN (type);
8078 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8079 if (max < type_max)
8080 return true;
8081 }
8082 return false;
8083 }
8084
8085 /* Return a mask type with half the number of elements as TYPE. */
8086
8087 tree
8088 vect_halve_mask_nunits (tree type)
8089 {
8090 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8091 return build_truth_vector_type (nunits, current_vector_size);
8092 }
8093
8094 /* Return a mask type with twice as many elements as TYPE. */
8095
8096 tree
8097 vect_double_mask_nunits (tree type)
8098 {
8099 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8100 return build_truth_vector_type (nunits, current_vector_size);
8101 }
8102
8103 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8104 contain a sequence of NVECTORS masks that each control a vector of type
8105 VECTYPE. */
8106
8107 void
8108 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8109 unsigned int nvectors, tree vectype)
8110 {
8111 gcc_assert (nvectors != 0);
8112 if (masks->length () < nvectors)
8113 masks->safe_grow_cleared (nvectors);
8114 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8115 /* The number of scalars per iteration and the number of vectors are
8116 both compile-time constants. */
8117 unsigned int nscalars_per_iter
8118 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8119 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8120 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8121 {
8122 rgm->max_nscalars_per_iter = nscalars_per_iter;
8123 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8124 }
8125 }
8126
8127 /* Given a complete set of masks MASKS, extract mask number INDEX
8128 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8129 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8130
8131 See the comment above vec_loop_masks for more details about the mask
8132 arrangement. */
8133
8134 tree
8135 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8136 unsigned int nvectors, tree vectype, unsigned int index)
8137 {
8138 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8139 tree mask_type = rgm->mask_type;
8140
8141 /* Populate the rgroup's mask array, if this is the first time we've
8142 used it. */
8143 if (rgm->masks.is_empty ())
8144 {
8145 rgm->masks.safe_grow_cleared (nvectors);
8146 for (unsigned int i = 0; i < nvectors; ++i)
8147 {
8148 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8149 /* Provide a dummy definition until the real one is available. */
8150 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8151 rgm->masks[i] = mask;
8152 }
8153 }
8154
8155 tree mask = rgm->masks[index];
8156 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8157 TYPE_VECTOR_SUBPARTS (vectype)))
8158 {
8159 /* A loop mask for data type X can be reused for data type Y
8160 if X has N times more elements than Y and if Y's elements
8161 are N times bigger than X's. In this case each sequence
8162 of N elements in the loop mask will be all-zero or all-one.
8163 We can then view-convert the mask so that each sequence of
8164 N elements is replaced by a single element. */
8165 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8166 TYPE_VECTOR_SUBPARTS (vectype)));
8167 gimple_seq seq = NULL;
8168 mask_type = build_same_sized_truth_vector_type (vectype);
8169 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8170 if (seq)
8171 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8172 }
8173 return mask;
8174 }
8175
8176 /* Scale profiling counters by estimation for LOOP which is vectorized
8177 by factor VF. */
8178
8179 static void
8180 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8181 {
8182 edge preheader = loop_preheader_edge (loop);
8183 /* Reduce loop iterations by the vectorization factor. */
8184 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8185 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8186
8187 if (freq_h.nonzero_p ())
8188 {
8189 profile_probability p;
8190
8191 /* Avoid dropping loop body profile counter to 0 because of zero count
8192 in loop's preheader. */
8193 if (!(freq_e == profile_count::zero ()))
8194 freq_e = freq_e.force_nonzero ();
8195 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8196 scale_loop_frequencies (loop, p);
8197 }
8198
8199 edge exit_e = single_exit (loop);
8200 exit_e->probability = profile_probability::always ()
8201 .apply_scale (1, new_est_niter + 1);
8202
8203 edge exit_l = single_pred_edge (loop->latch);
8204 profile_probability prob = exit_l->probability;
8205 exit_l->probability = exit_e->probability.invert ();
8206 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8207 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8208 }
8209
8210 /* Vectorize STMT if relevant, inserting any new instructions before GSI.
8211 When vectorizing STMT as a store, set *SEEN_STORE to its stmt_vec_info.
8212 *SLP_SCHEDULE is a running record of whether we have called
8213 vect_schedule_slp. */
8214
8215 static void
8216 vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt,
8217 gimple_stmt_iterator *gsi,
8218 stmt_vec_info *seen_store, bool *slp_scheduled)
8219 {
8220 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8221 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8222 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
8223 if (!stmt_info)
8224 return;
8225
8226 if (dump_enabled_p ())
8227 {
8228 dump_printf_loc (MSG_NOTE, vect_location,
8229 "------>vectorizing statement: ");
8230 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
8231 }
8232
8233 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8234 vect_loop_kill_debug_uses (loop, stmt_info);
8235
8236 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8237 && !STMT_VINFO_LIVE_P (stmt_info))
8238 return;
8239
8240 if (STMT_VINFO_VECTYPE (stmt_info))
8241 {
8242 poly_uint64 nunits
8243 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8244 if (!STMT_SLP_TYPE (stmt_info)
8245 && maybe_ne (nunits, vf)
8246 && dump_enabled_p ())
8247 /* For SLP VF is set according to unrolling factor, and not
8248 to vector size, hence for SLP this print is not valid. */
8249 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8250 }
8251
8252 /* SLP. Schedule all the SLP instances when the first SLP stmt is
8253 reached. */
8254 if (slp_vect_type slptype = STMT_SLP_TYPE (stmt_info))
8255 {
8256
8257 if (!*slp_scheduled)
8258 {
8259 *slp_scheduled = true;
8260
8261 DUMP_VECT_SCOPE ("scheduling SLP instances");
8262
8263 vect_schedule_slp (loop_vinfo);
8264 }
8265
8266 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
8267 if (slptype == pure_slp)
8268 return;
8269 }
8270
8271 if (dump_enabled_p ())
8272 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8273
8274 bool grouped_store = false;
8275 if (vect_transform_stmt (stmt_info, gsi, &grouped_store, NULL, NULL))
8276 *seen_store = stmt_info;
8277 }
8278
8279 /* Function vect_transform_loop.
8280
8281 The analysis phase has determined that the loop is vectorizable.
8282 Vectorize the loop - created vectorized stmts to replace the scalar
8283 stmts in the loop, and update the loop exit condition.
8284 Returns scalar epilogue loop if any. */
8285
8286 struct loop *
8287 vect_transform_loop (loop_vec_info loop_vinfo)
8288 {
8289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8290 struct loop *epilogue = NULL;
8291 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8292 int nbbs = loop->num_nodes;
8293 int i;
8294 tree niters_vector = NULL_TREE;
8295 tree step_vector = NULL_TREE;
8296 tree niters_vector_mult_vf = NULL_TREE;
8297 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8298 unsigned int lowest_vf = constant_lower_bound (vf);
8299 bool slp_scheduled = false;
8300 gimple *stmt;
8301 bool check_profitability = false;
8302 unsigned int th;
8303
8304 DUMP_VECT_SCOPE ("vec_transform_loop");
8305
8306 loop_vinfo->shared->check_datarefs ();
8307
8308 /* Use the more conservative vectorization threshold. If the number
8309 of iterations is constant assume the cost check has been performed
8310 by our caller. If the threshold makes all loops profitable that
8311 run at least the (estimated) vectorization factor number of times
8312 checking is pointless, too. */
8313 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8314 if (th >= vect_vf_for_cost (loop_vinfo)
8315 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8316 {
8317 if (dump_enabled_p ())
8318 dump_printf_loc (MSG_NOTE, vect_location,
8319 "Profitability threshold is %d loop iterations.\n",
8320 th);
8321 check_profitability = true;
8322 }
8323
8324 /* Make sure there exists a single-predecessor exit bb. Do this before
8325 versioning. */
8326 edge e = single_exit (loop);
8327 if (! single_pred_p (e->dest))
8328 {
8329 split_loop_exit_edge (e);
8330 if (dump_enabled_p ())
8331 dump_printf (MSG_NOTE, "split exit edge\n");
8332 }
8333
8334 /* Version the loop first, if required, so the profitability check
8335 comes first. */
8336
8337 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8338 {
8339 poly_uint64 versioning_threshold
8340 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8341 if (check_profitability
8342 && ordered_p (poly_uint64 (th), versioning_threshold))
8343 {
8344 versioning_threshold = ordered_max (poly_uint64 (th),
8345 versioning_threshold);
8346 check_profitability = false;
8347 }
8348 vect_loop_versioning (loop_vinfo, th, check_profitability,
8349 versioning_threshold);
8350 check_profitability = false;
8351 }
8352
8353 /* Make sure there exists a single-predecessor exit bb also on the
8354 scalar loop copy. Do this after versioning but before peeling
8355 so CFG structure is fine for both scalar and if-converted loop
8356 to make slpeel_duplicate_current_defs_from_edges face matched
8357 loop closed PHI nodes on the exit. */
8358 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8359 {
8360 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8361 if (! single_pred_p (e->dest))
8362 {
8363 split_loop_exit_edge (e);
8364 if (dump_enabled_p ())
8365 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8366 }
8367 }
8368
8369 tree niters = vect_build_loop_niters (loop_vinfo);
8370 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8371 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8372 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8373 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8374 &step_vector, &niters_vector_mult_vf, th,
8375 check_profitability, niters_no_overflow);
8376
8377 if (niters_vector == NULL_TREE)
8378 {
8379 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8380 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8381 && known_eq (lowest_vf, vf))
8382 {
8383 niters_vector
8384 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8385 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8386 step_vector = build_one_cst (TREE_TYPE (niters));
8387 }
8388 else
8389 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8390 &step_vector, niters_no_overflow);
8391 }
8392
8393 /* 1) Make sure the loop header has exactly two entries
8394 2) Make sure we have a preheader basic block. */
8395
8396 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8397
8398 split_edge (loop_preheader_edge (loop));
8399
8400 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8401 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8402 /* This will deal with any possible peeling. */
8403 vect_prepare_for_masked_peels (loop_vinfo);
8404
8405 /* FORNOW: the vectorizer supports only loops which body consist
8406 of one basic block (header + empty latch). When the vectorizer will
8407 support more involved loop forms, the order by which the BBs are
8408 traversed need to be reconsidered. */
8409
8410 for (i = 0; i < nbbs; i++)
8411 {
8412 basic_block bb = bbs[i];
8413 stmt_vec_info stmt_info;
8414
8415 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8416 gsi_next (&si))
8417 {
8418 gphi *phi = si.phi ();
8419 if (dump_enabled_p ())
8420 {
8421 dump_printf_loc (MSG_NOTE, vect_location,
8422 "------>vectorizing phi: ");
8423 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8424 }
8425 stmt_info = loop_vinfo->lookup_stmt (phi);
8426 if (!stmt_info)
8427 continue;
8428
8429 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8430 vect_loop_kill_debug_uses (loop, stmt_info);
8431
8432 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8433 && !STMT_VINFO_LIVE_P (stmt_info))
8434 continue;
8435
8436 if (STMT_VINFO_VECTYPE (stmt_info)
8437 && (maybe_ne
8438 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8439 && dump_enabled_p ())
8440 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8441
8442 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8443 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8444 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8445 && ! PURE_SLP_STMT (stmt_info))
8446 {
8447 if (dump_enabled_p ())
8448 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8449 vect_transform_stmt (stmt_info, NULL, NULL, NULL, NULL);
8450 }
8451 }
8452
8453 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8454 !gsi_end_p (si);)
8455 {
8456 stmt = gsi_stmt (si);
8457 /* During vectorization remove existing clobber stmts. */
8458 if (gimple_clobber_p (stmt))
8459 {
8460 unlink_stmt_vdef (stmt);
8461 gsi_remove (&si, true);
8462 release_defs (stmt);
8463 }
8464 else
8465 {
8466 stmt_info = loop_vinfo->lookup_stmt (stmt);
8467
8468 /* vector stmts created in the outer-loop during vectorization of
8469 stmts in an inner-loop may not have a stmt_info, and do not
8470 need to be vectorized. */
8471 stmt_vec_info seen_store = NULL;
8472 if (stmt_info)
8473 {
8474 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8475 {
8476 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8477 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8478 !gsi_end_p (subsi); gsi_next (&subsi))
8479 vect_transform_loop_stmt (loop_vinfo,
8480 gsi_stmt (subsi), &si,
8481 &seen_store,
8482 &slp_scheduled);
8483 gimple *pat_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8484 vect_transform_loop_stmt (loop_vinfo, pat_stmt, &si,
8485 &seen_store, &slp_scheduled);
8486 }
8487 vect_transform_loop_stmt (loop_vinfo, stmt, &si,
8488 &seen_store, &slp_scheduled);
8489 }
8490 if (seen_store)
8491 {
8492 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8493 {
8494 /* Interleaving. If IS_STORE is TRUE, the
8495 vectorization of the interleaving chain was
8496 completed - free all the stores in the chain. */
8497 gsi_next (&si);
8498 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8499 }
8500 else
8501 {
8502 /* Free the attached stmt_vec_info and remove the
8503 stmt. */
8504 free_stmt_vec_info (stmt);
8505 unlink_stmt_vdef (stmt);
8506 gsi_remove (&si, true);
8507 release_defs (stmt);
8508 }
8509 }
8510 else
8511 gsi_next (&si);
8512 }
8513 }
8514
8515 /* Stub out scalar statements that must not survive vectorization.
8516 Doing this here helps with grouped statements, or statements that
8517 are involved in patterns. */
8518 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8519 !gsi_end_p (gsi); gsi_next (&gsi))
8520 {
8521 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8522 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8523 {
8524 tree lhs = gimple_get_lhs (call);
8525 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8526 {
8527 tree zero = build_zero_cst (TREE_TYPE (lhs));
8528 gimple *new_stmt = gimple_build_assign (lhs, zero);
8529 gsi_replace (&gsi, new_stmt, true);
8530 }
8531 }
8532 }
8533 } /* BBs in loop */
8534
8535 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8536 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8537 if (integer_onep (step_vector))
8538 niters_no_overflow = true;
8539 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8540 niters_vector_mult_vf, !niters_no_overflow);
8541
8542 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8543 scale_profile_for_vect_loop (loop, assumed_vf);
8544
8545 /* True if the final iteration might not handle a full vector's
8546 worth of scalar iterations. */
8547 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8548 /* The minimum number of iterations performed by the epilogue. This
8549 is 1 when peeling for gaps because we always need a final scalar
8550 iteration. */
8551 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8552 /* +1 to convert latch counts to loop iteration counts,
8553 -min_epilogue_iters to remove iterations that cannot be performed
8554 by the vector code. */
8555 int bias_for_lowest = 1 - min_epilogue_iters;
8556 int bias_for_assumed = bias_for_lowest;
8557 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8558 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8559 {
8560 /* When the amount of peeling is known at compile time, the first
8561 iteration will have exactly alignment_npeels active elements.
8562 In the worst case it will have at least one. */
8563 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8564 bias_for_lowest += lowest_vf - min_first_active;
8565 bias_for_assumed += assumed_vf - min_first_active;
8566 }
8567 /* In these calculations the "- 1" converts loop iteration counts
8568 back to latch counts. */
8569 if (loop->any_upper_bound)
8570 loop->nb_iterations_upper_bound
8571 = (final_iter_may_be_partial
8572 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8573 lowest_vf) - 1
8574 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8575 lowest_vf) - 1);
8576 if (loop->any_likely_upper_bound)
8577 loop->nb_iterations_likely_upper_bound
8578 = (final_iter_may_be_partial
8579 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8580 + bias_for_lowest, lowest_vf) - 1
8581 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8582 + bias_for_lowest, lowest_vf) - 1);
8583 if (loop->any_estimate)
8584 loop->nb_iterations_estimate
8585 = (final_iter_may_be_partial
8586 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8587 assumed_vf) - 1
8588 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8589 assumed_vf) - 1);
8590
8591 if (dump_enabled_p ())
8592 {
8593 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8594 {
8595 dump_printf_loc (MSG_NOTE, vect_location,
8596 "LOOP VECTORIZED\n");
8597 if (loop->inner)
8598 dump_printf_loc (MSG_NOTE, vect_location,
8599 "OUTER LOOP VECTORIZED\n");
8600 dump_printf (MSG_NOTE, "\n");
8601 }
8602 else
8603 {
8604 dump_printf_loc (MSG_NOTE, vect_location,
8605 "LOOP EPILOGUE VECTORIZED (VS=");
8606 dump_dec (MSG_NOTE, current_vector_size);
8607 dump_printf (MSG_NOTE, ")\n");
8608 }
8609 }
8610
8611 /* Free SLP instances here because otherwise stmt reference counting
8612 won't work. */
8613 slp_instance instance;
8614 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8615 vect_free_slp_instance (instance, true);
8616 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8617 /* Clear-up safelen field since its value is invalid after vectorization
8618 since vectorized loop can have loop-carried dependencies. */
8619 loop->safelen = 0;
8620
8621 /* Don't vectorize epilogue for epilogue. */
8622 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8623 epilogue = NULL;
8624
8625 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8626 epilogue = NULL;
8627
8628 if (epilogue)
8629 {
8630 auto_vector_sizes vector_sizes;
8631 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8632 unsigned int next_size = 0;
8633
8634 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8635 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8636 && known_eq (vf, lowest_vf))
8637 {
8638 unsigned int eiters
8639 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8640 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8641 eiters = eiters % lowest_vf;
8642 epilogue->nb_iterations_upper_bound = eiters - 1;
8643
8644 unsigned int ratio;
8645 while (next_size < vector_sizes.length ()
8646 && !(constant_multiple_p (current_vector_size,
8647 vector_sizes[next_size], &ratio)
8648 && eiters >= lowest_vf / ratio))
8649 next_size += 1;
8650 }
8651 else
8652 while (next_size < vector_sizes.length ()
8653 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8654 next_size += 1;
8655
8656 if (next_size == vector_sizes.length ())
8657 epilogue = NULL;
8658 }
8659
8660 if (epilogue)
8661 {
8662 epilogue->force_vectorize = loop->force_vectorize;
8663 epilogue->safelen = loop->safelen;
8664 epilogue->dont_vectorize = false;
8665
8666 /* We may need to if-convert epilogue to vectorize it. */
8667 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8668 tree_if_conversion (epilogue);
8669 }
8670
8671 return epilogue;
8672 }
8673
8674 /* The code below is trying to perform simple optimization - revert
8675 if-conversion for masked stores, i.e. if the mask of a store is zero
8676 do not perform it and all stored value producers also if possible.
8677 For example,
8678 for (i=0; i<n; i++)
8679 if (c[i])
8680 {
8681 p1[i] += 1;
8682 p2[i] = p3[i] +2;
8683 }
8684 this transformation will produce the following semi-hammock:
8685
8686 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8687 {
8688 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8689 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8690 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8691 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8692 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8693 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8694 }
8695 */
8696
8697 void
8698 optimize_mask_stores (struct loop *loop)
8699 {
8700 basic_block *bbs = get_loop_body (loop);
8701 unsigned nbbs = loop->num_nodes;
8702 unsigned i;
8703 basic_block bb;
8704 struct loop *bb_loop;
8705 gimple_stmt_iterator gsi;
8706 gimple *stmt;
8707 auto_vec<gimple *> worklist;
8708
8709 vect_location = find_loop_location (loop);
8710 /* Pick up all masked stores in loop if any. */
8711 for (i = 0; i < nbbs; i++)
8712 {
8713 bb = bbs[i];
8714 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8715 gsi_next (&gsi))
8716 {
8717 stmt = gsi_stmt (gsi);
8718 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8719 worklist.safe_push (stmt);
8720 }
8721 }
8722
8723 free (bbs);
8724 if (worklist.is_empty ())
8725 return;
8726
8727 /* Loop has masked stores. */
8728 while (!worklist.is_empty ())
8729 {
8730 gimple *last, *last_store;
8731 edge e, efalse;
8732 tree mask;
8733 basic_block store_bb, join_bb;
8734 gimple_stmt_iterator gsi_to;
8735 tree vdef, new_vdef;
8736 gphi *phi;
8737 tree vectype;
8738 tree zero;
8739
8740 last = worklist.pop ();
8741 mask = gimple_call_arg (last, 2);
8742 bb = gimple_bb (last);
8743 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8744 the same loop as if_bb. It could be different to LOOP when two
8745 level loop-nest is vectorized and mask_store belongs to the inner
8746 one. */
8747 e = split_block (bb, last);
8748 bb_loop = bb->loop_father;
8749 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8750 join_bb = e->dest;
8751 store_bb = create_empty_bb (bb);
8752 add_bb_to_loop (store_bb, bb_loop);
8753 e->flags = EDGE_TRUE_VALUE;
8754 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8755 /* Put STORE_BB to likely part. */
8756 efalse->probability = profile_probability::unlikely ();
8757 store_bb->count = efalse->count ();
8758 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8759 if (dom_info_available_p (CDI_DOMINATORS))
8760 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8761 if (dump_enabled_p ())
8762 dump_printf_loc (MSG_NOTE, vect_location,
8763 "Create new block %d to sink mask stores.",
8764 store_bb->index);
8765 /* Create vector comparison with boolean result. */
8766 vectype = TREE_TYPE (mask);
8767 zero = build_zero_cst (vectype);
8768 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8769 gsi = gsi_last_bb (bb);
8770 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8771 /* Create new PHI node for vdef of the last masked store:
8772 .MEM_2 = VDEF <.MEM_1>
8773 will be converted to
8774 .MEM.3 = VDEF <.MEM_1>
8775 and new PHI node will be created in join bb
8776 .MEM_2 = PHI <.MEM_1, .MEM_3>
8777 */
8778 vdef = gimple_vdef (last);
8779 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8780 gimple_set_vdef (last, new_vdef);
8781 phi = create_phi_node (vdef, join_bb);
8782 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8783
8784 /* Put all masked stores with the same mask to STORE_BB if possible. */
8785 while (true)
8786 {
8787 gimple_stmt_iterator gsi_from;
8788 gimple *stmt1 = NULL;
8789
8790 /* Move masked store to STORE_BB. */
8791 last_store = last;
8792 gsi = gsi_for_stmt (last);
8793 gsi_from = gsi;
8794 /* Shift GSI to the previous stmt for further traversal. */
8795 gsi_prev (&gsi);
8796 gsi_to = gsi_start_bb (store_bb);
8797 gsi_move_before (&gsi_from, &gsi_to);
8798 /* Setup GSI_TO to the non-empty block start. */
8799 gsi_to = gsi_start_bb (store_bb);
8800 if (dump_enabled_p ())
8801 {
8802 dump_printf_loc (MSG_NOTE, vect_location,
8803 "Move stmt to created bb\n");
8804 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8805 }
8806 /* Move all stored value producers if possible. */
8807 while (!gsi_end_p (gsi))
8808 {
8809 tree lhs;
8810 imm_use_iterator imm_iter;
8811 use_operand_p use_p;
8812 bool res;
8813
8814 /* Skip debug statements. */
8815 if (is_gimple_debug (gsi_stmt (gsi)))
8816 {
8817 gsi_prev (&gsi);
8818 continue;
8819 }
8820 stmt1 = gsi_stmt (gsi);
8821 /* Do not consider statements writing to memory or having
8822 volatile operand. */
8823 if (gimple_vdef (stmt1)
8824 || gimple_has_volatile_ops (stmt1))
8825 break;
8826 gsi_from = gsi;
8827 gsi_prev (&gsi);
8828 lhs = gimple_get_lhs (stmt1);
8829 if (!lhs)
8830 break;
8831
8832 /* LHS of vectorized stmt must be SSA_NAME. */
8833 if (TREE_CODE (lhs) != SSA_NAME)
8834 break;
8835
8836 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8837 {
8838 /* Remove dead scalar statement. */
8839 if (has_zero_uses (lhs))
8840 {
8841 gsi_remove (&gsi_from, true);
8842 continue;
8843 }
8844 }
8845
8846 /* Check that LHS does not have uses outside of STORE_BB. */
8847 res = true;
8848 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8849 {
8850 gimple *use_stmt;
8851 use_stmt = USE_STMT (use_p);
8852 if (is_gimple_debug (use_stmt))
8853 continue;
8854 if (gimple_bb (use_stmt) != store_bb)
8855 {
8856 res = false;
8857 break;
8858 }
8859 }
8860 if (!res)
8861 break;
8862
8863 if (gimple_vuse (stmt1)
8864 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8865 break;
8866
8867 /* Can move STMT1 to STORE_BB. */
8868 if (dump_enabled_p ())
8869 {
8870 dump_printf_loc (MSG_NOTE, vect_location,
8871 "Move stmt to created bb\n");
8872 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8873 }
8874 gsi_move_before (&gsi_from, &gsi_to);
8875 /* Shift GSI_TO for further insertion. */
8876 gsi_prev (&gsi_to);
8877 }
8878 /* Put other masked stores with the same mask to STORE_BB. */
8879 if (worklist.is_empty ()
8880 || gimple_call_arg (worklist.last (), 2) != mask
8881 || worklist.last () != stmt1)
8882 break;
8883 last = worklist.pop ();
8884 }
8885 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
8886 }
8887 }