[68/77] Use scalar_mode for is_int_mode/is_float_mode pairs
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
54
55 /* Says whether a statement is a load, a store of a vectorized statement
56 result, or a store of an invariant value. */
57 enum vec_load_store_type {
58 VLS_LOAD,
59 VLS_STORE,
60 VLS_STORE_INVARIANT
61 };
62
63 /* Return the vectorized type for the given statement. */
64
65 tree
66 stmt_vectype (struct _stmt_vec_info *stmt_info)
67 {
68 return STMT_VINFO_VECTYPE (stmt_info);
69 }
70
71 /* Return TRUE iff the given statement is in an inner loop relative to
72 the loop being vectorized. */
73 bool
74 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
75 {
76 gimple *stmt = STMT_VINFO_STMT (stmt_info);
77 basic_block bb = gimple_bb (stmt);
78 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
79 struct loop* loop;
80
81 if (!loop_vinfo)
82 return false;
83
84 loop = LOOP_VINFO_LOOP (loop_vinfo);
85
86 return (bb->loop_father == loop->inner);
87 }
88
89 /* Record the cost of a statement, either by directly informing the
90 target model or by saving it in a vector for later processing.
91 Return a preliminary estimate of the statement's cost. */
92
93 unsigned
94 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
95 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
96 int misalign, enum vect_cost_model_location where)
97 {
98 if (body_cost_vec)
99 {
100 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
101 stmt_info_for_cost si = { count, kind,
102 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
103 misalign };
104 body_cost_vec->safe_push (si);
105 return (unsigned)
106 (builtin_vectorization_cost (kind, vectype, misalign) * count);
107 }
108 else
109 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
110 count, kind, stmt_info, misalign, where);
111 }
112
113 /* Return a variable of type ELEM_TYPE[NELEMS]. */
114
115 static tree
116 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
117 {
118 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
119 "vect_array");
120 }
121
122 /* ARRAY is an array of vectors created by create_vector_array.
123 Return an SSA_NAME for the vector in index N. The reference
124 is part of the vectorization of STMT and the vector is associated
125 with scalar destination SCALAR_DEST. */
126
127 static tree
128 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
129 tree array, unsigned HOST_WIDE_INT n)
130 {
131 tree vect_type, vect, vect_name, array_ref;
132 gimple *new_stmt;
133
134 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
135 vect_type = TREE_TYPE (TREE_TYPE (array));
136 vect = vect_create_destination_var (scalar_dest, vect_type);
137 array_ref = build4 (ARRAY_REF, vect_type, array,
138 build_int_cst (size_type_node, n),
139 NULL_TREE, NULL_TREE);
140
141 new_stmt = gimple_build_assign (vect, array_ref);
142 vect_name = make_ssa_name (vect, new_stmt);
143 gimple_assign_set_lhs (new_stmt, vect_name);
144 vect_finish_stmt_generation (stmt, new_stmt, gsi);
145
146 return vect_name;
147 }
148
149 /* ARRAY is an array of vectors created by create_vector_array.
150 Emit code to store SSA_NAME VECT in index N of the array.
151 The store is part of the vectorization of STMT. */
152
153 static void
154 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
155 tree array, unsigned HOST_WIDE_INT n)
156 {
157 tree array_ref;
158 gimple *new_stmt;
159
160 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
161 build_int_cst (size_type_node, n),
162 NULL_TREE, NULL_TREE);
163
164 new_stmt = gimple_build_assign (array_ref, vect);
165 vect_finish_stmt_generation (stmt, new_stmt, gsi);
166 }
167
168 /* PTR is a pointer to an array of type TYPE. Return a representation
169 of *PTR. The memory reference replaces those in FIRST_DR
170 (and its group). */
171
172 static tree
173 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
174 {
175 tree mem_ref;
176
177 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
178 /* Arrays have the same alignment as their type. */
179 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
180 return mem_ref;
181 }
182
183 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
184
185 /* Function vect_mark_relevant.
186
187 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
188
189 static void
190 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
191 enum vect_relevant relevant, bool live_p)
192 {
193 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
194 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
195 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
196 gimple *pattern_stmt;
197
198 if (dump_enabled_p ())
199 {
200 dump_printf_loc (MSG_NOTE, vect_location,
201 "mark relevant %d, live %d: ", relevant, live_p);
202 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
203 }
204
205 /* If this stmt is an original stmt in a pattern, we might need to mark its
206 related pattern stmt instead of the original stmt. However, such stmts
207 may have their own uses that are not in any pattern, in such cases the
208 stmt itself should be marked. */
209 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
210 {
211 /* This is the last stmt in a sequence that was detected as a
212 pattern that can potentially be vectorized. Don't mark the stmt
213 as relevant/live because it's not going to be vectorized.
214 Instead mark the pattern-stmt that replaces it. */
215
216 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
217
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE, vect_location,
220 "last stmt in pattern. don't mark"
221 " relevant/live.\n");
222 stmt_info = vinfo_for_stmt (pattern_stmt);
223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
224 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
225 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
226 stmt = pattern_stmt;
227 }
228
229 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
230 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
231 STMT_VINFO_RELEVANT (stmt_info) = relevant;
232
233 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
234 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
235 {
236 if (dump_enabled_p ())
237 dump_printf_loc (MSG_NOTE, vect_location,
238 "already marked relevant/live.\n");
239 return;
240 }
241
242 worklist->safe_push (stmt);
243 }
244
245
246 /* Function is_simple_and_all_uses_invariant
247
248 Return true if STMT is simple and all uses of it are invariant. */
249
250 bool
251 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
252 {
253 tree op;
254 gimple *def_stmt;
255 ssa_op_iter iter;
256
257 if (!is_gimple_assign (stmt))
258 return false;
259
260 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
261 {
262 enum vect_def_type dt = vect_uninitialized_def;
263
264 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
265 {
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
268 "use not simple.\n");
269 return false;
270 }
271
272 if (dt != vect_external_def && dt != vect_constant_def)
273 return false;
274 }
275 return true;
276 }
277
278 /* Function vect_stmt_relevant_p.
279
280 Return true if STMT in loop that is represented by LOOP_VINFO is
281 "relevant for vectorization".
282
283 A stmt is considered "relevant for vectorization" if:
284 - it has uses outside the loop.
285 - it has vdefs (it alters memory).
286 - control stmts in the loop (except for the exit condition).
287
288 CHECKME: what other side effects would the vectorizer allow? */
289
290 static bool
291 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
292 enum vect_relevant *relevant, bool *live_p)
293 {
294 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
295 ssa_op_iter op_iter;
296 imm_use_iterator imm_iter;
297 use_operand_p use_p;
298 def_operand_p def_p;
299
300 *relevant = vect_unused_in_scope;
301 *live_p = false;
302
303 /* cond stmt other than loop exit cond. */
304 if (is_ctrl_stmt (stmt)
305 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
306 != loop_exit_ctrl_vec_info_type)
307 *relevant = vect_used_in_scope;
308
309 /* changing memory. */
310 if (gimple_code (stmt) != GIMPLE_PHI)
311 if (gimple_vdef (stmt)
312 && !gimple_clobber_p (stmt))
313 {
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE, vect_location,
316 "vec_stmt_relevant_p: stmt has vdefs.\n");
317 *relevant = vect_used_in_scope;
318 }
319
320 /* uses outside the loop. */
321 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
322 {
323 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
324 {
325 basic_block bb = gimple_bb (USE_STMT (use_p));
326 if (!flow_bb_inside_loop_p (loop, bb))
327 {
328 if (dump_enabled_p ())
329 dump_printf_loc (MSG_NOTE, vect_location,
330 "vec_stmt_relevant_p: used out of loop.\n");
331
332 if (is_gimple_debug (USE_STMT (use_p)))
333 continue;
334
335 /* We expect all such uses to be in the loop exit phis
336 (because of loop closed form) */
337 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
338 gcc_assert (bb == single_exit (loop)->dest);
339
340 *live_p = true;
341 }
342 }
343 }
344
345 if (*live_p && *relevant == vect_unused_in_scope
346 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
347 {
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_NOTE, vect_location,
350 "vec_stmt_relevant_p: stmt live but not relevant.\n");
351 *relevant = vect_used_only_live;
352 }
353
354 return (*live_p || *relevant);
355 }
356
357
358 /* Function exist_non_indexing_operands_for_use_p
359
360 USE is one of the uses attached to STMT. Check if USE is
361 used in STMT for anything other than indexing an array. */
362
363 static bool
364 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
365 {
366 tree operand;
367 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
368
369 /* USE corresponds to some operand in STMT. If there is no data
370 reference in STMT, then any operand that corresponds to USE
371 is not indexing an array. */
372 if (!STMT_VINFO_DATA_REF (stmt_info))
373 return true;
374
375 /* STMT has a data_ref. FORNOW this means that its of one of
376 the following forms:
377 -1- ARRAY_REF = var
378 -2- var = ARRAY_REF
379 (This should have been verified in analyze_data_refs).
380
381 'var' in the second case corresponds to a def, not a use,
382 so USE cannot correspond to any operands that are not used
383 for array indexing.
384
385 Therefore, all we need to check is if STMT falls into the
386 first case, and whether var corresponds to USE. */
387
388 if (!gimple_assign_copy_p (stmt))
389 {
390 if (is_gimple_call (stmt)
391 && gimple_call_internal_p (stmt))
392 switch (gimple_call_internal_fn (stmt))
393 {
394 case IFN_MASK_STORE:
395 operand = gimple_call_arg (stmt, 3);
396 if (operand == use)
397 return true;
398 /* FALLTHRU */
399 case IFN_MASK_LOAD:
400 operand = gimple_call_arg (stmt, 2);
401 if (operand == use)
402 return true;
403 break;
404 default:
405 break;
406 }
407 return false;
408 }
409
410 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
411 return false;
412 operand = gimple_assign_rhs1 (stmt);
413 if (TREE_CODE (operand) != SSA_NAME)
414 return false;
415
416 if (operand == use)
417 return true;
418
419 return false;
420 }
421
422
423 /*
424 Function process_use.
425
426 Inputs:
427 - a USE in STMT in a loop represented by LOOP_VINFO
428 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
429 that defined USE. This is done by calling mark_relevant and passing it
430 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
431 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
432 be performed.
433
434 Outputs:
435 Generally, LIVE_P and RELEVANT are used to define the liveness and
436 relevance info of the DEF_STMT of this USE:
437 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
438 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
439 Exceptions:
440 - case 1: If USE is used only for address computations (e.g. array indexing),
441 which does not need to be directly vectorized, then the liveness/relevance
442 of the respective DEF_STMT is left unchanged.
443 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
444 skip DEF_STMT cause it had already been processed.
445 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
446 be modified accordingly.
447
448 Return true if everything is as expected. Return false otherwise. */
449
450 static bool
451 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
452 enum vect_relevant relevant, vec<gimple *> *worklist,
453 bool force)
454 {
455 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
456 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
457 stmt_vec_info dstmt_vinfo;
458 basic_block bb, def_bb;
459 gimple *def_stmt;
460 enum vect_def_type dt;
461
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
465 return true;
466
467 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
468 {
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
471 "not vectorized: unsupported use in stmt.\n");
472 return false;
473 }
474
475 if (!def_stmt || gimple_nop_p (def_stmt))
476 return true;
477
478 def_bb = gimple_bb (def_stmt);
479 if (!flow_bb_inside_loop_p (loop, def_bb))
480 {
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
483 return true;
484 }
485
486 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
487 DEF_STMT must have already been processed, because this should be the
488 only way that STMT, which is a reduction-phi, was put in the worklist,
489 as there should be no other uses for DEF_STMT in the loop. So we just
490 check that everything is as expected, and we are done. */
491 dstmt_vinfo = vinfo_for_stmt (def_stmt);
492 bb = gimple_bb (stmt);
493 if (gimple_code (stmt) == GIMPLE_PHI
494 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
495 && gimple_code (def_stmt) != GIMPLE_PHI
496 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
497 && bb->loop_father == def_bb->loop_father)
498 {
499 if (dump_enabled_p ())
500 dump_printf_loc (MSG_NOTE, vect_location,
501 "reduc-stmt defining reduc-phi in the same nest.\n");
502 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
503 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
504 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
505 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
506 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
507 return true;
508 }
509
510 /* case 3a: outer-loop stmt defining an inner-loop stmt:
511 outer-loop-header-bb:
512 d = def_stmt
513 inner-loop:
514 stmt # use (d)
515 outer-loop-tail-bb:
516 ... */
517 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
518 {
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE, vect_location,
521 "outer-loop def-stmt defining inner-loop stmt.\n");
522
523 switch (relevant)
524 {
525 case vect_unused_in_scope:
526 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
527 vect_used_in_scope : vect_unused_in_scope;
528 break;
529
530 case vect_used_in_outer_by_reduction:
531 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
532 relevant = vect_used_by_reduction;
533 break;
534
535 case vect_used_in_outer:
536 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
537 relevant = vect_used_in_scope;
538 break;
539
540 case vect_used_in_scope:
541 break;
542
543 default:
544 gcc_unreachable ();
545 }
546 }
547
548 /* case 3b: inner-loop stmt defining an outer-loop stmt:
549 outer-loop-header-bb:
550 ...
551 inner-loop:
552 d = def_stmt
553 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
554 stmt # use (d) */
555 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
556 {
557 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE, vect_location,
559 "inner-loop def-stmt defining outer-loop stmt.\n");
560
561 switch (relevant)
562 {
563 case vect_unused_in_scope:
564 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
565 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
566 vect_used_in_outer_by_reduction : vect_unused_in_scope;
567 break;
568
569 case vect_used_by_reduction:
570 case vect_used_only_live:
571 relevant = vect_used_in_outer_by_reduction;
572 break;
573
574 case vect_used_in_scope:
575 relevant = vect_used_in_outer;
576 break;
577
578 default:
579 gcc_unreachable ();
580 }
581 }
582 /* We are also not interested in uses on loop PHI backedges that are
583 inductions. Otherwise we'll needlessly vectorize the IV increment
584 and cause hybrid SLP for SLP inductions. Unless the PHI is live
585 of course. */
586 else if (gimple_code (stmt) == GIMPLE_PHI
587 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
588 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
589 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
590 == use))
591 {
592 if (dump_enabled_p ())
593 dump_printf_loc (MSG_NOTE, vect_location,
594 "induction value on backedge.\n");
595 return true;
596 }
597
598
599 vect_mark_relevant (worklist, def_stmt, relevant, false);
600 return true;
601 }
602
603
604 /* Function vect_mark_stmts_to_be_vectorized.
605
606 Not all stmts in the loop need to be vectorized. For example:
607
608 for i...
609 for j...
610 1. T0 = i + j
611 2. T1 = a[T0]
612
613 3. j = j + 1
614
615 Stmt 1 and 3 do not need to be vectorized, because loop control and
616 addressing of vectorized data-refs are handled differently.
617
618 This pass detects such stmts. */
619
620 bool
621 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
622 {
623 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
624 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
625 unsigned int nbbs = loop->num_nodes;
626 gimple_stmt_iterator si;
627 gimple *stmt;
628 unsigned int i;
629 stmt_vec_info stmt_vinfo;
630 basic_block bb;
631 gimple *phi;
632 bool live_p;
633 enum vect_relevant relevant;
634
635 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE, vect_location,
637 "=== vect_mark_stmts_to_be_vectorized ===\n");
638
639 auto_vec<gimple *, 64> worklist;
640
641 /* 1. Init worklist. */
642 for (i = 0; i < nbbs; i++)
643 {
644 bb = bbs[i];
645 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
646 {
647 phi = gsi_stmt (si);
648 if (dump_enabled_p ())
649 {
650 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
651 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
652 }
653
654 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
655 vect_mark_relevant (&worklist, phi, relevant, live_p);
656 }
657 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
658 {
659 stmt = gsi_stmt (si);
660 if (dump_enabled_p ())
661 {
662 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
663 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
664 }
665
666 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
667 vect_mark_relevant (&worklist, stmt, relevant, live_p);
668 }
669 }
670
671 /* 2. Process_worklist */
672 while (worklist.length () > 0)
673 {
674 use_operand_p use_p;
675 ssa_op_iter iter;
676
677 stmt = worklist.pop ();
678 if (dump_enabled_p ())
679 {
680 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
681 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
682 }
683
684 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
685 (DEF_STMT) as relevant/irrelevant according to the relevance property
686 of STMT. */
687 stmt_vinfo = vinfo_for_stmt (stmt);
688 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
689
690 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
691 propagated as is to the DEF_STMTs of its USEs.
692
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the relevance to vect_used_by_reduction.
695 This is because we distinguish between two kinds of relevant stmts -
696 those that are used by a reduction computation, and those that are
697 (also) used by a regular computation. This allows us later on to
698 identify stmts that are used solely by a reduction, and therefore the
699 order of the results that they produce does not have to be kept. */
700
701 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
702 {
703 case vect_reduction_def:
704 gcc_assert (relevant != vect_unused_in_scope);
705 if (relevant != vect_unused_in_scope
706 && relevant != vect_used_in_scope
707 && relevant != vect_used_by_reduction
708 && relevant != vect_used_only_live)
709 {
710 if (dump_enabled_p ())
711 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
712 "unsupported use of reduction.\n");
713 return false;
714 }
715 break;
716
717 case vect_nested_cycle:
718 if (relevant != vect_unused_in_scope
719 && relevant != vect_used_in_outer_by_reduction
720 && relevant != vect_used_in_outer)
721 {
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "unsupported use of nested cycle.\n");
725
726 return false;
727 }
728 break;
729
730 case vect_double_reduction_def:
731 if (relevant != vect_unused_in_scope
732 && relevant != vect_used_by_reduction
733 && relevant != vect_used_only_live)
734 {
735 if (dump_enabled_p ())
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
737 "unsupported use of double reduction.\n");
738
739 return false;
740 }
741 break;
742
743 default:
744 break;
745 }
746
747 if (is_pattern_stmt_p (stmt_vinfo))
748 {
749 /* Pattern statements are not inserted into the code, so
750 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
751 have to scan the RHS or function arguments instead. */
752 if (is_gimple_assign (stmt))
753 {
754 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
755 tree op = gimple_assign_rhs1 (stmt);
756
757 i = 1;
758 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
759 {
760 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
761 relevant, &worklist, false)
762 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
763 relevant, &worklist, false))
764 return false;
765 i = 2;
766 }
767 for (; i < gimple_num_ops (stmt); i++)
768 {
769 op = gimple_op (stmt, i);
770 if (TREE_CODE (op) == SSA_NAME
771 && !process_use (stmt, op, loop_vinfo, relevant,
772 &worklist, false))
773 return false;
774 }
775 }
776 else if (is_gimple_call (stmt))
777 {
778 for (i = 0; i < gimple_call_num_args (stmt); i++)
779 {
780 tree arg = gimple_call_arg (stmt, i);
781 if (!process_use (stmt, arg, loop_vinfo, relevant,
782 &worklist, false))
783 return false;
784 }
785 }
786 }
787 else
788 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
789 {
790 tree op = USE_FROM_PTR (use_p);
791 if (!process_use (stmt, op, loop_vinfo, relevant,
792 &worklist, false))
793 return false;
794 }
795
796 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
797 {
798 gather_scatter_info gs_info;
799 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
800 gcc_unreachable ();
801 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
802 &worklist, true))
803 return false;
804 }
805 } /* while worklist */
806
807 return true;
808 }
809
810
811 /* Function vect_model_simple_cost.
812
813 Models cost for simple operations, i.e. those that only emit ncopies of a
814 single op. Right now, this does not account for multiple insns that could
815 be generated for the single vector op. We will handle that shortly. */
816
817 void
818 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
819 enum vect_def_type *dt,
820 int ndts,
821 stmt_vector_for_cost *prologue_cost_vec,
822 stmt_vector_for_cost *body_cost_vec)
823 {
824 int i;
825 int inside_cost = 0, prologue_cost = 0;
826
827 /* The SLP costs were already calculated during SLP tree build. */
828 if (PURE_SLP_STMT (stmt_info))
829 return;
830
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
833 cost model. */
834 for (i = 0; i < ndts; i++)
835 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
836 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
837 stmt_info, 0, vect_prologue);
838
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
841 stmt_info, 0, vect_body);
842
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE, vect_location,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost, prologue_cost);
847 }
848
849
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
854
855 static void
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
857 enum vect_def_type *dt, int pwr)
858 {
859 int i, tmp;
860 int inside_cost = 0, prologue_cost = 0;
861 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
862 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
863 void *target_cost_data;
864
865 /* The SLP costs were already calculated during SLP tree build. */
866 if (PURE_SLP_STMT (stmt_info))
867 return;
868
869 if (loop_vinfo)
870 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
871 else
872 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
873
874 for (i = 0; i < pwr + 1; i++)
875 {
876 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
877 (i + 1) : i;
878 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
879 vec_promote_demote, stmt_info, 0,
880 vect_body);
881 }
882
883 /* FORNOW: Assuming maximum 2 args per stmts. */
884 for (i = 0; i < 2; i++)
885 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
886 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
887 stmt_info, 0, vect_prologue);
888
889 if (dump_enabled_p ())
890 dump_printf_loc (MSG_NOTE, vect_location,
891 "vect_model_promotion_demotion_cost: inside_cost = %d, "
892 "prologue_cost = %d .\n", inside_cost, prologue_cost);
893 }
894
895 /* Function vect_model_store_cost
896
897 Models cost for stores. In the case of grouped accesses, one access
898 has the overhead of the grouped access attributed to it. */
899
900 void
901 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
902 vect_memory_access_type memory_access_type,
903 enum vect_def_type dt, slp_tree slp_node,
904 stmt_vector_for_cost *prologue_cost_vec,
905 stmt_vector_for_cost *body_cost_vec)
906 {
907 unsigned int inside_cost = 0, prologue_cost = 0;
908 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
909 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
910 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
911
912 if (dt == vect_constant_def || dt == vect_external_def)
913 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
914 stmt_info, 0, vect_prologue);
915
916 /* Grouped stores update all elements in the group at once,
917 so we want the DR for the first statement. */
918 if (!slp_node && grouped_access_p)
919 {
920 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
921 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
922 }
923
924 /* True if we should include any once-per-group costs as well as
925 the cost of the statement itself. For SLP we only get called
926 once per group anyhow. */
927 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
928
929 /* We assume that the cost of a single store-lanes instruction is
930 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
931 access is instead being provided by a permute-and-store operation,
932 include the cost of the permutes. */
933 if (first_stmt_p
934 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
935 {
936 /* Uses a high and low interleave or shuffle operations for each
937 needed permute. */
938 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
939 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
940 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
941 stmt_info, 0, vect_body);
942
943 if (dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE, vect_location,
945 "vect_model_store_cost: strided group_size = %d .\n",
946 group_size);
947 }
948
949 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
950 /* Costs of the stores. */
951 if (memory_access_type == VMAT_ELEMENTWISE
952 || memory_access_type == VMAT_GATHER_SCATTER)
953 /* N scalar stores plus extracting the elements. */
954 inside_cost += record_stmt_cost (body_cost_vec,
955 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
956 scalar_store, stmt_info, 0, vect_body);
957 else
958 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
959
960 if (memory_access_type == VMAT_ELEMENTWISE
961 || memory_access_type == VMAT_STRIDED_SLP)
962 inside_cost += record_stmt_cost (body_cost_vec,
963 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
964 vec_to_scalar, stmt_info, 0, vect_body);
965
966 if (dump_enabled_p ())
967 dump_printf_loc (MSG_NOTE, vect_location,
968 "vect_model_store_cost: inside_cost = %d, "
969 "prologue_cost = %d .\n", inside_cost, prologue_cost);
970 }
971
972
973 /* Calculate cost of DR's memory access. */
974 void
975 vect_get_store_cost (struct data_reference *dr, int ncopies,
976 unsigned int *inside_cost,
977 stmt_vector_for_cost *body_cost_vec)
978 {
979 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
980 gimple *stmt = DR_STMT (dr);
981 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
982
983 switch (alignment_support_scheme)
984 {
985 case dr_aligned:
986 {
987 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
988 vector_store, stmt_info, 0,
989 vect_body);
990
991 if (dump_enabled_p ())
992 dump_printf_loc (MSG_NOTE, vect_location,
993 "vect_model_store_cost: aligned.\n");
994 break;
995 }
996
997 case dr_unaligned_supported:
998 {
999 /* Here, we assign an additional cost for the unaligned store. */
1000 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1001 unaligned_store, stmt_info,
1002 DR_MISALIGNMENT (dr), vect_body);
1003 if (dump_enabled_p ())
1004 dump_printf_loc (MSG_NOTE, vect_location,
1005 "vect_model_store_cost: unaligned supported by "
1006 "hardware.\n");
1007 break;
1008 }
1009
1010 case dr_unaligned_unsupported:
1011 {
1012 *inside_cost = VECT_MAX_COST;
1013
1014 if (dump_enabled_p ())
1015 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1016 "vect_model_store_cost: unsupported access.\n");
1017 break;
1018 }
1019
1020 default:
1021 gcc_unreachable ();
1022 }
1023 }
1024
1025
1026 /* Function vect_model_load_cost
1027
1028 Models cost for loads. In the case of grouped accesses, one access has
1029 the overhead of the grouped access attributed to it. Since unaligned
1030 accesses are supported for loads, we also account for the costs of the
1031 access scheme chosen. */
1032
1033 void
1034 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1035 vect_memory_access_type memory_access_type,
1036 slp_tree slp_node,
1037 stmt_vector_for_cost *prologue_cost_vec,
1038 stmt_vector_for_cost *body_cost_vec)
1039 {
1040 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1041 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1042 unsigned int inside_cost = 0, prologue_cost = 0;
1043 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1044
1045 /* Grouped loads read all elements in the group at once,
1046 so we want the DR for the first statement. */
1047 if (!slp_node && grouped_access_p)
1048 {
1049 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1050 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1051 }
1052
1053 /* True if we should include any once-per-group costs as well as
1054 the cost of the statement itself. For SLP we only get called
1055 once per group anyhow. */
1056 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1057
1058 /* We assume that the cost of a single load-lanes instruction is
1059 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1060 access is instead being provided by a load-and-permute operation,
1061 include the cost of the permutes. */
1062 if (first_stmt_p
1063 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1064 {
1065 /* Uses an even and odd extract operations or shuffle operations
1066 for each needed permute. */
1067 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1068 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1069 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1070 stmt_info, 0, vect_body);
1071
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE, vect_location,
1074 "vect_model_load_cost: strided group_size = %d .\n",
1075 group_size);
1076 }
1077
1078 /* The loads themselves. */
1079 if (memory_access_type == VMAT_ELEMENTWISE
1080 || memory_access_type == VMAT_GATHER_SCATTER)
1081 {
1082 /* N scalar loads plus gathering them into a vector. */
1083 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1084 inside_cost += record_stmt_cost (body_cost_vec,
1085 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1086 scalar_load, stmt_info, 0, vect_body);
1087 }
1088 else
1089 vect_get_load_cost (dr, ncopies, first_stmt_p,
1090 &inside_cost, &prologue_cost,
1091 prologue_cost_vec, body_cost_vec, true);
1092 if (memory_access_type == VMAT_ELEMENTWISE
1093 || memory_access_type == VMAT_STRIDED_SLP)
1094 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1095 stmt_info, 0, vect_body);
1096
1097 if (dump_enabled_p ())
1098 dump_printf_loc (MSG_NOTE, vect_location,
1099 "vect_model_load_cost: inside_cost = %d, "
1100 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1101 }
1102
1103
1104 /* Calculate cost of DR's memory access. */
1105 void
1106 vect_get_load_cost (struct data_reference *dr, int ncopies,
1107 bool add_realign_cost, unsigned int *inside_cost,
1108 unsigned int *prologue_cost,
1109 stmt_vector_for_cost *prologue_cost_vec,
1110 stmt_vector_for_cost *body_cost_vec,
1111 bool record_prologue_costs)
1112 {
1113 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1114 gimple *stmt = DR_STMT (dr);
1115 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1116
1117 switch (alignment_support_scheme)
1118 {
1119 case dr_aligned:
1120 {
1121 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1122 stmt_info, 0, vect_body);
1123
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE, vect_location,
1126 "vect_model_load_cost: aligned.\n");
1127
1128 break;
1129 }
1130 case dr_unaligned_supported:
1131 {
1132 /* Here, we assign an additional cost for the unaligned load. */
1133 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1134 unaligned_load, stmt_info,
1135 DR_MISALIGNMENT (dr), vect_body);
1136
1137 if (dump_enabled_p ())
1138 dump_printf_loc (MSG_NOTE, vect_location,
1139 "vect_model_load_cost: unaligned supported by "
1140 "hardware.\n");
1141
1142 break;
1143 }
1144 case dr_explicit_realign:
1145 {
1146 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1147 vector_load, stmt_info, 0, vect_body);
1148 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1149 vec_perm, stmt_info, 0, vect_body);
1150
1151 /* FIXME: If the misalignment remains fixed across the iterations of
1152 the containing loop, the following cost should be added to the
1153 prologue costs. */
1154 if (targetm.vectorize.builtin_mask_for_load)
1155 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1156 stmt_info, 0, vect_body);
1157
1158 if (dump_enabled_p ())
1159 dump_printf_loc (MSG_NOTE, vect_location,
1160 "vect_model_load_cost: explicit realign\n");
1161
1162 break;
1163 }
1164 case dr_explicit_realign_optimized:
1165 {
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: unaligned software "
1169 "pipelined.\n");
1170
1171 /* Unaligned software pipeline has a load of an address, an initial
1172 load, and possibly a mask operation to "prime" the loop. However,
1173 if this is an access in a group of loads, which provide grouped
1174 access, then the above cost should only be considered for one
1175 access in the group. Inside the loop, there is a load op
1176 and a realignment op. */
1177
1178 if (add_realign_cost && record_prologue_costs)
1179 {
1180 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1181 vector_stmt, stmt_info,
1182 0, vect_prologue);
1183 if (targetm.vectorize.builtin_mask_for_load)
1184 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1185 vector_stmt, stmt_info,
1186 0, vect_prologue);
1187 }
1188
1189 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1190 stmt_info, 0, vect_body);
1191 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1192 stmt_info, 0, vect_body);
1193
1194 if (dump_enabled_p ())
1195 dump_printf_loc (MSG_NOTE, vect_location,
1196 "vect_model_load_cost: explicit realign optimized"
1197 "\n");
1198
1199 break;
1200 }
1201
1202 case dr_unaligned_unsupported:
1203 {
1204 *inside_cost = VECT_MAX_COST;
1205
1206 if (dump_enabled_p ())
1207 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1208 "vect_model_load_cost: unsupported access.\n");
1209 break;
1210 }
1211
1212 default:
1213 gcc_unreachable ();
1214 }
1215 }
1216
1217 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1218 the loop preheader for the vectorized stmt STMT. */
1219
1220 static void
1221 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1222 {
1223 if (gsi)
1224 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1225 else
1226 {
1227 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1228 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1229
1230 if (loop_vinfo)
1231 {
1232 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1233 basic_block new_bb;
1234 edge pe;
1235
1236 if (nested_in_vect_loop_p (loop, stmt))
1237 loop = loop->inner;
1238
1239 pe = loop_preheader_edge (loop);
1240 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1241 gcc_assert (!new_bb);
1242 }
1243 else
1244 {
1245 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1246 basic_block bb;
1247 gimple_stmt_iterator gsi_bb_start;
1248
1249 gcc_assert (bb_vinfo);
1250 bb = BB_VINFO_BB (bb_vinfo);
1251 gsi_bb_start = gsi_after_labels (bb);
1252 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1253 }
1254 }
1255
1256 if (dump_enabled_p ())
1257 {
1258 dump_printf_loc (MSG_NOTE, vect_location,
1259 "created new init_stmt: ");
1260 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1261 }
1262 }
1263
1264 /* Function vect_init_vector.
1265
1266 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1267 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1268 vector type a vector with all elements equal to VAL is created first.
1269 Place the initialization at BSI if it is not NULL. Otherwise, place the
1270 initialization at the loop preheader.
1271 Return the DEF of INIT_STMT.
1272 It will be used in the vectorization of STMT. */
1273
1274 tree
1275 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1276 {
1277 gimple *init_stmt;
1278 tree new_temp;
1279
1280 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1281 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1282 {
1283 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1284 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1285 {
1286 /* Scalar boolean value should be transformed into
1287 all zeros or all ones value before building a vector. */
1288 if (VECTOR_BOOLEAN_TYPE_P (type))
1289 {
1290 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1291 tree false_val = build_zero_cst (TREE_TYPE (type));
1292
1293 if (CONSTANT_CLASS_P (val))
1294 val = integer_zerop (val) ? false_val : true_val;
1295 else
1296 {
1297 new_temp = make_ssa_name (TREE_TYPE (type));
1298 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1299 val, true_val, false_val);
1300 vect_init_vector_1 (stmt, init_stmt, gsi);
1301 val = new_temp;
1302 }
1303 }
1304 else if (CONSTANT_CLASS_P (val))
1305 val = fold_convert (TREE_TYPE (type), val);
1306 else
1307 {
1308 new_temp = make_ssa_name (TREE_TYPE (type));
1309 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1310 init_stmt = gimple_build_assign (new_temp,
1311 fold_build1 (VIEW_CONVERT_EXPR,
1312 TREE_TYPE (type),
1313 val));
1314 else
1315 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1316 vect_init_vector_1 (stmt, init_stmt, gsi);
1317 val = new_temp;
1318 }
1319 }
1320 val = build_vector_from_val (type, val);
1321 }
1322
1323 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1324 init_stmt = gimple_build_assign (new_temp, val);
1325 vect_init_vector_1 (stmt, init_stmt, gsi);
1326 return new_temp;
1327 }
1328
1329 /* Function vect_get_vec_def_for_operand_1.
1330
1331 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1332 DT that will be used in the vectorized stmt. */
1333
1334 tree
1335 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1336 {
1337 tree vec_oprnd;
1338 gimple *vec_stmt;
1339 stmt_vec_info def_stmt_info = NULL;
1340
1341 switch (dt)
1342 {
1343 /* operand is a constant or a loop invariant. */
1344 case vect_constant_def:
1345 case vect_external_def:
1346 /* Code should use vect_get_vec_def_for_operand. */
1347 gcc_unreachable ();
1348
1349 /* operand is defined inside the loop. */
1350 case vect_internal_def:
1351 {
1352 /* Get the def from the vectorized stmt. */
1353 def_stmt_info = vinfo_for_stmt (def_stmt);
1354
1355 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1356 /* Get vectorized pattern statement. */
1357 if (!vec_stmt
1358 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1359 && !STMT_VINFO_RELEVANT (def_stmt_info))
1360 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1361 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1362 gcc_assert (vec_stmt);
1363 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1364 vec_oprnd = PHI_RESULT (vec_stmt);
1365 else if (is_gimple_call (vec_stmt))
1366 vec_oprnd = gimple_call_lhs (vec_stmt);
1367 else
1368 vec_oprnd = gimple_assign_lhs (vec_stmt);
1369 return vec_oprnd;
1370 }
1371
1372 /* operand is defined by a loop header phi. */
1373 case vect_reduction_def:
1374 case vect_double_reduction_def:
1375 case vect_nested_cycle:
1376 case vect_induction_def:
1377 {
1378 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1379
1380 /* Get the def from the vectorized stmt. */
1381 def_stmt_info = vinfo_for_stmt (def_stmt);
1382 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1383 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1384 vec_oprnd = PHI_RESULT (vec_stmt);
1385 else
1386 vec_oprnd = gimple_get_lhs (vec_stmt);
1387 return vec_oprnd;
1388 }
1389
1390 default:
1391 gcc_unreachable ();
1392 }
1393 }
1394
1395
1396 /* Function vect_get_vec_def_for_operand.
1397
1398 OP is an operand in STMT. This function returns a (vector) def that will be
1399 used in the vectorized stmt for STMT.
1400
1401 In the case that OP is an SSA_NAME which is defined in the loop, then
1402 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1403
1404 In case OP is an invariant or constant, a new stmt that creates a vector def
1405 needs to be introduced. VECTYPE may be used to specify a required type for
1406 vector invariant. */
1407
1408 tree
1409 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1410 {
1411 gimple *def_stmt;
1412 enum vect_def_type dt;
1413 bool is_simple_use;
1414 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1415 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1416
1417 if (dump_enabled_p ())
1418 {
1419 dump_printf_loc (MSG_NOTE, vect_location,
1420 "vect_get_vec_def_for_operand: ");
1421 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1422 dump_printf (MSG_NOTE, "\n");
1423 }
1424
1425 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1426 gcc_assert (is_simple_use);
1427 if (def_stmt && dump_enabled_p ())
1428 {
1429 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1430 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1431 }
1432
1433 if (dt == vect_constant_def || dt == vect_external_def)
1434 {
1435 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1436 tree vector_type;
1437
1438 if (vectype)
1439 vector_type = vectype;
1440 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1441 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1442 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1443 else
1444 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1445
1446 gcc_assert (vector_type);
1447 return vect_init_vector (stmt, op, vector_type, NULL);
1448 }
1449 else
1450 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1451 }
1452
1453
1454 /* Function vect_get_vec_def_for_stmt_copy
1455
1456 Return a vector-def for an operand. This function is used when the
1457 vectorized stmt to be created (by the caller to this function) is a "copy"
1458 created in case the vectorized result cannot fit in one vector, and several
1459 copies of the vector-stmt are required. In this case the vector-def is
1460 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1461 of the stmt that defines VEC_OPRND.
1462 DT is the type of the vector def VEC_OPRND.
1463
1464 Context:
1465 In case the vectorization factor (VF) is bigger than the number
1466 of elements that can fit in a vectype (nunits), we have to generate
1467 more than one vector stmt to vectorize the scalar stmt. This situation
1468 arises when there are multiple data-types operated upon in the loop; the
1469 smallest data-type determines the VF, and as a result, when vectorizing
1470 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1471 vector stmt (each computing a vector of 'nunits' results, and together
1472 computing 'VF' results in each iteration). This function is called when
1473 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1474 which VF=16 and nunits=4, so the number of copies required is 4):
1475
1476 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1477
1478 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1479 VS1.1: vx.1 = memref1 VS1.2
1480 VS1.2: vx.2 = memref2 VS1.3
1481 VS1.3: vx.3 = memref3
1482
1483 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1484 VSnew.1: vz1 = vx.1 + ... VSnew.2
1485 VSnew.2: vz2 = vx.2 + ... VSnew.3
1486 VSnew.3: vz3 = vx.3 + ...
1487
1488 The vectorization of S1 is explained in vectorizable_load.
1489 The vectorization of S2:
1490 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1491 the function 'vect_get_vec_def_for_operand' is called to
1492 get the relevant vector-def for each operand of S2. For operand x it
1493 returns the vector-def 'vx.0'.
1494
1495 To create the remaining copies of the vector-stmt (VSnew.j), this
1496 function is called to get the relevant vector-def for each operand. It is
1497 obtained from the respective VS1.j stmt, which is recorded in the
1498 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1499
1500 For example, to obtain the vector-def 'vx.1' in order to create the
1501 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1502 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1503 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1504 and return its def ('vx.1').
1505 Overall, to create the above sequence this function will be called 3 times:
1506 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1507 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1508 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1509
1510 tree
1511 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1512 {
1513 gimple *vec_stmt_for_operand;
1514 stmt_vec_info def_stmt_info;
1515
1516 /* Do nothing; can reuse same def. */
1517 if (dt == vect_external_def || dt == vect_constant_def )
1518 return vec_oprnd;
1519
1520 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1521 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1522 gcc_assert (def_stmt_info);
1523 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1524 gcc_assert (vec_stmt_for_operand);
1525 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1526 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1527 else
1528 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1529 return vec_oprnd;
1530 }
1531
1532
1533 /* Get vectorized definitions for the operands to create a copy of an original
1534 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1535
1536 void
1537 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1538 vec<tree> *vec_oprnds0,
1539 vec<tree> *vec_oprnds1)
1540 {
1541 tree vec_oprnd = vec_oprnds0->pop ();
1542
1543 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1544 vec_oprnds0->quick_push (vec_oprnd);
1545
1546 if (vec_oprnds1 && vec_oprnds1->length ())
1547 {
1548 vec_oprnd = vec_oprnds1->pop ();
1549 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1550 vec_oprnds1->quick_push (vec_oprnd);
1551 }
1552 }
1553
1554
1555 /* Get vectorized definitions for OP0 and OP1. */
1556
1557 void
1558 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1559 vec<tree> *vec_oprnds0,
1560 vec<tree> *vec_oprnds1,
1561 slp_tree slp_node)
1562 {
1563 if (slp_node)
1564 {
1565 int nops = (op1 == NULL_TREE) ? 1 : 2;
1566 auto_vec<tree> ops (nops);
1567 auto_vec<vec<tree> > vec_defs (nops);
1568
1569 ops.quick_push (op0);
1570 if (op1)
1571 ops.quick_push (op1);
1572
1573 vect_get_slp_defs (ops, slp_node, &vec_defs);
1574
1575 *vec_oprnds0 = vec_defs[0];
1576 if (op1)
1577 *vec_oprnds1 = vec_defs[1];
1578 }
1579 else
1580 {
1581 tree vec_oprnd;
1582
1583 vec_oprnds0->create (1);
1584 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1585 vec_oprnds0->quick_push (vec_oprnd);
1586
1587 if (op1)
1588 {
1589 vec_oprnds1->create (1);
1590 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1591 vec_oprnds1->quick_push (vec_oprnd);
1592 }
1593 }
1594 }
1595
1596
1597 /* Function vect_finish_stmt_generation.
1598
1599 Insert a new stmt. */
1600
1601 void
1602 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1603 gimple_stmt_iterator *gsi)
1604 {
1605 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1606 vec_info *vinfo = stmt_info->vinfo;
1607
1608 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1609
1610 if (!gsi_end_p (*gsi)
1611 && gimple_has_mem_ops (vec_stmt))
1612 {
1613 gimple *at_stmt = gsi_stmt (*gsi);
1614 tree vuse = gimple_vuse (at_stmt);
1615 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1616 {
1617 tree vdef = gimple_vdef (at_stmt);
1618 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1619 /* If we have an SSA vuse and insert a store, update virtual
1620 SSA form to avoid triggering the renamer. Do so only
1621 if we can easily see all uses - which is what almost always
1622 happens with the way vectorized stmts are inserted. */
1623 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1624 && ((is_gimple_assign (vec_stmt)
1625 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1626 || (is_gimple_call (vec_stmt)
1627 && !(gimple_call_flags (vec_stmt)
1628 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1629 {
1630 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1631 gimple_set_vdef (vec_stmt, new_vdef);
1632 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1633 }
1634 }
1635 }
1636 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1637
1638 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1639
1640 if (dump_enabled_p ())
1641 {
1642 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1643 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1644 }
1645
1646 gimple_set_location (vec_stmt, gimple_location (stmt));
1647
1648 /* While EH edges will generally prevent vectorization, stmt might
1649 e.g. be in a must-not-throw region. Ensure newly created stmts
1650 that could throw are part of the same region. */
1651 int lp_nr = lookup_stmt_eh_lp (stmt);
1652 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1653 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1654 }
1655
1656 /* We want to vectorize a call to combined function CFN with function
1657 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1658 as the types of all inputs. Check whether this is possible using
1659 an internal function, returning its code if so or IFN_LAST if not. */
1660
1661 static internal_fn
1662 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1663 tree vectype_out, tree vectype_in)
1664 {
1665 internal_fn ifn;
1666 if (internal_fn_p (cfn))
1667 ifn = as_internal_fn (cfn);
1668 else
1669 ifn = associated_internal_fn (fndecl);
1670 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1671 {
1672 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1673 if (info.vectorizable)
1674 {
1675 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1676 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1677 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1678 OPTIMIZE_FOR_SPEED))
1679 return ifn;
1680 }
1681 }
1682 return IFN_LAST;
1683 }
1684
1685
1686 static tree permute_vec_elements (tree, tree, tree, gimple *,
1687 gimple_stmt_iterator *);
1688
1689 /* STMT is a non-strided load or store, meaning that it accesses
1690 elements with a known constant step. Return -1 if that step
1691 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1692
1693 static int
1694 compare_step_with_zero (gimple *stmt)
1695 {
1696 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1697 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1698 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1699 size_zero_node);
1700 }
1701
1702 /* If the target supports a permute mask that reverses the elements in
1703 a vector of type VECTYPE, return that mask, otherwise return null. */
1704
1705 static tree
1706 perm_mask_for_reverse (tree vectype)
1707 {
1708 int i, nunits;
1709 unsigned char *sel;
1710
1711 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1712 sel = XALLOCAVEC (unsigned char, nunits);
1713
1714 for (i = 0; i < nunits; ++i)
1715 sel[i] = nunits - 1 - i;
1716
1717 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
1718 return NULL_TREE;
1719 return vect_gen_perm_mask_checked (vectype, sel);
1720 }
1721
1722 /* A subroutine of get_load_store_type, with a subset of the same
1723 arguments. Handle the case where STMT is part of a grouped load
1724 or store.
1725
1726 For stores, the statements in the group are all consecutive
1727 and there is no gap at the end. For loads, the statements in the
1728 group might not be consecutive; there can be gaps between statements
1729 as well as at the end. */
1730
1731 static bool
1732 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1733 vec_load_store_type vls_type,
1734 vect_memory_access_type *memory_access_type)
1735 {
1736 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1737 vec_info *vinfo = stmt_info->vinfo;
1738 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1739 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1740 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1741 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1742 bool single_element_p = (stmt == first_stmt
1743 && !GROUP_NEXT_ELEMENT (stmt_info));
1744 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1745 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
1746
1747 /* True if the vectorized statements would access beyond the last
1748 statement in the group. */
1749 bool overrun_p = false;
1750
1751 /* True if we can cope with such overrun by peeling for gaps, so that
1752 there is at least one final scalar iteration after the vector loop. */
1753 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1754
1755 /* There can only be a gap at the end of the group if the stride is
1756 known at compile time. */
1757 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1758
1759 /* Stores can't yet have gaps. */
1760 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1761
1762 if (slp)
1763 {
1764 if (STMT_VINFO_STRIDED_P (stmt_info))
1765 {
1766 /* Try to use consecutive accesses of GROUP_SIZE elements,
1767 separated by the stride, until we have a complete vector.
1768 Fall back to scalar accesses if that isn't possible. */
1769 if (nunits % group_size == 0)
1770 *memory_access_type = VMAT_STRIDED_SLP;
1771 else
1772 *memory_access_type = VMAT_ELEMENTWISE;
1773 }
1774 else
1775 {
1776 overrun_p = loop_vinfo && gap != 0;
1777 if (overrun_p && vls_type != VLS_LOAD)
1778 {
1779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1780 "Grouped store with gaps requires"
1781 " non-consecutive accesses\n");
1782 return false;
1783 }
1784 /* If the access is aligned an overrun is fine. */
1785 if (overrun_p
1786 && aligned_access_p
1787 (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt))))
1788 overrun_p = false;
1789 if (overrun_p && !can_overrun_p)
1790 {
1791 if (dump_enabled_p ())
1792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1793 "Peeling for outer loop is not supported\n");
1794 return false;
1795 }
1796 *memory_access_type = VMAT_CONTIGUOUS;
1797 }
1798 }
1799 else
1800 {
1801 /* We can always handle this case using elementwise accesses,
1802 but see if something more efficient is available. */
1803 *memory_access_type = VMAT_ELEMENTWISE;
1804
1805 /* If there is a gap at the end of the group then these optimizations
1806 would access excess elements in the last iteration. */
1807 bool would_overrun_p = (gap != 0);
1808 /* If the access is aligned an overrun is fine, but only if the
1809 overrun is not inside an unused vector (if the gap is as large
1810 or larger than a vector). */
1811 if (would_overrun_p
1812 && gap < nunits
1813 && aligned_access_p
1814 (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt))))
1815 would_overrun_p = false;
1816 if (!STMT_VINFO_STRIDED_P (stmt_info)
1817 && (can_overrun_p || !would_overrun_p)
1818 && compare_step_with_zero (stmt) > 0)
1819 {
1820 /* First try using LOAD/STORE_LANES. */
1821 if (vls_type == VLS_LOAD
1822 ? vect_load_lanes_supported (vectype, group_size)
1823 : vect_store_lanes_supported (vectype, group_size))
1824 {
1825 *memory_access_type = VMAT_LOAD_STORE_LANES;
1826 overrun_p = would_overrun_p;
1827 }
1828
1829 /* If that fails, try using permuting loads. */
1830 if (*memory_access_type == VMAT_ELEMENTWISE
1831 && (vls_type == VLS_LOAD
1832 ? vect_grouped_load_supported (vectype, single_element_p,
1833 group_size)
1834 : vect_grouped_store_supported (vectype, group_size)))
1835 {
1836 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1837 overrun_p = would_overrun_p;
1838 }
1839 }
1840 }
1841
1842 if (vls_type != VLS_LOAD && first_stmt == stmt)
1843 {
1844 /* STMT is the leader of the group. Check the operands of all the
1845 stmts of the group. */
1846 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1847 while (next_stmt)
1848 {
1849 gcc_assert (gimple_assign_single_p (next_stmt));
1850 tree op = gimple_assign_rhs1 (next_stmt);
1851 gimple *def_stmt;
1852 enum vect_def_type dt;
1853 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1854 {
1855 if (dump_enabled_p ())
1856 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1857 "use not simple.\n");
1858 return false;
1859 }
1860 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1861 }
1862 }
1863
1864 if (overrun_p)
1865 {
1866 gcc_assert (can_overrun_p);
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1869 "Data access with gaps requires scalar "
1870 "epilogue loop\n");
1871 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1872 }
1873
1874 return true;
1875 }
1876
1877 /* A subroutine of get_load_store_type, with a subset of the same
1878 arguments. Handle the case where STMT is a load or store that
1879 accesses consecutive elements with a negative step. */
1880
1881 static vect_memory_access_type
1882 get_negative_load_store_type (gimple *stmt, tree vectype,
1883 vec_load_store_type vls_type,
1884 unsigned int ncopies)
1885 {
1886 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1887 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1888 dr_alignment_support alignment_support_scheme;
1889
1890 if (ncopies > 1)
1891 {
1892 if (dump_enabled_p ())
1893 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1894 "multiple types with negative step.\n");
1895 return VMAT_ELEMENTWISE;
1896 }
1897
1898 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1899 if (alignment_support_scheme != dr_aligned
1900 && alignment_support_scheme != dr_unaligned_supported)
1901 {
1902 if (dump_enabled_p ())
1903 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1904 "negative step but alignment required.\n");
1905 return VMAT_ELEMENTWISE;
1906 }
1907
1908 if (vls_type == VLS_STORE_INVARIANT)
1909 {
1910 if (dump_enabled_p ())
1911 dump_printf_loc (MSG_NOTE, vect_location,
1912 "negative step with invariant source;"
1913 " no permute needed.\n");
1914 return VMAT_CONTIGUOUS_DOWN;
1915 }
1916
1917 if (!perm_mask_for_reverse (vectype))
1918 {
1919 if (dump_enabled_p ())
1920 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1921 "negative step and reversing not supported.\n");
1922 return VMAT_ELEMENTWISE;
1923 }
1924
1925 return VMAT_CONTIGUOUS_REVERSE;
1926 }
1927
1928 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1929 if there is a memory access type that the vectorized form can use,
1930 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1931 or scatters, fill in GS_INFO accordingly.
1932
1933 SLP says whether we're performing SLP rather than loop vectorization.
1934 VECTYPE is the vector type that the vectorized statements will use.
1935 NCOPIES is the number of vector statements that will be needed. */
1936
1937 static bool
1938 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1939 vec_load_store_type vls_type, unsigned int ncopies,
1940 vect_memory_access_type *memory_access_type,
1941 gather_scatter_info *gs_info)
1942 {
1943 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1944 vec_info *vinfo = stmt_info->vinfo;
1945 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1946 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1947 {
1948 *memory_access_type = VMAT_GATHER_SCATTER;
1949 gimple *def_stmt;
1950 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1951 gcc_unreachable ();
1952 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1953 &gs_info->offset_dt,
1954 &gs_info->offset_vectype))
1955 {
1956 if (dump_enabled_p ())
1957 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1958 "%s index use not simple.\n",
1959 vls_type == VLS_LOAD ? "gather" : "scatter");
1960 return false;
1961 }
1962 }
1963 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1964 {
1965 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1966 memory_access_type))
1967 return false;
1968 }
1969 else if (STMT_VINFO_STRIDED_P (stmt_info))
1970 {
1971 gcc_assert (!slp);
1972 *memory_access_type = VMAT_ELEMENTWISE;
1973 }
1974 else
1975 {
1976 int cmp = compare_step_with_zero (stmt);
1977 if (cmp < 0)
1978 *memory_access_type = get_negative_load_store_type
1979 (stmt, vectype, vls_type, ncopies);
1980 else if (cmp == 0)
1981 {
1982 gcc_assert (vls_type == VLS_LOAD);
1983 *memory_access_type = VMAT_INVARIANT;
1984 }
1985 else
1986 *memory_access_type = VMAT_CONTIGUOUS;
1987 }
1988
1989 /* FIXME: At the moment the cost model seems to underestimate the
1990 cost of using elementwise accesses. This check preserves the
1991 traditional behavior until that can be fixed. */
1992 if (*memory_access_type == VMAT_ELEMENTWISE
1993 && !STMT_VINFO_STRIDED_P (stmt_info))
1994 {
1995 if (dump_enabled_p ())
1996 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1997 "not falling back to elementwise accesses\n");
1998 return false;
1999 }
2000 return true;
2001 }
2002
2003 /* Function vectorizable_mask_load_store.
2004
2005 Check if STMT performs a conditional load or store that can be vectorized.
2006 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2007 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2008 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2009
2010 static bool
2011 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2012 gimple **vec_stmt, slp_tree slp_node)
2013 {
2014 tree vec_dest = NULL;
2015 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2016 stmt_vec_info prev_stmt_info;
2017 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2018 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2019 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2020 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2021 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2022 tree rhs_vectype = NULL_TREE;
2023 tree mask_vectype;
2024 tree elem_type;
2025 gimple *new_stmt;
2026 tree dummy;
2027 tree dataref_ptr = NULL_TREE;
2028 gimple *ptr_incr;
2029 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2030 int ncopies;
2031 int i, j;
2032 bool inv_p;
2033 gather_scatter_info gs_info;
2034 vec_load_store_type vls_type;
2035 tree mask;
2036 gimple *def_stmt;
2037 enum vect_def_type dt;
2038
2039 if (slp_node != NULL)
2040 return false;
2041
2042 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2043 gcc_assert (ncopies >= 1);
2044
2045 mask = gimple_call_arg (stmt, 2);
2046
2047 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2048 return false;
2049
2050 /* FORNOW. This restriction should be relaxed. */
2051 if (nested_in_vect_loop && ncopies > 1)
2052 {
2053 if (dump_enabled_p ())
2054 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2055 "multiple types in nested loop.");
2056 return false;
2057 }
2058
2059 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2060 return false;
2061
2062 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2063 && ! vec_stmt)
2064 return false;
2065
2066 if (!STMT_VINFO_DATA_REF (stmt_info))
2067 return false;
2068
2069 elem_type = TREE_TYPE (vectype);
2070
2071 if (TREE_CODE (mask) != SSA_NAME)
2072 return false;
2073
2074 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2075 return false;
2076
2077 if (!mask_vectype)
2078 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2079
2080 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2081 || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype))
2082 return false;
2083
2084 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2085 {
2086 tree rhs = gimple_call_arg (stmt, 3);
2087 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2088 return false;
2089 if (dt == vect_constant_def || dt == vect_external_def)
2090 vls_type = VLS_STORE_INVARIANT;
2091 else
2092 vls_type = VLS_STORE;
2093 }
2094 else
2095 vls_type = VLS_LOAD;
2096
2097 vect_memory_access_type memory_access_type;
2098 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2099 &memory_access_type, &gs_info))
2100 return false;
2101
2102 if (memory_access_type == VMAT_GATHER_SCATTER)
2103 {
2104 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2105 tree masktype
2106 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2107 if (TREE_CODE (masktype) == INTEGER_TYPE)
2108 {
2109 if (dump_enabled_p ())
2110 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2111 "masked gather with integer mask not supported.");
2112 return false;
2113 }
2114 }
2115 else if (memory_access_type != VMAT_CONTIGUOUS)
2116 {
2117 if (dump_enabled_p ())
2118 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2119 "unsupported access type for masked %s.\n",
2120 vls_type == VLS_LOAD ? "load" : "store");
2121 return false;
2122 }
2123 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2124 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2125 TYPE_MODE (mask_vectype),
2126 vls_type == VLS_LOAD)
2127 || (rhs_vectype
2128 && !useless_type_conversion_p (vectype, rhs_vectype)))
2129 return false;
2130
2131 if (!vec_stmt) /* transformation not required. */
2132 {
2133 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2134 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2135 if (vls_type == VLS_LOAD)
2136 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2137 NULL, NULL, NULL);
2138 else
2139 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2140 dt, NULL, NULL, NULL);
2141 return true;
2142 }
2143 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2144
2145 /* Transform. */
2146
2147 if (memory_access_type == VMAT_GATHER_SCATTER)
2148 {
2149 tree vec_oprnd0 = NULL_TREE, op;
2150 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2151 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2152 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2153 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2154 tree mask_perm_mask = NULL_TREE;
2155 edge pe = loop_preheader_edge (loop);
2156 gimple_seq seq;
2157 basic_block new_bb;
2158 enum { NARROW, NONE, WIDEN } modifier;
2159 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2160
2161 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2162 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2163 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2164 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2165 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2166 scaletype = TREE_VALUE (arglist);
2167 gcc_checking_assert (types_compatible_p (srctype, rettype)
2168 && types_compatible_p (srctype, masktype));
2169
2170 if (nunits == gather_off_nunits)
2171 modifier = NONE;
2172 else if (nunits == gather_off_nunits / 2)
2173 {
2174 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
2175 modifier = WIDEN;
2176
2177 for (i = 0; i < gather_off_nunits; ++i)
2178 sel[i] = i | nunits;
2179
2180 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel);
2181 }
2182 else if (nunits == gather_off_nunits * 2)
2183 {
2184 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
2185 modifier = NARROW;
2186
2187 for (i = 0; i < nunits; ++i)
2188 sel[i] = i < gather_off_nunits
2189 ? i : i + nunits - gather_off_nunits;
2190
2191 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
2192 ncopies *= 2;
2193 for (i = 0; i < nunits; ++i)
2194 sel[i] = i | gather_off_nunits;
2195 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
2196 }
2197 else
2198 gcc_unreachable ();
2199
2200 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2201
2202 ptr = fold_convert (ptrtype, gs_info.base);
2203 if (!is_gimple_min_invariant (ptr))
2204 {
2205 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2206 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2207 gcc_assert (!new_bb);
2208 }
2209
2210 scale = build_int_cst (scaletype, gs_info.scale);
2211
2212 prev_stmt_info = NULL;
2213 for (j = 0; j < ncopies; ++j)
2214 {
2215 if (modifier == WIDEN && (j & 1))
2216 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2217 perm_mask, stmt, gsi);
2218 else if (j == 0)
2219 op = vec_oprnd0
2220 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2221 else
2222 op = vec_oprnd0
2223 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2224
2225 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2226 {
2227 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
2228 == TYPE_VECTOR_SUBPARTS (idxtype));
2229 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2230 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2231 new_stmt
2232 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2233 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2234 op = var;
2235 }
2236
2237 if (mask_perm_mask && (j & 1))
2238 mask_op = permute_vec_elements (mask_op, mask_op,
2239 mask_perm_mask, stmt, gsi);
2240 else
2241 {
2242 if (j == 0)
2243 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2244 else
2245 {
2246 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2247 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2248 }
2249
2250 mask_op = vec_mask;
2251 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2252 {
2253 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
2254 == TYPE_VECTOR_SUBPARTS (masktype));
2255 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2256 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2257 new_stmt
2258 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2259 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2260 mask_op = var;
2261 }
2262 }
2263
2264 new_stmt
2265 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2266 scale);
2267
2268 if (!useless_type_conversion_p (vectype, rettype))
2269 {
2270 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2271 == TYPE_VECTOR_SUBPARTS (rettype));
2272 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2273 gimple_call_set_lhs (new_stmt, op);
2274 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2275 var = make_ssa_name (vec_dest);
2276 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2277 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2278 }
2279 else
2280 {
2281 var = make_ssa_name (vec_dest, new_stmt);
2282 gimple_call_set_lhs (new_stmt, var);
2283 }
2284
2285 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2286
2287 if (modifier == NARROW)
2288 {
2289 if ((j & 1) == 0)
2290 {
2291 prev_res = var;
2292 continue;
2293 }
2294 var = permute_vec_elements (prev_res, var,
2295 perm_mask, stmt, gsi);
2296 new_stmt = SSA_NAME_DEF_STMT (var);
2297 }
2298
2299 if (prev_stmt_info == NULL)
2300 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2301 else
2302 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2303 prev_stmt_info = vinfo_for_stmt (new_stmt);
2304 }
2305
2306 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2307 from the IL. */
2308 if (STMT_VINFO_RELATED_STMT (stmt_info))
2309 {
2310 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2311 stmt_info = vinfo_for_stmt (stmt);
2312 }
2313 tree lhs = gimple_call_lhs (stmt);
2314 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2315 set_vinfo_for_stmt (new_stmt, stmt_info);
2316 set_vinfo_for_stmt (stmt, NULL);
2317 STMT_VINFO_STMT (stmt_info) = new_stmt;
2318 gsi_replace (gsi, new_stmt, true);
2319 return true;
2320 }
2321 else if (vls_type != VLS_LOAD)
2322 {
2323 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2324 prev_stmt_info = NULL;
2325 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2326 for (i = 0; i < ncopies; i++)
2327 {
2328 unsigned align, misalign;
2329
2330 if (i == 0)
2331 {
2332 tree rhs = gimple_call_arg (stmt, 3);
2333 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2334 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2335 /* We should have catched mismatched types earlier. */
2336 gcc_assert (useless_type_conversion_p (vectype,
2337 TREE_TYPE (vec_rhs)));
2338 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2339 NULL_TREE, &dummy, gsi,
2340 &ptr_incr, false, &inv_p);
2341 gcc_assert (!inv_p);
2342 }
2343 else
2344 {
2345 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2346 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2347 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2348 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2349 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2350 TYPE_SIZE_UNIT (vectype));
2351 }
2352
2353 align = TYPE_ALIGN_UNIT (vectype);
2354 if (aligned_access_p (dr))
2355 misalign = 0;
2356 else if (DR_MISALIGNMENT (dr) == -1)
2357 {
2358 align = TYPE_ALIGN_UNIT (elem_type);
2359 misalign = 0;
2360 }
2361 else
2362 misalign = DR_MISALIGNMENT (dr);
2363 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2364 misalign);
2365 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2366 misalign ? least_bit_hwi (misalign) : align);
2367 gcall *call
2368 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2369 ptr, vec_mask, vec_rhs);
2370 gimple_call_set_nothrow (call, true);
2371 new_stmt = call;
2372 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2373 if (i == 0)
2374 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2375 else
2376 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2377 prev_stmt_info = vinfo_for_stmt (new_stmt);
2378 }
2379 }
2380 else
2381 {
2382 tree vec_mask = NULL_TREE;
2383 prev_stmt_info = NULL;
2384 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2385 for (i = 0; i < ncopies; i++)
2386 {
2387 unsigned align, misalign;
2388
2389 if (i == 0)
2390 {
2391 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2392 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2393 NULL_TREE, &dummy, gsi,
2394 &ptr_incr, false, &inv_p);
2395 gcc_assert (!inv_p);
2396 }
2397 else
2398 {
2399 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2400 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2401 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2402 TYPE_SIZE_UNIT (vectype));
2403 }
2404
2405 align = TYPE_ALIGN_UNIT (vectype);
2406 if (aligned_access_p (dr))
2407 misalign = 0;
2408 else if (DR_MISALIGNMENT (dr) == -1)
2409 {
2410 align = TYPE_ALIGN_UNIT (elem_type);
2411 misalign = 0;
2412 }
2413 else
2414 misalign = DR_MISALIGNMENT (dr);
2415 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2416 misalign);
2417 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2418 misalign ? least_bit_hwi (misalign) : align);
2419 gcall *call
2420 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2421 ptr, vec_mask);
2422 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2423 gimple_call_set_nothrow (call, true);
2424 vect_finish_stmt_generation (stmt, call, gsi);
2425 if (i == 0)
2426 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2427 else
2428 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2429 prev_stmt_info = vinfo_for_stmt (call);
2430 }
2431 }
2432
2433 if (vls_type == VLS_LOAD)
2434 {
2435 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2436 from the IL. */
2437 if (STMT_VINFO_RELATED_STMT (stmt_info))
2438 {
2439 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2440 stmt_info = vinfo_for_stmt (stmt);
2441 }
2442 tree lhs = gimple_call_lhs (stmt);
2443 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2444 set_vinfo_for_stmt (new_stmt, stmt_info);
2445 set_vinfo_for_stmt (stmt, NULL);
2446 STMT_VINFO_STMT (stmt_info) = new_stmt;
2447 gsi_replace (gsi, new_stmt, true);
2448 }
2449
2450 return true;
2451 }
2452
2453 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2454
2455 static bool
2456 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2457 gimple **vec_stmt, slp_tree slp_node,
2458 tree vectype_in, enum vect_def_type *dt)
2459 {
2460 tree op, vectype;
2461 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2462 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2463 unsigned ncopies, nunits;
2464
2465 op = gimple_call_arg (stmt, 0);
2466 vectype = STMT_VINFO_VECTYPE (stmt_info);
2467 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2468
2469 /* Multiple types in SLP are handled by creating the appropriate number of
2470 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2471 case of SLP. */
2472 if (slp_node)
2473 ncopies = 1;
2474 else
2475 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2476
2477 gcc_assert (ncopies >= 1);
2478
2479 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2480 if (! char_vectype)
2481 return false;
2482
2483 unsigned char *elts
2484 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (char_vectype));
2485 unsigned char *elt = elts;
2486 unsigned word_bytes = TYPE_VECTOR_SUBPARTS (char_vectype) / nunits;
2487 for (unsigned i = 0; i < nunits; ++i)
2488 for (unsigned j = 0; j < word_bytes; ++j)
2489 *elt++ = (i + 1) * word_bytes - j - 1;
2490
2491 if (! can_vec_perm_p (TYPE_MODE (char_vectype), false, elts))
2492 return false;
2493
2494 if (! vec_stmt)
2495 {
2496 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2497 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2499 "\n");
2500 if (! PURE_SLP_STMT (stmt_info))
2501 {
2502 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2503 1, vector_stmt, stmt_info, 0, vect_prologue);
2504 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2505 ncopies, vec_perm, stmt_info, 0, vect_body);
2506 }
2507 return true;
2508 }
2509
2510 tree *telts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (char_vectype));
2511 for (unsigned i = 0; i < TYPE_VECTOR_SUBPARTS (char_vectype); ++i)
2512 telts[i] = build_int_cst (char_type_node, elts[i]);
2513 tree bswap_vconst = build_vector (char_vectype, telts);
2514
2515 /* Transform. */
2516 vec<tree> vec_oprnds = vNULL;
2517 gimple *new_stmt = NULL;
2518 stmt_vec_info prev_stmt_info = NULL;
2519 for (unsigned j = 0; j < ncopies; j++)
2520 {
2521 /* Handle uses. */
2522 if (j == 0)
2523 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2524 else
2525 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2526
2527 /* Arguments are ready. create the new vector stmt. */
2528 unsigned i;
2529 tree vop;
2530 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2531 {
2532 tree tem = make_ssa_name (char_vectype);
2533 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2534 char_vectype, vop));
2535 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2536 tree tem2 = make_ssa_name (char_vectype);
2537 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2538 tem, tem, bswap_vconst);
2539 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2540 tem = make_ssa_name (vectype);
2541 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2542 vectype, tem2));
2543 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2544 if (slp_node)
2545 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2546 }
2547
2548 if (slp_node)
2549 continue;
2550
2551 if (j == 0)
2552 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2553 else
2554 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2555
2556 prev_stmt_info = vinfo_for_stmt (new_stmt);
2557 }
2558
2559 vec_oprnds.release ();
2560 return true;
2561 }
2562
2563 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2564 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2565 in a single step. On success, store the binary pack code in
2566 *CONVERT_CODE. */
2567
2568 static bool
2569 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2570 tree_code *convert_code)
2571 {
2572 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2573 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2574 return false;
2575
2576 tree_code code;
2577 int multi_step_cvt = 0;
2578 auto_vec <tree, 8> interm_types;
2579 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2580 &code, &multi_step_cvt,
2581 &interm_types)
2582 || multi_step_cvt)
2583 return false;
2584
2585 *convert_code = code;
2586 return true;
2587 }
2588
2589 /* Function vectorizable_call.
2590
2591 Check if GS performs a function call that can be vectorized.
2592 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2593 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2594 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2595
2596 static bool
2597 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2598 slp_tree slp_node)
2599 {
2600 gcall *stmt;
2601 tree vec_dest;
2602 tree scalar_dest;
2603 tree op, type;
2604 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2605 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2606 tree vectype_out, vectype_in;
2607 int nunits_in;
2608 int nunits_out;
2609 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2610 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2611 vec_info *vinfo = stmt_info->vinfo;
2612 tree fndecl, new_temp, rhs_type;
2613 gimple *def_stmt;
2614 enum vect_def_type dt[3]
2615 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2616 int ndts = 3;
2617 gimple *new_stmt = NULL;
2618 int ncopies, j;
2619 vec<tree> vargs = vNULL;
2620 enum { NARROW, NONE, WIDEN } modifier;
2621 size_t i, nargs;
2622 tree lhs;
2623
2624 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2625 return false;
2626
2627 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2628 && ! vec_stmt)
2629 return false;
2630
2631 /* Is GS a vectorizable call? */
2632 stmt = dyn_cast <gcall *> (gs);
2633 if (!stmt)
2634 return false;
2635
2636 if (gimple_call_internal_p (stmt)
2637 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2638 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2639 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2640 slp_node);
2641
2642 if (gimple_call_lhs (stmt) == NULL_TREE
2643 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2644 return false;
2645
2646 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2647
2648 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2649
2650 /* Process function arguments. */
2651 rhs_type = NULL_TREE;
2652 vectype_in = NULL_TREE;
2653 nargs = gimple_call_num_args (stmt);
2654
2655 /* Bail out if the function has more than three arguments, we do not have
2656 interesting builtin functions to vectorize with more than two arguments
2657 except for fma. No arguments is also not good. */
2658 if (nargs == 0 || nargs > 3)
2659 return false;
2660
2661 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2662 if (gimple_call_internal_p (stmt)
2663 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2664 {
2665 nargs = 0;
2666 rhs_type = unsigned_type_node;
2667 }
2668
2669 for (i = 0; i < nargs; i++)
2670 {
2671 tree opvectype;
2672
2673 op = gimple_call_arg (stmt, i);
2674
2675 /* We can only handle calls with arguments of the same type. */
2676 if (rhs_type
2677 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2678 {
2679 if (dump_enabled_p ())
2680 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2681 "argument types differ.\n");
2682 return false;
2683 }
2684 if (!rhs_type)
2685 rhs_type = TREE_TYPE (op);
2686
2687 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2688 {
2689 if (dump_enabled_p ())
2690 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2691 "use not simple.\n");
2692 return false;
2693 }
2694
2695 if (!vectype_in)
2696 vectype_in = opvectype;
2697 else if (opvectype
2698 && opvectype != vectype_in)
2699 {
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2702 "argument vector types differ.\n");
2703 return false;
2704 }
2705 }
2706 /* If all arguments are external or constant defs use a vector type with
2707 the same size as the output vector type. */
2708 if (!vectype_in)
2709 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2710 if (vec_stmt)
2711 gcc_assert (vectype_in);
2712 if (!vectype_in)
2713 {
2714 if (dump_enabled_p ())
2715 {
2716 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2717 "no vectype for scalar type ");
2718 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2719 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2720 }
2721
2722 return false;
2723 }
2724
2725 /* FORNOW */
2726 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2727 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2728 if (nunits_in == nunits_out / 2)
2729 modifier = NARROW;
2730 else if (nunits_out == nunits_in)
2731 modifier = NONE;
2732 else if (nunits_out == nunits_in / 2)
2733 modifier = WIDEN;
2734 else
2735 return false;
2736
2737 /* We only handle functions that do not read or clobber memory. */
2738 if (gimple_vuse (stmt))
2739 {
2740 if (dump_enabled_p ())
2741 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2742 "function reads from or writes to memory.\n");
2743 return false;
2744 }
2745
2746 /* For now, we only vectorize functions if a target specific builtin
2747 is available. TODO -- in some cases, it might be profitable to
2748 insert the calls for pieces of the vector, in order to be able
2749 to vectorize other operations in the loop. */
2750 fndecl = NULL_TREE;
2751 internal_fn ifn = IFN_LAST;
2752 combined_fn cfn = gimple_call_combined_fn (stmt);
2753 tree callee = gimple_call_fndecl (stmt);
2754
2755 /* First try using an internal function. */
2756 tree_code convert_code = ERROR_MARK;
2757 if (cfn != CFN_LAST
2758 && (modifier == NONE
2759 || (modifier == NARROW
2760 && simple_integer_narrowing (vectype_out, vectype_in,
2761 &convert_code))))
2762 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2763 vectype_in);
2764
2765 /* If that fails, try asking for a target-specific built-in function. */
2766 if (ifn == IFN_LAST)
2767 {
2768 if (cfn != CFN_LAST)
2769 fndecl = targetm.vectorize.builtin_vectorized_function
2770 (cfn, vectype_out, vectype_in);
2771 else
2772 fndecl = targetm.vectorize.builtin_md_vectorized_function
2773 (callee, vectype_out, vectype_in);
2774 }
2775
2776 if (ifn == IFN_LAST && !fndecl)
2777 {
2778 if (cfn == CFN_GOMP_SIMD_LANE
2779 && !slp_node
2780 && loop_vinfo
2781 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2782 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2783 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2784 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2785 {
2786 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2787 { 0, 1, 2, ... vf - 1 } vector. */
2788 gcc_assert (nargs == 0);
2789 }
2790 else if (modifier == NONE
2791 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2792 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2793 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2794 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2795 vectype_in, dt);
2796 else
2797 {
2798 if (dump_enabled_p ())
2799 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2800 "function is not vectorizable.\n");
2801 return false;
2802 }
2803 }
2804
2805 if (slp_node)
2806 ncopies = 1;
2807 else if (modifier == NARROW && ifn == IFN_LAST)
2808 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2809 else
2810 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2811
2812 /* Sanity check: make sure that at least one copy of the vectorized stmt
2813 needs to be generated. */
2814 gcc_assert (ncopies >= 1);
2815
2816 if (!vec_stmt) /* transformation not required. */
2817 {
2818 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2819 if (dump_enabled_p ())
2820 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2821 "\n");
2822 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2823 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2824 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2825 vec_promote_demote, stmt_info, 0, vect_body);
2826
2827 return true;
2828 }
2829
2830 /* Transform. */
2831
2832 if (dump_enabled_p ())
2833 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2834
2835 /* Handle def. */
2836 scalar_dest = gimple_call_lhs (stmt);
2837 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2838
2839 prev_stmt_info = NULL;
2840 if (modifier == NONE || ifn != IFN_LAST)
2841 {
2842 tree prev_res = NULL_TREE;
2843 for (j = 0; j < ncopies; ++j)
2844 {
2845 /* Build argument list for the vectorized call. */
2846 if (j == 0)
2847 vargs.create (nargs);
2848 else
2849 vargs.truncate (0);
2850
2851 if (slp_node)
2852 {
2853 auto_vec<vec<tree> > vec_defs (nargs);
2854 vec<tree> vec_oprnds0;
2855
2856 for (i = 0; i < nargs; i++)
2857 vargs.quick_push (gimple_call_arg (stmt, i));
2858 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2859 vec_oprnds0 = vec_defs[0];
2860
2861 /* Arguments are ready. Create the new vector stmt. */
2862 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2863 {
2864 size_t k;
2865 for (k = 0; k < nargs; k++)
2866 {
2867 vec<tree> vec_oprndsk = vec_defs[k];
2868 vargs[k] = vec_oprndsk[i];
2869 }
2870 if (modifier == NARROW)
2871 {
2872 tree half_res = make_ssa_name (vectype_in);
2873 gcall *call
2874 = gimple_build_call_internal_vec (ifn, vargs);
2875 gimple_call_set_lhs (call, half_res);
2876 gimple_call_set_nothrow (call, true);
2877 new_stmt = call;
2878 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2879 if ((i & 1) == 0)
2880 {
2881 prev_res = half_res;
2882 continue;
2883 }
2884 new_temp = make_ssa_name (vec_dest);
2885 new_stmt = gimple_build_assign (new_temp, convert_code,
2886 prev_res, half_res);
2887 }
2888 else
2889 {
2890 gcall *call;
2891 if (ifn != IFN_LAST)
2892 call = gimple_build_call_internal_vec (ifn, vargs);
2893 else
2894 call = gimple_build_call_vec (fndecl, vargs);
2895 new_temp = make_ssa_name (vec_dest, call);
2896 gimple_call_set_lhs (call, new_temp);
2897 gimple_call_set_nothrow (call, true);
2898 new_stmt = call;
2899 }
2900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2901 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2902 }
2903
2904 for (i = 0; i < nargs; i++)
2905 {
2906 vec<tree> vec_oprndsi = vec_defs[i];
2907 vec_oprndsi.release ();
2908 }
2909 continue;
2910 }
2911
2912 for (i = 0; i < nargs; i++)
2913 {
2914 op = gimple_call_arg (stmt, i);
2915 if (j == 0)
2916 vec_oprnd0
2917 = vect_get_vec_def_for_operand (op, stmt);
2918 else
2919 {
2920 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2921 vec_oprnd0
2922 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2923 }
2924
2925 vargs.quick_push (vec_oprnd0);
2926 }
2927
2928 if (gimple_call_internal_p (stmt)
2929 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2930 {
2931 tree *v = XALLOCAVEC (tree, nunits_out);
2932 int k;
2933 for (k = 0; k < nunits_out; ++k)
2934 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2935 tree cst = build_vector (vectype_out, v);
2936 tree new_var
2937 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2938 gimple *init_stmt = gimple_build_assign (new_var, cst);
2939 vect_init_vector_1 (stmt, init_stmt, NULL);
2940 new_temp = make_ssa_name (vec_dest);
2941 new_stmt = gimple_build_assign (new_temp, new_var);
2942 }
2943 else if (modifier == NARROW)
2944 {
2945 tree half_res = make_ssa_name (vectype_in);
2946 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2947 gimple_call_set_lhs (call, half_res);
2948 gimple_call_set_nothrow (call, true);
2949 new_stmt = call;
2950 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2951 if ((j & 1) == 0)
2952 {
2953 prev_res = half_res;
2954 continue;
2955 }
2956 new_temp = make_ssa_name (vec_dest);
2957 new_stmt = gimple_build_assign (new_temp, convert_code,
2958 prev_res, half_res);
2959 }
2960 else
2961 {
2962 gcall *call;
2963 if (ifn != IFN_LAST)
2964 call = gimple_build_call_internal_vec (ifn, vargs);
2965 else
2966 call = gimple_build_call_vec (fndecl, vargs);
2967 new_temp = make_ssa_name (vec_dest, new_stmt);
2968 gimple_call_set_lhs (call, new_temp);
2969 gimple_call_set_nothrow (call, true);
2970 new_stmt = call;
2971 }
2972 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2973
2974 if (j == (modifier == NARROW ? 1 : 0))
2975 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2976 else
2977 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2978
2979 prev_stmt_info = vinfo_for_stmt (new_stmt);
2980 }
2981 }
2982 else if (modifier == NARROW)
2983 {
2984 for (j = 0; j < ncopies; ++j)
2985 {
2986 /* Build argument list for the vectorized call. */
2987 if (j == 0)
2988 vargs.create (nargs * 2);
2989 else
2990 vargs.truncate (0);
2991
2992 if (slp_node)
2993 {
2994 auto_vec<vec<tree> > vec_defs (nargs);
2995 vec<tree> vec_oprnds0;
2996
2997 for (i = 0; i < nargs; i++)
2998 vargs.quick_push (gimple_call_arg (stmt, i));
2999 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3000 vec_oprnds0 = vec_defs[0];
3001
3002 /* Arguments are ready. Create the new vector stmt. */
3003 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3004 {
3005 size_t k;
3006 vargs.truncate (0);
3007 for (k = 0; k < nargs; k++)
3008 {
3009 vec<tree> vec_oprndsk = vec_defs[k];
3010 vargs.quick_push (vec_oprndsk[i]);
3011 vargs.quick_push (vec_oprndsk[i + 1]);
3012 }
3013 gcall *call;
3014 if (ifn != IFN_LAST)
3015 call = gimple_build_call_internal_vec (ifn, vargs);
3016 else
3017 call = gimple_build_call_vec (fndecl, vargs);
3018 new_temp = make_ssa_name (vec_dest, call);
3019 gimple_call_set_lhs (call, new_temp);
3020 gimple_call_set_nothrow (call, true);
3021 new_stmt = call;
3022 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3023 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3024 }
3025
3026 for (i = 0; i < nargs; i++)
3027 {
3028 vec<tree> vec_oprndsi = vec_defs[i];
3029 vec_oprndsi.release ();
3030 }
3031 continue;
3032 }
3033
3034 for (i = 0; i < nargs; i++)
3035 {
3036 op = gimple_call_arg (stmt, i);
3037 if (j == 0)
3038 {
3039 vec_oprnd0
3040 = vect_get_vec_def_for_operand (op, stmt);
3041 vec_oprnd1
3042 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3043 }
3044 else
3045 {
3046 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3047 vec_oprnd0
3048 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3049 vec_oprnd1
3050 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3051 }
3052
3053 vargs.quick_push (vec_oprnd0);
3054 vargs.quick_push (vec_oprnd1);
3055 }
3056
3057 new_stmt = gimple_build_call_vec (fndecl, vargs);
3058 new_temp = make_ssa_name (vec_dest, new_stmt);
3059 gimple_call_set_lhs (new_stmt, new_temp);
3060 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3061
3062 if (j == 0)
3063 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3064 else
3065 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3066
3067 prev_stmt_info = vinfo_for_stmt (new_stmt);
3068 }
3069
3070 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3071 }
3072 else
3073 /* No current target implements this case. */
3074 return false;
3075
3076 vargs.release ();
3077
3078 /* The call in STMT might prevent it from being removed in dce.
3079 We however cannot remove it here, due to the way the ssa name
3080 it defines is mapped to the new definition. So just replace
3081 rhs of the statement with something harmless. */
3082
3083 if (slp_node)
3084 return true;
3085
3086 type = TREE_TYPE (scalar_dest);
3087 if (is_pattern_stmt_p (stmt_info))
3088 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3089 else
3090 lhs = gimple_call_lhs (stmt);
3091
3092 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3093 set_vinfo_for_stmt (new_stmt, stmt_info);
3094 set_vinfo_for_stmt (stmt, NULL);
3095 STMT_VINFO_STMT (stmt_info) = new_stmt;
3096 gsi_replace (gsi, new_stmt, false);
3097
3098 return true;
3099 }
3100
3101
3102 struct simd_call_arg_info
3103 {
3104 tree vectype;
3105 tree op;
3106 HOST_WIDE_INT linear_step;
3107 enum vect_def_type dt;
3108 unsigned int align;
3109 bool simd_lane_linear;
3110 };
3111
3112 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3113 is linear within simd lane (but not within whole loop), note it in
3114 *ARGINFO. */
3115
3116 static void
3117 vect_simd_lane_linear (tree op, struct loop *loop,
3118 struct simd_call_arg_info *arginfo)
3119 {
3120 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3121
3122 if (!is_gimple_assign (def_stmt)
3123 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3124 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3125 return;
3126
3127 tree base = gimple_assign_rhs1 (def_stmt);
3128 HOST_WIDE_INT linear_step = 0;
3129 tree v = gimple_assign_rhs2 (def_stmt);
3130 while (TREE_CODE (v) == SSA_NAME)
3131 {
3132 tree t;
3133 def_stmt = SSA_NAME_DEF_STMT (v);
3134 if (is_gimple_assign (def_stmt))
3135 switch (gimple_assign_rhs_code (def_stmt))
3136 {
3137 case PLUS_EXPR:
3138 t = gimple_assign_rhs2 (def_stmt);
3139 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3140 return;
3141 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3142 v = gimple_assign_rhs1 (def_stmt);
3143 continue;
3144 case MULT_EXPR:
3145 t = gimple_assign_rhs2 (def_stmt);
3146 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3147 return;
3148 linear_step = tree_to_shwi (t);
3149 v = gimple_assign_rhs1 (def_stmt);
3150 continue;
3151 CASE_CONVERT:
3152 t = gimple_assign_rhs1 (def_stmt);
3153 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3154 || (TYPE_PRECISION (TREE_TYPE (v))
3155 < TYPE_PRECISION (TREE_TYPE (t))))
3156 return;
3157 if (!linear_step)
3158 linear_step = 1;
3159 v = t;
3160 continue;
3161 default:
3162 return;
3163 }
3164 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3165 && loop->simduid
3166 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3167 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3168 == loop->simduid))
3169 {
3170 if (!linear_step)
3171 linear_step = 1;
3172 arginfo->linear_step = linear_step;
3173 arginfo->op = base;
3174 arginfo->simd_lane_linear = true;
3175 return;
3176 }
3177 }
3178 }
3179
3180 /* Function vectorizable_simd_clone_call.
3181
3182 Check if STMT performs a function call that can be vectorized
3183 by calling a simd clone of the function.
3184 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3185 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3186 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3187
3188 static bool
3189 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3190 gimple **vec_stmt, slp_tree slp_node)
3191 {
3192 tree vec_dest;
3193 tree scalar_dest;
3194 tree op, type;
3195 tree vec_oprnd0 = NULL_TREE;
3196 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3197 tree vectype;
3198 unsigned int nunits;
3199 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3200 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3201 vec_info *vinfo = stmt_info->vinfo;
3202 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3203 tree fndecl, new_temp;
3204 gimple *def_stmt;
3205 gimple *new_stmt = NULL;
3206 int ncopies, j;
3207 auto_vec<simd_call_arg_info> arginfo;
3208 vec<tree> vargs = vNULL;
3209 size_t i, nargs;
3210 tree lhs, rtype, ratype;
3211 vec<constructor_elt, va_gc> *ret_ctor_elts;
3212
3213 /* Is STMT a vectorizable call? */
3214 if (!is_gimple_call (stmt))
3215 return false;
3216
3217 fndecl = gimple_call_fndecl (stmt);
3218 if (fndecl == NULL_TREE)
3219 return false;
3220
3221 struct cgraph_node *node = cgraph_node::get (fndecl);
3222 if (node == NULL || node->simd_clones == NULL)
3223 return false;
3224
3225 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3226 return false;
3227
3228 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3229 && ! vec_stmt)
3230 return false;
3231
3232 if (gimple_call_lhs (stmt)
3233 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3234 return false;
3235
3236 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3237
3238 vectype = STMT_VINFO_VECTYPE (stmt_info);
3239
3240 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3241 return false;
3242
3243 /* FORNOW */
3244 if (slp_node)
3245 return false;
3246
3247 /* Process function arguments. */
3248 nargs = gimple_call_num_args (stmt);
3249
3250 /* Bail out if the function has zero arguments. */
3251 if (nargs == 0)
3252 return false;
3253
3254 arginfo.reserve (nargs, true);
3255
3256 for (i = 0; i < nargs; i++)
3257 {
3258 simd_call_arg_info thisarginfo;
3259 affine_iv iv;
3260
3261 thisarginfo.linear_step = 0;
3262 thisarginfo.align = 0;
3263 thisarginfo.op = NULL_TREE;
3264 thisarginfo.simd_lane_linear = false;
3265
3266 op = gimple_call_arg (stmt, i);
3267 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3268 &thisarginfo.vectype)
3269 || thisarginfo.dt == vect_uninitialized_def)
3270 {
3271 if (dump_enabled_p ())
3272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3273 "use not simple.\n");
3274 return false;
3275 }
3276
3277 if (thisarginfo.dt == vect_constant_def
3278 || thisarginfo.dt == vect_external_def)
3279 gcc_assert (thisarginfo.vectype == NULL_TREE);
3280 else
3281 gcc_assert (thisarginfo.vectype != NULL_TREE);
3282
3283 /* For linear arguments, the analyze phase should have saved
3284 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3285 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3286 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3287 {
3288 gcc_assert (vec_stmt);
3289 thisarginfo.linear_step
3290 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3291 thisarginfo.op
3292 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3293 thisarginfo.simd_lane_linear
3294 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3295 == boolean_true_node);
3296 /* If loop has been peeled for alignment, we need to adjust it. */
3297 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3298 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3299 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3300 {
3301 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3302 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3303 tree opt = TREE_TYPE (thisarginfo.op);
3304 bias = fold_convert (TREE_TYPE (step), bias);
3305 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3306 thisarginfo.op
3307 = fold_build2 (POINTER_TYPE_P (opt)
3308 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3309 thisarginfo.op, bias);
3310 }
3311 }
3312 else if (!vec_stmt
3313 && thisarginfo.dt != vect_constant_def
3314 && thisarginfo.dt != vect_external_def
3315 && loop_vinfo
3316 && TREE_CODE (op) == SSA_NAME
3317 && simple_iv (loop, loop_containing_stmt (stmt), op,
3318 &iv, false)
3319 && tree_fits_shwi_p (iv.step))
3320 {
3321 thisarginfo.linear_step = tree_to_shwi (iv.step);
3322 thisarginfo.op = iv.base;
3323 }
3324 else if ((thisarginfo.dt == vect_constant_def
3325 || thisarginfo.dt == vect_external_def)
3326 && POINTER_TYPE_P (TREE_TYPE (op)))
3327 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3328 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3329 linear too. */
3330 if (POINTER_TYPE_P (TREE_TYPE (op))
3331 && !thisarginfo.linear_step
3332 && !vec_stmt
3333 && thisarginfo.dt != vect_constant_def
3334 && thisarginfo.dt != vect_external_def
3335 && loop_vinfo
3336 && !slp_node
3337 && TREE_CODE (op) == SSA_NAME)
3338 vect_simd_lane_linear (op, loop, &thisarginfo);
3339
3340 arginfo.quick_push (thisarginfo);
3341 }
3342
3343 unsigned int badness = 0;
3344 struct cgraph_node *bestn = NULL;
3345 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3346 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3347 else
3348 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3349 n = n->simdclone->next_clone)
3350 {
3351 unsigned int this_badness = 0;
3352 if (n->simdclone->simdlen
3353 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3354 || n->simdclone->nargs != nargs)
3355 continue;
3356 if (n->simdclone->simdlen
3357 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
3358 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
3359 - exact_log2 (n->simdclone->simdlen)) * 1024;
3360 if (n->simdclone->inbranch)
3361 this_badness += 2048;
3362 int target_badness = targetm.simd_clone.usable (n);
3363 if (target_badness < 0)
3364 continue;
3365 this_badness += target_badness * 512;
3366 /* FORNOW: Have to add code to add the mask argument. */
3367 if (n->simdclone->inbranch)
3368 continue;
3369 for (i = 0; i < nargs; i++)
3370 {
3371 switch (n->simdclone->args[i].arg_type)
3372 {
3373 case SIMD_CLONE_ARG_TYPE_VECTOR:
3374 if (!useless_type_conversion_p
3375 (n->simdclone->args[i].orig_type,
3376 TREE_TYPE (gimple_call_arg (stmt, i))))
3377 i = -1;
3378 else if (arginfo[i].dt == vect_constant_def
3379 || arginfo[i].dt == vect_external_def
3380 || arginfo[i].linear_step)
3381 this_badness += 64;
3382 break;
3383 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3384 if (arginfo[i].dt != vect_constant_def
3385 && arginfo[i].dt != vect_external_def)
3386 i = -1;
3387 break;
3388 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3389 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3390 if (arginfo[i].dt == vect_constant_def
3391 || arginfo[i].dt == vect_external_def
3392 || (arginfo[i].linear_step
3393 != n->simdclone->args[i].linear_step))
3394 i = -1;
3395 break;
3396 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3397 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3398 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3399 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3400 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3401 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3402 /* FORNOW */
3403 i = -1;
3404 break;
3405 case SIMD_CLONE_ARG_TYPE_MASK:
3406 gcc_unreachable ();
3407 }
3408 if (i == (size_t) -1)
3409 break;
3410 if (n->simdclone->args[i].alignment > arginfo[i].align)
3411 {
3412 i = -1;
3413 break;
3414 }
3415 if (arginfo[i].align)
3416 this_badness += (exact_log2 (arginfo[i].align)
3417 - exact_log2 (n->simdclone->args[i].alignment));
3418 }
3419 if (i == (size_t) -1)
3420 continue;
3421 if (bestn == NULL || this_badness < badness)
3422 {
3423 bestn = n;
3424 badness = this_badness;
3425 }
3426 }
3427
3428 if (bestn == NULL)
3429 return false;
3430
3431 for (i = 0; i < nargs; i++)
3432 if ((arginfo[i].dt == vect_constant_def
3433 || arginfo[i].dt == vect_external_def)
3434 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3435 {
3436 arginfo[i].vectype
3437 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3438 i)));
3439 if (arginfo[i].vectype == NULL
3440 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3441 > bestn->simdclone->simdlen))
3442 return false;
3443 }
3444
3445 fndecl = bestn->decl;
3446 nunits = bestn->simdclone->simdlen;
3447 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3448
3449 /* If the function isn't const, only allow it in simd loops where user
3450 has asserted that at least nunits consecutive iterations can be
3451 performed using SIMD instructions. */
3452 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3453 && gimple_vuse (stmt))
3454 return false;
3455
3456 /* Sanity check: make sure that at least one copy of the vectorized stmt
3457 needs to be generated. */
3458 gcc_assert (ncopies >= 1);
3459
3460 if (!vec_stmt) /* transformation not required. */
3461 {
3462 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3463 for (i = 0; i < nargs; i++)
3464 if ((bestn->simdclone->args[i].arg_type
3465 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3466 || (bestn->simdclone->args[i].arg_type
3467 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3468 {
3469 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3470 + 1);
3471 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3472 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3473 ? size_type_node : TREE_TYPE (arginfo[i].op);
3474 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3475 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3476 tree sll = arginfo[i].simd_lane_linear
3477 ? boolean_true_node : boolean_false_node;
3478 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3479 }
3480 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3481 if (dump_enabled_p ())
3482 dump_printf_loc (MSG_NOTE, vect_location,
3483 "=== vectorizable_simd_clone_call ===\n");
3484 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3485 return true;
3486 }
3487
3488 /* Transform. */
3489
3490 if (dump_enabled_p ())
3491 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3492
3493 /* Handle def. */
3494 scalar_dest = gimple_call_lhs (stmt);
3495 vec_dest = NULL_TREE;
3496 rtype = NULL_TREE;
3497 ratype = NULL_TREE;
3498 if (scalar_dest)
3499 {
3500 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3501 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3502 if (TREE_CODE (rtype) == ARRAY_TYPE)
3503 {
3504 ratype = rtype;
3505 rtype = TREE_TYPE (ratype);
3506 }
3507 }
3508
3509 prev_stmt_info = NULL;
3510 for (j = 0; j < ncopies; ++j)
3511 {
3512 /* Build argument list for the vectorized call. */
3513 if (j == 0)
3514 vargs.create (nargs);
3515 else
3516 vargs.truncate (0);
3517
3518 for (i = 0; i < nargs; i++)
3519 {
3520 unsigned int k, l, m, o;
3521 tree atype;
3522 op = gimple_call_arg (stmt, i);
3523 switch (bestn->simdclone->args[i].arg_type)
3524 {
3525 case SIMD_CLONE_ARG_TYPE_VECTOR:
3526 atype = bestn->simdclone->args[i].vector_type;
3527 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
3528 for (m = j * o; m < (j + 1) * o; m++)
3529 {
3530 if (TYPE_VECTOR_SUBPARTS (atype)
3531 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3532 {
3533 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3534 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3535 / TYPE_VECTOR_SUBPARTS (atype));
3536 gcc_assert ((k & (k - 1)) == 0);
3537 if (m == 0)
3538 vec_oprnd0
3539 = vect_get_vec_def_for_operand (op, stmt);
3540 else
3541 {
3542 vec_oprnd0 = arginfo[i].op;
3543 if ((m & (k - 1)) == 0)
3544 vec_oprnd0
3545 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3546 vec_oprnd0);
3547 }
3548 arginfo[i].op = vec_oprnd0;
3549 vec_oprnd0
3550 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3551 bitsize_int (prec),
3552 bitsize_int ((m & (k - 1)) * prec));
3553 new_stmt
3554 = gimple_build_assign (make_ssa_name (atype),
3555 vec_oprnd0);
3556 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3557 vargs.safe_push (gimple_assign_lhs (new_stmt));
3558 }
3559 else
3560 {
3561 k = (TYPE_VECTOR_SUBPARTS (atype)
3562 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3563 gcc_assert ((k & (k - 1)) == 0);
3564 vec<constructor_elt, va_gc> *ctor_elts;
3565 if (k != 1)
3566 vec_alloc (ctor_elts, k);
3567 else
3568 ctor_elts = NULL;
3569 for (l = 0; l < k; l++)
3570 {
3571 if (m == 0 && l == 0)
3572 vec_oprnd0
3573 = vect_get_vec_def_for_operand (op, stmt);
3574 else
3575 vec_oprnd0
3576 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3577 arginfo[i].op);
3578 arginfo[i].op = vec_oprnd0;
3579 if (k == 1)
3580 break;
3581 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3582 vec_oprnd0);
3583 }
3584 if (k == 1)
3585 vargs.safe_push (vec_oprnd0);
3586 else
3587 {
3588 vec_oprnd0 = build_constructor (atype, ctor_elts);
3589 new_stmt
3590 = gimple_build_assign (make_ssa_name (atype),
3591 vec_oprnd0);
3592 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3593 vargs.safe_push (gimple_assign_lhs (new_stmt));
3594 }
3595 }
3596 }
3597 break;
3598 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3599 vargs.safe_push (op);
3600 break;
3601 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3602 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3603 if (j == 0)
3604 {
3605 gimple_seq stmts;
3606 arginfo[i].op
3607 = force_gimple_operand (arginfo[i].op, &stmts, true,
3608 NULL_TREE);
3609 if (stmts != NULL)
3610 {
3611 basic_block new_bb;
3612 edge pe = loop_preheader_edge (loop);
3613 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3614 gcc_assert (!new_bb);
3615 }
3616 if (arginfo[i].simd_lane_linear)
3617 {
3618 vargs.safe_push (arginfo[i].op);
3619 break;
3620 }
3621 tree phi_res = copy_ssa_name (op);
3622 gphi *new_phi = create_phi_node (phi_res, loop->header);
3623 set_vinfo_for_stmt (new_phi,
3624 new_stmt_vec_info (new_phi, loop_vinfo));
3625 add_phi_arg (new_phi, arginfo[i].op,
3626 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3627 enum tree_code code
3628 = POINTER_TYPE_P (TREE_TYPE (op))
3629 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3630 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3631 ? sizetype : TREE_TYPE (op);
3632 widest_int cst
3633 = wi::mul (bestn->simdclone->args[i].linear_step,
3634 ncopies * nunits);
3635 tree tcst = wide_int_to_tree (type, cst);
3636 tree phi_arg = copy_ssa_name (op);
3637 new_stmt
3638 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3639 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3640 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3641 set_vinfo_for_stmt (new_stmt,
3642 new_stmt_vec_info (new_stmt, loop_vinfo));
3643 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3644 UNKNOWN_LOCATION);
3645 arginfo[i].op = phi_res;
3646 vargs.safe_push (phi_res);
3647 }
3648 else
3649 {
3650 enum tree_code code
3651 = POINTER_TYPE_P (TREE_TYPE (op))
3652 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3653 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3654 ? sizetype : TREE_TYPE (op);
3655 widest_int cst
3656 = wi::mul (bestn->simdclone->args[i].linear_step,
3657 j * nunits);
3658 tree tcst = wide_int_to_tree (type, cst);
3659 new_temp = make_ssa_name (TREE_TYPE (op));
3660 new_stmt = gimple_build_assign (new_temp, code,
3661 arginfo[i].op, tcst);
3662 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3663 vargs.safe_push (new_temp);
3664 }
3665 break;
3666 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3667 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3668 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3669 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3670 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3671 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3672 default:
3673 gcc_unreachable ();
3674 }
3675 }
3676
3677 new_stmt = gimple_build_call_vec (fndecl, vargs);
3678 if (vec_dest)
3679 {
3680 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3681 if (ratype)
3682 new_temp = create_tmp_var (ratype);
3683 else if (TYPE_VECTOR_SUBPARTS (vectype)
3684 == TYPE_VECTOR_SUBPARTS (rtype))
3685 new_temp = make_ssa_name (vec_dest, new_stmt);
3686 else
3687 new_temp = make_ssa_name (rtype, new_stmt);
3688 gimple_call_set_lhs (new_stmt, new_temp);
3689 }
3690 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3691
3692 if (vec_dest)
3693 {
3694 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3695 {
3696 unsigned int k, l;
3697 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3698 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3699 gcc_assert ((k & (k - 1)) == 0);
3700 for (l = 0; l < k; l++)
3701 {
3702 tree t;
3703 if (ratype)
3704 {
3705 t = build_fold_addr_expr (new_temp);
3706 t = build2 (MEM_REF, vectype, t,
3707 build_int_cst (TREE_TYPE (t),
3708 l * prec / BITS_PER_UNIT));
3709 }
3710 else
3711 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3712 bitsize_int (prec), bitsize_int (l * prec));
3713 new_stmt
3714 = gimple_build_assign (make_ssa_name (vectype), t);
3715 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3716 if (j == 0 && l == 0)
3717 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3718 else
3719 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3720
3721 prev_stmt_info = vinfo_for_stmt (new_stmt);
3722 }
3723
3724 if (ratype)
3725 {
3726 tree clobber = build_constructor (ratype, NULL);
3727 TREE_THIS_VOLATILE (clobber) = 1;
3728 new_stmt = gimple_build_assign (new_temp, clobber);
3729 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3730 }
3731 continue;
3732 }
3733 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3734 {
3735 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3736 / TYPE_VECTOR_SUBPARTS (rtype));
3737 gcc_assert ((k & (k - 1)) == 0);
3738 if ((j & (k - 1)) == 0)
3739 vec_alloc (ret_ctor_elts, k);
3740 if (ratype)
3741 {
3742 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3743 for (m = 0; m < o; m++)
3744 {
3745 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3746 size_int (m), NULL_TREE, NULL_TREE);
3747 new_stmt
3748 = gimple_build_assign (make_ssa_name (rtype), tem);
3749 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3750 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3751 gimple_assign_lhs (new_stmt));
3752 }
3753 tree clobber = build_constructor (ratype, NULL);
3754 TREE_THIS_VOLATILE (clobber) = 1;
3755 new_stmt = gimple_build_assign (new_temp, clobber);
3756 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3757 }
3758 else
3759 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3760 if ((j & (k - 1)) != k - 1)
3761 continue;
3762 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3763 new_stmt
3764 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3765 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3766
3767 if ((unsigned) j == k - 1)
3768 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3769 else
3770 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3771
3772 prev_stmt_info = vinfo_for_stmt (new_stmt);
3773 continue;
3774 }
3775 else if (ratype)
3776 {
3777 tree t = build_fold_addr_expr (new_temp);
3778 t = build2 (MEM_REF, vectype, t,
3779 build_int_cst (TREE_TYPE (t), 0));
3780 new_stmt
3781 = gimple_build_assign (make_ssa_name (vec_dest), t);
3782 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3783 tree clobber = build_constructor (ratype, NULL);
3784 TREE_THIS_VOLATILE (clobber) = 1;
3785 vect_finish_stmt_generation (stmt,
3786 gimple_build_assign (new_temp,
3787 clobber), gsi);
3788 }
3789 }
3790
3791 if (j == 0)
3792 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3793 else
3794 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3795
3796 prev_stmt_info = vinfo_for_stmt (new_stmt);
3797 }
3798
3799 vargs.release ();
3800
3801 /* The call in STMT might prevent it from being removed in dce.
3802 We however cannot remove it here, due to the way the ssa name
3803 it defines is mapped to the new definition. So just replace
3804 rhs of the statement with something harmless. */
3805
3806 if (slp_node)
3807 return true;
3808
3809 if (scalar_dest)
3810 {
3811 type = TREE_TYPE (scalar_dest);
3812 if (is_pattern_stmt_p (stmt_info))
3813 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3814 else
3815 lhs = gimple_call_lhs (stmt);
3816 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3817 }
3818 else
3819 new_stmt = gimple_build_nop ();
3820 set_vinfo_for_stmt (new_stmt, stmt_info);
3821 set_vinfo_for_stmt (stmt, NULL);
3822 STMT_VINFO_STMT (stmt_info) = new_stmt;
3823 gsi_replace (gsi, new_stmt, true);
3824 unlink_stmt_vdef (stmt);
3825
3826 return true;
3827 }
3828
3829
3830 /* Function vect_gen_widened_results_half
3831
3832 Create a vector stmt whose code, type, number of arguments, and result
3833 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3834 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3835 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3836 needs to be created (DECL is a function-decl of a target-builtin).
3837 STMT is the original scalar stmt that we are vectorizing. */
3838
3839 static gimple *
3840 vect_gen_widened_results_half (enum tree_code code,
3841 tree decl,
3842 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3843 tree vec_dest, gimple_stmt_iterator *gsi,
3844 gimple *stmt)
3845 {
3846 gimple *new_stmt;
3847 tree new_temp;
3848
3849 /* Generate half of the widened result: */
3850 if (code == CALL_EXPR)
3851 {
3852 /* Target specific support */
3853 if (op_type == binary_op)
3854 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3855 else
3856 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3857 new_temp = make_ssa_name (vec_dest, new_stmt);
3858 gimple_call_set_lhs (new_stmt, new_temp);
3859 }
3860 else
3861 {
3862 /* Generic support */
3863 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3864 if (op_type != binary_op)
3865 vec_oprnd1 = NULL;
3866 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3867 new_temp = make_ssa_name (vec_dest, new_stmt);
3868 gimple_assign_set_lhs (new_stmt, new_temp);
3869 }
3870 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3871
3872 return new_stmt;
3873 }
3874
3875
3876 /* Get vectorized definitions for loop-based vectorization. For the first
3877 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3878 scalar operand), and for the rest we get a copy with
3879 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3880 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3881 The vectors are collected into VEC_OPRNDS. */
3882
3883 static void
3884 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3885 vec<tree> *vec_oprnds, int multi_step_cvt)
3886 {
3887 tree vec_oprnd;
3888
3889 /* Get first vector operand. */
3890 /* All the vector operands except the very first one (that is scalar oprnd)
3891 are stmt copies. */
3892 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3893 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3894 else
3895 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3896
3897 vec_oprnds->quick_push (vec_oprnd);
3898
3899 /* Get second vector operand. */
3900 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3901 vec_oprnds->quick_push (vec_oprnd);
3902
3903 *oprnd = vec_oprnd;
3904
3905 /* For conversion in multiple steps, continue to get operands
3906 recursively. */
3907 if (multi_step_cvt)
3908 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3909 }
3910
3911
3912 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3913 For multi-step conversions store the resulting vectors and call the function
3914 recursively. */
3915
3916 static void
3917 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3918 int multi_step_cvt, gimple *stmt,
3919 vec<tree> vec_dsts,
3920 gimple_stmt_iterator *gsi,
3921 slp_tree slp_node, enum tree_code code,
3922 stmt_vec_info *prev_stmt_info)
3923 {
3924 unsigned int i;
3925 tree vop0, vop1, new_tmp, vec_dest;
3926 gimple *new_stmt;
3927 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3928
3929 vec_dest = vec_dsts.pop ();
3930
3931 for (i = 0; i < vec_oprnds->length (); i += 2)
3932 {
3933 /* Create demotion operation. */
3934 vop0 = (*vec_oprnds)[i];
3935 vop1 = (*vec_oprnds)[i + 1];
3936 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3937 new_tmp = make_ssa_name (vec_dest, new_stmt);
3938 gimple_assign_set_lhs (new_stmt, new_tmp);
3939 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3940
3941 if (multi_step_cvt)
3942 /* Store the resulting vector for next recursive call. */
3943 (*vec_oprnds)[i/2] = new_tmp;
3944 else
3945 {
3946 /* This is the last step of the conversion sequence. Store the
3947 vectors in SLP_NODE or in vector info of the scalar statement
3948 (or in STMT_VINFO_RELATED_STMT chain). */
3949 if (slp_node)
3950 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3951 else
3952 {
3953 if (!*prev_stmt_info)
3954 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3955 else
3956 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3957
3958 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3959 }
3960 }
3961 }
3962
3963 /* For multi-step demotion operations we first generate demotion operations
3964 from the source type to the intermediate types, and then combine the
3965 results (stored in VEC_OPRNDS) in demotion operation to the destination
3966 type. */
3967 if (multi_step_cvt)
3968 {
3969 /* At each level of recursion we have half of the operands we had at the
3970 previous level. */
3971 vec_oprnds->truncate ((i+1)/2);
3972 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3973 stmt, vec_dsts, gsi, slp_node,
3974 VEC_PACK_TRUNC_EXPR,
3975 prev_stmt_info);
3976 }
3977
3978 vec_dsts.quick_push (vec_dest);
3979 }
3980
3981
3982 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3983 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3984 the resulting vectors and call the function recursively. */
3985
3986 static void
3987 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3988 vec<tree> *vec_oprnds1,
3989 gimple *stmt, tree vec_dest,
3990 gimple_stmt_iterator *gsi,
3991 enum tree_code code1,
3992 enum tree_code code2, tree decl1,
3993 tree decl2, int op_type)
3994 {
3995 int i;
3996 tree vop0, vop1, new_tmp1, new_tmp2;
3997 gimple *new_stmt1, *new_stmt2;
3998 vec<tree> vec_tmp = vNULL;
3999
4000 vec_tmp.create (vec_oprnds0->length () * 2);
4001 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4002 {
4003 if (op_type == binary_op)
4004 vop1 = (*vec_oprnds1)[i];
4005 else
4006 vop1 = NULL_TREE;
4007
4008 /* Generate the two halves of promotion operation. */
4009 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4010 op_type, vec_dest, gsi, stmt);
4011 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4012 op_type, vec_dest, gsi, stmt);
4013 if (is_gimple_call (new_stmt1))
4014 {
4015 new_tmp1 = gimple_call_lhs (new_stmt1);
4016 new_tmp2 = gimple_call_lhs (new_stmt2);
4017 }
4018 else
4019 {
4020 new_tmp1 = gimple_assign_lhs (new_stmt1);
4021 new_tmp2 = gimple_assign_lhs (new_stmt2);
4022 }
4023
4024 /* Store the results for the next step. */
4025 vec_tmp.quick_push (new_tmp1);
4026 vec_tmp.quick_push (new_tmp2);
4027 }
4028
4029 vec_oprnds0->release ();
4030 *vec_oprnds0 = vec_tmp;
4031 }
4032
4033
4034 /* Check if STMT performs a conversion operation, that can be vectorized.
4035 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4036 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4037 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4038
4039 static bool
4040 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4041 gimple **vec_stmt, slp_tree slp_node)
4042 {
4043 tree vec_dest;
4044 tree scalar_dest;
4045 tree op0, op1 = NULL_TREE;
4046 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4047 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4048 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4049 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4050 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4051 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4052 tree new_temp;
4053 gimple *def_stmt;
4054 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4055 int ndts = 2;
4056 gimple *new_stmt = NULL;
4057 stmt_vec_info prev_stmt_info;
4058 int nunits_in;
4059 int nunits_out;
4060 tree vectype_out, vectype_in;
4061 int ncopies, i, j;
4062 tree lhs_type, rhs_type;
4063 enum { NARROW, NONE, WIDEN } modifier;
4064 vec<tree> vec_oprnds0 = vNULL;
4065 vec<tree> vec_oprnds1 = vNULL;
4066 tree vop0;
4067 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4068 vec_info *vinfo = stmt_info->vinfo;
4069 int multi_step_cvt = 0;
4070 vec<tree> interm_types = vNULL;
4071 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4072 int op_type;
4073 unsigned short fltsz;
4074
4075 /* Is STMT a vectorizable conversion? */
4076
4077 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4078 return false;
4079
4080 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4081 && ! vec_stmt)
4082 return false;
4083
4084 if (!is_gimple_assign (stmt))
4085 return false;
4086
4087 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4088 return false;
4089
4090 code = gimple_assign_rhs_code (stmt);
4091 if (!CONVERT_EXPR_CODE_P (code)
4092 && code != FIX_TRUNC_EXPR
4093 && code != FLOAT_EXPR
4094 && code != WIDEN_MULT_EXPR
4095 && code != WIDEN_LSHIFT_EXPR)
4096 return false;
4097
4098 op_type = TREE_CODE_LENGTH (code);
4099
4100 /* Check types of lhs and rhs. */
4101 scalar_dest = gimple_assign_lhs (stmt);
4102 lhs_type = TREE_TYPE (scalar_dest);
4103 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4104
4105 op0 = gimple_assign_rhs1 (stmt);
4106 rhs_type = TREE_TYPE (op0);
4107
4108 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4109 && !((INTEGRAL_TYPE_P (lhs_type)
4110 && INTEGRAL_TYPE_P (rhs_type))
4111 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4112 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4113 return false;
4114
4115 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4116 && ((INTEGRAL_TYPE_P (lhs_type)
4117 && !type_has_mode_precision_p (lhs_type))
4118 || (INTEGRAL_TYPE_P (rhs_type)
4119 && !type_has_mode_precision_p (rhs_type))))
4120 {
4121 if (dump_enabled_p ())
4122 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4123 "type conversion to/from bit-precision unsupported."
4124 "\n");
4125 return false;
4126 }
4127
4128 /* Check the operands of the operation. */
4129 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4130 {
4131 if (dump_enabled_p ())
4132 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4133 "use not simple.\n");
4134 return false;
4135 }
4136 if (op_type == binary_op)
4137 {
4138 bool ok;
4139
4140 op1 = gimple_assign_rhs2 (stmt);
4141 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4142 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4143 OP1. */
4144 if (CONSTANT_CLASS_P (op0))
4145 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4146 else
4147 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4148
4149 if (!ok)
4150 {
4151 if (dump_enabled_p ())
4152 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4153 "use not simple.\n");
4154 return false;
4155 }
4156 }
4157
4158 /* If op0 is an external or constant defs use a vector type of
4159 the same size as the output vector type. */
4160 if (!vectype_in)
4161 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4162 if (vec_stmt)
4163 gcc_assert (vectype_in);
4164 if (!vectype_in)
4165 {
4166 if (dump_enabled_p ())
4167 {
4168 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4169 "no vectype for scalar type ");
4170 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4171 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4172 }
4173
4174 return false;
4175 }
4176
4177 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4178 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4179 {
4180 if (dump_enabled_p ())
4181 {
4182 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4183 "can't convert between boolean and non "
4184 "boolean vectors");
4185 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4186 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4187 }
4188
4189 return false;
4190 }
4191
4192 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4193 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4194 if (nunits_in < nunits_out)
4195 modifier = NARROW;
4196 else if (nunits_out == nunits_in)
4197 modifier = NONE;
4198 else
4199 modifier = WIDEN;
4200
4201 /* Multiple types in SLP are handled by creating the appropriate number of
4202 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4203 case of SLP. */
4204 if (slp_node)
4205 ncopies = 1;
4206 else if (modifier == NARROW)
4207 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
4208 else
4209 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4210
4211 /* Sanity check: make sure that at least one copy of the vectorized stmt
4212 needs to be generated. */
4213 gcc_assert (ncopies >= 1);
4214
4215 machine_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4216 machine_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4217
4218 /* Supportable by target? */
4219 switch (modifier)
4220 {
4221 case NONE:
4222 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4223 return false;
4224 if (supportable_convert_operation (code, vectype_out, vectype_in,
4225 &decl1, &code1))
4226 break;
4227 /* FALLTHRU */
4228 unsupported:
4229 if (dump_enabled_p ())
4230 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4231 "conversion not supported by target.\n");
4232 return false;
4233
4234 case WIDEN:
4235 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4236 &code1, &code2, &multi_step_cvt,
4237 &interm_types))
4238 {
4239 /* Binary widening operation can only be supported directly by the
4240 architecture. */
4241 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4242 break;
4243 }
4244
4245 if (code != FLOAT_EXPR
4246 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4247 goto unsupported;
4248
4249 fltsz = GET_MODE_SIZE (lhs_mode);
4250 FOR_EACH_2XWIDER_MODE (rhs_mode, rhs_mode)
4251 {
4252 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4253 break;
4254
4255 cvt_type
4256 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4257 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4258 if (cvt_type == NULL_TREE)
4259 goto unsupported;
4260
4261 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4262 {
4263 if (!supportable_convert_operation (code, vectype_out,
4264 cvt_type, &decl1, &codecvt1))
4265 goto unsupported;
4266 }
4267 else if (!supportable_widening_operation (code, stmt, vectype_out,
4268 cvt_type, &codecvt1,
4269 &codecvt2, &multi_step_cvt,
4270 &interm_types))
4271 continue;
4272 else
4273 gcc_assert (multi_step_cvt == 0);
4274
4275 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4276 vectype_in, &code1, &code2,
4277 &multi_step_cvt, &interm_types))
4278 break;
4279 }
4280
4281 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
4282 goto unsupported;
4283
4284 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4285 codecvt2 = ERROR_MARK;
4286 else
4287 {
4288 multi_step_cvt++;
4289 interm_types.safe_push (cvt_type);
4290 cvt_type = NULL_TREE;
4291 }
4292 break;
4293
4294 case NARROW:
4295 gcc_assert (op_type == unary_op);
4296 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4297 &code1, &multi_step_cvt,
4298 &interm_types))
4299 break;
4300
4301 if (code != FIX_TRUNC_EXPR
4302 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4303 goto unsupported;
4304
4305 cvt_type
4306 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4307 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4308 if (cvt_type == NULL_TREE)
4309 goto unsupported;
4310 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4311 &decl1, &codecvt1))
4312 goto unsupported;
4313 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4314 &code1, &multi_step_cvt,
4315 &interm_types))
4316 break;
4317 goto unsupported;
4318
4319 default:
4320 gcc_unreachable ();
4321 }
4322
4323 if (!vec_stmt) /* transformation not required. */
4324 {
4325 if (dump_enabled_p ())
4326 dump_printf_loc (MSG_NOTE, vect_location,
4327 "=== vectorizable_conversion ===\n");
4328 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4329 {
4330 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4331 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4332 }
4333 else if (modifier == NARROW)
4334 {
4335 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4336 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4337 }
4338 else
4339 {
4340 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4341 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4342 }
4343 interm_types.release ();
4344 return true;
4345 }
4346
4347 /* Transform. */
4348 if (dump_enabled_p ())
4349 dump_printf_loc (MSG_NOTE, vect_location,
4350 "transform conversion. ncopies = %d.\n", ncopies);
4351
4352 if (op_type == binary_op)
4353 {
4354 if (CONSTANT_CLASS_P (op0))
4355 op0 = fold_convert (TREE_TYPE (op1), op0);
4356 else if (CONSTANT_CLASS_P (op1))
4357 op1 = fold_convert (TREE_TYPE (op0), op1);
4358 }
4359
4360 /* In case of multi-step conversion, we first generate conversion operations
4361 to the intermediate types, and then from that types to the final one.
4362 We create vector destinations for the intermediate type (TYPES) received
4363 from supportable_*_operation, and store them in the correct order
4364 for future use in vect_create_vectorized_*_stmts (). */
4365 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4366 vec_dest = vect_create_destination_var (scalar_dest,
4367 (cvt_type && modifier == WIDEN)
4368 ? cvt_type : vectype_out);
4369 vec_dsts.quick_push (vec_dest);
4370
4371 if (multi_step_cvt)
4372 {
4373 for (i = interm_types.length () - 1;
4374 interm_types.iterate (i, &intermediate_type); i--)
4375 {
4376 vec_dest = vect_create_destination_var (scalar_dest,
4377 intermediate_type);
4378 vec_dsts.quick_push (vec_dest);
4379 }
4380 }
4381
4382 if (cvt_type)
4383 vec_dest = vect_create_destination_var (scalar_dest,
4384 modifier == WIDEN
4385 ? vectype_out : cvt_type);
4386
4387 if (!slp_node)
4388 {
4389 if (modifier == WIDEN)
4390 {
4391 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4392 if (op_type == binary_op)
4393 vec_oprnds1.create (1);
4394 }
4395 else if (modifier == NARROW)
4396 vec_oprnds0.create (
4397 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4398 }
4399 else if (code == WIDEN_LSHIFT_EXPR)
4400 vec_oprnds1.create (slp_node->vec_stmts_size);
4401
4402 last_oprnd = op0;
4403 prev_stmt_info = NULL;
4404 switch (modifier)
4405 {
4406 case NONE:
4407 for (j = 0; j < ncopies; j++)
4408 {
4409 if (j == 0)
4410 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4411 else
4412 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4413
4414 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4415 {
4416 /* Arguments are ready, create the new vector stmt. */
4417 if (code1 == CALL_EXPR)
4418 {
4419 new_stmt = gimple_build_call (decl1, 1, vop0);
4420 new_temp = make_ssa_name (vec_dest, new_stmt);
4421 gimple_call_set_lhs (new_stmt, new_temp);
4422 }
4423 else
4424 {
4425 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4426 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4427 new_temp = make_ssa_name (vec_dest, new_stmt);
4428 gimple_assign_set_lhs (new_stmt, new_temp);
4429 }
4430
4431 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4432 if (slp_node)
4433 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4434 else
4435 {
4436 if (!prev_stmt_info)
4437 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4438 else
4439 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4440 prev_stmt_info = vinfo_for_stmt (new_stmt);
4441 }
4442 }
4443 }
4444 break;
4445
4446 case WIDEN:
4447 /* In case the vectorization factor (VF) is bigger than the number
4448 of elements that we can fit in a vectype (nunits), we have to
4449 generate more than one vector stmt - i.e - we need to "unroll"
4450 the vector stmt by a factor VF/nunits. */
4451 for (j = 0; j < ncopies; j++)
4452 {
4453 /* Handle uses. */
4454 if (j == 0)
4455 {
4456 if (slp_node)
4457 {
4458 if (code == WIDEN_LSHIFT_EXPR)
4459 {
4460 unsigned int k;
4461
4462 vec_oprnd1 = op1;
4463 /* Store vec_oprnd1 for every vector stmt to be created
4464 for SLP_NODE. We check during the analysis that all
4465 the shift arguments are the same. */
4466 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4467 vec_oprnds1.quick_push (vec_oprnd1);
4468
4469 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4470 slp_node);
4471 }
4472 else
4473 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4474 &vec_oprnds1, slp_node);
4475 }
4476 else
4477 {
4478 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4479 vec_oprnds0.quick_push (vec_oprnd0);
4480 if (op_type == binary_op)
4481 {
4482 if (code == WIDEN_LSHIFT_EXPR)
4483 vec_oprnd1 = op1;
4484 else
4485 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4486 vec_oprnds1.quick_push (vec_oprnd1);
4487 }
4488 }
4489 }
4490 else
4491 {
4492 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4493 vec_oprnds0.truncate (0);
4494 vec_oprnds0.quick_push (vec_oprnd0);
4495 if (op_type == binary_op)
4496 {
4497 if (code == WIDEN_LSHIFT_EXPR)
4498 vec_oprnd1 = op1;
4499 else
4500 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4501 vec_oprnd1);
4502 vec_oprnds1.truncate (0);
4503 vec_oprnds1.quick_push (vec_oprnd1);
4504 }
4505 }
4506
4507 /* Arguments are ready. Create the new vector stmts. */
4508 for (i = multi_step_cvt; i >= 0; i--)
4509 {
4510 tree this_dest = vec_dsts[i];
4511 enum tree_code c1 = code1, c2 = code2;
4512 if (i == 0 && codecvt2 != ERROR_MARK)
4513 {
4514 c1 = codecvt1;
4515 c2 = codecvt2;
4516 }
4517 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4518 &vec_oprnds1,
4519 stmt, this_dest, gsi,
4520 c1, c2, decl1, decl2,
4521 op_type);
4522 }
4523
4524 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4525 {
4526 if (cvt_type)
4527 {
4528 if (codecvt1 == CALL_EXPR)
4529 {
4530 new_stmt = gimple_build_call (decl1, 1, vop0);
4531 new_temp = make_ssa_name (vec_dest, new_stmt);
4532 gimple_call_set_lhs (new_stmt, new_temp);
4533 }
4534 else
4535 {
4536 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4537 new_temp = make_ssa_name (vec_dest);
4538 new_stmt = gimple_build_assign (new_temp, codecvt1,
4539 vop0);
4540 }
4541
4542 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4543 }
4544 else
4545 new_stmt = SSA_NAME_DEF_STMT (vop0);
4546
4547 if (slp_node)
4548 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4549 else
4550 {
4551 if (!prev_stmt_info)
4552 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4553 else
4554 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4555 prev_stmt_info = vinfo_for_stmt (new_stmt);
4556 }
4557 }
4558 }
4559
4560 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4561 break;
4562
4563 case NARROW:
4564 /* In case the vectorization factor (VF) is bigger than the number
4565 of elements that we can fit in a vectype (nunits), we have to
4566 generate more than one vector stmt - i.e - we need to "unroll"
4567 the vector stmt by a factor VF/nunits. */
4568 for (j = 0; j < ncopies; j++)
4569 {
4570 /* Handle uses. */
4571 if (slp_node)
4572 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4573 slp_node);
4574 else
4575 {
4576 vec_oprnds0.truncate (0);
4577 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4578 vect_pow2 (multi_step_cvt) - 1);
4579 }
4580
4581 /* Arguments are ready. Create the new vector stmts. */
4582 if (cvt_type)
4583 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4584 {
4585 if (codecvt1 == CALL_EXPR)
4586 {
4587 new_stmt = gimple_build_call (decl1, 1, vop0);
4588 new_temp = make_ssa_name (vec_dest, new_stmt);
4589 gimple_call_set_lhs (new_stmt, new_temp);
4590 }
4591 else
4592 {
4593 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4594 new_temp = make_ssa_name (vec_dest);
4595 new_stmt = gimple_build_assign (new_temp, codecvt1,
4596 vop0);
4597 }
4598
4599 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4600 vec_oprnds0[i] = new_temp;
4601 }
4602
4603 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4604 stmt, vec_dsts, gsi,
4605 slp_node, code1,
4606 &prev_stmt_info);
4607 }
4608
4609 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4610 break;
4611 }
4612
4613 vec_oprnds0.release ();
4614 vec_oprnds1.release ();
4615 interm_types.release ();
4616
4617 return true;
4618 }
4619
4620
4621 /* Function vectorizable_assignment.
4622
4623 Check if STMT performs an assignment (copy) that can be vectorized.
4624 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4625 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4626 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4627
4628 static bool
4629 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4630 gimple **vec_stmt, slp_tree slp_node)
4631 {
4632 tree vec_dest;
4633 tree scalar_dest;
4634 tree op;
4635 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4636 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4637 tree new_temp;
4638 gimple *def_stmt;
4639 enum vect_def_type dt[1] = {vect_unknown_def_type};
4640 int ndts = 1;
4641 int ncopies;
4642 int i, j;
4643 vec<tree> vec_oprnds = vNULL;
4644 tree vop;
4645 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4646 vec_info *vinfo = stmt_info->vinfo;
4647 gimple *new_stmt = NULL;
4648 stmt_vec_info prev_stmt_info = NULL;
4649 enum tree_code code;
4650 tree vectype_in;
4651
4652 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4653 return false;
4654
4655 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4656 && ! vec_stmt)
4657 return false;
4658
4659 /* Is vectorizable assignment? */
4660 if (!is_gimple_assign (stmt))
4661 return false;
4662
4663 scalar_dest = gimple_assign_lhs (stmt);
4664 if (TREE_CODE (scalar_dest) != SSA_NAME)
4665 return false;
4666
4667 code = gimple_assign_rhs_code (stmt);
4668 if (gimple_assign_single_p (stmt)
4669 || code == PAREN_EXPR
4670 || CONVERT_EXPR_CODE_P (code))
4671 op = gimple_assign_rhs1 (stmt);
4672 else
4673 return false;
4674
4675 if (code == VIEW_CONVERT_EXPR)
4676 op = TREE_OPERAND (op, 0);
4677
4678 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4679 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4680
4681 /* Multiple types in SLP are handled by creating the appropriate number of
4682 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4683 case of SLP. */
4684 if (slp_node)
4685 ncopies = 1;
4686 else
4687 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4688
4689 gcc_assert (ncopies >= 1);
4690
4691 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4692 {
4693 if (dump_enabled_p ())
4694 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4695 "use not simple.\n");
4696 return false;
4697 }
4698
4699 /* We can handle NOP_EXPR conversions that do not change the number
4700 of elements or the vector size. */
4701 if ((CONVERT_EXPR_CODE_P (code)
4702 || code == VIEW_CONVERT_EXPR)
4703 && (!vectype_in
4704 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4705 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4706 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4707 return false;
4708
4709 /* We do not handle bit-precision changes. */
4710 if ((CONVERT_EXPR_CODE_P (code)
4711 || code == VIEW_CONVERT_EXPR)
4712 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4713 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4714 || !type_has_mode_precision_p (TREE_TYPE (op)))
4715 /* But a conversion that does not change the bit-pattern is ok. */
4716 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4717 > TYPE_PRECISION (TREE_TYPE (op)))
4718 && TYPE_UNSIGNED (TREE_TYPE (op)))
4719 /* Conversion between boolean types of different sizes is
4720 a simple assignment in case their vectypes are same
4721 boolean vectors. */
4722 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4723 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4724 {
4725 if (dump_enabled_p ())
4726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4727 "type conversion to/from bit-precision "
4728 "unsupported.\n");
4729 return false;
4730 }
4731
4732 if (!vec_stmt) /* transformation not required. */
4733 {
4734 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4735 if (dump_enabled_p ())
4736 dump_printf_loc (MSG_NOTE, vect_location,
4737 "=== vectorizable_assignment ===\n");
4738 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4739 return true;
4740 }
4741
4742 /* Transform. */
4743 if (dump_enabled_p ())
4744 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4745
4746 /* Handle def. */
4747 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4748
4749 /* Handle use. */
4750 for (j = 0; j < ncopies; j++)
4751 {
4752 /* Handle uses. */
4753 if (j == 0)
4754 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4755 else
4756 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4757
4758 /* Arguments are ready. create the new vector stmt. */
4759 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4760 {
4761 if (CONVERT_EXPR_CODE_P (code)
4762 || code == VIEW_CONVERT_EXPR)
4763 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4764 new_stmt = gimple_build_assign (vec_dest, vop);
4765 new_temp = make_ssa_name (vec_dest, new_stmt);
4766 gimple_assign_set_lhs (new_stmt, new_temp);
4767 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4768 if (slp_node)
4769 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4770 }
4771
4772 if (slp_node)
4773 continue;
4774
4775 if (j == 0)
4776 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4777 else
4778 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4779
4780 prev_stmt_info = vinfo_for_stmt (new_stmt);
4781 }
4782
4783 vec_oprnds.release ();
4784 return true;
4785 }
4786
4787
4788 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4789 either as shift by a scalar or by a vector. */
4790
4791 bool
4792 vect_supportable_shift (enum tree_code code, tree scalar_type)
4793 {
4794
4795 machine_mode vec_mode;
4796 optab optab;
4797 int icode;
4798 tree vectype;
4799
4800 vectype = get_vectype_for_scalar_type (scalar_type);
4801 if (!vectype)
4802 return false;
4803
4804 optab = optab_for_tree_code (code, vectype, optab_scalar);
4805 if (!optab
4806 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4807 {
4808 optab = optab_for_tree_code (code, vectype, optab_vector);
4809 if (!optab
4810 || (optab_handler (optab, TYPE_MODE (vectype))
4811 == CODE_FOR_nothing))
4812 return false;
4813 }
4814
4815 vec_mode = TYPE_MODE (vectype);
4816 icode = (int) optab_handler (optab, vec_mode);
4817 if (icode == CODE_FOR_nothing)
4818 return false;
4819
4820 return true;
4821 }
4822
4823
4824 /* Function vectorizable_shift.
4825
4826 Check if STMT performs a shift operation that can be vectorized.
4827 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4828 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4829 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4830
4831 static bool
4832 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4833 gimple **vec_stmt, slp_tree slp_node)
4834 {
4835 tree vec_dest;
4836 tree scalar_dest;
4837 tree op0, op1 = NULL;
4838 tree vec_oprnd1 = NULL_TREE;
4839 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4840 tree vectype;
4841 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4842 enum tree_code code;
4843 machine_mode vec_mode;
4844 tree new_temp;
4845 optab optab;
4846 int icode;
4847 machine_mode optab_op2_mode;
4848 gimple *def_stmt;
4849 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4850 int ndts = 2;
4851 gimple *new_stmt = NULL;
4852 stmt_vec_info prev_stmt_info;
4853 int nunits_in;
4854 int nunits_out;
4855 tree vectype_out;
4856 tree op1_vectype;
4857 int ncopies;
4858 int j, i;
4859 vec<tree> vec_oprnds0 = vNULL;
4860 vec<tree> vec_oprnds1 = vNULL;
4861 tree vop0, vop1;
4862 unsigned int k;
4863 bool scalar_shift_arg = true;
4864 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4865 vec_info *vinfo = stmt_info->vinfo;
4866 int vf;
4867
4868 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4869 return false;
4870
4871 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4872 && ! vec_stmt)
4873 return false;
4874
4875 /* Is STMT a vectorizable binary/unary operation? */
4876 if (!is_gimple_assign (stmt))
4877 return false;
4878
4879 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4880 return false;
4881
4882 code = gimple_assign_rhs_code (stmt);
4883
4884 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4885 || code == RROTATE_EXPR))
4886 return false;
4887
4888 scalar_dest = gimple_assign_lhs (stmt);
4889 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4890 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4891 {
4892 if (dump_enabled_p ())
4893 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4894 "bit-precision shifts not supported.\n");
4895 return false;
4896 }
4897
4898 op0 = gimple_assign_rhs1 (stmt);
4899 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4900 {
4901 if (dump_enabled_p ())
4902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4903 "use not simple.\n");
4904 return false;
4905 }
4906 /* If op0 is an external or constant def use a vector type with
4907 the same size as the output vector type. */
4908 if (!vectype)
4909 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4910 if (vec_stmt)
4911 gcc_assert (vectype);
4912 if (!vectype)
4913 {
4914 if (dump_enabled_p ())
4915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4916 "no vectype for scalar type\n");
4917 return false;
4918 }
4919
4920 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4921 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4922 if (nunits_out != nunits_in)
4923 return false;
4924
4925 op1 = gimple_assign_rhs2 (stmt);
4926 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4927 {
4928 if (dump_enabled_p ())
4929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4930 "use not simple.\n");
4931 return false;
4932 }
4933
4934 if (loop_vinfo)
4935 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4936 else
4937 vf = 1;
4938
4939 /* Multiple types in SLP are handled by creating the appropriate number of
4940 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4941 case of SLP. */
4942 if (slp_node)
4943 ncopies = 1;
4944 else
4945 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4946
4947 gcc_assert (ncopies >= 1);
4948
4949 /* Determine whether the shift amount is a vector, or scalar. If the
4950 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4951
4952 if ((dt[1] == vect_internal_def
4953 || dt[1] == vect_induction_def)
4954 && !slp_node)
4955 scalar_shift_arg = false;
4956 else if (dt[1] == vect_constant_def
4957 || dt[1] == vect_external_def
4958 || dt[1] == vect_internal_def)
4959 {
4960 /* In SLP, need to check whether the shift count is the same,
4961 in loops if it is a constant or invariant, it is always
4962 a scalar shift. */
4963 if (slp_node)
4964 {
4965 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4966 gimple *slpstmt;
4967
4968 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4969 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4970 scalar_shift_arg = false;
4971 }
4972
4973 /* If the shift amount is computed by a pattern stmt we cannot
4974 use the scalar amount directly thus give up and use a vector
4975 shift. */
4976 if (dt[1] == vect_internal_def)
4977 {
4978 gimple *def = SSA_NAME_DEF_STMT (op1);
4979 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
4980 scalar_shift_arg = false;
4981 }
4982 }
4983 else
4984 {
4985 if (dump_enabled_p ())
4986 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4987 "operand mode requires invariant argument.\n");
4988 return false;
4989 }
4990
4991 /* Vector shifted by vector. */
4992 if (!scalar_shift_arg)
4993 {
4994 optab = optab_for_tree_code (code, vectype, optab_vector);
4995 if (dump_enabled_p ())
4996 dump_printf_loc (MSG_NOTE, vect_location,
4997 "vector/vector shift/rotate found.\n");
4998
4999 if (!op1_vectype)
5000 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5001 if (op1_vectype == NULL_TREE
5002 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5003 {
5004 if (dump_enabled_p ())
5005 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5006 "unusable type for last operand in"
5007 " vector/vector shift/rotate.\n");
5008 return false;
5009 }
5010 }
5011 /* See if the machine has a vector shifted by scalar insn and if not
5012 then see if it has a vector shifted by vector insn. */
5013 else
5014 {
5015 optab = optab_for_tree_code (code, vectype, optab_scalar);
5016 if (optab
5017 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5018 {
5019 if (dump_enabled_p ())
5020 dump_printf_loc (MSG_NOTE, vect_location,
5021 "vector/scalar shift/rotate found.\n");
5022 }
5023 else
5024 {
5025 optab = optab_for_tree_code (code, vectype, optab_vector);
5026 if (optab
5027 && (optab_handler (optab, TYPE_MODE (vectype))
5028 != CODE_FOR_nothing))
5029 {
5030 scalar_shift_arg = false;
5031
5032 if (dump_enabled_p ())
5033 dump_printf_loc (MSG_NOTE, vect_location,
5034 "vector/vector shift/rotate found.\n");
5035
5036 /* Unlike the other binary operators, shifts/rotates have
5037 the rhs being int, instead of the same type as the lhs,
5038 so make sure the scalar is the right type if we are
5039 dealing with vectors of long long/long/short/char. */
5040 if (dt[1] == vect_constant_def)
5041 op1 = fold_convert (TREE_TYPE (vectype), op1);
5042 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5043 TREE_TYPE (op1)))
5044 {
5045 if (slp_node
5046 && TYPE_MODE (TREE_TYPE (vectype))
5047 != TYPE_MODE (TREE_TYPE (op1)))
5048 {
5049 if (dump_enabled_p ())
5050 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5051 "unusable type for last operand in"
5052 " vector/vector shift/rotate.\n");
5053 return false;
5054 }
5055 if (vec_stmt && !slp_node)
5056 {
5057 op1 = fold_convert (TREE_TYPE (vectype), op1);
5058 op1 = vect_init_vector (stmt, op1,
5059 TREE_TYPE (vectype), NULL);
5060 }
5061 }
5062 }
5063 }
5064 }
5065
5066 /* Supportable by target? */
5067 if (!optab)
5068 {
5069 if (dump_enabled_p ())
5070 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5071 "no optab.\n");
5072 return false;
5073 }
5074 vec_mode = TYPE_MODE (vectype);
5075 icode = (int) optab_handler (optab, vec_mode);
5076 if (icode == CODE_FOR_nothing)
5077 {
5078 if (dump_enabled_p ())
5079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5080 "op not supported by target.\n");
5081 /* Check only during analysis. */
5082 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5083 || (vf < vect_min_worthwhile_factor (code)
5084 && !vec_stmt))
5085 return false;
5086 if (dump_enabled_p ())
5087 dump_printf_loc (MSG_NOTE, vect_location,
5088 "proceeding using word mode.\n");
5089 }
5090
5091 /* Worthwhile without SIMD support? Check only during analysis. */
5092 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5093 && vf < vect_min_worthwhile_factor (code)
5094 && !vec_stmt)
5095 {
5096 if (dump_enabled_p ())
5097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5098 "not worthwhile without SIMD support.\n");
5099 return false;
5100 }
5101
5102 if (!vec_stmt) /* transformation not required. */
5103 {
5104 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5105 if (dump_enabled_p ())
5106 dump_printf_loc (MSG_NOTE, vect_location,
5107 "=== vectorizable_shift ===\n");
5108 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5109 return true;
5110 }
5111
5112 /* Transform. */
5113
5114 if (dump_enabled_p ())
5115 dump_printf_loc (MSG_NOTE, vect_location,
5116 "transform binary/unary operation.\n");
5117
5118 /* Handle def. */
5119 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5120
5121 prev_stmt_info = NULL;
5122 for (j = 0; j < ncopies; j++)
5123 {
5124 /* Handle uses. */
5125 if (j == 0)
5126 {
5127 if (scalar_shift_arg)
5128 {
5129 /* Vector shl and shr insn patterns can be defined with scalar
5130 operand 2 (shift operand). In this case, use constant or loop
5131 invariant op1 directly, without extending it to vector mode
5132 first. */
5133 optab_op2_mode = insn_data[icode].operand[2].mode;
5134 if (!VECTOR_MODE_P (optab_op2_mode))
5135 {
5136 if (dump_enabled_p ())
5137 dump_printf_loc (MSG_NOTE, vect_location,
5138 "operand 1 using scalar mode.\n");
5139 vec_oprnd1 = op1;
5140 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5141 vec_oprnds1.quick_push (vec_oprnd1);
5142 if (slp_node)
5143 {
5144 /* Store vec_oprnd1 for every vector stmt to be created
5145 for SLP_NODE. We check during the analysis that all
5146 the shift arguments are the same.
5147 TODO: Allow different constants for different vector
5148 stmts generated for an SLP instance. */
5149 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5150 vec_oprnds1.quick_push (vec_oprnd1);
5151 }
5152 }
5153 }
5154
5155 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5156 (a special case for certain kind of vector shifts); otherwise,
5157 operand 1 should be of a vector type (the usual case). */
5158 if (vec_oprnd1)
5159 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5160 slp_node);
5161 else
5162 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5163 slp_node);
5164 }
5165 else
5166 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5167
5168 /* Arguments are ready. Create the new vector stmt. */
5169 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5170 {
5171 vop1 = vec_oprnds1[i];
5172 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5173 new_temp = make_ssa_name (vec_dest, new_stmt);
5174 gimple_assign_set_lhs (new_stmt, new_temp);
5175 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5176 if (slp_node)
5177 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5178 }
5179
5180 if (slp_node)
5181 continue;
5182
5183 if (j == 0)
5184 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5185 else
5186 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5187 prev_stmt_info = vinfo_for_stmt (new_stmt);
5188 }
5189
5190 vec_oprnds0.release ();
5191 vec_oprnds1.release ();
5192
5193 return true;
5194 }
5195
5196
5197 /* Function vectorizable_operation.
5198
5199 Check if STMT performs a binary, unary or ternary operation that can
5200 be vectorized.
5201 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5202 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5203 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5204
5205 static bool
5206 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5207 gimple **vec_stmt, slp_tree slp_node)
5208 {
5209 tree vec_dest;
5210 tree scalar_dest;
5211 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5212 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5213 tree vectype;
5214 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5215 enum tree_code code;
5216 machine_mode vec_mode;
5217 tree new_temp;
5218 int op_type;
5219 optab optab;
5220 bool target_support_p;
5221 gimple *def_stmt;
5222 enum vect_def_type dt[3]
5223 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5224 int ndts = 3;
5225 gimple *new_stmt = NULL;
5226 stmt_vec_info prev_stmt_info;
5227 int nunits_in;
5228 int nunits_out;
5229 tree vectype_out;
5230 int ncopies;
5231 int j, i;
5232 vec<tree> vec_oprnds0 = vNULL;
5233 vec<tree> vec_oprnds1 = vNULL;
5234 vec<tree> vec_oprnds2 = vNULL;
5235 tree vop0, vop1, vop2;
5236 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5237 vec_info *vinfo = stmt_info->vinfo;
5238 int vf;
5239
5240 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5241 return false;
5242
5243 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5244 && ! vec_stmt)
5245 return false;
5246
5247 /* Is STMT a vectorizable binary/unary operation? */
5248 if (!is_gimple_assign (stmt))
5249 return false;
5250
5251 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5252 return false;
5253
5254 code = gimple_assign_rhs_code (stmt);
5255
5256 /* For pointer addition, we should use the normal plus for
5257 the vector addition. */
5258 if (code == POINTER_PLUS_EXPR)
5259 code = PLUS_EXPR;
5260
5261 /* Support only unary or binary operations. */
5262 op_type = TREE_CODE_LENGTH (code);
5263 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5264 {
5265 if (dump_enabled_p ())
5266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5267 "num. args = %d (not unary/binary/ternary op).\n",
5268 op_type);
5269 return false;
5270 }
5271
5272 scalar_dest = gimple_assign_lhs (stmt);
5273 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5274
5275 /* Most operations cannot handle bit-precision types without extra
5276 truncations. */
5277 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5278 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5279 /* Exception are bitwise binary operations. */
5280 && code != BIT_IOR_EXPR
5281 && code != BIT_XOR_EXPR
5282 && code != BIT_AND_EXPR)
5283 {
5284 if (dump_enabled_p ())
5285 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5286 "bit-precision arithmetic not supported.\n");
5287 return false;
5288 }
5289
5290 op0 = gimple_assign_rhs1 (stmt);
5291 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5292 {
5293 if (dump_enabled_p ())
5294 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5295 "use not simple.\n");
5296 return false;
5297 }
5298 /* If op0 is an external or constant def use a vector type with
5299 the same size as the output vector type. */
5300 if (!vectype)
5301 {
5302 /* For boolean type we cannot determine vectype by
5303 invariant value (don't know whether it is a vector
5304 of booleans or vector of integers). We use output
5305 vectype because operations on boolean don't change
5306 type. */
5307 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5308 {
5309 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5310 {
5311 if (dump_enabled_p ())
5312 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5313 "not supported operation on bool value.\n");
5314 return false;
5315 }
5316 vectype = vectype_out;
5317 }
5318 else
5319 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5320 }
5321 if (vec_stmt)
5322 gcc_assert (vectype);
5323 if (!vectype)
5324 {
5325 if (dump_enabled_p ())
5326 {
5327 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5328 "no vectype for scalar type ");
5329 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5330 TREE_TYPE (op0));
5331 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5332 }
5333
5334 return false;
5335 }
5336
5337 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5338 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5339 if (nunits_out != nunits_in)
5340 return false;
5341
5342 if (op_type == binary_op || op_type == ternary_op)
5343 {
5344 op1 = gimple_assign_rhs2 (stmt);
5345 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5346 {
5347 if (dump_enabled_p ())
5348 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5349 "use not simple.\n");
5350 return false;
5351 }
5352 }
5353 if (op_type == ternary_op)
5354 {
5355 op2 = gimple_assign_rhs3 (stmt);
5356 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5357 {
5358 if (dump_enabled_p ())
5359 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5360 "use not simple.\n");
5361 return false;
5362 }
5363 }
5364
5365 if (loop_vinfo)
5366 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5367 else
5368 vf = 1;
5369
5370 /* Multiple types in SLP are handled by creating the appropriate number of
5371 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5372 case of SLP. */
5373 if (slp_node)
5374 ncopies = 1;
5375 else
5376 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
5377
5378 gcc_assert (ncopies >= 1);
5379
5380 /* Shifts are handled in vectorizable_shift (). */
5381 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5382 || code == RROTATE_EXPR)
5383 return false;
5384
5385 /* Supportable by target? */
5386
5387 vec_mode = TYPE_MODE (vectype);
5388 if (code == MULT_HIGHPART_EXPR)
5389 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5390 else
5391 {
5392 optab = optab_for_tree_code (code, vectype, optab_default);
5393 if (!optab)
5394 {
5395 if (dump_enabled_p ())
5396 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5397 "no optab.\n");
5398 return false;
5399 }
5400 target_support_p = (optab_handler (optab, vec_mode)
5401 != CODE_FOR_nothing);
5402 }
5403
5404 if (!target_support_p)
5405 {
5406 if (dump_enabled_p ())
5407 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5408 "op not supported by target.\n");
5409 /* Check only during analysis. */
5410 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5411 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
5412 return false;
5413 if (dump_enabled_p ())
5414 dump_printf_loc (MSG_NOTE, vect_location,
5415 "proceeding using word mode.\n");
5416 }
5417
5418 /* Worthwhile without SIMD support? Check only during analysis. */
5419 if (!VECTOR_MODE_P (vec_mode)
5420 && !vec_stmt
5421 && vf < vect_min_worthwhile_factor (code))
5422 {
5423 if (dump_enabled_p ())
5424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5425 "not worthwhile without SIMD support.\n");
5426 return false;
5427 }
5428
5429 if (!vec_stmt) /* transformation not required. */
5430 {
5431 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5432 if (dump_enabled_p ())
5433 dump_printf_loc (MSG_NOTE, vect_location,
5434 "=== vectorizable_operation ===\n");
5435 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5436 return true;
5437 }
5438
5439 /* Transform. */
5440
5441 if (dump_enabled_p ())
5442 dump_printf_loc (MSG_NOTE, vect_location,
5443 "transform binary/unary operation.\n");
5444
5445 /* Handle def. */
5446 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5447
5448 /* In case the vectorization factor (VF) is bigger than the number
5449 of elements that we can fit in a vectype (nunits), we have to generate
5450 more than one vector stmt - i.e - we need to "unroll" the
5451 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5452 from one copy of the vector stmt to the next, in the field
5453 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5454 stages to find the correct vector defs to be used when vectorizing
5455 stmts that use the defs of the current stmt. The example below
5456 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5457 we need to create 4 vectorized stmts):
5458
5459 before vectorization:
5460 RELATED_STMT VEC_STMT
5461 S1: x = memref - -
5462 S2: z = x + 1 - -
5463
5464 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5465 there):
5466 RELATED_STMT VEC_STMT
5467 VS1_0: vx0 = memref0 VS1_1 -
5468 VS1_1: vx1 = memref1 VS1_2 -
5469 VS1_2: vx2 = memref2 VS1_3 -
5470 VS1_3: vx3 = memref3 - -
5471 S1: x = load - VS1_0
5472 S2: z = x + 1 - -
5473
5474 step2: vectorize stmt S2 (done here):
5475 To vectorize stmt S2 we first need to find the relevant vector
5476 def for the first operand 'x'. This is, as usual, obtained from
5477 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5478 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5479 relevant vector def 'vx0'. Having found 'vx0' we can generate
5480 the vector stmt VS2_0, and as usual, record it in the
5481 STMT_VINFO_VEC_STMT of stmt S2.
5482 When creating the second copy (VS2_1), we obtain the relevant vector
5483 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5484 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5485 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5486 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5487 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5488 chain of stmts and pointers:
5489 RELATED_STMT VEC_STMT
5490 VS1_0: vx0 = memref0 VS1_1 -
5491 VS1_1: vx1 = memref1 VS1_2 -
5492 VS1_2: vx2 = memref2 VS1_3 -
5493 VS1_3: vx3 = memref3 - -
5494 S1: x = load - VS1_0
5495 VS2_0: vz0 = vx0 + v1 VS2_1 -
5496 VS2_1: vz1 = vx1 + v1 VS2_2 -
5497 VS2_2: vz2 = vx2 + v1 VS2_3 -
5498 VS2_3: vz3 = vx3 + v1 - -
5499 S2: z = x + 1 - VS2_0 */
5500
5501 prev_stmt_info = NULL;
5502 for (j = 0; j < ncopies; j++)
5503 {
5504 /* Handle uses. */
5505 if (j == 0)
5506 {
5507 if (op_type == binary_op || op_type == ternary_op)
5508 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5509 slp_node);
5510 else
5511 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5512 slp_node);
5513 if (op_type == ternary_op)
5514 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5515 slp_node);
5516 }
5517 else
5518 {
5519 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5520 if (op_type == ternary_op)
5521 {
5522 tree vec_oprnd = vec_oprnds2.pop ();
5523 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5524 vec_oprnd));
5525 }
5526 }
5527
5528 /* Arguments are ready. Create the new vector stmt. */
5529 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5530 {
5531 vop1 = ((op_type == binary_op || op_type == ternary_op)
5532 ? vec_oprnds1[i] : NULL_TREE);
5533 vop2 = ((op_type == ternary_op)
5534 ? vec_oprnds2[i] : NULL_TREE);
5535 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5536 new_temp = make_ssa_name (vec_dest, new_stmt);
5537 gimple_assign_set_lhs (new_stmt, new_temp);
5538 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5539 if (slp_node)
5540 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5541 }
5542
5543 if (slp_node)
5544 continue;
5545
5546 if (j == 0)
5547 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5548 else
5549 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5550 prev_stmt_info = vinfo_for_stmt (new_stmt);
5551 }
5552
5553 vec_oprnds0.release ();
5554 vec_oprnds1.release ();
5555 vec_oprnds2.release ();
5556
5557 return true;
5558 }
5559
5560 /* A helper function to ensure data reference DR's base alignment
5561 for STMT_INFO. */
5562
5563 static void
5564 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
5565 {
5566 if (!dr->aux)
5567 return;
5568
5569 if (DR_VECT_AUX (dr)->base_misaligned)
5570 {
5571 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5572 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5573
5574 if (decl_in_symtab_p (base_decl))
5575 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
5576 else
5577 {
5578 SET_DECL_ALIGN (base_decl, TYPE_ALIGN (vectype));
5579 DECL_USER_ALIGN (base_decl) = 1;
5580 }
5581 DR_VECT_AUX (dr)->base_misaligned = false;
5582 }
5583 }
5584
5585
5586 /* Function get_group_alias_ptr_type.
5587
5588 Return the alias type for the group starting at FIRST_STMT. */
5589
5590 static tree
5591 get_group_alias_ptr_type (gimple *first_stmt)
5592 {
5593 struct data_reference *first_dr, *next_dr;
5594 gimple *next_stmt;
5595
5596 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5597 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5598 while (next_stmt)
5599 {
5600 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5601 if (get_alias_set (DR_REF (first_dr))
5602 != get_alias_set (DR_REF (next_dr)))
5603 {
5604 if (dump_enabled_p ())
5605 dump_printf_loc (MSG_NOTE, vect_location,
5606 "conflicting alias set types.\n");
5607 return ptr_type_node;
5608 }
5609 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5610 }
5611 return reference_alias_ptr_type (DR_REF (first_dr));
5612 }
5613
5614
5615 /* Function vectorizable_store.
5616
5617 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5618 can be vectorized.
5619 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5620 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5621 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5622
5623 static bool
5624 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5625 slp_tree slp_node)
5626 {
5627 tree scalar_dest;
5628 tree data_ref;
5629 tree op;
5630 tree vec_oprnd = NULL_TREE;
5631 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5632 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5633 tree elem_type;
5634 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5635 struct loop *loop = NULL;
5636 machine_mode vec_mode;
5637 tree dummy;
5638 enum dr_alignment_support alignment_support_scheme;
5639 gimple *def_stmt;
5640 enum vect_def_type dt;
5641 stmt_vec_info prev_stmt_info = NULL;
5642 tree dataref_ptr = NULL_TREE;
5643 tree dataref_offset = NULL_TREE;
5644 gimple *ptr_incr = NULL;
5645 int ncopies;
5646 int j;
5647 gimple *next_stmt, *first_stmt;
5648 bool grouped_store;
5649 unsigned int group_size, i;
5650 vec<tree> oprnds = vNULL;
5651 vec<tree> result_chain = vNULL;
5652 bool inv_p;
5653 tree offset = NULL_TREE;
5654 vec<tree> vec_oprnds = vNULL;
5655 bool slp = (slp_node != NULL);
5656 unsigned int vec_num;
5657 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5658 vec_info *vinfo = stmt_info->vinfo;
5659 tree aggr_type;
5660 gather_scatter_info gs_info;
5661 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5662 gimple *new_stmt;
5663 int vf;
5664 vec_load_store_type vls_type;
5665 tree ref_type;
5666
5667 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5668 return false;
5669
5670 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5671 && ! vec_stmt)
5672 return false;
5673
5674 /* Is vectorizable store? */
5675
5676 if (!is_gimple_assign (stmt))
5677 return false;
5678
5679 scalar_dest = gimple_assign_lhs (stmt);
5680 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5681 && is_pattern_stmt_p (stmt_info))
5682 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5683 if (TREE_CODE (scalar_dest) != ARRAY_REF
5684 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5685 && TREE_CODE (scalar_dest) != INDIRECT_REF
5686 && TREE_CODE (scalar_dest) != COMPONENT_REF
5687 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5688 && TREE_CODE (scalar_dest) != REALPART_EXPR
5689 && TREE_CODE (scalar_dest) != MEM_REF)
5690 return false;
5691
5692 /* Cannot have hybrid store SLP -- that would mean storing to the
5693 same location twice. */
5694 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5695
5696 gcc_assert (gimple_assign_single_p (stmt));
5697
5698 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5699 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5700
5701 if (loop_vinfo)
5702 {
5703 loop = LOOP_VINFO_LOOP (loop_vinfo);
5704 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5705 }
5706 else
5707 vf = 1;
5708
5709 /* Multiple types in SLP are handled by creating the appropriate number of
5710 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5711 case of SLP. */
5712 if (slp)
5713 ncopies = 1;
5714 else
5715 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5716
5717 gcc_assert (ncopies >= 1);
5718
5719 /* FORNOW. This restriction should be relaxed. */
5720 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5721 {
5722 if (dump_enabled_p ())
5723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5724 "multiple types in nested loop.\n");
5725 return false;
5726 }
5727
5728 op = gimple_assign_rhs1 (stmt);
5729
5730 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5731 {
5732 if (dump_enabled_p ())
5733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5734 "use not simple.\n");
5735 return false;
5736 }
5737
5738 if (dt == vect_constant_def || dt == vect_external_def)
5739 vls_type = VLS_STORE_INVARIANT;
5740 else
5741 vls_type = VLS_STORE;
5742
5743 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5744 return false;
5745
5746 elem_type = TREE_TYPE (vectype);
5747 vec_mode = TYPE_MODE (vectype);
5748
5749 /* FORNOW. In some cases can vectorize even if data-type not supported
5750 (e.g. - array initialization with 0). */
5751 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5752 return false;
5753
5754 if (!STMT_VINFO_DATA_REF (stmt_info))
5755 return false;
5756
5757 vect_memory_access_type memory_access_type;
5758 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5759 &memory_access_type, &gs_info))
5760 return false;
5761
5762 if (!vec_stmt) /* transformation not required. */
5763 {
5764 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5765 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5766 /* The SLP costs are calculated during SLP analysis. */
5767 if (!PURE_SLP_STMT (stmt_info))
5768 vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
5769 NULL, NULL, NULL);
5770 return true;
5771 }
5772 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5773
5774 /* Transform. */
5775
5776 ensure_base_align (stmt_info, dr);
5777
5778 if (memory_access_type == VMAT_GATHER_SCATTER)
5779 {
5780 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5781 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5782 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5783 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5784 edge pe = loop_preheader_edge (loop);
5785 gimple_seq seq;
5786 basic_block new_bb;
5787 enum { NARROW, NONE, WIDEN } modifier;
5788 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5789
5790 if (nunits == (unsigned int) scatter_off_nunits)
5791 modifier = NONE;
5792 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5793 {
5794 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5795 modifier = WIDEN;
5796
5797 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5798 sel[i] = i | nunits;
5799
5800 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel);
5801 gcc_assert (perm_mask != NULL_TREE);
5802 }
5803 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5804 {
5805 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5806 modifier = NARROW;
5807
5808 for (i = 0; i < (unsigned int) nunits; ++i)
5809 sel[i] = i | scatter_off_nunits;
5810
5811 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5812 gcc_assert (perm_mask != NULL_TREE);
5813 ncopies *= 2;
5814 }
5815 else
5816 gcc_unreachable ();
5817
5818 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5819 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5820 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5821 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5822 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5823 scaletype = TREE_VALUE (arglist);
5824
5825 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5826 && TREE_CODE (rettype) == VOID_TYPE);
5827
5828 ptr = fold_convert (ptrtype, gs_info.base);
5829 if (!is_gimple_min_invariant (ptr))
5830 {
5831 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5832 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5833 gcc_assert (!new_bb);
5834 }
5835
5836 /* Currently we support only unconditional scatter stores,
5837 so mask should be all ones. */
5838 mask = build_int_cst (masktype, -1);
5839 mask = vect_init_vector (stmt, mask, masktype, NULL);
5840
5841 scale = build_int_cst (scaletype, gs_info.scale);
5842
5843 prev_stmt_info = NULL;
5844 for (j = 0; j < ncopies; ++j)
5845 {
5846 if (j == 0)
5847 {
5848 src = vec_oprnd1
5849 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5850 op = vec_oprnd0
5851 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5852 }
5853 else if (modifier != NONE && (j & 1))
5854 {
5855 if (modifier == WIDEN)
5856 {
5857 src = vec_oprnd1
5858 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5859 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5860 stmt, gsi);
5861 }
5862 else if (modifier == NARROW)
5863 {
5864 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5865 stmt, gsi);
5866 op = vec_oprnd0
5867 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5868 vec_oprnd0);
5869 }
5870 else
5871 gcc_unreachable ();
5872 }
5873 else
5874 {
5875 src = vec_oprnd1
5876 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5877 op = vec_oprnd0
5878 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5879 vec_oprnd0);
5880 }
5881
5882 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5883 {
5884 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5885 == TYPE_VECTOR_SUBPARTS (srctype));
5886 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5887 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5888 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5889 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5890 src = var;
5891 }
5892
5893 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5894 {
5895 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5896 == TYPE_VECTOR_SUBPARTS (idxtype));
5897 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5898 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5899 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5901 op = var;
5902 }
5903
5904 new_stmt
5905 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
5906
5907 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5908
5909 if (prev_stmt_info == NULL)
5910 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5911 else
5912 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5913 prev_stmt_info = vinfo_for_stmt (new_stmt);
5914 }
5915 return true;
5916 }
5917
5918 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
5919 if (grouped_store)
5920 {
5921 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5922 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5923 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5924
5925 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5926
5927 /* FORNOW */
5928 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5929
5930 /* We vectorize all the stmts of the interleaving group when we
5931 reach the last stmt in the group. */
5932 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5933 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5934 && !slp)
5935 {
5936 *vec_stmt = NULL;
5937 return true;
5938 }
5939
5940 if (slp)
5941 {
5942 grouped_store = false;
5943 /* VEC_NUM is the number of vect stmts to be created for this
5944 group. */
5945 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5946 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5947 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
5948 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5949 op = gimple_assign_rhs1 (first_stmt);
5950 }
5951 else
5952 /* VEC_NUM is the number of vect stmts to be created for this
5953 group. */
5954 vec_num = group_size;
5955
5956 ref_type = get_group_alias_ptr_type (first_stmt);
5957 }
5958 else
5959 {
5960 first_stmt = stmt;
5961 first_dr = dr;
5962 group_size = vec_num = 1;
5963 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
5964 }
5965
5966 if (dump_enabled_p ())
5967 dump_printf_loc (MSG_NOTE, vect_location,
5968 "transform store. ncopies = %d\n", ncopies);
5969
5970 if (memory_access_type == VMAT_ELEMENTWISE
5971 || memory_access_type == VMAT_STRIDED_SLP)
5972 {
5973 gimple_stmt_iterator incr_gsi;
5974 bool insert_after;
5975 gimple *incr;
5976 tree offvar;
5977 tree ivstep;
5978 tree running_off;
5979 gimple_seq stmts = NULL;
5980 tree stride_base, stride_step, alias_off;
5981 tree vec_oprnd;
5982 unsigned int g;
5983
5984 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5985
5986 stride_base
5987 = fold_build_pointer_plus
5988 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5989 size_binop (PLUS_EXPR,
5990 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5991 convert_to_ptrofftype (DR_INIT (first_dr))));
5992 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5993
5994 /* For a store with loop-invariant (but other than power-of-2)
5995 stride (i.e. not a grouped access) like so:
5996
5997 for (i = 0; i < n; i += stride)
5998 array[i] = ...;
5999
6000 we generate a new induction variable and new stores from
6001 the components of the (vectorized) rhs:
6002
6003 for (j = 0; ; j += VF*stride)
6004 vectemp = ...;
6005 tmp1 = vectemp[0];
6006 array[j] = tmp1;
6007 tmp2 = vectemp[1];
6008 array[j + stride] = tmp2;
6009 ...
6010 */
6011
6012 unsigned nstores = nunits;
6013 unsigned lnel = 1;
6014 tree ltype = elem_type;
6015 tree lvectype = vectype;
6016 if (slp)
6017 {
6018 if (group_size < nunits
6019 && nunits % group_size == 0)
6020 {
6021 nstores = nunits / group_size;
6022 lnel = group_size;
6023 ltype = build_vector_type (elem_type, group_size);
6024 lvectype = vectype;
6025
6026 /* First check if vec_extract optab doesn't support extraction
6027 of vector elts directly. */
6028 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6029 machine_mode vmode = mode_for_vector (elmode, group_size);
6030 if (! VECTOR_MODE_P (vmode)
6031 || (convert_optab_handler (vec_extract_optab,
6032 TYPE_MODE (vectype), vmode)
6033 == CODE_FOR_nothing))
6034 {
6035 /* Try to avoid emitting an extract of vector elements
6036 by performing the extracts using an integer type of the
6037 same size, extracting from a vector of those and then
6038 re-interpreting it as the original vector type if
6039 supported. */
6040 unsigned lsize
6041 = group_size * GET_MODE_BITSIZE (elmode);
6042 elmode = int_mode_for_size (lsize, 0).require ();
6043 vmode = mode_for_vector (elmode, nunits / group_size);
6044 /* If we can't construct such a vector fall back to
6045 element extracts from the original vector type and
6046 element size stores. */
6047 if (VECTOR_MODE_P (vmode)
6048 && (convert_optab_handler (vec_extract_optab,
6049 vmode, elmode)
6050 != CODE_FOR_nothing))
6051 {
6052 nstores = nunits / group_size;
6053 lnel = group_size;
6054 ltype = build_nonstandard_integer_type (lsize, 1);
6055 lvectype = build_vector_type (ltype, nstores);
6056 }
6057 /* Else fall back to vector extraction anyway.
6058 Fewer stores are more important than avoiding spilling
6059 of the vector we extract from. Compared to the
6060 construction case in vectorizable_load no store-forwarding
6061 issue exists here for reasonable archs. */
6062 }
6063 }
6064 else if (group_size >= nunits
6065 && group_size % nunits == 0)
6066 {
6067 nstores = 1;
6068 lnel = nunits;
6069 ltype = vectype;
6070 lvectype = vectype;
6071 }
6072 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6073 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6074 }
6075
6076 ivstep = stride_step;
6077 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6078 build_int_cst (TREE_TYPE (ivstep), vf));
6079
6080 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6081
6082 create_iv (stride_base, ivstep, NULL,
6083 loop, &incr_gsi, insert_after,
6084 &offvar, NULL);
6085 incr = gsi_stmt (incr_gsi);
6086 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6087
6088 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6089 if (stmts)
6090 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6091
6092 prev_stmt_info = NULL;
6093 alias_off = build_int_cst (ref_type, 0);
6094 next_stmt = first_stmt;
6095 for (g = 0; g < group_size; g++)
6096 {
6097 running_off = offvar;
6098 if (g)
6099 {
6100 tree size = TYPE_SIZE_UNIT (ltype);
6101 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6102 size);
6103 tree newoff = copy_ssa_name (running_off, NULL);
6104 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6105 running_off, pos);
6106 vect_finish_stmt_generation (stmt, incr, gsi);
6107 running_off = newoff;
6108 }
6109 unsigned int group_el = 0;
6110 unsigned HOST_WIDE_INT
6111 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6112 for (j = 0; j < ncopies; j++)
6113 {
6114 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6115 and first_stmt == stmt. */
6116 if (j == 0)
6117 {
6118 if (slp)
6119 {
6120 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6121 slp_node);
6122 vec_oprnd = vec_oprnds[0];
6123 }
6124 else
6125 {
6126 gcc_assert (gimple_assign_single_p (next_stmt));
6127 op = gimple_assign_rhs1 (next_stmt);
6128 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6129 }
6130 }
6131 else
6132 {
6133 if (slp)
6134 vec_oprnd = vec_oprnds[j];
6135 else
6136 {
6137 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6138 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6139 }
6140 }
6141 /* Pun the vector to extract from if necessary. */
6142 if (lvectype != vectype)
6143 {
6144 tree tem = make_ssa_name (lvectype);
6145 gimple *pun
6146 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6147 lvectype, vec_oprnd));
6148 vect_finish_stmt_generation (stmt, pun, gsi);
6149 vec_oprnd = tem;
6150 }
6151 for (i = 0; i < nstores; i++)
6152 {
6153 tree newref, newoff;
6154 gimple *incr, *assign;
6155 tree size = TYPE_SIZE (ltype);
6156 /* Extract the i'th component. */
6157 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6158 bitsize_int (i), size);
6159 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6160 size, pos);
6161
6162 elem = force_gimple_operand_gsi (gsi, elem, true,
6163 NULL_TREE, true,
6164 GSI_SAME_STMT);
6165
6166 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6167 group_el * elsz);
6168 newref = build2 (MEM_REF, ltype,
6169 running_off, this_off);
6170
6171 /* And store it to *running_off. */
6172 assign = gimple_build_assign (newref, elem);
6173 vect_finish_stmt_generation (stmt, assign, gsi);
6174
6175 group_el += lnel;
6176 if (! slp
6177 || group_el == group_size)
6178 {
6179 newoff = copy_ssa_name (running_off, NULL);
6180 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6181 running_off, stride_step);
6182 vect_finish_stmt_generation (stmt, incr, gsi);
6183
6184 running_off = newoff;
6185 group_el = 0;
6186 }
6187 if (g == group_size - 1
6188 && !slp)
6189 {
6190 if (j == 0 && i == 0)
6191 STMT_VINFO_VEC_STMT (stmt_info)
6192 = *vec_stmt = assign;
6193 else
6194 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6195 prev_stmt_info = vinfo_for_stmt (assign);
6196 }
6197 }
6198 }
6199 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6200 if (slp)
6201 break;
6202 }
6203
6204 vec_oprnds.release ();
6205 return true;
6206 }
6207
6208 auto_vec<tree> dr_chain (group_size);
6209 oprnds.create (group_size);
6210
6211 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6212 gcc_assert (alignment_support_scheme);
6213 /* Targets with store-lane instructions must not require explicit
6214 realignment. */
6215 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6216 || alignment_support_scheme == dr_aligned
6217 || alignment_support_scheme == dr_unaligned_supported);
6218
6219 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6220 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6221 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6222
6223 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6224 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6225 else
6226 aggr_type = vectype;
6227
6228 /* In case the vectorization factor (VF) is bigger than the number
6229 of elements that we can fit in a vectype (nunits), we have to generate
6230 more than one vector stmt - i.e - we need to "unroll" the
6231 vector stmt by a factor VF/nunits. For more details see documentation in
6232 vect_get_vec_def_for_copy_stmt. */
6233
6234 /* In case of interleaving (non-unit grouped access):
6235
6236 S1: &base + 2 = x2
6237 S2: &base = x0
6238 S3: &base + 1 = x1
6239 S4: &base + 3 = x3
6240
6241 We create vectorized stores starting from base address (the access of the
6242 first stmt in the chain (S2 in the above example), when the last store stmt
6243 of the chain (S4) is reached:
6244
6245 VS1: &base = vx2
6246 VS2: &base + vec_size*1 = vx0
6247 VS3: &base + vec_size*2 = vx1
6248 VS4: &base + vec_size*3 = vx3
6249
6250 Then permutation statements are generated:
6251
6252 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6253 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6254 ...
6255
6256 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6257 (the order of the data-refs in the output of vect_permute_store_chain
6258 corresponds to the order of scalar stmts in the interleaving chain - see
6259 the documentation of vect_permute_store_chain()).
6260
6261 In case of both multiple types and interleaving, above vector stores and
6262 permutation stmts are created for every copy. The result vector stmts are
6263 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6264 STMT_VINFO_RELATED_STMT for the next copies.
6265 */
6266
6267 prev_stmt_info = NULL;
6268 for (j = 0; j < ncopies; j++)
6269 {
6270
6271 if (j == 0)
6272 {
6273 if (slp)
6274 {
6275 /* Get vectorized arguments for SLP_NODE. */
6276 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6277 NULL, slp_node);
6278
6279 vec_oprnd = vec_oprnds[0];
6280 }
6281 else
6282 {
6283 /* For interleaved stores we collect vectorized defs for all the
6284 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6285 used as an input to vect_permute_store_chain(), and OPRNDS as
6286 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6287
6288 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6289 OPRNDS are of size 1. */
6290 next_stmt = first_stmt;
6291 for (i = 0; i < group_size; i++)
6292 {
6293 /* Since gaps are not supported for interleaved stores,
6294 GROUP_SIZE is the exact number of stmts in the chain.
6295 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6296 there is no interleaving, GROUP_SIZE is 1, and only one
6297 iteration of the loop will be executed. */
6298 gcc_assert (next_stmt
6299 && gimple_assign_single_p (next_stmt));
6300 op = gimple_assign_rhs1 (next_stmt);
6301
6302 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6303 dr_chain.quick_push (vec_oprnd);
6304 oprnds.quick_push (vec_oprnd);
6305 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6306 }
6307 }
6308
6309 /* We should have catched mismatched types earlier. */
6310 gcc_assert (useless_type_conversion_p (vectype,
6311 TREE_TYPE (vec_oprnd)));
6312 bool simd_lane_access_p
6313 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6314 if (simd_lane_access_p
6315 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6316 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6317 && integer_zerop (DR_OFFSET (first_dr))
6318 && integer_zerop (DR_INIT (first_dr))
6319 && alias_sets_conflict_p (get_alias_set (aggr_type),
6320 get_alias_set (TREE_TYPE (ref_type))))
6321 {
6322 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6323 dataref_offset = build_int_cst (ref_type, 0);
6324 inv_p = false;
6325 }
6326 else
6327 dataref_ptr
6328 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6329 simd_lane_access_p ? loop : NULL,
6330 offset, &dummy, gsi, &ptr_incr,
6331 simd_lane_access_p, &inv_p);
6332 gcc_assert (bb_vinfo || !inv_p);
6333 }
6334 else
6335 {
6336 /* For interleaved stores we created vectorized defs for all the
6337 defs stored in OPRNDS in the previous iteration (previous copy).
6338 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6339 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6340 next copy.
6341 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6342 OPRNDS are of size 1. */
6343 for (i = 0; i < group_size; i++)
6344 {
6345 op = oprnds[i];
6346 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6347 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6348 dr_chain[i] = vec_oprnd;
6349 oprnds[i] = vec_oprnd;
6350 }
6351 if (dataref_offset)
6352 dataref_offset
6353 = int_const_binop (PLUS_EXPR, dataref_offset,
6354 TYPE_SIZE_UNIT (aggr_type));
6355 else
6356 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6357 TYPE_SIZE_UNIT (aggr_type));
6358 }
6359
6360 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6361 {
6362 tree vec_array;
6363
6364 /* Combine all the vectors into an array. */
6365 vec_array = create_vector_array (vectype, vec_num);
6366 for (i = 0; i < vec_num; i++)
6367 {
6368 vec_oprnd = dr_chain[i];
6369 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6370 }
6371
6372 /* Emit:
6373 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6374 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6375 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6376 vec_array);
6377 gimple_call_set_lhs (call, data_ref);
6378 gimple_call_set_nothrow (call, true);
6379 new_stmt = call;
6380 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6381 }
6382 else
6383 {
6384 new_stmt = NULL;
6385 if (grouped_store)
6386 {
6387 if (j == 0)
6388 result_chain.create (group_size);
6389 /* Permute. */
6390 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6391 &result_chain);
6392 }
6393
6394 next_stmt = first_stmt;
6395 for (i = 0; i < vec_num; i++)
6396 {
6397 unsigned align, misalign;
6398
6399 if (i > 0)
6400 /* Bump the vector pointer. */
6401 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6402 stmt, NULL_TREE);
6403
6404 if (slp)
6405 vec_oprnd = vec_oprnds[i];
6406 else if (grouped_store)
6407 /* For grouped stores vectorized defs are interleaved in
6408 vect_permute_store_chain(). */
6409 vec_oprnd = result_chain[i];
6410
6411 data_ref = fold_build2 (MEM_REF, vectype,
6412 dataref_ptr,
6413 dataref_offset
6414 ? dataref_offset
6415 : build_int_cst (ref_type, 0));
6416 align = TYPE_ALIGN_UNIT (vectype);
6417 if (aligned_access_p (first_dr))
6418 misalign = 0;
6419 else if (DR_MISALIGNMENT (first_dr) == -1)
6420 {
6421 align = dr_alignment (vect_dr_behavior (first_dr));
6422 misalign = 0;
6423 TREE_TYPE (data_ref)
6424 = build_aligned_type (TREE_TYPE (data_ref),
6425 align * BITS_PER_UNIT);
6426 }
6427 else
6428 {
6429 TREE_TYPE (data_ref)
6430 = build_aligned_type (TREE_TYPE (data_ref),
6431 TYPE_ALIGN (elem_type));
6432 misalign = DR_MISALIGNMENT (first_dr);
6433 }
6434 if (dataref_offset == NULL_TREE
6435 && TREE_CODE (dataref_ptr) == SSA_NAME)
6436 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6437 misalign);
6438
6439 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6440 {
6441 tree perm_mask = perm_mask_for_reverse (vectype);
6442 tree perm_dest
6443 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6444 vectype);
6445 tree new_temp = make_ssa_name (perm_dest);
6446
6447 /* Generate the permute statement. */
6448 gimple *perm_stmt
6449 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6450 vec_oprnd, perm_mask);
6451 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6452
6453 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6454 vec_oprnd = new_temp;
6455 }
6456
6457 /* Arguments are ready. Create the new vector stmt. */
6458 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6459 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6460
6461 if (slp)
6462 continue;
6463
6464 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6465 if (!next_stmt)
6466 break;
6467 }
6468 }
6469 if (!slp)
6470 {
6471 if (j == 0)
6472 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6473 else
6474 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6475 prev_stmt_info = vinfo_for_stmt (new_stmt);
6476 }
6477 }
6478
6479 oprnds.release ();
6480 result_chain.release ();
6481 vec_oprnds.release ();
6482
6483 return true;
6484 }
6485
6486 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6487 VECTOR_CST mask. No checks are made that the target platform supports the
6488 mask, so callers may wish to test can_vec_perm_p separately, or use
6489 vect_gen_perm_mask_checked. */
6490
6491 tree
6492 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
6493 {
6494 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
6495 int i, nunits;
6496
6497 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6498
6499 mask_elt_type = lang_hooks.types.type_for_mode
6500 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
6501 mask_type = get_vectype_for_scalar_type (mask_elt_type);
6502
6503 mask_elts = XALLOCAVEC (tree, nunits);
6504 for (i = nunits - 1; i >= 0; i--)
6505 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
6506 mask_vec = build_vector (mask_type, mask_elts);
6507
6508 return mask_vec;
6509 }
6510
6511 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6512 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6513
6514 tree
6515 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
6516 {
6517 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
6518 return vect_gen_perm_mask_any (vectype, sel);
6519 }
6520
6521 /* Given a vector variable X and Y, that was generated for the scalar
6522 STMT, generate instructions to permute the vector elements of X and Y
6523 using permutation mask MASK_VEC, insert them at *GSI and return the
6524 permuted vector variable. */
6525
6526 static tree
6527 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6528 gimple_stmt_iterator *gsi)
6529 {
6530 tree vectype = TREE_TYPE (x);
6531 tree perm_dest, data_ref;
6532 gimple *perm_stmt;
6533
6534 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6535 data_ref = make_ssa_name (perm_dest);
6536
6537 /* Generate the permute statement. */
6538 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6539 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6540
6541 return data_ref;
6542 }
6543
6544 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6545 inserting them on the loops preheader edge. Returns true if we
6546 were successful in doing so (and thus STMT can be moved then),
6547 otherwise returns false. */
6548
6549 static bool
6550 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6551 {
6552 ssa_op_iter i;
6553 tree op;
6554 bool any = false;
6555
6556 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6557 {
6558 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6559 if (!gimple_nop_p (def_stmt)
6560 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6561 {
6562 /* Make sure we don't need to recurse. While we could do
6563 so in simple cases when there are more complex use webs
6564 we don't have an easy way to preserve stmt order to fulfil
6565 dependencies within them. */
6566 tree op2;
6567 ssa_op_iter i2;
6568 if (gimple_code (def_stmt) == GIMPLE_PHI)
6569 return false;
6570 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6571 {
6572 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6573 if (!gimple_nop_p (def_stmt2)
6574 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6575 return false;
6576 }
6577 any = true;
6578 }
6579 }
6580
6581 if (!any)
6582 return true;
6583
6584 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6585 {
6586 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6587 if (!gimple_nop_p (def_stmt)
6588 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6589 {
6590 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6591 gsi_remove (&gsi, false);
6592 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6593 }
6594 }
6595
6596 return true;
6597 }
6598
6599 /* vectorizable_load.
6600
6601 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6602 can be vectorized.
6603 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6604 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6605 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6606
6607 static bool
6608 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6609 slp_tree slp_node, slp_instance slp_node_instance)
6610 {
6611 tree scalar_dest;
6612 tree vec_dest = NULL;
6613 tree data_ref = NULL;
6614 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6615 stmt_vec_info prev_stmt_info;
6616 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6617 struct loop *loop = NULL;
6618 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6619 bool nested_in_vect_loop = false;
6620 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6621 tree elem_type;
6622 tree new_temp;
6623 machine_mode mode;
6624 gimple *new_stmt = NULL;
6625 tree dummy;
6626 enum dr_alignment_support alignment_support_scheme;
6627 tree dataref_ptr = NULL_TREE;
6628 tree dataref_offset = NULL_TREE;
6629 gimple *ptr_incr = NULL;
6630 int ncopies;
6631 int i, j, group_size, group_gap_adj;
6632 tree msq = NULL_TREE, lsq;
6633 tree offset = NULL_TREE;
6634 tree byte_offset = NULL_TREE;
6635 tree realignment_token = NULL_TREE;
6636 gphi *phi = NULL;
6637 vec<tree> dr_chain = vNULL;
6638 bool grouped_load = false;
6639 gimple *first_stmt;
6640 gimple *first_stmt_for_drptr = NULL;
6641 bool inv_p;
6642 bool compute_in_loop = false;
6643 struct loop *at_loop;
6644 int vec_num;
6645 bool slp = (slp_node != NULL);
6646 bool slp_perm = false;
6647 enum tree_code code;
6648 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6649 int vf;
6650 tree aggr_type;
6651 gather_scatter_info gs_info;
6652 vec_info *vinfo = stmt_info->vinfo;
6653 tree ref_type;
6654
6655 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6656 return false;
6657
6658 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6659 && ! vec_stmt)
6660 return false;
6661
6662 /* Is vectorizable load? */
6663 if (!is_gimple_assign (stmt))
6664 return false;
6665
6666 scalar_dest = gimple_assign_lhs (stmt);
6667 if (TREE_CODE (scalar_dest) != SSA_NAME)
6668 return false;
6669
6670 code = gimple_assign_rhs_code (stmt);
6671 if (code != ARRAY_REF
6672 && code != BIT_FIELD_REF
6673 && code != INDIRECT_REF
6674 && code != COMPONENT_REF
6675 && code != IMAGPART_EXPR
6676 && code != REALPART_EXPR
6677 && code != MEM_REF
6678 && TREE_CODE_CLASS (code) != tcc_declaration)
6679 return false;
6680
6681 if (!STMT_VINFO_DATA_REF (stmt_info))
6682 return false;
6683
6684 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6685 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6686
6687 if (loop_vinfo)
6688 {
6689 loop = LOOP_VINFO_LOOP (loop_vinfo);
6690 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6691 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6692 }
6693 else
6694 vf = 1;
6695
6696 /* Multiple types in SLP are handled by creating the appropriate number of
6697 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6698 case of SLP. */
6699 if (slp)
6700 ncopies = 1;
6701 else
6702 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6703
6704 gcc_assert (ncopies >= 1);
6705
6706 /* FORNOW. This restriction should be relaxed. */
6707 if (nested_in_vect_loop && ncopies > 1)
6708 {
6709 if (dump_enabled_p ())
6710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6711 "multiple types in nested loop.\n");
6712 return false;
6713 }
6714
6715 /* Invalidate assumptions made by dependence analysis when vectorization
6716 on the unrolled body effectively re-orders stmts. */
6717 if (ncopies > 1
6718 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6719 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6720 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6721 {
6722 if (dump_enabled_p ())
6723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6724 "cannot perform implicit CSE when unrolling "
6725 "with negative dependence distance\n");
6726 return false;
6727 }
6728
6729 elem_type = TREE_TYPE (vectype);
6730 mode = TYPE_MODE (vectype);
6731
6732 /* FORNOW. In some cases can vectorize even if data-type not supported
6733 (e.g. - data copies). */
6734 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6735 {
6736 if (dump_enabled_p ())
6737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6738 "Aligned load, but unsupported type.\n");
6739 return false;
6740 }
6741
6742 /* Check if the load is a part of an interleaving chain. */
6743 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6744 {
6745 grouped_load = true;
6746 /* FORNOW */
6747 gcc_assert (!nested_in_vect_loop);
6748 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6749
6750 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6751 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6752
6753 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6754 slp_perm = true;
6755
6756 /* Invalidate assumptions made by dependence analysis when vectorization
6757 on the unrolled body effectively re-orders stmts. */
6758 if (!PURE_SLP_STMT (stmt_info)
6759 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6760 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6761 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6762 {
6763 if (dump_enabled_p ())
6764 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6765 "cannot perform implicit CSE when performing "
6766 "group loads with negative dependence distance\n");
6767 return false;
6768 }
6769
6770 /* Similarly when the stmt is a load that is both part of a SLP
6771 instance and a loop vectorized stmt via the same-dr mechanism
6772 we have to give up. */
6773 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6774 && (STMT_SLP_TYPE (stmt_info)
6775 != STMT_SLP_TYPE (vinfo_for_stmt
6776 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6777 {
6778 if (dump_enabled_p ())
6779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6780 "conflicting SLP types for CSEd load\n");
6781 return false;
6782 }
6783 }
6784
6785 vect_memory_access_type memory_access_type;
6786 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6787 &memory_access_type, &gs_info))
6788 return false;
6789
6790 if (!vec_stmt) /* transformation not required. */
6791 {
6792 if (!slp)
6793 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6794 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6795 /* The SLP costs are calculated during SLP analysis. */
6796 if (!PURE_SLP_STMT (stmt_info))
6797 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6798 NULL, NULL, NULL);
6799 return true;
6800 }
6801
6802 if (!slp)
6803 gcc_assert (memory_access_type
6804 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6805
6806 if (dump_enabled_p ())
6807 dump_printf_loc (MSG_NOTE, vect_location,
6808 "transform load. ncopies = %d\n", ncopies);
6809
6810 /* Transform. */
6811
6812 ensure_base_align (stmt_info, dr);
6813
6814 if (memory_access_type == VMAT_GATHER_SCATTER)
6815 {
6816 tree vec_oprnd0 = NULL_TREE, op;
6817 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6818 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6819 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6820 edge pe = loop_preheader_edge (loop);
6821 gimple_seq seq;
6822 basic_block new_bb;
6823 enum { NARROW, NONE, WIDEN } modifier;
6824 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6825
6826 if (nunits == gather_off_nunits)
6827 modifier = NONE;
6828 else if (nunits == gather_off_nunits / 2)
6829 {
6830 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6831 modifier = WIDEN;
6832
6833 for (i = 0; i < gather_off_nunits; ++i)
6834 sel[i] = i | nunits;
6835
6836 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype, sel);
6837 }
6838 else if (nunits == gather_off_nunits * 2)
6839 {
6840 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6841 modifier = NARROW;
6842
6843 for (i = 0; i < nunits; ++i)
6844 sel[i] = i < gather_off_nunits
6845 ? i : i + nunits - gather_off_nunits;
6846
6847 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6848 ncopies *= 2;
6849 }
6850 else
6851 gcc_unreachable ();
6852
6853 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6854 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6855 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6856 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6857 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6858 scaletype = TREE_VALUE (arglist);
6859 gcc_checking_assert (types_compatible_p (srctype, rettype));
6860
6861 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6862
6863 ptr = fold_convert (ptrtype, gs_info.base);
6864 if (!is_gimple_min_invariant (ptr))
6865 {
6866 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6867 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6868 gcc_assert (!new_bb);
6869 }
6870
6871 /* Currently we support only unconditional gather loads,
6872 so mask should be all ones. */
6873 if (TREE_CODE (masktype) == INTEGER_TYPE)
6874 mask = build_int_cst (masktype, -1);
6875 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6876 {
6877 mask = build_int_cst (TREE_TYPE (masktype), -1);
6878 mask = build_vector_from_val (masktype, mask);
6879 mask = vect_init_vector (stmt, mask, masktype, NULL);
6880 }
6881 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6882 {
6883 REAL_VALUE_TYPE r;
6884 long tmp[6];
6885 for (j = 0; j < 6; ++j)
6886 tmp[j] = -1;
6887 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6888 mask = build_real (TREE_TYPE (masktype), r);
6889 mask = build_vector_from_val (masktype, mask);
6890 mask = vect_init_vector (stmt, mask, masktype, NULL);
6891 }
6892 else
6893 gcc_unreachable ();
6894
6895 scale = build_int_cst (scaletype, gs_info.scale);
6896
6897 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6898 merge = build_int_cst (TREE_TYPE (rettype), 0);
6899 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6900 {
6901 REAL_VALUE_TYPE r;
6902 long tmp[6];
6903 for (j = 0; j < 6; ++j)
6904 tmp[j] = 0;
6905 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6906 merge = build_real (TREE_TYPE (rettype), r);
6907 }
6908 else
6909 gcc_unreachable ();
6910 merge = build_vector_from_val (rettype, merge);
6911 merge = vect_init_vector (stmt, merge, rettype, NULL);
6912
6913 prev_stmt_info = NULL;
6914 for (j = 0; j < ncopies; ++j)
6915 {
6916 if (modifier == WIDEN && (j & 1))
6917 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6918 perm_mask, stmt, gsi);
6919 else if (j == 0)
6920 op = vec_oprnd0
6921 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6922 else
6923 op = vec_oprnd0
6924 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
6925
6926 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6927 {
6928 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6929 == TYPE_VECTOR_SUBPARTS (idxtype));
6930 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6931 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6932 new_stmt
6933 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6934 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6935 op = var;
6936 }
6937
6938 new_stmt
6939 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
6940
6941 if (!useless_type_conversion_p (vectype, rettype))
6942 {
6943 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6944 == TYPE_VECTOR_SUBPARTS (rettype));
6945 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6946 gimple_call_set_lhs (new_stmt, op);
6947 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6948 var = make_ssa_name (vec_dest);
6949 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6950 new_stmt
6951 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6952 }
6953 else
6954 {
6955 var = make_ssa_name (vec_dest, new_stmt);
6956 gimple_call_set_lhs (new_stmt, var);
6957 }
6958
6959 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6960
6961 if (modifier == NARROW)
6962 {
6963 if ((j & 1) == 0)
6964 {
6965 prev_res = var;
6966 continue;
6967 }
6968 var = permute_vec_elements (prev_res, var,
6969 perm_mask, stmt, gsi);
6970 new_stmt = SSA_NAME_DEF_STMT (var);
6971 }
6972
6973 if (prev_stmt_info == NULL)
6974 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6975 else
6976 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6977 prev_stmt_info = vinfo_for_stmt (new_stmt);
6978 }
6979 return true;
6980 }
6981
6982 if (memory_access_type == VMAT_ELEMENTWISE
6983 || memory_access_type == VMAT_STRIDED_SLP)
6984 {
6985 gimple_stmt_iterator incr_gsi;
6986 bool insert_after;
6987 gimple *incr;
6988 tree offvar;
6989 tree ivstep;
6990 tree running_off;
6991 vec<constructor_elt, va_gc> *v = NULL;
6992 gimple_seq stmts = NULL;
6993 tree stride_base, stride_step, alias_off;
6994
6995 gcc_assert (!nested_in_vect_loop);
6996
6997 if (slp && grouped_load)
6998 {
6999 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7000 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7001 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7002 ref_type = get_group_alias_ptr_type (first_stmt);
7003 }
7004 else
7005 {
7006 first_stmt = stmt;
7007 first_dr = dr;
7008 group_size = 1;
7009 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7010 }
7011
7012 stride_base
7013 = fold_build_pointer_plus
7014 (DR_BASE_ADDRESS (first_dr),
7015 size_binop (PLUS_EXPR,
7016 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7017 convert_to_ptrofftype (DR_INIT (first_dr))));
7018 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7019
7020 /* For a load with loop-invariant (but other than power-of-2)
7021 stride (i.e. not a grouped access) like so:
7022
7023 for (i = 0; i < n; i += stride)
7024 ... = array[i];
7025
7026 we generate a new induction variable and new accesses to
7027 form a new vector (or vectors, depending on ncopies):
7028
7029 for (j = 0; ; j += VF*stride)
7030 tmp1 = array[j];
7031 tmp2 = array[j + stride];
7032 ...
7033 vectemp = {tmp1, tmp2, ...}
7034 */
7035
7036 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7037 build_int_cst (TREE_TYPE (stride_step), vf));
7038
7039 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7040
7041 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7042 loop, &incr_gsi, insert_after,
7043 &offvar, NULL);
7044 incr = gsi_stmt (incr_gsi);
7045 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7046
7047 stride_step = force_gimple_operand (unshare_expr (stride_step),
7048 &stmts, true, NULL_TREE);
7049 if (stmts)
7050 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7051
7052 prev_stmt_info = NULL;
7053 running_off = offvar;
7054 alias_off = build_int_cst (ref_type, 0);
7055 int nloads = nunits;
7056 int lnel = 1;
7057 tree ltype = TREE_TYPE (vectype);
7058 tree lvectype = vectype;
7059 auto_vec<tree> dr_chain;
7060 if (memory_access_type == VMAT_STRIDED_SLP)
7061 {
7062 if (group_size < nunits)
7063 {
7064 /* First check if vec_init optab supports construction from
7065 vector elts directly. */
7066 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7067 machine_mode vmode = mode_for_vector (elmode, group_size);
7068 if (VECTOR_MODE_P (vmode)
7069 && (convert_optab_handler (vec_init_optab,
7070 TYPE_MODE (vectype), vmode)
7071 != CODE_FOR_nothing))
7072 {
7073 nloads = nunits / group_size;
7074 lnel = group_size;
7075 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7076 }
7077 else
7078 {
7079 /* Otherwise avoid emitting a constructor of vector elements
7080 by performing the loads using an integer type of the same
7081 size, constructing a vector of those and then
7082 re-interpreting it as the original vector type.
7083 This avoids a huge runtime penalty due to the general
7084 inability to perform store forwarding from smaller stores
7085 to a larger load. */
7086 unsigned lsize
7087 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7088 elmode = int_mode_for_size (lsize, 0).require ();
7089 vmode = mode_for_vector (elmode, nunits / group_size);
7090 /* If we can't construct such a vector fall back to
7091 element loads of the original vector type. */
7092 if (VECTOR_MODE_P (vmode)
7093 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7094 != CODE_FOR_nothing))
7095 {
7096 nloads = nunits / group_size;
7097 lnel = group_size;
7098 ltype = build_nonstandard_integer_type (lsize, 1);
7099 lvectype = build_vector_type (ltype, nloads);
7100 }
7101 }
7102 }
7103 else
7104 {
7105 nloads = 1;
7106 lnel = nunits;
7107 ltype = vectype;
7108 }
7109 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7110 }
7111 if (slp)
7112 {
7113 /* For SLP permutation support we need to load the whole group,
7114 not only the number of vector stmts the permutation result
7115 fits in. */
7116 if (slp_perm)
7117 {
7118 ncopies = (group_size * vf + nunits - 1) / nunits;
7119 dr_chain.create (ncopies);
7120 }
7121 else
7122 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7123 }
7124 int group_el = 0;
7125 unsigned HOST_WIDE_INT
7126 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7127 for (j = 0; j < ncopies; j++)
7128 {
7129 if (nloads > 1)
7130 vec_alloc (v, nloads);
7131 for (i = 0; i < nloads; i++)
7132 {
7133 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7134 group_el * elsz);
7135 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7136 build2 (MEM_REF, ltype,
7137 running_off, this_off));
7138 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7139 if (nloads > 1)
7140 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7141 gimple_assign_lhs (new_stmt));
7142
7143 group_el += lnel;
7144 if (! slp
7145 || group_el == group_size)
7146 {
7147 tree newoff = copy_ssa_name (running_off);
7148 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7149 running_off, stride_step);
7150 vect_finish_stmt_generation (stmt, incr, gsi);
7151
7152 running_off = newoff;
7153 group_el = 0;
7154 }
7155 }
7156 if (nloads > 1)
7157 {
7158 tree vec_inv = build_constructor (lvectype, v);
7159 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7160 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7161 if (lvectype != vectype)
7162 {
7163 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7164 VIEW_CONVERT_EXPR,
7165 build1 (VIEW_CONVERT_EXPR,
7166 vectype, new_temp));
7167 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7168 }
7169 }
7170
7171 if (slp)
7172 {
7173 if (slp_perm)
7174 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7175 else
7176 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7177 }
7178 else
7179 {
7180 if (j == 0)
7181 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7182 else
7183 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7184 prev_stmt_info = vinfo_for_stmt (new_stmt);
7185 }
7186 }
7187 if (slp_perm)
7188 {
7189 unsigned n_perms;
7190 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7191 slp_node_instance, false, &n_perms);
7192 }
7193 return true;
7194 }
7195
7196 if (grouped_load)
7197 {
7198 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7199 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7200 int group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
7201 /* For SLP vectorization we directly vectorize a subchain
7202 without permutation. */
7203 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7204 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7205 /* For BB vectorization always use the first stmt to base
7206 the data ref pointer on. */
7207 if (bb_vinfo)
7208 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7209
7210 /* Check if the chain of loads is already vectorized. */
7211 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7212 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7213 ??? But we can only do so if there is exactly one
7214 as we have no way to get at the rest. Leave the CSE
7215 opportunity alone.
7216 ??? With the group load eventually participating
7217 in multiple different permutations (having multiple
7218 slp nodes which refer to the same group) the CSE
7219 is even wrong code. See PR56270. */
7220 && !slp)
7221 {
7222 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7223 return true;
7224 }
7225 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7226 group_gap_adj = 0;
7227
7228 /* VEC_NUM is the number of vect stmts to be created for this group. */
7229 if (slp)
7230 {
7231 grouped_load = false;
7232 /* For SLP permutation support we need to load the whole group,
7233 not only the number of vector stmts the permutation result
7234 fits in. */
7235 if (slp_perm)
7236 {
7237 vec_num = (group_size * vf + nunits - 1) / nunits;
7238 group_gap_adj = vf * group_size - nunits * vec_num;
7239 }
7240 else
7241 {
7242 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7243 group_gap_adj = group_gap;
7244 }
7245 }
7246 else
7247 vec_num = group_size;
7248
7249 ref_type = get_group_alias_ptr_type (first_stmt);
7250 }
7251 else
7252 {
7253 first_stmt = stmt;
7254 first_dr = dr;
7255 group_size = vec_num = 1;
7256 group_gap_adj = 0;
7257 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7258 }
7259
7260 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7261 gcc_assert (alignment_support_scheme);
7262 /* Targets with load-lane instructions must not require explicit
7263 realignment. */
7264 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7265 || alignment_support_scheme == dr_aligned
7266 || alignment_support_scheme == dr_unaligned_supported);
7267
7268 /* In case the vectorization factor (VF) is bigger than the number
7269 of elements that we can fit in a vectype (nunits), we have to generate
7270 more than one vector stmt - i.e - we need to "unroll" the
7271 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7272 from one copy of the vector stmt to the next, in the field
7273 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7274 stages to find the correct vector defs to be used when vectorizing
7275 stmts that use the defs of the current stmt. The example below
7276 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7277 need to create 4 vectorized stmts):
7278
7279 before vectorization:
7280 RELATED_STMT VEC_STMT
7281 S1: x = memref - -
7282 S2: z = x + 1 - -
7283
7284 step 1: vectorize stmt S1:
7285 We first create the vector stmt VS1_0, and, as usual, record a
7286 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7287 Next, we create the vector stmt VS1_1, and record a pointer to
7288 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7289 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7290 stmts and pointers:
7291 RELATED_STMT VEC_STMT
7292 VS1_0: vx0 = memref0 VS1_1 -
7293 VS1_1: vx1 = memref1 VS1_2 -
7294 VS1_2: vx2 = memref2 VS1_3 -
7295 VS1_3: vx3 = memref3 - -
7296 S1: x = load - VS1_0
7297 S2: z = x + 1 - -
7298
7299 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7300 information we recorded in RELATED_STMT field is used to vectorize
7301 stmt S2. */
7302
7303 /* In case of interleaving (non-unit grouped access):
7304
7305 S1: x2 = &base + 2
7306 S2: x0 = &base
7307 S3: x1 = &base + 1
7308 S4: x3 = &base + 3
7309
7310 Vectorized loads are created in the order of memory accesses
7311 starting from the access of the first stmt of the chain:
7312
7313 VS1: vx0 = &base
7314 VS2: vx1 = &base + vec_size*1
7315 VS3: vx3 = &base + vec_size*2
7316 VS4: vx4 = &base + vec_size*3
7317
7318 Then permutation statements are generated:
7319
7320 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7321 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7322 ...
7323
7324 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7325 (the order of the data-refs in the output of vect_permute_load_chain
7326 corresponds to the order of scalar stmts in the interleaving chain - see
7327 the documentation of vect_permute_load_chain()).
7328 The generation of permutation stmts and recording them in
7329 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7330
7331 In case of both multiple types and interleaving, the vector loads and
7332 permutation stmts above are created for every copy. The result vector
7333 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7334 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7335
7336 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7337 on a target that supports unaligned accesses (dr_unaligned_supported)
7338 we generate the following code:
7339 p = initial_addr;
7340 indx = 0;
7341 loop {
7342 p = p + indx * vectype_size;
7343 vec_dest = *(p);
7344 indx = indx + 1;
7345 }
7346
7347 Otherwise, the data reference is potentially unaligned on a target that
7348 does not support unaligned accesses (dr_explicit_realign_optimized) -
7349 then generate the following code, in which the data in each iteration is
7350 obtained by two vector loads, one from the previous iteration, and one
7351 from the current iteration:
7352 p1 = initial_addr;
7353 msq_init = *(floor(p1))
7354 p2 = initial_addr + VS - 1;
7355 realignment_token = call target_builtin;
7356 indx = 0;
7357 loop {
7358 p2 = p2 + indx * vectype_size
7359 lsq = *(floor(p2))
7360 vec_dest = realign_load (msq, lsq, realignment_token)
7361 indx = indx + 1;
7362 msq = lsq;
7363 } */
7364
7365 /* If the misalignment remains the same throughout the execution of the
7366 loop, we can create the init_addr and permutation mask at the loop
7367 preheader. Otherwise, it needs to be created inside the loop.
7368 This can only occur when vectorizing memory accesses in the inner-loop
7369 nested within an outer-loop that is being vectorized. */
7370
7371 if (nested_in_vect_loop
7372 && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0)
7373 {
7374 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7375 compute_in_loop = true;
7376 }
7377
7378 if ((alignment_support_scheme == dr_explicit_realign_optimized
7379 || alignment_support_scheme == dr_explicit_realign)
7380 && !compute_in_loop)
7381 {
7382 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7383 alignment_support_scheme, NULL_TREE,
7384 &at_loop);
7385 if (alignment_support_scheme == dr_explicit_realign_optimized)
7386 {
7387 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7388 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7389 size_one_node);
7390 }
7391 }
7392 else
7393 at_loop = loop;
7394
7395 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7396 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7397
7398 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7399 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7400 else
7401 aggr_type = vectype;
7402
7403 prev_stmt_info = NULL;
7404 int group_elt = 0;
7405 for (j = 0; j < ncopies; j++)
7406 {
7407 /* 1. Create the vector or array pointer update chain. */
7408 if (j == 0)
7409 {
7410 bool simd_lane_access_p
7411 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7412 if (simd_lane_access_p
7413 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7414 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7415 && integer_zerop (DR_OFFSET (first_dr))
7416 && integer_zerop (DR_INIT (first_dr))
7417 && alias_sets_conflict_p (get_alias_set (aggr_type),
7418 get_alias_set (TREE_TYPE (ref_type)))
7419 && (alignment_support_scheme == dr_aligned
7420 || alignment_support_scheme == dr_unaligned_supported))
7421 {
7422 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7423 dataref_offset = build_int_cst (ref_type, 0);
7424 inv_p = false;
7425 }
7426 else if (first_stmt_for_drptr
7427 && first_stmt != first_stmt_for_drptr)
7428 {
7429 dataref_ptr
7430 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7431 at_loop, offset, &dummy, gsi,
7432 &ptr_incr, simd_lane_access_p,
7433 &inv_p, byte_offset);
7434 /* Adjust the pointer by the difference to first_stmt. */
7435 data_reference_p ptrdr
7436 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7437 tree diff = fold_convert (sizetype,
7438 size_binop (MINUS_EXPR,
7439 DR_INIT (first_dr),
7440 DR_INIT (ptrdr)));
7441 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7442 stmt, diff);
7443 }
7444 else
7445 dataref_ptr
7446 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7447 offset, &dummy, gsi, &ptr_incr,
7448 simd_lane_access_p, &inv_p,
7449 byte_offset);
7450 }
7451 else if (dataref_offset)
7452 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7453 TYPE_SIZE_UNIT (aggr_type));
7454 else
7455 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7456 TYPE_SIZE_UNIT (aggr_type));
7457
7458 if (grouped_load || slp_perm)
7459 dr_chain.create (vec_num);
7460
7461 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7462 {
7463 tree vec_array;
7464
7465 vec_array = create_vector_array (vectype, vec_num);
7466
7467 /* Emit:
7468 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7469 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7470 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7471 data_ref);
7472 gimple_call_set_lhs (call, vec_array);
7473 gimple_call_set_nothrow (call, true);
7474 new_stmt = call;
7475 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7476
7477 /* Extract each vector into an SSA_NAME. */
7478 for (i = 0; i < vec_num; i++)
7479 {
7480 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7481 vec_array, i);
7482 dr_chain.quick_push (new_temp);
7483 }
7484
7485 /* Record the mapping between SSA_NAMEs and statements. */
7486 vect_record_grouped_load_vectors (stmt, dr_chain);
7487 }
7488 else
7489 {
7490 for (i = 0; i < vec_num; i++)
7491 {
7492 if (i > 0)
7493 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7494 stmt, NULL_TREE);
7495
7496 /* 2. Create the vector-load in the loop. */
7497 switch (alignment_support_scheme)
7498 {
7499 case dr_aligned:
7500 case dr_unaligned_supported:
7501 {
7502 unsigned int align, misalign;
7503
7504 data_ref
7505 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7506 dataref_offset
7507 ? dataref_offset
7508 : build_int_cst (ref_type, 0));
7509 align = TYPE_ALIGN_UNIT (vectype);
7510 if (alignment_support_scheme == dr_aligned)
7511 {
7512 gcc_assert (aligned_access_p (first_dr));
7513 misalign = 0;
7514 }
7515 else if (DR_MISALIGNMENT (first_dr) == -1)
7516 {
7517 align = dr_alignment (vect_dr_behavior (first_dr));
7518 misalign = 0;
7519 TREE_TYPE (data_ref)
7520 = build_aligned_type (TREE_TYPE (data_ref),
7521 align * BITS_PER_UNIT);
7522 }
7523 else
7524 {
7525 TREE_TYPE (data_ref)
7526 = build_aligned_type (TREE_TYPE (data_ref),
7527 TYPE_ALIGN (elem_type));
7528 misalign = DR_MISALIGNMENT (first_dr);
7529 }
7530 if (dataref_offset == NULL_TREE
7531 && TREE_CODE (dataref_ptr) == SSA_NAME)
7532 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7533 align, misalign);
7534 break;
7535 }
7536 case dr_explicit_realign:
7537 {
7538 tree ptr, bump;
7539
7540 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7541
7542 if (compute_in_loop)
7543 msq = vect_setup_realignment (first_stmt, gsi,
7544 &realignment_token,
7545 dr_explicit_realign,
7546 dataref_ptr, NULL);
7547
7548 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7549 ptr = copy_ssa_name (dataref_ptr);
7550 else
7551 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7552 new_stmt = gimple_build_assign
7553 (ptr, BIT_AND_EXPR, dataref_ptr,
7554 build_int_cst
7555 (TREE_TYPE (dataref_ptr),
7556 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7557 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7558 data_ref
7559 = build2 (MEM_REF, vectype, ptr,
7560 build_int_cst (ref_type, 0));
7561 vec_dest = vect_create_destination_var (scalar_dest,
7562 vectype);
7563 new_stmt = gimple_build_assign (vec_dest, data_ref);
7564 new_temp = make_ssa_name (vec_dest, new_stmt);
7565 gimple_assign_set_lhs (new_stmt, new_temp);
7566 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7567 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7568 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7569 msq = new_temp;
7570
7571 bump = size_binop (MULT_EXPR, vs,
7572 TYPE_SIZE_UNIT (elem_type));
7573 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7574 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7575 new_stmt = gimple_build_assign
7576 (NULL_TREE, BIT_AND_EXPR, ptr,
7577 build_int_cst
7578 (TREE_TYPE (ptr),
7579 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7580 ptr = copy_ssa_name (ptr, new_stmt);
7581 gimple_assign_set_lhs (new_stmt, ptr);
7582 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7583 data_ref
7584 = build2 (MEM_REF, vectype, ptr,
7585 build_int_cst (ref_type, 0));
7586 break;
7587 }
7588 case dr_explicit_realign_optimized:
7589 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7590 new_temp = copy_ssa_name (dataref_ptr);
7591 else
7592 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7593 new_stmt = gimple_build_assign
7594 (new_temp, BIT_AND_EXPR, dataref_ptr,
7595 build_int_cst
7596 (TREE_TYPE (dataref_ptr),
7597 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7598 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7599 data_ref
7600 = build2 (MEM_REF, vectype, new_temp,
7601 build_int_cst (ref_type, 0));
7602 break;
7603 default:
7604 gcc_unreachable ();
7605 }
7606 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7607 new_stmt = gimple_build_assign (vec_dest, data_ref);
7608 new_temp = make_ssa_name (vec_dest, new_stmt);
7609 gimple_assign_set_lhs (new_stmt, new_temp);
7610 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7611
7612 /* 3. Handle explicit realignment if necessary/supported.
7613 Create in loop:
7614 vec_dest = realign_load (msq, lsq, realignment_token) */
7615 if (alignment_support_scheme == dr_explicit_realign_optimized
7616 || alignment_support_scheme == dr_explicit_realign)
7617 {
7618 lsq = gimple_assign_lhs (new_stmt);
7619 if (!realignment_token)
7620 realignment_token = dataref_ptr;
7621 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7622 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7623 msq, lsq, realignment_token);
7624 new_temp = make_ssa_name (vec_dest, new_stmt);
7625 gimple_assign_set_lhs (new_stmt, new_temp);
7626 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7627
7628 if (alignment_support_scheme == dr_explicit_realign_optimized)
7629 {
7630 gcc_assert (phi);
7631 if (i == vec_num - 1 && j == ncopies - 1)
7632 add_phi_arg (phi, lsq,
7633 loop_latch_edge (containing_loop),
7634 UNKNOWN_LOCATION);
7635 msq = lsq;
7636 }
7637 }
7638
7639 /* 4. Handle invariant-load. */
7640 if (inv_p && !bb_vinfo)
7641 {
7642 gcc_assert (!grouped_load);
7643 /* If we have versioned for aliasing or the loop doesn't
7644 have any data dependencies that would preclude this,
7645 then we are sure this is a loop invariant load and
7646 thus we can insert it on the preheader edge. */
7647 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7648 && !nested_in_vect_loop
7649 && hoist_defs_of_uses (stmt, loop))
7650 {
7651 if (dump_enabled_p ())
7652 {
7653 dump_printf_loc (MSG_NOTE, vect_location,
7654 "hoisting out of the vectorized "
7655 "loop: ");
7656 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7657 }
7658 tree tem = copy_ssa_name (scalar_dest);
7659 gsi_insert_on_edge_immediate
7660 (loop_preheader_edge (loop),
7661 gimple_build_assign (tem,
7662 unshare_expr
7663 (gimple_assign_rhs1 (stmt))));
7664 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7665 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7666 set_vinfo_for_stmt (new_stmt,
7667 new_stmt_vec_info (new_stmt, vinfo));
7668 }
7669 else
7670 {
7671 gimple_stmt_iterator gsi2 = *gsi;
7672 gsi_next (&gsi2);
7673 new_temp = vect_init_vector (stmt, scalar_dest,
7674 vectype, &gsi2);
7675 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7676 }
7677 }
7678
7679 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7680 {
7681 tree perm_mask = perm_mask_for_reverse (vectype);
7682 new_temp = permute_vec_elements (new_temp, new_temp,
7683 perm_mask, stmt, gsi);
7684 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7685 }
7686
7687 /* Collect vector loads and later create their permutation in
7688 vect_transform_grouped_load (). */
7689 if (grouped_load || slp_perm)
7690 dr_chain.quick_push (new_temp);
7691
7692 /* Store vector loads in the corresponding SLP_NODE. */
7693 if (slp && !slp_perm)
7694 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7695
7696 /* With SLP permutation we load the gaps as well, without
7697 we need to skip the gaps after we manage to fully load
7698 all elements. group_gap_adj is GROUP_SIZE here. */
7699 group_elt += nunits;
7700 if (group_gap_adj != 0 && ! slp_perm
7701 && group_elt == group_size - group_gap_adj)
7702 {
7703 bool ovf;
7704 tree bump
7705 = wide_int_to_tree (sizetype,
7706 wi::smul (TYPE_SIZE_UNIT (elem_type),
7707 group_gap_adj, &ovf));
7708 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7709 stmt, bump);
7710 group_elt = 0;
7711 }
7712 }
7713 /* Bump the vector pointer to account for a gap or for excess
7714 elements loaded for a permuted SLP load. */
7715 if (group_gap_adj != 0 && slp_perm)
7716 {
7717 bool ovf;
7718 tree bump
7719 = wide_int_to_tree (sizetype,
7720 wi::smul (TYPE_SIZE_UNIT (elem_type),
7721 group_gap_adj, &ovf));
7722 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7723 stmt, bump);
7724 }
7725 }
7726
7727 if (slp && !slp_perm)
7728 continue;
7729
7730 if (slp_perm)
7731 {
7732 unsigned n_perms;
7733 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7734 slp_node_instance, false,
7735 &n_perms))
7736 {
7737 dr_chain.release ();
7738 return false;
7739 }
7740 }
7741 else
7742 {
7743 if (grouped_load)
7744 {
7745 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7746 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7747 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7748 }
7749 else
7750 {
7751 if (j == 0)
7752 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7753 else
7754 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7755 prev_stmt_info = vinfo_for_stmt (new_stmt);
7756 }
7757 }
7758 dr_chain.release ();
7759 }
7760
7761 return true;
7762 }
7763
7764 /* Function vect_is_simple_cond.
7765
7766 Input:
7767 LOOP - the loop that is being vectorized.
7768 COND - Condition that is checked for simple use.
7769
7770 Output:
7771 *COMP_VECTYPE - the vector type for the comparison.
7772 *DTS - The def types for the arguments of the comparison
7773
7774 Returns whether a COND can be vectorized. Checks whether
7775 condition operands are supportable using vec_is_simple_use. */
7776
7777 static bool
7778 vect_is_simple_cond (tree cond, vec_info *vinfo,
7779 tree *comp_vectype, enum vect_def_type *dts)
7780 {
7781 tree lhs, rhs;
7782 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7783
7784 /* Mask case. */
7785 if (TREE_CODE (cond) == SSA_NAME
7786 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7787 {
7788 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7789 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7790 &dts[0], comp_vectype)
7791 || !*comp_vectype
7792 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7793 return false;
7794 return true;
7795 }
7796
7797 if (!COMPARISON_CLASS_P (cond))
7798 return false;
7799
7800 lhs = TREE_OPERAND (cond, 0);
7801 rhs = TREE_OPERAND (cond, 1);
7802
7803 if (TREE_CODE (lhs) == SSA_NAME)
7804 {
7805 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7806 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7807 return false;
7808 }
7809 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7810 || TREE_CODE (lhs) == FIXED_CST)
7811 dts[0] = vect_constant_def;
7812 else
7813 return false;
7814
7815 if (TREE_CODE (rhs) == SSA_NAME)
7816 {
7817 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7818 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7819 return false;
7820 }
7821 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7822 || TREE_CODE (rhs) == FIXED_CST)
7823 dts[1] = vect_constant_def;
7824 else
7825 return false;
7826
7827 if (vectype1 && vectype2
7828 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7829 return false;
7830
7831 *comp_vectype = vectype1 ? vectype1 : vectype2;
7832 return true;
7833 }
7834
7835 /* vectorizable_condition.
7836
7837 Check if STMT is conditional modify expression that can be vectorized.
7838 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7839 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7840 at GSI.
7841
7842 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7843 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7844 else clause if it is 2).
7845
7846 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7847
7848 bool
7849 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7850 gimple **vec_stmt, tree reduc_def, int reduc_index,
7851 slp_tree slp_node)
7852 {
7853 tree scalar_dest = NULL_TREE;
7854 tree vec_dest = NULL_TREE;
7855 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7856 tree then_clause, else_clause;
7857 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7858 tree comp_vectype = NULL_TREE;
7859 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7860 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7861 tree vec_compare;
7862 tree new_temp;
7863 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7864 enum vect_def_type dts[4]
7865 = {vect_unknown_def_type, vect_unknown_def_type,
7866 vect_unknown_def_type, vect_unknown_def_type};
7867 int ndts = 4;
7868 int ncopies;
7869 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
7870 stmt_vec_info prev_stmt_info = NULL;
7871 int i, j;
7872 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7873 vec<tree> vec_oprnds0 = vNULL;
7874 vec<tree> vec_oprnds1 = vNULL;
7875 vec<tree> vec_oprnds2 = vNULL;
7876 vec<tree> vec_oprnds3 = vNULL;
7877 tree vec_cmp_type;
7878 bool masked = false;
7879
7880 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7881 return false;
7882
7883 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7884 {
7885 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7886 return false;
7887
7888 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7889 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7890 && reduc_def))
7891 return false;
7892
7893 /* FORNOW: not yet supported. */
7894 if (STMT_VINFO_LIVE_P (stmt_info))
7895 {
7896 if (dump_enabled_p ())
7897 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7898 "value used after loop.\n");
7899 return false;
7900 }
7901 }
7902
7903 /* Is vectorizable conditional operation? */
7904 if (!is_gimple_assign (stmt))
7905 return false;
7906
7907 code = gimple_assign_rhs_code (stmt);
7908
7909 if (code != COND_EXPR)
7910 return false;
7911
7912 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7913 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7914 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7915
7916 if (slp_node)
7917 ncopies = 1;
7918 else
7919 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7920
7921 gcc_assert (ncopies >= 1);
7922 if (reduc_index && ncopies > 1)
7923 return false; /* FORNOW */
7924
7925 cond_expr = gimple_assign_rhs1 (stmt);
7926 then_clause = gimple_assign_rhs2 (stmt);
7927 else_clause = gimple_assign_rhs3 (stmt);
7928
7929 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
7930 &comp_vectype, &dts[0])
7931 || !comp_vectype)
7932 return false;
7933
7934 gimple *def_stmt;
7935 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
7936 &vectype1))
7937 return false;
7938 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
7939 &vectype2))
7940 return false;
7941
7942 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
7943 return false;
7944
7945 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
7946 return false;
7947
7948 masked = !COMPARISON_CLASS_P (cond_expr);
7949 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7950
7951 if (vec_cmp_type == NULL_TREE)
7952 return false;
7953
7954 cond_code = TREE_CODE (cond_expr);
7955 if (!masked)
7956 {
7957 cond_expr0 = TREE_OPERAND (cond_expr, 0);
7958 cond_expr1 = TREE_OPERAND (cond_expr, 1);
7959 }
7960
7961 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
7962 {
7963 /* Boolean values may have another representation in vectors
7964 and therefore we prefer bit operations over comparison for
7965 them (which also works for scalar masks). We store opcodes
7966 to use in bitop1 and bitop2. Statement is vectorized as
7967 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
7968 depending on bitop1 and bitop2 arity. */
7969 switch (cond_code)
7970 {
7971 case GT_EXPR:
7972 bitop1 = BIT_NOT_EXPR;
7973 bitop2 = BIT_AND_EXPR;
7974 break;
7975 case GE_EXPR:
7976 bitop1 = BIT_NOT_EXPR;
7977 bitop2 = BIT_IOR_EXPR;
7978 break;
7979 case LT_EXPR:
7980 bitop1 = BIT_NOT_EXPR;
7981 bitop2 = BIT_AND_EXPR;
7982 std::swap (cond_expr0, cond_expr1);
7983 break;
7984 case LE_EXPR:
7985 bitop1 = BIT_NOT_EXPR;
7986 bitop2 = BIT_IOR_EXPR;
7987 std::swap (cond_expr0, cond_expr1);
7988 break;
7989 case NE_EXPR:
7990 bitop1 = BIT_XOR_EXPR;
7991 break;
7992 case EQ_EXPR:
7993 bitop1 = BIT_XOR_EXPR;
7994 bitop2 = BIT_NOT_EXPR;
7995 break;
7996 default:
7997 return false;
7998 }
7999 cond_code = SSA_NAME;
8000 }
8001
8002 if (!vec_stmt)
8003 {
8004 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8005 if (bitop1 != NOP_EXPR)
8006 {
8007 machine_mode mode = TYPE_MODE (comp_vectype);
8008 optab optab;
8009
8010 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8011 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8012 return false;
8013
8014 if (bitop2 != NOP_EXPR)
8015 {
8016 optab = optab_for_tree_code (bitop2, comp_vectype,
8017 optab_default);
8018 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8019 return false;
8020 }
8021 }
8022 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8023 cond_code))
8024 {
8025 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8026 return true;
8027 }
8028 return false;
8029 }
8030
8031 /* Transform. */
8032
8033 if (!slp_node)
8034 {
8035 vec_oprnds0.create (1);
8036 vec_oprnds1.create (1);
8037 vec_oprnds2.create (1);
8038 vec_oprnds3.create (1);
8039 }
8040
8041 /* Handle def. */
8042 scalar_dest = gimple_assign_lhs (stmt);
8043 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8044
8045 /* Handle cond expr. */
8046 for (j = 0; j < ncopies; j++)
8047 {
8048 gassign *new_stmt = NULL;
8049 if (j == 0)
8050 {
8051 if (slp_node)
8052 {
8053 auto_vec<tree, 4> ops;
8054 auto_vec<vec<tree>, 4> vec_defs;
8055
8056 if (masked)
8057 ops.safe_push (cond_expr);
8058 else
8059 {
8060 ops.safe_push (cond_expr0);
8061 ops.safe_push (cond_expr1);
8062 }
8063 ops.safe_push (then_clause);
8064 ops.safe_push (else_clause);
8065 vect_get_slp_defs (ops, slp_node, &vec_defs);
8066 vec_oprnds3 = vec_defs.pop ();
8067 vec_oprnds2 = vec_defs.pop ();
8068 if (!masked)
8069 vec_oprnds1 = vec_defs.pop ();
8070 vec_oprnds0 = vec_defs.pop ();
8071 }
8072 else
8073 {
8074 gimple *gtemp;
8075 if (masked)
8076 {
8077 vec_cond_lhs
8078 = vect_get_vec_def_for_operand (cond_expr, stmt,
8079 comp_vectype);
8080 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8081 &gtemp, &dts[0]);
8082 }
8083 else
8084 {
8085 vec_cond_lhs
8086 = vect_get_vec_def_for_operand (cond_expr0,
8087 stmt, comp_vectype);
8088 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8089
8090 vec_cond_rhs
8091 = vect_get_vec_def_for_operand (cond_expr1,
8092 stmt, comp_vectype);
8093 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8094 }
8095 if (reduc_index == 1)
8096 vec_then_clause = reduc_def;
8097 else
8098 {
8099 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8100 stmt);
8101 vect_is_simple_use (then_clause, loop_vinfo,
8102 &gtemp, &dts[2]);
8103 }
8104 if (reduc_index == 2)
8105 vec_else_clause = reduc_def;
8106 else
8107 {
8108 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8109 stmt);
8110 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8111 }
8112 }
8113 }
8114 else
8115 {
8116 vec_cond_lhs
8117 = vect_get_vec_def_for_stmt_copy (dts[0],
8118 vec_oprnds0.pop ());
8119 if (!masked)
8120 vec_cond_rhs
8121 = vect_get_vec_def_for_stmt_copy (dts[1],
8122 vec_oprnds1.pop ());
8123
8124 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8125 vec_oprnds2.pop ());
8126 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8127 vec_oprnds3.pop ());
8128 }
8129
8130 if (!slp_node)
8131 {
8132 vec_oprnds0.quick_push (vec_cond_lhs);
8133 if (!masked)
8134 vec_oprnds1.quick_push (vec_cond_rhs);
8135 vec_oprnds2.quick_push (vec_then_clause);
8136 vec_oprnds3.quick_push (vec_else_clause);
8137 }
8138
8139 /* Arguments are ready. Create the new vector stmt. */
8140 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8141 {
8142 vec_then_clause = vec_oprnds2[i];
8143 vec_else_clause = vec_oprnds3[i];
8144
8145 if (masked)
8146 vec_compare = vec_cond_lhs;
8147 else
8148 {
8149 vec_cond_rhs = vec_oprnds1[i];
8150 if (bitop1 == NOP_EXPR)
8151 vec_compare = build2 (cond_code, vec_cmp_type,
8152 vec_cond_lhs, vec_cond_rhs);
8153 else
8154 {
8155 new_temp = make_ssa_name (vec_cmp_type);
8156 if (bitop1 == BIT_NOT_EXPR)
8157 new_stmt = gimple_build_assign (new_temp, bitop1,
8158 vec_cond_rhs);
8159 else
8160 new_stmt
8161 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8162 vec_cond_rhs);
8163 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8164 if (bitop2 == NOP_EXPR)
8165 vec_compare = new_temp;
8166 else if (bitop2 == BIT_NOT_EXPR)
8167 {
8168 /* Instead of doing ~x ? y : z do x ? z : y. */
8169 vec_compare = new_temp;
8170 std::swap (vec_then_clause, vec_else_clause);
8171 }
8172 else
8173 {
8174 vec_compare = make_ssa_name (vec_cmp_type);
8175 new_stmt
8176 = gimple_build_assign (vec_compare, bitop2,
8177 vec_cond_lhs, new_temp);
8178 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8179 }
8180 }
8181 }
8182 new_temp = make_ssa_name (vec_dest);
8183 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8184 vec_compare, vec_then_clause,
8185 vec_else_clause);
8186 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8187 if (slp_node)
8188 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8189 }
8190
8191 if (slp_node)
8192 continue;
8193
8194 if (j == 0)
8195 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8196 else
8197 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8198
8199 prev_stmt_info = vinfo_for_stmt (new_stmt);
8200 }
8201
8202 vec_oprnds0.release ();
8203 vec_oprnds1.release ();
8204 vec_oprnds2.release ();
8205 vec_oprnds3.release ();
8206
8207 return true;
8208 }
8209
8210 /* vectorizable_comparison.
8211
8212 Check if STMT is comparison expression that can be vectorized.
8213 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8214 comparison, put it in VEC_STMT, and insert it at GSI.
8215
8216 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8217
8218 static bool
8219 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8220 gimple **vec_stmt, tree reduc_def,
8221 slp_tree slp_node)
8222 {
8223 tree lhs, rhs1, rhs2;
8224 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8225 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8226 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8227 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8228 tree new_temp;
8229 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8230 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8231 int ndts = 2;
8232 unsigned nunits;
8233 int ncopies;
8234 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8235 stmt_vec_info prev_stmt_info = NULL;
8236 int i, j;
8237 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8238 vec<tree> vec_oprnds0 = vNULL;
8239 vec<tree> vec_oprnds1 = vNULL;
8240 gimple *def_stmt;
8241 tree mask_type;
8242 tree mask;
8243
8244 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8245 return false;
8246
8247 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8248 return false;
8249
8250 mask_type = vectype;
8251 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8252
8253 if (slp_node)
8254 ncopies = 1;
8255 else
8256 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
8257
8258 gcc_assert (ncopies >= 1);
8259 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8260 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8261 && reduc_def))
8262 return false;
8263
8264 if (STMT_VINFO_LIVE_P (stmt_info))
8265 {
8266 if (dump_enabled_p ())
8267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8268 "value used after loop.\n");
8269 return false;
8270 }
8271
8272 if (!is_gimple_assign (stmt))
8273 return false;
8274
8275 code = gimple_assign_rhs_code (stmt);
8276
8277 if (TREE_CODE_CLASS (code) != tcc_comparison)
8278 return false;
8279
8280 rhs1 = gimple_assign_rhs1 (stmt);
8281 rhs2 = gimple_assign_rhs2 (stmt);
8282
8283 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8284 &dts[0], &vectype1))
8285 return false;
8286
8287 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8288 &dts[1], &vectype2))
8289 return false;
8290
8291 if (vectype1 && vectype2
8292 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
8293 return false;
8294
8295 vectype = vectype1 ? vectype1 : vectype2;
8296
8297 /* Invariant comparison. */
8298 if (!vectype)
8299 {
8300 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8301 if (TYPE_VECTOR_SUBPARTS (vectype) != nunits)
8302 return false;
8303 }
8304 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
8305 return false;
8306
8307 /* Can't compare mask and non-mask types. */
8308 if (vectype1 && vectype2
8309 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8310 return false;
8311
8312 /* Boolean values may have another representation in vectors
8313 and therefore we prefer bit operations over comparison for
8314 them (which also works for scalar masks). We store opcodes
8315 to use in bitop1 and bitop2. Statement is vectorized as
8316 BITOP2 (rhs1 BITOP1 rhs2) or
8317 rhs1 BITOP2 (BITOP1 rhs2)
8318 depending on bitop1 and bitop2 arity. */
8319 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8320 {
8321 if (code == GT_EXPR)
8322 {
8323 bitop1 = BIT_NOT_EXPR;
8324 bitop2 = BIT_AND_EXPR;
8325 }
8326 else if (code == GE_EXPR)
8327 {
8328 bitop1 = BIT_NOT_EXPR;
8329 bitop2 = BIT_IOR_EXPR;
8330 }
8331 else if (code == LT_EXPR)
8332 {
8333 bitop1 = BIT_NOT_EXPR;
8334 bitop2 = BIT_AND_EXPR;
8335 std::swap (rhs1, rhs2);
8336 std::swap (dts[0], dts[1]);
8337 }
8338 else if (code == LE_EXPR)
8339 {
8340 bitop1 = BIT_NOT_EXPR;
8341 bitop2 = BIT_IOR_EXPR;
8342 std::swap (rhs1, rhs2);
8343 std::swap (dts[0], dts[1]);
8344 }
8345 else
8346 {
8347 bitop1 = BIT_XOR_EXPR;
8348 if (code == EQ_EXPR)
8349 bitop2 = BIT_NOT_EXPR;
8350 }
8351 }
8352
8353 if (!vec_stmt)
8354 {
8355 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8356 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8357 dts, ndts, NULL, NULL);
8358 if (bitop1 == NOP_EXPR)
8359 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8360 else
8361 {
8362 machine_mode mode = TYPE_MODE (vectype);
8363 optab optab;
8364
8365 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8366 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8367 return false;
8368
8369 if (bitop2 != NOP_EXPR)
8370 {
8371 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8372 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8373 return false;
8374 }
8375 return true;
8376 }
8377 }
8378
8379 /* Transform. */
8380 if (!slp_node)
8381 {
8382 vec_oprnds0.create (1);
8383 vec_oprnds1.create (1);
8384 }
8385
8386 /* Handle def. */
8387 lhs = gimple_assign_lhs (stmt);
8388 mask = vect_create_destination_var (lhs, mask_type);
8389
8390 /* Handle cmp expr. */
8391 for (j = 0; j < ncopies; j++)
8392 {
8393 gassign *new_stmt = NULL;
8394 if (j == 0)
8395 {
8396 if (slp_node)
8397 {
8398 auto_vec<tree, 2> ops;
8399 auto_vec<vec<tree>, 2> vec_defs;
8400
8401 ops.safe_push (rhs1);
8402 ops.safe_push (rhs2);
8403 vect_get_slp_defs (ops, slp_node, &vec_defs);
8404 vec_oprnds1 = vec_defs.pop ();
8405 vec_oprnds0 = vec_defs.pop ();
8406 }
8407 else
8408 {
8409 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8410 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8411 }
8412 }
8413 else
8414 {
8415 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8416 vec_oprnds0.pop ());
8417 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8418 vec_oprnds1.pop ());
8419 }
8420
8421 if (!slp_node)
8422 {
8423 vec_oprnds0.quick_push (vec_rhs1);
8424 vec_oprnds1.quick_push (vec_rhs2);
8425 }
8426
8427 /* Arguments are ready. Create the new vector stmt. */
8428 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8429 {
8430 vec_rhs2 = vec_oprnds1[i];
8431
8432 new_temp = make_ssa_name (mask);
8433 if (bitop1 == NOP_EXPR)
8434 {
8435 new_stmt = gimple_build_assign (new_temp, code,
8436 vec_rhs1, vec_rhs2);
8437 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8438 }
8439 else
8440 {
8441 if (bitop1 == BIT_NOT_EXPR)
8442 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8443 else
8444 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8445 vec_rhs2);
8446 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8447 if (bitop2 != NOP_EXPR)
8448 {
8449 tree res = make_ssa_name (mask);
8450 if (bitop2 == BIT_NOT_EXPR)
8451 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8452 else
8453 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8454 new_temp);
8455 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8456 }
8457 }
8458 if (slp_node)
8459 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8460 }
8461
8462 if (slp_node)
8463 continue;
8464
8465 if (j == 0)
8466 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8467 else
8468 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8469
8470 prev_stmt_info = vinfo_for_stmt (new_stmt);
8471 }
8472
8473 vec_oprnds0.release ();
8474 vec_oprnds1.release ();
8475
8476 return true;
8477 }
8478
8479 /* Make sure the statement is vectorizable. */
8480
8481 bool
8482 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8483 slp_instance node_instance)
8484 {
8485 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8486 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8487 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8488 bool ok;
8489 gimple *pattern_stmt;
8490 gimple_seq pattern_def_seq;
8491
8492 if (dump_enabled_p ())
8493 {
8494 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8495 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8496 }
8497
8498 if (gimple_has_volatile_ops (stmt))
8499 {
8500 if (dump_enabled_p ())
8501 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8502 "not vectorized: stmt has volatile operands\n");
8503
8504 return false;
8505 }
8506
8507 /* Skip stmts that do not need to be vectorized. In loops this is expected
8508 to include:
8509 - the COND_EXPR which is the loop exit condition
8510 - any LABEL_EXPRs in the loop
8511 - computations that are used only for array indexing or loop control.
8512 In basic blocks we only analyze statements that are a part of some SLP
8513 instance, therefore, all the statements are relevant.
8514
8515 Pattern statement needs to be analyzed instead of the original statement
8516 if the original statement is not relevant. Otherwise, we analyze both
8517 statements. In basic blocks we are called from some SLP instance
8518 traversal, don't analyze pattern stmts instead, the pattern stmts
8519 already will be part of SLP instance. */
8520
8521 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8522 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8523 && !STMT_VINFO_LIVE_P (stmt_info))
8524 {
8525 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8526 && pattern_stmt
8527 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8528 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8529 {
8530 /* Analyze PATTERN_STMT instead of the original stmt. */
8531 stmt = pattern_stmt;
8532 stmt_info = vinfo_for_stmt (pattern_stmt);
8533 if (dump_enabled_p ())
8534 {
8535 dump_printf_loc (MSG_NOTE, vect_location,
8536 "==> examining pattern statement: ");
8537 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8538 }
8539 }
8540 else
8541 {
8542 if (dump_enabled_p ())
8543 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8544
8545 return true;
8546 }
8547 }
8548 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8549 && node == NULL
8550 && pattern_stmt
8551 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8552 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8553 {
8554 /* Analyze PATTERN_STMT too. */
8555 if (dump_enabled_p ())
8556 {
8557 dump_printf_loc (MSG_NOTE, vect_location,
8558 "==> examining pattern statement: ");
8559 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8560 }
8561
8562 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8563 node_instance))
8564 return false;
8565 }
8566
8567 if (is_pattern_stmt_p (stmt_info)
8568 && node == NULL
8569 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8570 {
8571 gimple_stmt_iterator si;
8572
8573 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8574 {
8575 gimple *pattern_def_stmt = gsi_stmt (si);
8576 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8577 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8578 {
8579 /* Analyze def stmt of STMT if it's a pattern stmt. */
8580 if (dump_enabled_p ())
8581 {
8582 dump_printf_loc (MSG_NOTE, vect_location,
8583 "==> examining pattern def statement: ");
8584 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8585 }
8586
8587 if (!vect_analyze_stmt (pattern_def_stmt,
8588 need_to_vectorize, node, node_instance))
8589 return false;
8590 }
8591 }
8592 }
8593
8594 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8595 {
8596 case vect_internal_def:
8597 break;
8598
8599 case vect_reduction_def:
8600 case vect_nested_cycle:
8601 gcc_assert (!bb_vinfo
8602 && (relevance == vect_used_in_outer
8603 || relevance == vect_used_in_outer_by_reduction
8604 || relevance == vect_used_by_reduction
8605 || relevance == vect_unused_in_scope
8606 || relevance == vect_used_only_live));
8607 break;
8608
8609 case vect_induction_def:
8610 gcc_assert (!bb_vinfo);
8611 break;
8612
8613 case vect_constant_def:
8614 case vect_external_def:
8615 case vect_unknown_def_type:
8616 default:
8617 gcc_unreachable ();
8618 }
8619
8620 if (STMT_VINFO_RELEVANT_P (stmt_info))
8621 {
8622 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8623 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8624 || (is_gimple_call (stmt)
8625 && gimple_call_lhs (stmt) == NULL_TREE));
8626 *need_to_vectorize = true;
8627 }
8628
8629 if (PURE_SLP_STMT (stmt_info) && !node)
8630 {
8631 dump_printf_loc (MSG_NOTE, vect_location,
8632 "handled only by SLP analysis\n");
8633 return true;
8634 }
8635
8636 ok = true;
8637 if (!bb_vinfo
8638 && (STMT_VINFO_RELEVANT_P (stmt_info)
8639 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8640 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8641 || vectorizable_conversion (stmt, NULL, NULL, node)
8642 || vectorizable_shift (stmt, NULL, NULL, node)
8643 || vectorizable_operation (stmt, NULL, NULL, node)
8644 || vectorizable_assignment (stmt, NULL, NULL, node)
8645 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8646 || vectorizable_call (stmt, NULL, NULL, node)
8647 || vectorizable_store (stmt, NULL, NULL, node)
8648 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8649 || vectorizable_induction (stmt, NULL, NULL, node)
8650 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8651 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8652 else
8653 {
8654 if (bb_vinfo)
8655 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8656 || vectorizable_conversion (stmt, NULL, NULL, node)
8657 || vectorizable_shift (stmt, NULL, NULL, node)
8658 || vectorizable_operation (stmt, NULL, NULL, node)
8659 || vectorizable_assignment (stmt, NULL, NULL, node)
8660 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8661 || vectorizable_call (stmt, NULL, NULL, node)
8662 || vectorizable_store (stmt, NULL, NULL, node)
8663 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8664 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8665 }
8666
8667 if (!ok)
8668 {
8669 if (dump_enabled_p ())
8670 {
8671 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8672 "not vectorized: relevant stmt not ");
8673 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8674 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8675 }
8676
8677 return false;
8678 }
8679
8680 if (bb_vinfo)
8681 return true;
8682
8683 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8684 need extra handling, except for vectorizable reductions. */
8685 if (STMT_VINFO_LIVE_P (stmt_info)
8686 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8687 ok = vectorizable_live_operation (stmt, NULL, NULL, -1, NULL);
8688
8689 if (!ok)
8690 {
8691 if (dump_enabled_p ())
8692 {
8693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8694 "not vectorized: live stmt not ");
8695 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8696 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8697 }
8698
8699 return false;
8700 }
8701
8702 return true;
8703 }
8704
8705
8706 /* Function vect_transform_stmt.
8707
8708 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8709
8710 bool
8711 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8712 bool *grouped_store, slp_tree slp_node,
8713 slp_instance slp_node_instance)
8714 {
8715 bool is_store = false;
8716 gimple *vec_stmt = NULL;
8717 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8718 bool done;
8719
8720 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8721 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8722
8723 switch (STMT_VINFO_TYPE (stmt_info))
8724 {
8725 case type_demotion_vec_info_type:
8726 case type_promotion_vec_info_type:
8727 case type_conversion_vec_info_type:
8728 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8729 gcc_assert (done);
8730 break;
8731
8732 case induc_vec_info_type:
8733 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8734 gcc_assert (done);
8735 break;
8736
8737 case shift_vec_info_type:
8738 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8739 gcc_assert (done);
8740 break;
8741
8742 case op_vec_info_type:
8743 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8744 gcc_assert (done);
8745 break;
8746
8747 case assignment_vec_info_type:
8748 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8749 gcc_assert (done);
8750 break;
8751
8752 case load_vec_info_type:
8753 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8754 slp_node_instance);
8755 gcc_assert (done);
8756 break;
8757
8758 case store_vec_info_type:
8759 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8760 gcc_assert (done);
8761 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8762 {
8763 /* In case of interleaving, the whole chain is vectorized when the
8764 last store in the chain is reached. Store stmts before the last
8765 one are skipped, and there vec_stmt_info shouldn't be freed
8766 meanwhile. */
8767 *grouped_store = true;
8768 if (STMT_VINFO_VEC_STMT (stmt_info))
8769 is_store = true;
8770 }
8771 else
8772 is_store = true;
8773 break;
8774
8775 case condition_vec_info_type:
8776 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8777 gcc_assert (done);
8778 break;
8779
8780 case comparison_vec_info_type:
8781 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8782 gcc_assert (done);
8783 break;
8784
8785 case call_vec_info_type:
8786 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8787 stmt = gsi_stmt (*gsi);
8788 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8789 is_store = true;
8790 break;
8791
8792 case call_simd_clone_vec_info_type:
8793 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8794 stmt = gsi_stmt (*gsi);
8795 break;
8796
8797 case reduc_vec_info_type:
8798 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8799 slp_node_instance);
8800 gcc_assert (done);
8801 break;
8802
8803 default:
8804 if (!STMT_VINFO_LIVE_P (stmt_info))
8805 {
8806 if (dump_enabled_p ())
8807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8808 "stmt not supported.\n");
8809 gcc_unreachable ();
8810 }
8811 }
8812
8813 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8814 This would break hybrid SLP vectorization. */
8815 if (slp_node)
8816 gcc_assert (!vec_stmt
8817 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8818
8819 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8820 is being vectorized, but outside the immediately enclosing loop. */
8821 if (vec_stmt
8822 && STMT_VINFO_LOOP_VINFO (stmt_info)
8823 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8824 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8825 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8826 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8827 || STMT_VINFO_RELEVANT (stmt_info) ==
8828 vect_used_in_outer_by_reduction))
8829 {
8830 struct loop *innerloop = LOOP_VINFO_LOOP (
8831 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8832 imm_use_iterator imm_iter;
8833 use_operand_p use_p;
8834 tree scalar_dest;
8835 gimple *exit_phi;
8836
8837 if (dump_enabled_p ())
8838 dump_printf_loc (MSG_NOTE, vect_location,
8839 "Record the vdef for outer-loop vectorization.\n");
8840
8841 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8842 (to be used when vectorizing outer-loop stmts that use the DEF of
8843 STMT). */
8844 if (gimple_code (stmt) == GIMPLE_PHI)
8845 scalar_dest = PHI_RESULT (stmt);
8846 else
8847 scalar_dest = gimple_assign_lhs (stmt);
8848
8849 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8850 {
8851 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8852 {
8853 exit_phi = USE_STMT (use_p);
8854 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8855 }
8856 }
8857 }
8858
8859 /* Handle stmts whose DEF is used outside the loop-nest that is
8860 being vectorized. */
8861 if (slp_node)
8862 {
8863 gimple *slp_stmt;
8864 int i;
8865 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8866 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8867 {
8868 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8869 if (STMT_VINFO_LIVE_P (slp_stmt_info))
8870 {
8871 done = vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8872 &vec_stmt);
8873 gcc_assert (done);
8874 }
8875 }
8876 }
8877 else if (STMT_VINFO_LIVE_P (stmt_info)
8878 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8879 {
8880 done = vectorizable_live_operation (stmt, gsi, slp_node, -1, &vec_stmt);
8881 gcc_assert (done);
8882 }
8883
8884 if (vec_stmt)
8885 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8886
8887 return is_store;
8888 }
8889
8890
8891 /* Remove a group of stores (for SLP or interleaving), free their
8892 stmt_vec_info. */
8893
8894 void
8895 vect_remove_stores (gimple *first_stmt)
8896 {
8897 gimple *next = first_stmt;
8898 gimple *tmp;
8899 gimple_stmt_iterator next_si;
8900
8901 while (next)
8902 {
8903 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8904
8905 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8906 if (is_pattern_stmt_p (stmt_info))
8907 next = STMT_VINFO_RELATED_STMT (stmt_info);
8908 /* Free the attached stmt_vec_info and remove the stmt. */
8909 next_si = gsi_for_stmt (next);
8910 unlink_stmt_vdef (next);
8911 gsi_remove (&next_si, true);
8912 release_defs (next);
8913 free_stmt_vec_info (next);
8914 next = tmp;
8915 }
8916 }
8917
8918
8919 /* Function new_stmt_vec_info.
8920
8921 Create and initialize a new stmt_vec_info struct for STMT. */
8922
8923 stmt_vec_info
8924 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
8925 {
8926 stmt_vec_info res;
8927 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8928
8929 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8930 STMT_VINFO_STMT (res) = stmt;
8931 res->vinfo = vinfo;
8932 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
8933 STMT_VINFO_LIVE_P (res) = false;
8934 STMT_VINFO_VECTYPE (res) = NULL;
8935 STMT_VINFO_VEC_STMT (res) = NULL;
8936 STMT_VINFO_VECTORIZABLE (res) = true;
8937 STMT_VINFO_IN_PATTERN_P (res) = false;
8938 STMT_VINFO_RELATED_STMT (res) = NULL;
8939 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
8940 STMT_VINFO_DATA_REF (res) = NULL;
8941 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
8942 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
8943
8944 if (gimple_code (stmt) == GIMPLE_PHI
8945 && is_loop_header_bb_p (gimple_bb (stmt)))
8946 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
8947 else
8948 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
8949
8950 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
8951 STMT_SLP_TYPE (res) = loop_vect;
8952 STMT_VINFO_NUM_SLP_USES (res) = 0;
8953
8954 GROUP_FIRST_ELEMENT (res) = NULL;
8955 GROUP_NEXT_ELEMENT (res) = NULL;
8956 GROUP_SIZE (res) = 0;
8957 GROUP_STORE_COUNT (res) = 0;
8958 GROUP_GAP (res) = 0;
8959 GROUP_SAME_DR_STMT (res) = NULL;
8960
8961 return res;
8962 }
8963
8964
8965 /* Create a hash table for stmt_vec_info. */
8966
8967 void
8968 init_stmt_vec_info_vec (void)
8969 {
8970 gcc_assert (!stmt_vec_info_vec.exists ());
8971 stmt_vec_info_vec.create (50);
8972 }
8973
8974
8975 /* Free hash table for stmt_vec_info. */
8976
8977 void
8978 free_stmt_vec_info_vec (void)
8979 {
8980 unsigned int i;
8981 stmt_vec_info info;
8982 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
8983 if (info != NULL)
8984 free_stmt_vec_info (STMT_VINFO_STMT (info));
8985 gcc_assert (stmt_vec_info_vec.exists ());
8986 stmt_vec_info_vec.release ();
8987 }
8988
8989
8990 /* Free stmt vectorization related info. */
8991
8992 void
8993 free_stmt_vec_info (gimple *stmt)
8994 {
8995 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8996
8997 if (!stmt_info)
8998 return;
8999
9000 /* Check if this statement has a related "pattern stmt"
9001 (introduced by the vectorizer during the pattern recognition
9002 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9003 too. */
9004 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9005 {
9006 stmt_vec_info patt_info
9007 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9008 if (patt_info)
9009 {
9010 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9011 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9012 gimple_set_bb (patt_stmt, NULL);
9013 tree lhs = gimple_get_lhs (patt_stmt);
9014 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9015 release_ssa_name (lhs);
9016 if (seq)
9017 {
9018 gimple_stmt_iterator si;
9019 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9020 {
9021 gimple *seq_stmt = gsi_stmt (si);
9022 gimple_set_bb (seq_stmt, NULL);
9023 lhs = gimple_get_lhs (seq_stmt);
9024 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9025 release_ssa_name (lhs);
9026 free_stmt_vec_info (seq_stmt);
9027 }
9028 }
9029 free_stmt_vec_info (patt_stmt);
9030 }
9031 }
9032
9033 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9034 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9035 set_vinfo_for_stmt (stmt, NULL);
9036 free (stmt_info);
9037 }
9038
9039
9040 /* Function get_vectype_for_scalar_type_and_size.
9041
9042 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9043 by the target. */
9044
9045 static tree
9046 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
9047 {
9048 tree orig_scalar_type = scalar_type;
9049 scalar_mode inner_mode;
9050 machine_mode simd_mode;
9051 int nunits;
9052 tree vectype;
9053
9054 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9055 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9056 return NULL_TREE;
9057
9058 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9059
9060 /* For vector types of elements whose mode precision doesn't
9061 match their types precision we use a element type of mode
9062 precision. The vectorization routines will have to make sure
9063 they support the proper result truncation/extension.
9064 We also make sure to build vector types with INTEGER_TYPE
9065 component type only. */
9066 if (INTEGRAL_TYPE_P (scalar_type)
9067 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9068 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9069 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9070 TYPE_UNSIGNED (scalar_type));
9071
9072 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9073 When the component mode passes the above test simply use a type
9074 corresponding to that mode. The theory is that any use that
9075 would cause problems with this will disable vectorization anyway. */
9076 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9077 && !INTEGRAL_TYPE_P (scalar_type))
9078 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9079
9080 /* We can't build a vector type of elements with alignment bigger than
9081 their size. */
9082 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9083 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9084 TYPE_UNSIGNED (scalar_type));
9085
9086 /* If we felt back to using the mode fail if there was
9087 no scalar type for it. */
9088 if (scalar_type == NULL_TREE)
9089 return NULL_TREE;
9090
9091 /* If no size was supplied use the mode the target prefers. Otherwise
9092 lookup a vector mode of the specified size. */
9093 if (size == 0)
9094 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9095 else
9096 simd_mode = mode_for_vector (inner_mode, size / nbytes);
9097 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
9098 if (nunits <= 1)
9099 return NULL_TREE;
9100
9101 vectype = build_vector_type (scalar_type, nunits);
9102
9103 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9104 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9105 return NULL_TREE;
9106
9107 /* Re-attach the address-space qualifier if we canonicalized the scalar
9108 type. */
9109 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9110 return build_qualified_type
9111 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9112
9113 return vectype;
9114 }
9115
9116 unsigned int current_vector_size;
9117
9118 /* Function get_vectype_for_scalar_type.
9119
9120 Returns the vector type corresponding to SCALAR_TYPE as supported
9121 by the target. */
9122
9123 tree
9124 get_vectype_for_scalar_type (tree scalar_type)
9125 {
9126 tree vectype;
9127 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9128 current_vector_size);
9129 if (vectype
9130 && current_vector_size == 0)
9131 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9132 return vectype;
9133 }
9134
9135 /* Function get_mask_type_for_scalar_type.
9136
9137 Returns the mask type corresponding to a result of comparison
9138 of vectors of specified SCALAR_TYPE as supported by target. */
9139
9140 tree
9141 get_mask_type_for_scalar_type (tree scalar_type)
9142 {
9143 tree vectype = get_vectype_for_scalar_type (scalar_type);
9144
9145 if (!vectype)
9146 return NULL;
9147
9148 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9149 current_vector_size);
9150 }
9151
9152 /* Function get_same_sized_vectype
9153
9154 Returns a vector type corresponding to SCALAR_TYPE of size
9155 VECTOR_TYPE if supported by the target. */
9156
9157 tree
9158 get_same_sized_vectype (tree scalar_type, tree vector_type)
9159 {
9160 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9161 return build_same_sized_truth_vector_type (vector_type);
9162
9163 return get_vectype_for_scalar_type_and_size
9164 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9165 }
9166
9167 /* Function vect_is_simple_use.
9168
9169 Input:
9170 VINFO - the vect info of the loop or basic block that is being vectorized.
9171 OPERAND - operand in the loop or bb.
9172 Output:
9173 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9174 DT - the type of definition
9175
9176 Returns whether a stmt with OPERAND can be vectorized.
9177 For loops, supportable operands are constants, loop invariants, and operands
9178 that are defined by the current iteration of the loop. Unsupportable
9179 operands are those that are defined by a previous iteration of the loop (as
9180 is the case in reduction/induction computations).
9181 For basic blocks, supportable operands are constants and bb invariants.
9182 For now, operands defined outside the basic block are not supported. */
9183
9184 bool
9185 vect_is_simple_use (tree operand, vec_info *vinfo,
9186 gimple **def_stmt, enum vect_def_type *dt)
9187 {
9188 *def_stmt = NULL;
9189 *dt = vect_unknown_def_type;
9190
9191 if (dump_enabled_p ())
9192 {
9193 dump_printf_loc (MSG_NOTE, vect_location,
9194 "vect_is_simple_use: operand ");
9195 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9196 dump_printf (MSG_NOTE, "\n");
9197 }
9198
9199 if (CONSTANT_CLASS_P (operand))
9200 {
9201 *dt = vect_constant_def;
9202 return true;
9203 }
9204
9205 if (is_gimple_min_invariant (operand))
9206 {
9207 *dt = vect_external_def;
9208 return true;
9209 }
9210
9211 if (TREE_CODE (operand) != SSA_NAME)
9212 {
9213 if (dump_enabled_p ())
9214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9215 "not ssa-name.\n");
9216 return false;
9217 }
9218
9219 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9220 {
9221 *dt = vect_external_def;
9222 return true;
9223 }
9224
9225 *def_stmt = SSA_NAME_DEF_STMT (operand);
9226 if (dump_enabled_p ())
9227 {
9228 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9229 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9230 }
9231
9232 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9233 *dt = vect_external_def;
9234 else
9235 {
9236 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9237 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9238 }
9239
9240 if (dump_enabled_p ())
9241 {
9242 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9243 switch (*dt)
9244 {
9245 case vect_uninitialized_def:
9246 dump_printf (MSG_NOTE, "uninitialized\n");
9247 break;
9248 case vect_constant_def:
9249 dump_printf (MSG_NOTE, "constant\n");
9250 break;
9251 case vect_external_def:
9252 dump_printf (MSG_NOTE, "external\n");
9253 break;
9254 case vect_internal_def:
9255 dump_printf (MSG_NOTE, "internal\n");
9256 break;
9257 case vect_induction_def:
9258 dump_printf (MSG_NOTE, "induction\n");
9259 break;
9260 case vect_reduction_def:
9261 dump_printf (MSG_NOTE, "reduction\n");
9262 break;
9263 case vect_double_reduction_def:
9264 dump_printf (MSG_NOTE, "double reduction\n");
9265 break;
9266 case vect_nested_cycle:
9267 dump_printf (MSG_NOTE, "nested cycle\n");
9268 break;
9269 case vect_unknown_def_type:
9270 dump_printf (MSG_NOTE, "unknown\n");
9271 break;
9272 }
9273 }
9274
9275 if (*dt == vect_unknown_def_type)
9276 {
9277 if (dump_enabled_p ())
9278 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9279 "Unsupported pattern.\n");
9280 return false;
9281 }
9282
9283 switch (gimple_code (*def_stmt))
9284 {
9285 case GIMPLE_PHI:
9286 case GIMPLE_ASSIGN:
9287 case GIMPLE_CALL:
9288 break;
9289 default:
9290 if (dump_enabled_p ())
9291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9292 "unsupported defining stmt:\n");
9293 return false;
9294 }
9295
9296 return true;
9297 }
9298
9299 /* Function vect_is_simple_use.
9300
9301 Same as vect_is_simple_use but also determines the vector operand
9302 type of OPERAND and stores it to *VECTYPE. If the definition of
9303 OPERAND is vect_uninitialized_def, vect_constant_def or
9304 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9305 is responsible to compute the best suited vector type for the
9306 scalar operand. */
9307
9308 bool
9309 vect_is_simple_use (tree operand, vec_info *vinfo,
9310 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9311 {
9312 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9313 return false;
9314
9315 /* Now get a vector type if the def is internal, otherwise supply
9316 NULL_TREE and leave it up to the caller to figure out a proper
9317 type for the use stmt. */
9318 if (*dt == vect_internal_def
9319 || *dt == vect_induction_def
9320 || *dt == vect_reduction_def
9321 || *dt == vect_double_reduction_def
9322 || *dt == vect_nested_cycle)
9323 {
9324 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9325
9326 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9327 && !STMT_VINFO_RELEVANT (stmt_info)
9328 && !STMT_VINFO_LIVE_P (stmt_info))
9329 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9330
9331 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9332 gcc_assert (*vectype != NULL_TREE);
9333 }
9334 else if (*dt == vect_uninitialized_def
9335 || *dt == vect_constant_def
9336 || *dt == vect_external_def)
9337 *vectype = NULL_TREE;
9338 else
9339 gcc_unreachable ();
9340
9341 return true;
9342 }
9343
9344
9345 /* Function supportable_widening_operation
9346
9347 Check whether an operation represented by the code CODE is a
9348 widening operation that is supported by the target platform in
9349 vector form (i.e., when operating on arguments of type VECTYPE_IN
9350 producing a result of type VECTYPE_OUT).
9351
9352 Widening operations we currently support are NOP (CONVERT), FLOAT
9353 and WIDEN_MULT. This function checks if these operations are supported
9354 by the target platform either directly (via vector tree-codes), or via
9355 target builtins.
9356
9357 Output:
9358 - CODE1 and CODE2 are codes of vector operations to be used when
9359 vectorizing the operation, if available.
9360 - MULTI_STEP_CVT determines the number of required intermediate steps in
9361 case of multi-step conversion (like char->short->int - in that case
9362 MULTI_STEP_CVT will be 1).
9363 - INTERM_TYPES contains the intermediate type required to perform the
9364 widening operation (short in the above example). */
9365
9366 bool
9367 supportable_widening_operation (enum tree_code code, gimple *stmt,
9368 tree vectype_out, tree vectype_in,
9369 enum tree_code *code1, enum tree_code *code2,
9370 int *multi_step_cvt,
9371 vec<tree> *interm_types)
9372 {
9373 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9374 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9375 struct loop *vect_loop = NULL;
9376 machine_mode vec_mode;
9377 enum insn_code icode1, icode2;
9378 optab optab1, optab2;
9379 tree vectype = vectype_in;
9380 tree wide_vectype = vectype_out;
9381 enum tree_code c1, c2;
9382 int i;
9383 tree prev_type, intermediate_type;
9384 machine_mode intermediate_mode, prev_mode;
9385 optab optab3, optab4;
9386
9387 *multi_step_cvt = 0;
9388 if (loop_info)
9389 vect_loop = LOOP_VINFO_LOOP (loop_info);
9390
9391 switch (code)
9392 {
9393 case WIDEN_MULT_EXPR:
9394 /* The result of a vectorized widening operation usually requires
9395 two vectors (because the widened results do not fit into one vector).
9396 The generated vector results would normally be expected to be
9397 generated in the same order as in the original scalar computation,
9398 i.e. if 8 results are generated in each vector iteration, they are
9399 to be organized as follows:
9400 vect1: [res1,res2,res3,res4],
9401 vect2: [res5,res6,res7,res8].
9402
9403 However, in the special case that the result of the widening
9404 operation is used in a reduction computation only, the order doesn't
9405 matter (because when vectorizing a reduction we change the order of
9406 the computation). Some targets can take advantage of this and
9407 generate more efficient code. For example, targets like Altivec,
9408 that support widen_mult using a sequence of {mult_even,mult_odd}
9409 generate the following vectors:
9410 vect1: [res1,res3,res5,res7],
9411 vect2: [res2,res4,res6,res8].
9412
9413 When vectorizing outer-loops, we execute the inner-loop sequentially
9414 (each vectorized inner-loop iteration contributes to VF outer-loop
9415 iterations in parallel). We therefore don't allow to change the
9416 order of the computation in the inner-loop during outer-loop
9417 vectorization. */
9418 /* TODO: Another case in which order doesn't *really* matter is when we
9419 widen and then contract again, e.g. (short)((int)x * y >> 8).
9420 Normally, pack_trunc performs an even/odd permute, whereas the
9421 repack from an even/odd expansion would be an interleave, which
9422 would be significantly simpler for e.g. AVX2. */
9423 /* In any case, in order to avoid duplicating the code below, recurse
9424 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9425 are properly set up for the caller. If we fail, we'll continue with
9426 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9427 if (vect_loop
9428 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9429 && !nested_in_vect_loop_p (vect_loop, stmt)
9430 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9431 stmt, vectype_out, vectype_in,
9432 code1, code2, multi_step_cvt,
9433 interm_types))
9434 {
9435 /* Elements in a vector with vect_used_by_reduction property cannot
9436 be reordered if the use chain with this property does not have the
9437 same operation. One such an example is s += a * b, where elements
9438 in a and b cannot be reordered. Here we check if the vector defined
9439 by STMT is only directly used in the reduction statement. */
9440 tree lhs = gimple_assign_lhs (stmt);
9441 use_operand_p dummy;
9442 gimple *use_stmt;
9443 stmt_vec_info use_stmt_info = NULL;
9444 if (single_imm_use (lhs, &dummy, &use_stmt)
9445 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9446 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9447 return true;
9448 }
9449 c1 = VEC_WIDEN_MULT_LO_EXPR;
9450 c2 = VEC_WIDEN_MULT_HI_EXPR;
9451 break;
9452
9453 case DOT_PROD_EXPR:
9454 c1 = DOT_PROD_EXPR;
9455 c2 = DOT_PROD_EXPR;
9456 break;
9457
9458 case SAD_EXPR:
9459 c1 = SAD_EXPR;
9460 c2 = SAD_EXPR;
9461 break;
9462
9463 case VEC_WIDEN_MULT_EVEN_EXPR:
9464 /* Support the recursion induced just above. */
9465 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9466 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9467 break;
9468
9469 case WIDEN_LSHIFT_EXPR:
9470 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9471 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9472 break;
9473
9474 CASE_CONVERT:
9475 c1 = VEC_UNPACK_LO_EXPR;
9476 c2 = VEC_UNPACK_HI_EXPR;
9477 break;
9478
9479 case FLOAT_EXPR:
9480 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9481 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9482 break;
9483
9484 case FIX_TRUNC_EXPR:
9485 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9486 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9487 computing the operation. */
9488 return false;
9489
9490 default:
9491 gcc_unreachable ();
9492 }
9493
9494 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9495 std::swap (c1, c2);
9496
9497 if (code == FIX_TRUNC_EXPR)
9498 {
9499 /* The signedness is determined from output operand. */
9500 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9501 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9502 }
9503 else
9504 {
9505 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9506 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9507 }
9508
9509 if (!optab1 || !optab2)
9510 return false;
9511
9512 vec_mode = TYPE_MODE (vectype);
9513 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9514 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9515 return false;
9516
9517 *code1 = c1;
9518 *code2 = c2;
9519
9520 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9521 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9522 /* For scalar masks we may have different boolean
9523 vector types having the same QImode. Thus we
9524 add additional check for elements number. */
9525 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9526 || (TYPE_VECTOR_SUBPARTS (vectype) / 2
9527 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9528
9529 /* Check if it's a multi-step conversion that can be done using intermediate
9530 types. */
9531
9532 prev_type = vectype;
9533 prev_mode = vec_mode;
9534
9535 if (!CONVERT_EXPR_CODE_P (code))
9536 return false;
9537
9538 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9539 intermediate steps in promotion sequence. We try
9540 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9541 not. */
9542 interm_types->create (MAX_INTERM_CVT_STEPS);
9543 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9544 {
9545 intermediate_mode = insn_data[icode1].operand[0].mode;
9546 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9547 {
9548 intermediate_type
9549 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2,
9550 current_vector_size);
9551 if (intermediate_mode != TYPE_MODE (intermediate_type))
9552 return false;
9553 }
9554 else
9555 intermediate_type
9556 = lang_hooks.types.type_for_mode (intermediate_mode,
9557 TYPE_UNSIGNED (prev_type));
9558
9559 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9560 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9561
9562 if (!optab3 || !optab4
9563 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9564 || insn_data[icode1].operand[0].mode != intermediate_mode
9565 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9566 || insn_data[icode2].operand[0].mode != intermediate_mode
9567 || ((icode1 = optab_handler (optab3, intermediate_mode))
9568 == CODE_FOR_nothing)
9569 || ((icode2 = optab_handler (optab4, intermediate_mode))
9570 == CODE_FOR_nothing))
9571 break;
9572
9573 interm_types->quick_push (intermediate_type);
9574 (*multi_step_cvt)++;
9575
9576 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9577 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9578 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9579 || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2
9580 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9581
9582 prev_type = intermediate_type;
9583 prev_mode = intermediate_mode;
9584 }
9585
9586 interm_types->release ();
9587 return false;
9588 }
9589
9590
9591 /* Function supportable_narrowing_operation
9592
9593 Check whether an operation represented by the code CODE is a
9594 narrowing operation that is supported by the target platform in
9595 vector form (i.e., when operating on arguments of type VECTYPE_IN
9596 and producing a result of type VECTYPE_OUT).
9597
9598 Narrowing operations we currently support are NOP (CONVERT) and
9599 FIX_TRUNC. This function checks if these operations are supported by
9600 the target platform directly via vector tree-codes.
9601
9602 Output:
9603 - CODE1 is the code of a vector operation to be used when
9604 vectorizing the operation, if available.
9605 - MULTI_STEP_CVT determines the number of required intermediate steps in
9606 case of multi-step conversion (like int->short->char - in that case
9607 MULTI_STEP_CVT will be 1).
9608 - INTERM_TYPES contains the intermediate type required to perform the
9609 narrowing operation (short in the above example). */
9610
9611 bool
9612 supportable_narrowing_operation (enum tree_code code,
9613 tree vectype_out, tree vectype_in,
9614 enum tree_code *code1, int *multi_step_cvt,
9615 vec<tree> *interm_types)
9616 {
9617 machine_mode vec_mode;
9618 enum insn_code icode1;
9619 optab optab1, interm_optab;
9620 tree vectype = vectype_in;
9621 tree narrow_vectype = vectype_out;
9622 enum tree_code c1;
9623 tree intermediate_type, prev_type;
9624 machine_mode intermediate_mode, prev_mode;
9625 int i;
9626 bool uns;
9627
9628 *multi_step_cvt = 0;
9629 switch (code)
9630 {
9631 CASE_CONVERT:
9632 c1 = VEC_PACK_TRUNC_EXPR;
9633 break;
9634
9635 case FIX_TRUNC_EXPR:
9636 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9637 break;
9638
9639 case FLOAT_EXPR:
9640 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9641 tree code and optabs used for computing the operation. */
9642 return false;
9643
9644 default:
9645 gcc_unreachable ();
9646 }
9647
9648 if (code == FIX_TRUNC_EXPR)
9649 /* The signedness is determined from output operand. */
9650 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9651 else
9652 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9653
9654 if (!optab1)
9655 return false;
9656
9657 vec_mode = TYPE_MODE (vectype);
9658 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9659 return false;
9660
9661 *code1 = c1;
9662
9663 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9664 /* For scalar masks we may have different boolean
9665 vector types having the same QImode. Thus we
9666 add additional check for elements number. */
9667 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9668 || (TYPE_VECTOR_SUBPARTS (vectype) * 2
9669 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9670
9671 /* Check if it's a multi-step conversion that can be done using intermediate
9672 types. */
9673 prev_mode = vec_mode;
9674 prev_type = vectype;
9675 if (code == FIX_TRUNC_EXPR)
9676 uns = TYPE_UNSIGNED (vectype_out);
9677 else
9678 uns = TYPE_UNSIGNED (vectype);
9679
9680 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9681 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9682 costly than signed. */
9683 if (code == FIX_TRUNC_EXPR && uns)
9684 {
9685 enum insn_code icode2;
9686
9687 intermediate_type
9688 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9689 interm_optab
9690 = optab_for_tree_code (c1, intermediate_type, optab_default);
9691 if (interm_optab != unknown_optab
9692 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9693 && insn_data[icode1].operand[0].mode
9694 == insn_data[icode2].operand[0].mode)
9695 {
9696 uns = false;
9697 optab1 = interm_optab;
9698 icode1 = icode2;
9699 }
9700 }
9701
9702 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9703 intermediate steps in promotion sequence. We try
9704 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9705 interm_types->create (MAX_INTERM_CVT_STEPS);
9706 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9707 {
9708 intermediate_mode = insn_data[icode1].operand[0].mode;
9709 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9710 {
9711 intermediate_type
9712 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9713 current_vector_size);
9714 if (intermediate_mode != TYPE_MODE (intermediate_type))
9715 return false;
9716 }
9717 else
9718 intermediate_type
9719 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9720 interm_optab
9721 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9722 optab_default);
9723 if (!interm_optab
9724 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9725 || insn_data[icode1].operand[0].mode != intermediate_mode
9726 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9727 == CODE_FOR_nothing))
9728 break;
9729
9730 interm_types->quick_push (intermediate_type);
9731 (*multi_step_cvt)++;
9732
9733 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9734 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9735 || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2
9736 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9737
9738 prev_mode = intermediate_mode;
9739 prev_type = intermediate_type;
9740 optab1 = interm_optab;
9741 }
9742
9743 interm_types->release ();
9744 return false;
9745 }