[Vectorizer] Split vect_gen_perm_mask into _checked and _any variants
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "target.h"
30 #include "predict.h"
31 #include "vec.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "machmode.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "dominance.h"
39 #include "cfg.h"
40 #include "basic-block.h"
41 #include "gimple-pretty-print.h"
42 #include "tree-ssa-alias.h"
43 #include "internal-fn.h"
44 #include "tree-eh.h"
45 #include "gimple-expr.h"
46 #include "is-a.h"
47 #include "gimple.h"
48 #include "gimplify.h"
49 #include "gimple-iterator.h"
50 #include "gimplify-me.h"
51 #include "gimple-ssa.h"
52 #include "tree-cfg.h"
53 #include "tree-phinodes.h"
54 #include "ssa-iterators.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "tree-ssa-loop-manip.h"
58 #include "cfgloop.h"
59 #include "tree-ssa-loop.h"
60 #include "tree-scalar-evolution.h"
61 #include "expr.h"
62 #include "recog.h" /* FIXME: for insn_data */
63 #include "insn-codes.h"
64 #include "optabs.h"
65 #include "diagnostic-core.h"
66 #include "tree-vectorizer.h"
67 #include "dumpfile.h"
68 #include "hash-map.h"
69 #include "plugin-api.h"
70 #include "ipa-ref.h"
71 #include "cgraph.h"
72 #include "builtins.h"
73
74 /* For lang_hooks.types.type_for_mode. */
75 #include "langhooks.h"
76
77 /* Return the vectorized type for the given statement. */
78
79 tree
80 stmt_vectype (struct _stmt_vec_info *stmt_info)
81 {
82 return STMT_VINFO_VECTYPE (stmt_info);
83 }
84
85 /* Return TRUE iff the given statement is in an inner loop relative to
86 the loop being vectorized. */
87 bool
88 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
89 {
90 gimple stmt = STMT_VINFO_STMT (stmt_info);
91 basic_block bb = gimple_bb (stmt);
92 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
93 struct loop* loop;
94
95 if (!loop_vinfo)
96 return false;
97
98 loop = LOOP_VINFO_LOOP (loop_vinfo);
99
100 return (bb->loop_father == loop->inner);
101 }
102
103 /* Record the cost of a statement, either by directly informing the
104 target model or by saving it in a vector for later processing.
105 Return a preliminary estimate of the statement's cost. */
106
107 unsigned
108 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
109 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
110 int misalign, enum vect_cost_model_location where)
111 {
112 if (body_cost_vec)
113 {
114 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
115 add_stmt_info_to_vec (body_cost_vec, count, kind,
116 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
117 misalign);
118 return (unsigned)
119 (builtin_vectorization_cost (kind, vectype, misalign) * count);
120
121 }
122 else
123 {
124 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
125 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
126 void *target_cost_data;
127
128 if (loop_vinfo)
129 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
130 else
131 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
132
133 return add_stmt_cost (target_cost_data, count, kind, stmt_info,
134 misalign, where);
135 }
136 }
137
138 /* Return a variable of type ELEM_TYPE[NELEMS]. */
139
140 static tree
141 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
142 {
143 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
144 "vect_array");
145 }
146
147 /* ARRAY is an array of vectors created by create_vector_array.
148 Return an SSA_NAME for the vector in index N. The reference
149 is part of the vectorization of STMT and the vector is associated
150 with scalar destination SCALAR_DEST. */
151
152 static tree
153 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
154 tree array, unsigned HOST_WIDE_INT n)
155 {
156 tree vect_type, vect, vect_name, array_ref;
157 gimple new_stmt;
158
159 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
160 vect_type = TREE_TYPE (TREE_TYPE (array));
161 vect = vect_create_destination_var (scalar_dest, vect_type);
162 array_ref = build4 (ARRAY_REF, vect_type, array,
163 build_int_cst (size_type_node, n),
164 NULL_TREE, NULL_TREE);
165
166 new_stmt = gimple_build_assign (vect, array_ref);
167 vect_name = make_ssa_name (vect, new_stmt);
168 gimple_assign_set_lhs (new_stmt, vect_name);
169 vect_finish_stmt_generation (stmt, new_stmt, gsi);
170
171 return vect_name;
172 }
173
174 /* ARRAY is an array of vectors created by create_vector_array.
175 Emit code to store SSA_NAME VECT in index N of the array.
176 The store is part of the vectorization of STMT. */
177
178 static void
179 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
180 tree array, unsigned HOST_WIDE_INT n)
181 {
182 tree array_ref;
183 gimple new_stmt;
184
185 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
186 build_int_cst (size_type_node, n),
187 NULL_TREE, NULL_TREE);
188
189 new_stmt = gimple_build_assign (array_ref, vect);
190 vect_finish_stmt_generation (stmt, new_stmt, gsi);
191 }
192
193 /* PTR is a pointer to an array of type TYPE. Return a representation
194 of *PTR. The memory reference replaces those in FIRST_DR
195 (and its group). */
196
197 static tree
198 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
199 {
200 tree mem_ref, alias_ptr_type;
201
202 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
203 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
204 /* Arrays have the same alignment as their type. */
205 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
206 return mem_ref;
207 }
208
209 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
210
211 /* Function vect_mark_relevant.
212
213 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
214
215 static void
216 vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
217 enum vect_relevant relevant, bool live_p,
218 bool used_in_pattern)
219 {
220 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
221 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
222 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
223 gimple pattern_stmt;
224
225 if (dump_enabled_p ())
226 dump_printf_loc (MSG_NOTE, vect_location,
227 "mark relevant %d, live %d.\n", relevant, live_p);
228
229 /* If this stmt is an original stmt in a pattern, we might need to mark its
230 related pattern stmt instead of the original stmt. However, such stmts
231 may have their own uses that are not in any pattern, in such cases the
232 stmt itself should be marked. */
233 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
234 {
235 bool found = false;
236 if (!used_in_pattern)
237 {
238 imm_use_iterator imm_iter;
239 use_operand_p use_p;
240 gimple use_stmt;
241 tree lhs;
242 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
243 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
244
245 if (is_gimple_assign (stmt))
246 lhs = gimple_assign_lhs (stmt);
247 else
248 lhs = gimple_call_lhs (stmt);
249
250 /* This use is out of pattern use, if LHS has other uses that are
251 pattern uses, we should mark the stmt itself, and not the pattern
252 stmt. */
253 if (lhs && TREE_CODE (lhs) == SSA_NAME)
254 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
255 {
256 if (is_gimple_debug (USE_STMT (use_p)))
257 continue;
258 use_stmt = USE_STMT (use_p);
259
260 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
261 continue;
262
263 if (vinfo_for_stmt (use_stmt)
264 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
265 {
266 found = true;
267 break;
268 }
269 }
270 }
271
272 if (!found)
273 {
274 /* This is the last stmt in a sequence that was detected as a
275 pattern that can potentially be vectorized. Don't mark the stmt
276 as relevant/live because it's not going to be vectorized.
277 Instead mark the pattern-stmt that replaces it. */
278
279 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
280
281 if (dump_enabled_p ())
282 dump_printf_loc (MSG_NOTE, vect_location,
283 "last stmt in pattern. don't mark"
284 " relevant/live.\n");
285 stmt_info = vinfo_for_stmt (pattern_stmt);
286 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
287 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
288 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
289 stmt = pattern_stmt;
290 }
291 }
292
293 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
294 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
295 STMT_VINFO_RELEVANT (stmt_info) = relevant;
296
297 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
298 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
299 {
300 if (dump_enabled_p ())
301 dump_printf_loc (MSG_NOTE, vect_location,
302 "already marked relevant/live.\n");
303 return;
304 }
305
306 worklist->safe_push (stmt);
307 }
308
309
310 /* Function vect_stmt_relevant_p.
311
312 Return true if STMT in loop that is represented by LOOP_VINFO is
313 "relevant for vectorization".
314
315 A stmt is considered "relevant for vectorization" if:
316 - it has uses outside the loop.
317 - it has vdefs (it alters memory).
318 - control stmts in the loop (except for the exit condition).
319
320 CHECKME: what other side effects would the vectorizer allow? */
321
322 static bool
323 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
324 enum vect_relevant *relevant, bool *live_p)
325 {
326 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
327 ssa_op_iter op_iter;
328 imm_use_iterator imm_iter;
329 use_operand_p use_p;
330 def_operand_p def_p;
331
332 *relevant = vect_unused_in_scope;
333 *live_p = false;
334
335 /* cond stmt other than loop exit cond. */
336 if (is_ctrl_stmt (stmt)
337 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
338 != loop_exit_ctrl_vec_info_type)
339 *relevant = vect_used_in_scope;
340
341 /* changing memory. */
342 if (gimple_code (stmt) != GIMPLE_PHI)
343 if (gimple_vdef (stmt))
344 {
345 if (dump_enabled_p ())
346 dump_printf_loc (MSG_NOTE, vect_location,
347 "vec_stmt_relevant_p: stmt has vdefs.\n");
348 *relevant = vect_used_in_scope;
349 }
350
351 /* uses outside the loop. */
352 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
353 {
354 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
355 {
356 basic_block bb = gimple_bb (USE_STMT (use_p));
357 if (!flow_bb_inside_loop_p (loop, bb))
358 {
359 if (dump_enabled_p ())
360 dump_printf_loc (MSG_NOTE, vect_location,
361 "vec_stmt_relevant_p: used out of loop.\n");
362
363 if (is_gimple_debug (USE_STMT (use_p)))
364 continue;
365
366 /* We expect all such uses to be in the loop exit phis
367 (because of loop closed form) */
368 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
369 gcc_assert (bb == single_exit (loop)->dest);
370
371 *live_p = true;
372 }
373 }
374 }
375
376 return (*live_p || *relevant);
377 }
378
379
380 /* Function exist_non_indexing_operands_for_use_p
381
382 USE is one of the uses attached to STMT. Check if USE is
383 used in STMT for anything other than indexing an array. */
384
385 static bool
386 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
387 {
388 tree operand;
389 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
390
391 /* USE corresponds to some operand in STMT. If there is no data
392 reference in STMT, then any operand that corresponds to USE
393 is not indexing an array. */
394 if (!STMT_VINFO_DATA_REF (stmt_info))
395 return true;
396
397 /* STMT has a data_ref. FORNOW this means that its of one of
398 the following forms:
399 -1- ARRAY_REF = var
400 -2- var = ARRAY_REF
401 (This should have been verified in analyze_data_refs).
402
403 'var' in the second case corresponds to a def, not a use,
404 so USE cannot correspond to any operands that are not used
405 for array indexing.
406
407 Therefore, all we need to check is if STMT falls into the
408 first case, and whether var corresponds to USE. */
409
410 if (!gimple_assign_copy_p (stmt))
411 {
412 if (is_gimple_call (stmt)
413 && gimple_call_internal_p (stmt))
414 switch (gimple_call_internal_fn (stmt))
415 {
416 case IFN_MASK_STORE:
417 operand = gimple_call_arg (stmt, 3);
418 if (operand == use)
419 return true;
420 /* FALLTHRU */
421 case IFN_MASK_LOAD:
422 operand = gimple_call_arg (stmt, 2);
423 if (operand == use)
424 return true;
425 break;
426 default:
427 break;
428 }
429 return false;
430 }
431
432 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
433 return false;
434 operand = gimple_assign_rhs1 (stmt);
435 if (TREE_CODE (operand) != SSA_NAME)
436 return false;
437
438 if (operand == use)
439 return true;
440
441 return false;
442 }
443
444
445 /*
446 Function process_use.
447
448 Inputs:
449 - a USE in STMT in a loop represented by LOOP_VINFO
450 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
451 that defined USE. This is done by calling mark_relevant and passing it
452 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
453 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
454 be performed.
455
456 Outputs:
457 Generally, LIVE_P and RELEVANT are used to define the liveness and
458 relevance info of the DEF_STMT of this USE:
459 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
460 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
461 Exceptions:
462 - case 1: If USE is used only for address computations (e.g. array indexing),
463 which does not need to be directly vectorized, then the liveness/relevance
464 of the respective DEF_STMT is left unchanged.
465 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
466 skip DEF_STMT cause it had already been processed.
467 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
468 be modified accordingly.
469
470 Return true if everything is as expected. Return false otherwise. */
471
472 static bool
473 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
474 enum vect_relevant relevant, vec<gimple> *worklist,
475 bool force)
476 {
477 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
478 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
479 stmt_vec_info dstmt_vinfo;
480 basic_block bb, def_bb;
481 tree def;
482 gimple def_stmt;
483 enum vect_def_type dt;
484
485 /* case 1: we are only interested in uses that need to be vectorized. Uses
486 that are used for address computation are not considered relevant. */
487 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
488 return true;
489
490 if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
491 {
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
494 "not vectorized: unsupported use in stmt.\n");
495 return false;
496 }
497
498 if (!def_stmt || gimple_nop_p (def_stmt))
499 return true;
500
501 def_bb = gimple_bb (def_stmt);
502 if (!flow_bb_inside_loop_p (loop, def_bb))
503 {
504 if (dump_enabled_p ())
505 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
506 return true;
507 }
508
509 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
510 DEF_STMT must have already been processed, because this should be the
511 only way that STMT, which is a reduction-phi, was put in the worklist,
512 as there should be no other uses for DEF_STMT in the loop. So we just
513 check that everything is as expected, and we are done. */
514 dstmt_vinfo = vinfo_for_stmt (def_stmt);
515 bb = gimple_bb (stmt);
516 if (gimple_code (stmt) == GIMPLE_PHI
517 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
518 && gimple_code (def_stmt) != GIMPLE_PHI
519 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
520 && bb->loop_father == def_bb->loop_father)
521 {
522 if (dump_enabled_p ())
523 dump_printf_loc (MSG_NOTE, vect_location,
524 "reduc-stmt defining reduc-phi in the same nest.\n");
525 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
526 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
527 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
528 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
529 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
530 return true;
531 }
532
533 /* case 3a: outer-loop stmt defining an inner-loop stmt:
534 outer-loop-header-bb:
535 d = def_stmt
536 inner-loop:
537 stmt # use (d)
538 outer-loop-tail-bb:
539 ... */
540 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
541 {
542 if (dump_enabled_p ())
543 dump_printf_loc (MSG_NOTE, vect_location,
544 "outer-loop def-stmt defining inner-loop stmt.\n");
545
546 switch (relevant)
547 {
548 case vect_unused_in_scope:
549 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
550 vect_used_in_scope : vect_unused_in_scope;
551 break;
552
553 case vect_used_in_outer_by_reduction:
554 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
555 relevant = vect_used_by_reduction;
556 break;
557
558 case vect_used_in_outer:
559 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
560 relevant = vect_used_in_scope;
561 break;
562
563 case vect_used_in_scope:
564 break;
565
566 default:
567 gcc_unreachable ();
568 }
569 }
570
571 /* case 3b: inner-loop stmt defining an outer-loop stmt:
572 outer-loop-header-bb:
573 ...
574 inner-loop:
575 d = def_stmt
576 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
577 stmt # use (d) */
578 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
579 {
580 if (dump_enabled_p ())
581 dump_printf_loc (MSG_NOTE, vect_location,
582 "inner-loop def-stmt defining outer-loop stmt.\n");
583
584 switch (relevant)
585 {
586 case vect_unused_in_scope:
587 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
588 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
589 vect_used_in_outer_by_reduction : vect_unused_in_scope;
590 break;
591
592 case vect_used_by_reduction:
593 relevant = vect_used_in_outer_by_reduction;
594 break;
595
596 case vect_used_in_scope:
597 relevant = vect_used_in_outer;
598 break;
599
600 default:
601 gcc_unreachable ();
602 }
603 }
604
605 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
606 is_pattern_stmt_p (stmt_vinfo));
607 return true;
608 }
609
610
611 /* Function vect_mark_stmts_to_be_vectorized.
612
613 Not all stmts in the loop need to be vectorized. For example:
614
615 for i...
616 for j...
617 1. T0 = i + j
618 2. T1 = a[T0]
619
620 3. j = j + 1
621
622 Stmt 1 and 3 do not need to be vectorized, because loop control and
623 addressing of vectorized data-refs are handled differently.
624
625 This pass detects such stmts. */
626
627 bool
628 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
629 {
630 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
631 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
632 unsigned int nbbs = loop->num_nodes;
633 gimple_stmt_iterator si;
634 gimple stmt;
635 unsigned int i;
636 stmt_vec_info stmt_vinfo;
637 basic_block bb;
638 gimple phi;
639 bool live_p;
640 enum vect_relevant relevant, tmp_relevant;
641 enum vect_def_type def_type;
642
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
646
647 auto_vec<gimple, 64> worklist;
648
649 /* 1. Init worklist. */
650 for (i = 0; i < nbbs; i++)
651 {
652 bb = bbs[i];
653 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
654 {
655 phi = gsi_stmt (si);
656 if (dump_enabled_p ())
657 {
658 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
660 dump_printf (MSG_NOTE, "\n");
661 }
662
663 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
664 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
665 }
666 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
667 {
668 stmt = gsi_stmt (si);
669 if (dump_enabled_p ())
670 {
671 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
672 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
673 dump_printf (MSG_NOTE, "\n");
674 }
675
676 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
677 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
678 }
679 }
680
681 /* 2. Process_worklist */
682 while (worklist.length () > 0)
683 {
684 use_operand_p use_p;
685 ssa_op_iter iter;
686
687 stmt = worklist.pop ();
688 if (dump_enabled_p ())
689 {
690 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
691 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
692 dump_printf (MSG_NOTE, "\n");
693 }
694
695 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
696 (DEF_STMT) as relevant/irrelevant and live/dead according to the
697 liveness and relevance properties of STMT. */
698 stmt_vinfo = vinfo_for_stmt (stmt);
699 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
700 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
701
702 /* Generally, the liveness and relevance properties of STMT are
703 propagated as is to the DEF_STMTs of its USEs:
704 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
705 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
706
707 One exception is when STMT has been identified as defining a reduction
708 variable; in this case we set the liveness/relevance as follows:
709 live_p = false
710 relevant = vect_used_by_reduction
711 This is because we distinguish between two kinds of relevant stmts -
712 those that are used by a reduction computation, and those that are
713 (also) used by a regular computation. This allows us later on to
714 identify stmts that are used solely by a reduction, and therefore the
715 order of the results that they produce does not have to be kept. */
716
717 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
718 tmp_relevant = relevant;
719 switch (def_type)
720 {
721 case vect_reduction_def:
722 switch (tmp_relevant)
723 {
724 case vect_unused_in_scope:
725 relevant = vect_used_by_reduction;
726 break;
727
728 case vect_used_by_reduction:
729 if (gimple_code (stmt) == GIMPLE_PHI)
730 break;
731 /* fall through */
732
733 default:
734 if (dump_enabled_p ())
735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
736 "unsupported use of reduction.\n");
737 return false;
738 }
739
740 live_p = false;
741 break;
742
743 case vect_nested_cycle:
744 if (tmp_relevant != vect_unused_in_scope
745 && tmp_relevant != vect_used_in_outer_by_reduction
746 && tmp_relevant != vect_used_in_outer)
747 {
748 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
750 "unsupported use of nested cycle.\n");
751
752 return false;
753 }
754
755 live_p = false;
756 break;
757
758 case vect_double_reduction_def:
759 if (tmp_relevant != vect_unused_in_scope
760 && tmp_relevant != vect_used_by_reduction)
761 {
762 if (dump_enabled_p ())
763 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
764 "unsupported use of double reduction.\n");
765
766 return false;
767 }
768
769 live_p = false;
770 break;
771
772 default:
773 break;
774 }
775
776 if (is_pattern_stmt_p (stmt_vinfo))
777 {
778 /* Pattern statements are not inserted into the code, so
779 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
780 have to scan the RHS or function arguments instead. */
781 if (is_gimple_assign (stmt))
782 {
783 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
784 tree op = gimple_assign_rhs1 (stmt);
785
786 i = 1;
787 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
788 {
789 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
790 live_p, relevant, &worklist, false)
791 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
792 live_p, relevant, &worklist, false))
793 return false;
794 i = 2;
795 }
796 for (; i < gimple_num_ops (stmt); i++)
797 {
798 op = gimple_op (stmt, i);
799 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
800 &worklist, false))
801 return false;
802 }
803 }
804 else if (is_gimple_call (stmt))
805 {
806 for (i = 0; i < gimple_call_num_args (stmt); i++)
807 {
808 tree arg = gimple_call_arg (stmt, i);
809 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
810 &worklist, false))
811 return false;
812 }
813 }
814 }
815 else
816 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
817 {
818 tree op = USE_FROM_PTR (use_p);
819 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
820 &worklist, false))
821 return false;
822 }
823
824 if (STMT_VINFO_GATHER_P (stmt_vinfo))
825 {
826 tree off;
827 tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
828 gcc_assert (decl);
829 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
830 &worklist, true))
831 return false;
832 }
833 } /* while worklist */
834
835 return true;
836 }
837
838
839 /* Function vect_model_simple_cost.
840
841 Models cost for simple operations, i.e. those that only emit ncopies of a
842 single op. Right now, this does not account for multiple insns that could
843 be generated for the single vector op. We will handle that shortly. */
844
845 void
846 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
847 enum vect_def_type *dt,
848 stmt_vector_for_cost *prologue_cost_vec,
849 stmt_vector_for_cost *body_cost_vec)
850 {
851 int i;
852 int inside_cost = 0, prologue_cost = 0;
853
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info))
856 return;
857
858 /* FORNOW: Assuming maximum 2 args per stmts. */
859 for (i = 0; i < 2; i++)
860 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
861 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
862 stmt_info, 0, vect_prologue);
863
864 /* Pass the inside-of-loop statements to the target-specific cost model. */
865 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
866 stmt_info, 0, vect_body);
867
868 if (dump_enabled_p ())
869 dump_printf_loc (MSG_NOTE, vect_location,
870 "vect_model_simple_cost: inside_cost = %d, "
871 "prologue_cost = %d .\n", inside_cost, prologue_cost);
872 }
873
874
875 /* Model cost for type demotion and promotion operations. PWR is normally
876 zero for single-step promotions and demotions. It will be one if
877 two-step promotion/demotion is required, and so on. Each additional
878 step doubles the number of instructions required. */
879
880 static void
881 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
882 enum vect_def_type *dt, int pwr)
883 {
884 int i, tmp;
885 int inside_cost = 0, prologue_cost = 0;
886 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
887 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
888 void *target_cost_data;
889
890 /* The SLP costs were already calculated during SLP tree build. */
891 if (PURE_SLP_STMT (stmt_info))
892 return;
893
894 if (loop_vinfo)
895 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
896 else
897 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
898
899 for (i = 0; i < pwr + 1; i++)
900 {
901 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
902 (i + 1) : i;
903 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
904 vec_promote_demote, stmt_info, 0,
905 vect_body);
906 }
907
908 /* FORNOW: Assuming maximum 2 args per stmts. */
909 for (i = 0; i < 2; i++)
910 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
911 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
912 stmt_info, 0, vect_prologue);
913
914 if (dump_enabled_p ())
915 dump_printf_loc (MSG_NOTE, vect_location,
916 "vect_model_promotion_demotion_cost: inside_cost = %d, "
917 "prologue_cost = %d .\n", inside_cost, prologue_cost);
918 }
919
920 /* Function vect_cost_group_size
921
922 For grouped load or store, return the group_size only if it is the first
923 load or store of a group, else return 1. This ensures that group size is
924 only returned once per group. */
925
926 static int
927 vect_cost_group_size (stmt_vec_info stmt_info)
928 {
929 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
930
931 if (first_stmt == STMT_VINFO_STMT (stmt_info))
932 return GROUP_SIZE (stmt_info);
933
934 return 1;
935 }
936
937
938 /* Function vect_model_store_cost
939
940 Models cost for stores. In the case of grouped accesses, one access
941 has the overhead of the grouped access attributed to it. */
942
943 void
944 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
945 bool store_lanes_p, enum vect_def_type dt,
946 slp_tree slp_node,
947 stmt_vector_for_cost *prologue_cost_vec,
948 stmt_vector_for_cost *body_cost_vec)
949 {
950 int group_size;
951 unsigned int inside_cost = 0, prologue_cost = 0;
952 struct data_reference *first_dr;
953 gimple first_stmt;
954
955 /* The SLP costs were already calculated during SLP tree build. */
956 if (PURE_SLP_STMT (stmt_info))
957 return;
958
959 if (dt == vect_constant_def || dt == vect_external_def)
960 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
961 stmt_info, 0, vect_prologue);
962
963 /* Grouped access? */
964 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
965 {
966 if (slp_node)
967 {
968 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
969 group_size = 1;
970 }
971 else
972 {
973 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
974 group_size = vect_cost_group_size (stmt_info);
975 }
976
977 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
978 }
979 /* Not a grouped access. */
980 else
981 {
982 group_size = 1;
983 first_dr = STMT_VINFO_DATA_REF (stmt_info);
984 }
985
986 /* We assume that the cost of a single store-lanes instruction is
987 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
988 access is instead being provided by a permute-and-store operation,
989 include the cost of the permutes. */
990 if (!store_lanes_p && group_size > 1)
991 {
992 /* Uses a high and low interleave or shuffle operations for each
993 needed permute. */
994 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
995 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
996 stmt_info, 0, vect_body);
997
998 if (dump_enabled_p ())
999 dump_printf_loc (MSG_NOTE, vect_location,
1000 "vect_model_store_cost: strided group_size = %d .\n",
1001 group_size);
1002 }
1003
1004 /* Costs of the stores. */
1005 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
1006
1007 if (dump_enabled_p ())
1008 dump_printf_loc (MSG_NOTE, vect_location,
1009 "vect_model_store_cost: inside_cost = %d, "
1010 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1011 }
1012
1013
1014 /* Calculate cost of DR's memory access. */
1015 void
1016 vect_get_store_cost (struct data_reference *dr, int ncopies,
1017 unsigned int *inside_cost,
1018 stmt_vector_for_cost *body_cost_vec)
1019 {
1020 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1021 gimple stmt = DR_STMT (dr);
1022 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1023
1024 switch (alignment_support_scheme)
1025 {
1026 case dr_aligned:
1027 {
1028 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1029 vector_store, stmt_info, 0,
1030 vect_body);
1031
1032 if (dump_enabled_p ())
1033 dump_printf_loc (MSG_NOTE, vect_location,
1034 "vect_model_store_cost: aligned.\n");
1035 break;
1036 }
1037
1038 case dr_unaligned_supported:
1039 {
1040 /* Here, we assign an additional cost for the unaligned store. */
1041 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1042 unaligned_store, stmt_info,
1043 DR_MISALIGNMENT (dr), vect_body);
1044 if (dump_enabled_p ())
1045 dump_printf_loc (MSG_NOTE, vect_location,
1046 "vect_model_store_cost: unaligned supported by "
1047 "hardware.\n");
1048 break;
1049 }
1050
1051 case dr_unaligned_unsupported:
1052 {
1053 *inside_cost = VECT_MAX_COST;
1054
1055 if (dump_enabled_p ())
1056 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1057 "vect_model_store_cost: unsupported access.\n");
1058 break;
1059 }
1060
1061 default:
1062 gcc_unreachable ();
1063 }
1064 }
1065
1066
1067 /* Function vect_model_load_cost
1068
1069 Models cost for loads. In the case of grouped accesses, the last access
1070 has the overhead of the grouped access attributed to it. Since unaligned
1071 accesses are supported for loads, we also account for the costs of the
1072 access scheme chosen. */
1073
1074 void
1075 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1076 bool load_lanes_p, slp_tree slp_node,
1077 stmt_vector_for_cost *prologue_cost_vec,
1078 stmt_vector_for_cost *body_cost_vec)
1079 {
1080 int group_size;
1081 gimple first_stmt;
1082 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1083 unsigned int inside_cost = 0, prologue_cost = 0;
1084
1085 /* The SLP costs were already calculated during SLP tree build. */
1086 if (PURE_SLP_STMT (stmt_info))
1087 return;
1088
1089 /* Grouped accesses? */
1090 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1091 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1092 {
1093 group_size = vect_cost_group_size (stmt_info);
1094 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1095 }
1096 /* Not a grouped access. */
1097 else
1098 {
1099 group_size = 1;
1100 first_dr = dr;
1101 }
1102
1103 /* We assume that the cost of a single load-lanes instruction is
1104 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1105 access is instead being provided by a load-and-permute operation,
1106 include the cost of the permutes. */
1107 if (!load_lanes_p && group_size > 1)
1108 {
1109 /* Uses an even and odd extract operations or shuffle operations
1110 for each needed permute. */
1111 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1112 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1113 stmt_info, 0, vect_body);
1114
1115 if (dump_enabled_p ())
1116 dump_printf_loc (MSG_NOTE, vect_location,
1117 "vect_model_load_cost: strided group_size = %d .\n",
1118 group_size);
1119 }
1120
1121 /* The loads themselves. */
1122 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1123 {
1124 /* N scalar loads plus gathering them into a vector. */
1125 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1126 inside_cost += record_stmt_cost (body_cost_vec,
1127 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1128 scalar_load, stmt_info, 0, vect_body);
1129 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1130 stmt_info, 0, vect_body);
1131 }
1132 else
1133 vect_get_load_cost (first_dr, ncopies,
1134 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1135 || group_size > 1 || slp_node),
1136 &inside_cost, &prologue_cost,
1137 prologue_cost_vec, body_cost_vec, true);
1138
1139 if (dump_enabled_p ())
1140 dump_printf_loc (MSG_NOTE, vect_location,
1141 "vect_model_load_cost: inside_cost = %d, "
1142 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1143 }
1144
1145
1146 /* Calculate cost of DR's memory access. */
1147 void
1148 vect_get_load_cost (struct data_reference *dr, int ncopies,
1149 bool add_realign_cost, unsigned int *inside_cost,
1150 unsigned int *prologue_cost,
1151 stmt_vector_for_cost *prologue_cost_vec,
1152 stmt_vector_for_cost *body_cost_vec,
1153 bool record_prologue_costs)
1154 {
1155 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1156 gimple stmt = DR_STMT (dr);
1157 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1158
1159 switch (alignment_support_scheme)
1160 {
1161 case dr_aligned:
1162 {
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1164 stmt_info, 0, vect_body);
1165
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: aligned.\n");
1169
1170 break;
1171 }
1172 case dr_unaligned_supported:
1173 {
1174 /* Here, we assign an additional cost for the unaligned load. */
1175 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1176 unaligned_load, stmt_info,
1177 DR_MISALIGNMENT (dr), vect_body);
1178
1179 if (dump_enabled_p ())
1180 dump_printf_loc (MSG_NOTE, vect_location,
1181 "vect_model_load_cost: unaligned supported by "
1182 "hardware.\n");
1183
1184 break;
1185 }
1186 case dr_explicit_realign:
1187 {
1188 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1189 vector_load, stmt_info, 0, vect_body);
1190 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1191 vec_perm, stmt_info, 0, vect_body);
1192
1193 /* FIXME: If the misalignment remains fixed across the iterations of
1194 the containing loop, the following cost should be added to the
1195 prologue costs. */
1196 if (targetm.vectorize.builtin_mask_for_load)
1197 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1198 stmt_info, 0, vect_body);
1199
1200 if (dump_enabled_p ())
1201 dump_printf_loc (MSG_NOTE, vect_location,
1202 "vect_model_load_cost: explicit realign\n");
1203
1204 break;
1205 }
1206 case dr_explicit_realign_optimized:
1207 {
1208 if (dump_enabled_p ())
1209 dump_printf_loc (MSG_NOTE, vect_location,
1210 "vect_model_load_cost: unaligned software "
1211 "pipelined.\n");
1212
1213 /* Unaligned software pipeline has a load of an address, an initial
1214 load, and possibly a mask operation to "prime" the loop. However,
1215 if this is an access in a group of loads, which provide grouped
1216 access, then the above cost should only be considered for one
1217 access in the group. Inside the loop, there is a load op
1218 and a realignment op. */
1219
1220 if (add_realign_cost && record_prologue_costs)
1221 {
1222 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1223 vector_stmt, stmt_info,
1224 0, vect_prologue);
1225 if (targetm.vectorize.builtin_mask_for_load)
1226 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1227 vector_stmt, stmt_info,
1228 0, vect_prologue);
1229 }
1230
1231 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1232 stmt_info, 0, vect_body);
1233 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1234 stmt_info, 0, vect_body);
1235
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE, vect_location,
1238 "vect_model_load_cost: explicit realign optimized"
1239 "\n");
1240
1241 break;
1242 }
1243
1244 case dr_unaligned_unsupported:
1245 {
1246 *inside_cost = VECT_MAX_COST;
1247
1248 if (dump_enabled_p ())
1249 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1250 "vect_model_load_cost: unsupported access.\n");
1251 break;
1252 }
1253
1254 default:
1255 gcc_unreachable ();
1256 }
1257 }
1258
1259 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1260 the loop preheader for the vectorized stmt STMT. */
1261
1262 static void
1263 vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
1264 {
1265 if (gsi)
1266 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1267 else
1268 {
1269 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1270 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1271
1272 if (loop_vinfo)
1273 {
1274 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1275 basic_block new_bb;
1276 edge pe;
1277
1278 if (nested_in_vect_loop_p (loop, stmt))
1279 loop = loop->inner;
1280
1281 pe = loop_preheader_edge (loop);
1282 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1283 gcc_assert (!new_bb);
1284 }
1285 else
1286 {
1287 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1288 basic_block bb;
1289 gimple_stmt_iterator gsi_bb_start;
1290
1291 gcc_assert (bb_vinfo);
1292 bb = BB_VINFO_BB (bb_vinfo);
1293 gsi_bb_start = gsi_after_labels (bb);
1294 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1295 }
1296 }
1297
1298 if (dump_enabled_p ())
1299 {
1300 dump_printf_loc (MSG_NOTE, vect_location,
1301 "created new init_stmt: ");
1302 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1303 dump_printf (MSG_NOTE, "\n");
1304 }
1305 }
1306
1307 /* Function vect_init_vector.
1308
1309 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1310 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1311 vector type a vector with all elements equal to VAL is created first.
1312 Place the initialization at BSI if it is not NULL. Otherwise, place the
1313 initialization at the loop preheader.
1314 Return the DEF of INIT_STMT.
1315 It will be used in the vectorization of STMT. */
1316
1317 tree
1318 vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1319 {
1320 tree new_var;
1321 gimple init_stmt;
1322 tree vec_oprnd;
1323 tree new_temp;
1324
1325 if (TREE_CODE (type) == VECTOR_TYPE
1326 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1327 {
1328 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1329 {
1330 if (CONSTANT_CLASS_P (val))
1331 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1332 else
1333 {
1334 new_temp = make_ssa_name (TREE_TYPE (type), NULL);
1335 init_stmt = gimple_build_assign_with_ops (NOP_EXPR,
1336 new_temp, val,
1337 NULL_TREE);
1338 vect_init_vector_1 (stmt, init_stmt, gsi);
1339 val = new_temp;
1340 }
1341 }
1342 val = build_vector_from_val (type, val);
1343 }
1344
1345 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1346 init_stmt = gimple_build_assign (new_var, val);
1347 new_temp = make_ssa_name (new_var, init_stmt);
1348 gimple_assign_set_lhs (init_stmt, new_temp);
1349 vect_init_vector_1 (stmt, init_stmt, gsi);
1350 vec_oprnd = gimple_assign_lhs (init_stmt);
1351 return vec_oprnd;
1352 }
1353
1354
1355 /* Function vect_get_vec_def_for_operand.
1356
1357 OP is an operand in STMT. This function returns a (vector) def that will be
1358 used in the vectorized stmt for STMT.
1359
1360 In the case that OP is an SSA_NAME which is defined in the loop, then
1361 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1362
1363 In case OP is an invariant or constant, a new stmt that creates a vector def
1364 needs to be introduced. */
1365
1366 tree
1367 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1368 {
1369 tree vec_oprnd;
1370 gimple vec_stmt;
1371 gimple def_stmt;
1372 stmt_vec_info def_stmt_info = NULL;
1373 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1374 unsigned int nunits;
1375 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1376 tree def;
1377 enum vect_def_type dt;
1378 bool is_simple_use;
1379 tree vector_type;
1380
1381 if (dump_enabled_p ())
1382 {
1383 dump_printf_loc (MSG_NOTE, vect_location,
1384 "vect_get_vec_def_for_operand: ");
1385 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1386 dump_printf (MSG_NOTE, "\n");
1387 }
1388
1389 is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
1390 &def_stmt, &def, &dt);
1391 gcc_assert (is_simple_use);
1392 if (dump_enabled_p ())
1393 {
1394 int loc_printed = 0;
1395 if (def)
1396 {
1397 dump_printf_loc (MSG_NOTE, vect_location, "def = ");
1398 loc_printed = 1;
1399 dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
1400 dump_printf (MSG_NOTE, "\n");
1401 }
1402 if (def_stmt)
1403 {
1404 if (loc_printed)
1405 dump_printf (MSG_NOTE, " def_stmt = ");
1406 else
1407 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1408 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1409 dump_printf (MSG_NOTE, "\n");
1410 }
1411 }
1412
1413 switch (dt)
1414 {
1415 /* Case 1: operand is a constant. */
1416 case vect_constant_def:
1417 {
1418 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1419 gcc_assert (vector_type);
1420 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1421
1422 if (scalar_def)
1423 *scalar_def = op;
1424
1425 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1426 if (dump_enabled_p ())
1427 dump_printf_loc (MSG_NOTE, vect_location,
1428 "Create vector_cst. nunits = %d\n", nunits);
1429
1430 return vect_init_vector (stmt, op, vector_type, NULL);
1431 }
1432
1433 /* Case 2: operand is defined outside the loop - loop invariant. */
1434 case vect_external_def:
1435 {
1436 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1437 gcc_assert (vector_type);
1438
1439 if (scalar_def)
1440 *scalar_def = def;
1441
1442 /* Create 'vec_inv = {inv,inv,..,inv}' */
1443 if (dump_enabled_p ())
1444 dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
1445
1446 return vect_init_vector (stmt, def, vector_type, NULL);
1447 }
1448
1449 /* Case 3: operand is defined inside the loop. */
1450 case vect_internal_def:
1451 {
1452 if (scalar_def)
1453 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1454
1455 /* Get the def from the vectorized stmt. */
1456 def_stmt_info = vinfo_for_stmt (def_stmt);
1457
1458 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1459 /* Get vectorized pattern statement. */
1460 if (!vec_stmt
1461 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1462 && !STMT_VINFO_RELEVANT (def_stmt_info))
1463 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1464 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1465 gcc_assert (vec_stmt);
1466 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1467 vec_oprnd = PHI_RESULT (vec_stmt);
1468 else if (is_gimple_call (vec_stmt))
1469 vec_oprnd = gimple_call_lhs (vec_stmt);
1470 else
1471 vec_oprnd = gimple_assign_lhs (vec_stmt);
1472 return vec_oprnd;
1473 }
1474
1475 /* Case 4: operand is defined by a loop header phi - reduction */
1476 case vect_reduction_def:
1477 case vect_double_reduction_def:
1478 case vect_nested_cycle:
1479 {
1480 struct loop *loop;
1481
1482 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1483 loop = (gimple_bb (def_stmt))->loop_father;
1484
1485 /* Get the def before the loop */
1486 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1487 return get_initial_def_for_reduction (stmt, op, scalar_def);
1488 }
1489
1490 /* Case 5: operand is defined by loop-header phi - induction. */
1491 case vect_induction_def:
1492 {
1493 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1494
1495 /* Get the def from the vectorized stmt. */
1496 def_stmt_info = vinfo_for_stmt (def_stmt);
1497 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1498 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1499 vec_oprnd = PHI_RESULT (vec_stmt);
1500 else
1501 vec_oprnd = gimple_get_lhs (vec_stmt);
1502 return vec_oprnd;
1503 }
1504
1505 default:
1506 gcc_unreachable ();
1507 }
1508 }
1509
1510
1511 /* Function vect_get_vec_def_for_stmt_copy
1512
1513 Return a vector-def for an operand. This function is used when the
1514 vectorized stmt to be created (by the caller to this function) is a "copy"
1515 created in case the vectorized result cannot fit in one vector, and several
1516 copies of the vector-stmt are required. In this case the vector-def is
1517 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1518 of the stmt that defines VEC_OPRND.
1519 DT is the type of the vector def VEC_OPRND.
1520
1521 Context:
1522 In case the vectorization factor (VF) is bigger than the number
1523 of elements that can fit in a vectype (nunits), we have to generate
1524 more than one vector stmt to vectorize the scalar stmt. This situation
1525 arises when there are multiple data-types operated upon in the loop; the
1526 smallest data-type determines the VF, and as a result, when vectorizing
1527 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1528 vector stmt (each computing a vector of 'nunits' results, and together
1529 computing 'VF' results in each iteration). This function is called when
1530 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1531 which VF=16 and nunits=4, so the number of copies required is 4):
1532
1533 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1534
1535 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1536 VS1.1: vx.1 = memref1 VS1.2
1537 VS1.2: vx.2 = memref2 VS1.3
1538 VS1.3: vx.3 = memref3
1539
1540 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1541 VSnew.1: vz1 = vx.1 + ... VSnew.2
1542 VSnew.2: vz2 = vx.2 + ... VSnew.3
1543 VSnew.3: vz3 = vx.3 + ...
1544
1545 The vectorization of S1 is explained in vectorizable_load.
1546 The vectorization of S2:
1547 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1548 the function 'vect_get_vec_def_for_operand' is called to
1549 get the relevant vector-def for each operand of S2. For operand x it
1550 returns the vector-def 'vx.0'.
1551
1552 To create the remaining copies of the vector-stmt (VSnew.j), this
1553 function is called to get the relevant vector-def for each operand. It is
1554 obtained from the respective VS1.j stmt, which is recorded in the
1555 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1556
1557 For example, to obtain the vector-def 'vx.1' in order to create the
1558 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1559 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1560 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1561 and return its def ('vx.1').
1562 Overall, to create the above sequence this function will be called 3 times:
1563 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1564 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1565 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1566
1567 tree
1568 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1569 {
1570 gimple vec_stmt_for_operand;
1571 stmt_vec_info def_stmt_info;
1572
1573 /* Do nothing; can reuse same def. */
1574 if (dt == vect_external_def || dt == vect_constant_def )
1575 return vec_oprnd;
1576
1577 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1578 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1579 gcc_assert (def_stmt_info);
1580 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1581 gcc_assert (vec_stmt_for_operand);
1582 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1583 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1584 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1585 else
1586 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1587 return vec_oprnd;
1588 }
1589
1590
1591 /* Get vectorized definitions for the operands to create a copy of an original
1592 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1593
1594 static void
1595 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1596 vec<tree> *vec_oprnds0,
1597 vec<tree> *vec_oprnds1)
1598 {
1599 tree vec_oprnd = vec_oprnds0->pop ();
1600
1601 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1602 vec_oprnds0->quick_push (vec_oprnd);
1603
1604 if (vec_oprnds1 && vec_oprnds1->length ())
1605 {
1606 vec_oprnd = vec_oprnds1->pop ();
1607 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1608 vec_oprnds1->quick_push (vec_oprnd);
1609 }
1610 }
1611
1612
1613 /* Get vectorized definitions for OP0 and OP1.
1614 REDUC_INDEX is the index of reduction operand in case of reduction,
1615 and -1 otherwise. */
1616
1617 void
1618 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1619 vec<tree> *vec_oprnds0,
1620 vec<tree> *vec_oprnds1,
1621 slp_tree slp_node, int reduc_index)
1622 {
1623 if (slp_node)
1624 {
1625 int nops = (op1 == NULL_TREE) ? 1 : 2;
1626 auto_vec<tree> ops (nops);
1627 auto_vec<vec<tree> > vec_defs (nops);
1628
1629 ops.quick_push (op0);
1630 if (op1)
1631 ops.quick_push (op1);
1632
1633 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1634
1635 *vec_oprnds0 = vec_defs[0];
1636 if (op1)
1637 *vec_oprnds1 = vec_defs[1];
1638 }
1639 else
1640 {
1641 tree vec_oprnd;
1642
1643 vec_oprnds0->create (1);
1644 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1645 vec_oprnds0->quick_push (vec_oprnd);
1646
1647 if (op1)
1648 {
1649 vec_oprnds1->create (1);
1650 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1651 vec_oprnds1->quick_push (vec_oprnd);
1652 }
1653 }
1654 }
1655
1656
1657 /* Function vect_finish_stmt_generation.
1658
1659 Insert a new stmt. */
1660
1661 void
1662 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1663 gimple_stmt_iterator *gsi)
1664 {
1665 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1666 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1667 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1668
1669 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1670
1671 if (!gsi_end_p (*gsi)
1672 && gimple_has_mem_ops (vec_stmt))
1673 {
1674 gimple at_stmt = gsi_stmt (*gsi);
1675 tree vuse = gimple_vuse (at_stmt);
1676 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1677 {
1678 tree vdef = gimple_vdef (at_stmt);
1679 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1680 /* If we have an SSA vuse and insert a store, update virtual
1681 SSA form to avoid triggering the renamer. Do so only
1682 if we can easily see all uses - which is what almost always
1683 happens with the way vectorized stmts are inserted. */
1684 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1685 && ((is_gimple_assign (vec_stmt)
1686 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1687 || (is_gimple_call (vec_stmt)
1688 && !(gimple_call_flags (vec_stmt)
1689 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1690 {
1691 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1692 gimple_set_vdef (vec_stmt, new_vdef);
1693 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1694 }
1695 }
1696 }
1697 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1698
1699 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1700 bb_vinfo));
1701
1702 if (dump_enabled_p ())
1703 {
1704 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1705 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1706 dump_printf (MSG_NOTE, "\n");
1707 }
1708
1709 gimple_set_location (vec_stmt, gimple_location (stmt));
1710
1711 /* While EH edges will generally prevent vectorization, stmt might
1712 e.g. be in a must-not-throw region. Ensure newly created stmts
1713 that could throw are part of the same region. */
1714 int lp_nr = lookup_stmt_eh_lp (stmt);
1715 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1716 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1717 }
1718
1719 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1720 a function declaration if the target has a vectorized version
1721 of the function, or NULL_TREE if the function cannot be vectorized. */
1722
1723 tree
1724 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1725 {
1726 tree fndecl = gimple_call_fndecl (call);
1727
1728 /* We only handle functions that do not read or clobber memory -- i.e.
1729 const or novops ones. */
1730 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1731 return NULL_TREE;
1732
1733 if (!fndecl
1734 || TREE_CODE (fndecl) != FUNCTION_DECL
1735 || !DECL_BUILT_IN (fndecl))
1736 return NULL_TREE;
1737
1738 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1739 vectype_in);
1740 }
1741
1742
1743 static tree permute_vec_elements (tree, tree, tree, gimple,
1744 gimple_stmt_iterator *);
1745
1746
1747 /* Function vectorizable_mask_load_store.
1748
1749 Check if STMT performs a conditional load or store that can be vectorized.
1750 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1751 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1752 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1753
1754 static bool
1755 vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
1756 gimple *vec_stmt, slp_tree slp_node)
1757 {
1758 tree vec_dest = NULL;
1759 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1760 stmt_vec_info prev_stmt_info;
1761 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1762 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1763 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1764 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1765 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1766 tree elem_type;
1767 gimple new_stmt;
1768 tree dummy;
1769 tree dataref_ptr = NULL_TREE;
1770 gimple ptr_incr;
1771 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1772 int ncopies;
1773 int i, j;
1774 bool inv_p;
1775 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1776 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1777 int gather_scale = 1;
1778 enum vect_def_type gather_dt = vect_unknown_def_type;
1779 bool is_store;
1780 tree mask;
1781 gimple def_stmt;
1782 tree def;
1783 enum vect_def_type dt;
1784
1785 if (slp_node != NULL)
1786 return false;
1787
1788 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1789 gcc_assert (ncopies >= 1);
1790
1791 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1792 mask = gimple_call_arg (stmt, 2);
1793 if (TYPE_PRECISION (TREE_TYPE (mask))
1794 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1795 return false;
1796
1797 /* FORNOW. This restriction should be relaxed. */
1798 if (nested_in_vect_loop && ncopies > 1)
1799 {
1800 if (dump_enabled_p ())
1801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1802 "multiple types in nested loop.");
1803 return false;
1804 }
1805
1806 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1807 return false;
1808
1809 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1810 return false;
1811
1812 if (!STMT_VINFO_DATA_REF (stmt_info))
1813 return false;
1814
1815 elem_type = TREE_TYPE (vectype);
1816
1817 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1818 return false;
1819
1820 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1821 return false;
1822
1823 if (STMT_VINFO_GATHER_P (stmt_info))
1824 {
1825 gimple def_stmt;
1826 tree def;
1827 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
1828 &gather_off, &gather_scale);
1829 gcc_assert (gather_decl);
1830 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
1831 &def_stmt, &def, &gather_dt,
1832 &gather_off_vectype))
1833 {
1834 if (dump_enabled_p ())
1835 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1836 "gather index use not simple.");
1837 return false;
1838 }
1839
1840 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1841 tree masktype
1842 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1843 if (TREE_CODE (masktype) == INTEGER_TYPE)
1844 {
1845 if (dump_enabled_p ())
1846 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1847 "masked gather with integer mask not supported.");
1848 return false;
1849 }
1850 }
1851 else if (tree_int_cst_compare (nested_in_vect_loop
1852 ? STMT_VINFO_DR_STEP (stmt_info)
1853 : DR_STEP (dr), size_zero_node) <= 0)
1854 return false;
1855 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1856 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1857 return false;
1858
1859 if (TREE_CODE (mask) != SSA_NAME)
1860 return false;
1861
1862 if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
1863 &def_stmt, &def, &dt))
1864 return false;
1865
1866 if (is_store)
1867 {
1868 tree rhs = gimple_call_arg (stmt, 3);
1869 if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
1870 &def_stmt, &def, &dt))
1871 return false;
1872 }
1873
1874 if (!vec_stmt) /* transformation not required. */
1875 {
1876 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1877 if (is_store)
1878 vect_model_store_cost (stmt_info, ncopies, false, dt,
1879 NULL, NULL, NULL);
1880 else
1881 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1882 return true;
1883 }
1884
1885 /** Transform. **/
1886
1887 if (STMT_VINFO_GATHER_P (stmt_info))
1888 {
1889 tree vec_oprnd0 = NULL_TREE, op;
1890 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1891 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1892 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1893 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1894 tree mask_perm_mask = NULL_TREE;
1895 edge pe = loop_preheader_edge (loop);
1896 gimple_seq seq;
1897 basic_block new_bb;
1898 enum { NARROW, NONE, WIDEN } modifier;
1899 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1900
1901 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1902 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1903 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1904 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1905 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1906 scaletype = TREE_VALUE (arglist);
1907 gcc_checking_assert (types_compatible_p (srctype, rettype)
1908 && types_compatible_p (srctype, masktype));
1909
1910 if (nunits == gather_off_nunits)
1911 modifier = NONE;
1912 else if (nunits == gather_off_nunits / 2)
1913 {
1914 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1915 modifier = WIDEN;
1916
1917 for (i = 0; i < gather_off_nunits; ++i)
1918 sel[i] = i | nunits;
1919
1920 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1921 }
1922 else if (nunits == gather_off_nunits * 2)
1923 {
1924 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1925 modifier = NARROW;
1926
1927 for (i = 0; i < nunits; ++i)
1928 sel[i] = i < gather_off_nunits
1929 ? i : i + nunits - gather_off_nunits;
1930
1931 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1932 ncopies *= 2;
1933 for (i = 0; i < nunits; ++i)
1934 sel[i] = i | gather_off_nunits;
1935 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1936 }
1937 else
1938 gcc_unreachable ();
1939
1940 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1941
1942 ptr = fold_convert (ptrtype, gather_base);
1943 if (!is_gimple_min_invariant (ptr))
1944 {
1945 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1946 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1947 gcc_assert (!new_bb);
1948 }
1949
1950 scale = build_int_cst (scaletype, gather_scale);
1951
1952 prev_stmt_info = NULL;
1953 for (j = 0; j < ncopies; ++j)
1954 {
1955 if (modifier == WIDEN && (j & 1))
1956 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1957 perm_mask, stmt, gsi);
1958 else if (j == 0)
1959 op = vec_oprnd0
1960 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
1961 else
1962 op = vec_oprnd0
1963 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1964
1965 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1966 {
1967 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1968 == TYPE_VECTOR_SUBPARTS (idxtype));
1969 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1970 var = make_ssa_name (var, NULL);
1971 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1972 new_stmt
1973 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
1974 op, NULL_TREE);
1975 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1976 op = var;
1977 }
1978
1979 if (mask_perm_mask && (j & 1))
1980 mask_op = permute_vec_elements (mask_op, mask_op,
1981 mask_perm_mask, stmt, gsi);
1982 else
1983 {
1984 if (j == 0)
1985 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
1986 else
1987 {
1988 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL,
1989 &def_stmt, &def, &dt);
1990 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1991 }
1992
1993 mask_op = vec_mask;
1994 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1995 {
1996 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1997 == TYPE_VECTOR_SUBPARTS (masktype));
1998 var = vect_get_new_vect_var (masktype, vect_simple_var,
1999 NULL);
2000 var = make_ssa_name (var, NULL);
2001 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2002 new_stmt
2003 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
2004 mask_op, NULL_TREE);
2005 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2006 mask_op = var;
2007 }
2008 }
2009
2010 new_stmt
2011 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
2012 scale);
2013
2014 if (!useless_type_conversion_p (vectype, rettype))
2015 {
2016 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2017 == TYPE_VECTOR_SUBPARTS (rettype));
2018 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
2019 op = make_ssa_name (var, new_stmt);
2020 gimple_call_set_lhs (new_stmt, op);
2021 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2022 var = make_ssa_name (vec_dest, NULL);
2023 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2024 new_stmt
2025 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var, op,
2026 NULL_TREE);
2027 }
2028 else
2029 {
2030 var = make_ssa_name (vec_dest, new_stmt);
2031 gimple_call_set_lhs (new_stmt, var);
2032 }
2033
2034 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2035
2036 if (modifier == NARROW)
2037 {
2038 if ((j & 1) == 0)
2039 {
2040 prev_res = var;
2041 continue;
2042 }
2043 var = permute_vec_elements (prev_res, var,
2044 perm_mask, stmt, gsi);
2045 new_stmt = SSA_NAME_DEF_STMT (var);
2046 }
2047
2048 if (prev_stmt_info == NULL)
2049 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2050 else
2051 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2052 prev_stmt_info = vinfo_for_stmt (new_stmt);
2053 }
2054
2055 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2056 from the IL. */
2057 tree lhs = gimple_call_lhs (stmt);
2058 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2059 set_vinfo_for_stmt (new_stmt, stmt_info);
2060 set_vinfo_for_stmt (stmt, NULL);
2061 STMT_VINFO_STMT (stmt_info) = new_stmt;
2062 gsi_replace (gsi, new_stmt, true);
2063 return true;
2064 }
2065 else if (is_store)
2066 {
2067 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2068 prev_stmt_info = NULL;
2069 for (i = 0; i < ncopies; i++)
2070 {
2071 unsigned align, misalign;
2072
2073 if (i == 0)
2074 {
2075 tree rhs = gimple_call_arg (stmt, 3);
2076 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
2077 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2078 /* We should have catched mismatched types earlier. */
2079 gcc_assert (useless_type_conversion_p (vectype,
2080 TREE_TYPE (vec_rhs)));
2081 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2082 NULL_TREE, &dummy, gsi,
2083 &ptr_incr, false, &inv_p);
2084 gcc_assert (!inv_p);
2085 }
2086 else
2087 {
2088 vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
2089 &def, &dt);
2090 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2091 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2092 &def, &dt);
2093 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2094 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2095 TYPE_SIZE_UNIT (vectype));
2096 }
2097
2098 align = TYPE_ALIGN_UNIT (vectype);
2099 if (aligned_access_p (dr))
2100 misalign = 0;
2101 else if (DR_MISALIGNMENT (dr) == -1)
2102 {
2103 align = TYPE_ALIGN_UNIT (elem_type);
2104 misalign = 0;
2105 }
2106 else
2107 misalign = DR_MISALIGNMENT (dr);
2108 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2109 misalign);
2110 new_stmt
2111 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2112 gimple_call_arg (stmt, 1),
2113 vec_mask, vec_rhs);
2114 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2115 if (i == 0)
2116 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2117 else
2118 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2119 prev_stmt_info = vinfo_for_stmt (new_stmt);
2120 }
2121 }
2122 else
2123 {
2124 tree vec_mask = NULL_TREE;
2125 prev_stmt_info = NULL;
2126 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2127 for (i = 0; i < ncopies; i++)
2128 {
2129 unsigned align, misalign;
2130
2131 if (i == 0)
2132 {
2133 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2134 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2135 NULL_TREE, &dummy, gsi,
2136 &ptr_incr, false, &inv_p);
2137 gcc_assert (!inv_p);
2138 }
2139 else
2140 {
2141 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2142 &def, &dt);
2143 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2144 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2145 TYPE_SIZE_UNIT (vectype));
2146 }
2147
2148 align = TYPE_ALIGN_UNIT (vectype);
2149 if (aligned_access_p (dr))
2150 misalign = 0;
2151 else if (DR_MISALIGNMENT (dr) == -1)
2152 {
2153 align = TYPE_ALIGN_UNIT (elem_type);
2154 misalign = 0;
2155 }
2156 else
2157 misalign = DR_MISALIGNMENT (dr);
2158 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2159 misalign);
2160 new_stmt
2161 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2162 gimple_call_arg (stmt, 1),
2163 vec_mask);
2164 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest, NULL));
2165 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2166 if (i == 0)
2167 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2168 else
2169 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2170 prev_stmt_info = vinfo_for_stmt (new_stmt);
2171 }
2172 }
2173
2174 if (!is_store)
2175 {
2176 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2177 from the IL. */
2178 tree lhs = gimple_call_lhs (stmt);
2179 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2180 set_vinfo_for_stmt (new_stmt, stmt_info);
2181 set_vinfo_for_stmt (stmt, NULL);
2182 STMT_VINFO_STMT (stmt_info) = new_stmt;
2183 gsi_replace (gsi, new_stmt, true);
2184 }
2185
2186 return true;
2187 }
2188
2189
2190 /* Function vectorizable_call.
2191
2192 Check if STMT performs a function call that can be vectorized.
2193 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2194 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2195 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2196
2197 static bool
2198 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2199 slp_tree slp_node)
2200 {
2201 tree vec_dest;
2202 tree scalar_dest;
2203 tree op, type;
2204 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2205 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2206 tree vectype_out, vectype_in;
2207 int nunits_in;
2208 int nunits_out;
2209 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2210 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2211 tree fndecl, new_temp, def, rhs_type;
2212 gimple def_stmt;
2213 enum vect_def_type dt[3]
2214 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2215 gimple new_stmt = NULL;
2216 int ncopies, j;
2217 vec<tree> vargs = vNULL;
2218 enum { NARROW, NONE, WIDEN } modifier;
2219 size_t i, nargs;
2220 tree lhs;
2221
2222 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2223 return false;
2224
2225 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2226 return false;
2227
2228 /* Is STMT a vectorizable call? */
2229 if (!is_gimple_call (stmt))
2230 return false;
2231
2232 if (gimple_call_internal_p (stmt)
2233 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2234 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2235 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2236 slp_node);
2237
2238 if (gimple_call_lhs (stmt) == NULL_TREE
2239 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2240 return false;
2241
2242 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2243
2244 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2245
2246 /* Process function arguments. */
2247 rhs_type = NULL_TREE;
2248 vectype_in = NULL_TREE;
2249 nargs = gimple_call_num_args (stmt);
2250
2251 /* Bail out if the function has more than three arguments, we do not have
2252 interesting builtin functions to vectorize with more than two arguments
2253 except for fma. No arguments is also not good. */
2254 if (nargs == 0 || nargs > 3)
2255 return false;
2256
2257 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2258 if (gimple_call_internal_p (stmt)
2259 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2260 {
2261 nargs = 0;
2262 rhs_type = unsigned_type_node;
2263 }
2264
2265 for (i = 0; i < nargs; i++)
2266 {
2267 tree opvectype;
2268
2269 op = gimple_call_arg (stmt, i);
2270
2271 /* We can only handle calls with arguments of the same type. */
2272 if (rhs_type
2273 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2274 {
2275 if (dump_enabled_p ())
2276 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2277 "argument types differ.\n");
2278 return false;
2279 }
2280 if (!rhs_type)
2281 rhs_type = TREE_TYPE (op);
2282
2283 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2284 &def_stmt, &def, &dt[i], &opvectype))
2285 {
2286 if (dump_enabled_p ())
2287 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2288 "use not simple.\n");
2289 return false;
2290 }
2291
2292 if (!vectype_in)
2293 vectype_in = opvectype;
2294 else if (opvectype
2295 && opvectype != vectype_in)
2296 {
2297 if (dump_enabled_p ())
2298 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2299 "argument vector types differ.\n");
2300 return false;
2301 }
2302 }
2303 /* If all arguments are external or constant defs use a vector type with
2304 the same size as the output vector type. */
2305 if (!vectype_in)
2306 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2307 if (vec_stmt)
2308 gcc_assert (vectype_in);
2309 if (!vectype_in)
2310 {
2311 if (dump_enabled_p ())
2312 {
2313 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2314 "no vectype for scalar type ");
2315 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2316 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2317 }
2318
2319 return false;
2320 }
2321
2322 /* FORNOW */
2323 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2324 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2325 if (nunits_in == nunits_out / 2)
2326 modifier = NARROW;
2327 else if (nunits_out == nunits_in)
2328 modifier = NONE;
2329 else if (nunits_out == nunits_in / 2)
2330 modifier = WIDEN;
2331 else
2332 return false;
2333
2334 /* For now, we only vectorize functions if a target specific builtin
2335 is available. TODO -- in some cases, it might be profitable to
2336 insert the calls for pieces of the vector, in order to be able
2337 to vectorize other operations in the loop. */
2338 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2339 if (fndecl == NULL_TREE)
2340 {
2341 if (gimple_call_internal_p (stmt)
2342 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2343 && !slp_node
2344 && loop_vinfo
2345 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2346 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2347 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2348 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2349 {
2350 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2351 { 0, 1, 2, ... vf - 1 } vector. */
2352 gcc_assert (nargs == 0);
2353 }
2354 else
2355 {
2356 if (dump_enabled_p ())
2357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2358 "function is not vectorizable.\n");
2359 return false;
2360 }
2361 }
2362
2363 gcc_assert (!gimple_vuse (stmt));
2364
2365 if (slp_node || PURE_SLP_STMT (stmt_info))
2366 ncopies = 1;
2367 else if (modifier == NARROW)
2368 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2369 else
2370 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2371
2372 /* Sanity check: make sure that at least one copy of the vectorized stmt
2373 needs to be generated. */
2374 gcc_assert (ncopies >= 1);
2375
2376 if (!vec_stmt) /* transformation not required. */
2377 {
2378 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2381 "\n");
2382 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2383 return true;
2384 }
2385
2386 /** Transform. **/
2387
2388 if (dump_enabled_p ())
2389 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2390
2391 /* Handle def. */
2392 scalar_dest = gimple_call_lhs (stmt);
2393 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2394
2395 prev_stmt_info = NULL;
2396 switch (modifier)
2397 {
2398 case NONE:
2399 for (j = 0; j < ncopies; ++j)
2400 {
2401 /* Build argument list for the vectorized call. */
2402 if (j == 0)
2403 vargs.create (nargs);
2404 else
2405 vargs.truncate (0);
2406
2407 if (slp_node)
2408 {
2409 auto_vec<vec<tree> > vec_defs (nargs);
2410 vec<tree> vec_oprnds0;
2411
2412 for (i = 0; i < nargs; i++)
2413 vargs.quick_push (gimple_call_arg (stmt, i));
2414 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2415 vec_oprnds0 = vec_defs[0];
2416
2417 /* Arguments are ready. Create the new vector stmt. */
2418 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2419 {
2420 size_t k;
2421 for (k = 0; k < nargs; k++)
2422 {
2423 vec<tree> vec_oprndsk = vec_defs[k];
2424 vargs[k] = vec_oprndsk[i];
2425 }
2426 new_stmt = gimple_build_call_vec (fndecl, vargs);
2427 new_temp = make_ssa_name (vec_dest, new_stmt);
2428 gimple_call_set_lhs (new_stmt, new_temp);
2429 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2430 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2431 }
2432
2433 for (i = 0; i < nargs; i++)
2434 {
2435 vec<tree> vec_oprndsi = vec_defs[i];
2436 vec_oprndsi.release ();
2437 }
2438 continue;
2439 }
2440
2441 for (i = 0; i < nargs; i++)
2442 {
2443 op = gimple_call_arg (stmt, i);
2444 if (j == 0)
2445 vec_oprnd0
2446 = vect_get_vec_def_for_operand (op, stmt, NULL);
2447 else
2448 {
2449 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2450 vec_oprnd0
2451 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2452 }
2453
2454 vargs.quick_push (vec_oprnd0);
2455 }
2456
2457 if (gimple_call_internal_p (stmt)
2458 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2459 {
2460 tree *v = XALLOCAVEC (tree, nunits_out);
2461 int k;
2462 for (k = 0; k < nunits_out; ++k)
2463 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2464 tree cst = build_vector (vectype_out, v);
2465 tree new_var
2466 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2467 gimple init_stmt = gimple_build_assign (new_var, cst);
2468 new_temp = make_ssa_name (new_var, init_stmt);
2469 gimple_assign_set_lhs (init_stmt, new_temp);
2470 vect_init_vector_1 (stmt, init_stmt, NULL);
2471 new_temp = make_ssa_name (vec_dest, NULL);
2472 new_stmt = gimple_build_assign (new_temp,
2473 gimple_assign_lhs (init_stmt));
2474 }
2475 else
2476 {
2477 new_stmt = gimple_build_call_vec (fndecl, vargs);
2478 new_temp = make_ssa_name (vec_dest, new_stmt);
2479 gimple_call_set_lhs (new_stmt, new_temp);
2480 }
2481 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2482
2483 if (j == 0)
2484 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2485 else
2486 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2487
2488 prev_stmt_info = vinfo_for_stmt (new_stmt);
2489 }
2490
2491 break;
2492
2493 case NARROW:
2494 for (j = 0; j < ncopies; ++j)
2495 {
2496 /* Build argument list for the vectorized call. */
2497 if (j == 0)
2498 vargs.create (nargs * 2);
2499 else
2500 vargs.truncate (0);
2501
2502 if (slp_node)
2503 {
2504 auto_vec<vec<tree> > vec_defs (nargs);
2505 vec<tree> vec_oprnds0;
2506
2507 for (i = 0; i < nargs; i++)
2508 vargs.quick_push (gimple_call_arg (stmt, i));
2509 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2510 vec_oprnds0 = vec_defs[0];
2511
2512 /* Arguments are ready. Create the new vector stmt. */
2513 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2514 {
2515 size_t k;
2516 vargs.truncate (0);
2517 for (k = 0; k < nargs; k++)
2518 {
2519 vec<tree> vec_oprndsk = vec_defs[k];
2520 vargs.quick_push (vec_oprndsk[i]);
2521 vargs.quick_push (vec_oprndsk[i + 1]);
2522 }
2523 new_stmt = gimple_build_call_vec (fndecl, vargs);
2524 new_temp = make_ssa_name (vec_dest, new_stmt);
2525 gimple_call_set_lhs (new_stmt, new_temp);
2526 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2527 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2528 }
2529
2530 for (i = 0; i < nargs; i++)
2531 {
2532 vec<tree> vec_oprndsi = vec_defs[i];
2533 vec_oprndsi.release ();
2534 }
2535 continue;
2536 }
2537
2538 for (i = 0; i < nargs; i++)
2539 {
2540 op = gimple_call_arg (stmt, i);
2541 if (j == 0)
2542 {
2543 vec_oprnd0
2544 = vect_get_vec_def_for_operand (op, stmt, NULL);
2545 vec_oprnd1
2546 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2547 }
2548 else
2549 {
2550 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2551 vec_oprnd0
2552 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2553 vec_oprnd1
2554 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2555 }
2556
2557 vargs.quick_push (vec_oprnd0);
2558 vargs.quick_push (vec_oprnd1);
2559 }
2560
2561 new_stmt = gimple_build_call_vec (fndecl, vargs);
2562 new_temp = make_ssa_name (vec_dest, new_stmt);
2563 gimple_call_set_lhs (new_stmt, new_temp);
2564 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2565
2566 if (j == 0)
2567 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2568 else
2569 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2570
2571 prev_stmt_info = vinfo_for_stmt (new_stmt);
2572 }
2573
2574 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2575
2576 break;
2577
2578 case WIDEN:
2579 /* No current target implements this case. */
2580 return false;
2581 }
2582
2583 vargs.release ();
2584
2585 /* The call in STMT might prevent it from being removed in dce.
2586 We however cannot remove it here, due to the way the ssa name
2587 it defines is mapped to the new definition. So just replace
2588 rhs of the statement with something harmless. */
2589
2590 if (slp_node)
2591 return true;
2592
2593 type = TREE_TYPE (scalar_dest);
2594 if (is_pattern_stmt_p (stmt_info))
2595 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2596 else
2597 lhs = gimple_call_lhs (stmt);
2598 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2599 set_vinfo_for_stmt (new_stmt, stmt_info);
2600 set_vinfo_for_stmt (stmt, NULL);
2601 STMT_VINFO_STMT (stmt_info) = new_stmt;
2602 gsi_replace (gsi, new_stmt, false);
2603
2604 return true;
2605 }
2606
2607
2608 struct simd_call_arg_info
2609 {
2610 tree vectype;
2611 tree op;
2612 enum vect_def_type dt;
2613 HOST_WIDE_INT linear_step;
2614 unsigned int align;
2615 };
2616
2617 /* Function vectorizable_simd_clone_call.
2618
2619 Check if STMT performs a function call that can be vectorized
2620 by calling a simd clone of the function.
2621 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2622 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2623 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2624
2625 static bool
2626 vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
2627 gimple *vec_stmt, slp_tree slp_node)
2628 {
2629 tree vec_dest;
2630 tree scalar_dest;
2631 tree op, type;
2632 tree vec_oprnd0 = NULL_TREE;
2633 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2634 tree vectype;
2635 unsigned int nunits;
2636 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2637 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2638 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2639 tree fndecl, new_temp, def;
2640 gimple def_stmt;
2641 gimple new_stmt = NULL;
2642 int ncopies, j;
2643 vec<simd_call_arg_info> arginfo = vNULL;
2644 vec<tree> vargs = vNULL;
2645 size_t i, nargs;
2646 tree lhs, rtype, ratype;
2647 vec<constructor_elt, va_gc> *ret_ctor_elts;
2648
2649 /* Is STMT a vectorizable call? */
2650 if (!is_gimple_call (stmt))
2651 return false;
2652
2653 fndecl = gimple_call_fndecl (stmt);
2654 if (fndecl == NULL_TREE)
2655 return false;
2656
2657 struct cgraph_node *node = cgraph_node::get (fndecl);
2658 if (node == NULL || node->simd_clones == NULL)
2659 return false;
2660
2661 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2662 return false;
2663
2664 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2665 return false;
2666
2667 if (gimple_call_lhs (stmt)
2668 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2669 return false;
2670
2671 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2672
2673 vectype = STMT_VINFO_VECTYPE (stmt_info);
2674
2675 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2676 return false;
2677
2678 /* FORNOW */
2679 if (slp_node || PURE_SLP_STMT (stmt_info))
2680 return false;
2681
2682 /* Process function arguments. */
2683 nargs = gimple_call_num_args (stmt);
2684
2685 /* Bail out if the function has zero arguments. */
2686 if (nargs == 0)
2687 return false;
2688
2689 arginfo.create (nargs);
2690
2691 for (i = 0; i < nargs; i++)
2692 {
2693 simd_call_arg_info thisarginfo;
2694 affine_iv iv;
2695
2696 thisarginfo.linear_step = 0;
2697 thisarginfo.align = 0;
2698 thisarginfo.op = NULL_TREE;
2699
2700 op = gimple_call_arg (stmt, i);
2701 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2702 &def_stmt, &def, &thisarginfo.dt,
2703 &thisarginfo.vectype)
2704 || thisarginfo.dt == vect_uninitialized_def)
2705 {
2706 if (dump_enabled_p ())
2707 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2708 "use not simple.\n");
2709 arginfo.release ();
2710 return false;
2711 }
2712
2713 if (thisarginfo.dt == vect_constant_def
2714 || thisarginfo.dt == vect_external_def)
2715 gcc_assert (thisarginfo.vectype == NULL_TREE);
2716 else
2717 gcc_assert (thisarginfo.vectype != NULL_TREE);
2718
2719 if (thisarginfo.dt != vect_constant_def
2720 && thisarginfo.dt != vect_external_def
2721 && loop_vinfo
2722 && TREE_CODE (op) == SSA_NAME
2723 && simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false)
2724 && tree_fits_shwi_p (iv.step))
2725 {
2726 thisarginfo.linear_step = tree_to_shwi (iv.step);
2727 thisarginfo.op = iv.base;
2728 }
2729 else if ((thisarginfo.dt == vect_constant_def
2730 || thisarginfo.dt == vect_external_def)
2731 && POINTER_TYPE_P (TREE_TYPE (op)))
2732 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2733
2734 arginfo.quick_push (thisarginfo);
2735 }
2736
2737 unsigned int badness = 0;
2738 struct cgraph_node *bestn = NULL;
2739 if (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info))
2740 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info));
2741 else
2742 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2743 n = n->simdclone->next_clone)
2744 {
2745 unsigned int this_badness = 0;
2746 if (n->simdclone->simdlen
2747 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2748 || n->simdclone->nargs != nargs)
2749 continue;
2750 if (n->simdclone->simdlen
2751 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2752 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2753 - exact_log2 (n->simdclone->simdlen)) * 1024;
2754 if (n->simdclone->inbranch)
2755 this_badness += 2048;
2756 int target_badness = targetm.simd_clone.usable (n);
2757 if (target_badness < 0)
2758 continue;
2759 this_badness += target_badness * 512;
2760 /* FORNOW: Have to add code to add the mask argument. */
2761 if (n->simdclone->inbranch)
2762 continue;
2763 for (i = 0; i < nargs; i++)
2764 {
2765 switch (n->simdclone->args[i].arg_type)
2766 {
2767 case SIMD_CLONE_ARG_TYPE_VECTOR:
2768 if (!useless_type_conversion_p
2769 (n->simdclone->args[i].orig_type,
2770 TREE_TYPE (gimple_call_arg (stmt, i))))
2771 i = -1;
2772 else if (arginfo[i].dt == vect_constant_def
2773 || arginfo[i].dt == vect_external_def
2774 || arginfo[i].linear_step)
2775 this_badness += 64;
2776 break;
2777 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2778 if (arginfo[i].dt != vect_constant_def
2779 && arginfo[i].dt != vect_external_def)
2780 i = -1;
2781 break;
2782 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2783 if (arginfo[i].dt == vect_constant_def
2784 || arginfo[i].dt == vect_external_def
2785 || (arginfo[i].linear_step
2786 != n->simdclone->args[i].linear_step))
2787 i = -1;
2788 break;
2789 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2790 /* FORNOW */
2791 i = -1;
2792 break;
2793 case SIMD_CLONE_ARG_TYPE_MASK:
2794 gcc_unreachable ();
2795 }
2796 if (i == (size_t) -1)
2797 break;
2798 if (n->simdclone->args[i].alignment > arginfo[i].align)
2799 {
2800 i = -1;
2801 break;
2802 }
2803 if (arginfo[i].align)
2804 this_badness += (exact_log2 (arginfo[i].align)
2805 - exact_log2 (n->simdclone->args[i].alignment));
2806 }
2807 if (i == (size_t) -1)
2808 continue;
2809 if (bestn == NULL || this_badness < badness)
2810 {
2811 bestn = n;
2812 badness = this_badness;
2813 }
2814 }
2815
2816 if (bestn == NULL)
2817 {
2818 arginfo.release ();
2819 return false;
2820 }
2821
2822 for (i = 0; i < nargs; i++)
2823 if ((arginfo[i].dt == vect_constant_def
2824 || arginfo[i].dt == vect_external_def)
2825 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2826 {
2827 arginfo[i].vectype
2828 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2829 i)));
2830 if (arginfo[i].vectype == NULL
2831 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2832 > bestn->simdclone->simdlen))
2833 {
2834 arginfo.release ();
2835 return false;
2836 }
2837 }
2838
2839 fndecl = bestn->decl;
2840 nunits = bestn->simdclone->simdlen;
2841 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2842
2843 /* If the function isn't const, only allow it in simd loops where user
2844 has asserted that at least nunits consecutive iterations can be
2845 performed using SIMD instructions. */
2846 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2847 && gimple_vuse (stmt))
2848 {
2849 arginfo.release ();
2850 return false;
2851 }
2852
2853 /* Sanity check: make sure that at least one copy of the vectorized stmt
2854 needs to be generated. */
2855 gcc_assert (ncopies >= 1);
2856
2857 if (!vec_stmt) /* transformation not required. */
2858 {
2859 STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info) = bestn->decl;
2860 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2861 if (dump_enabled_p ())
2862 dump_printf_loc (MSG_NOTE, vect_location,
2863 "=== vectorizable_simd_clone_call ===\n");
2864 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2865 arginfo.release ();
2866 return true;
2867 }
2868
2869 /** Transform. **/
2870
2871 if (dump_enabled_p ())
2872 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2873
2874 /* Handle def. */
2875 scalar_dest = gimple_call_lhs (stmt);
2876 vec_dest = NULL_TREE;
2877 rtype = NULL_TREE;
2878 ratype = NULL_TREE;
2879 if (scalar_dest)
2880 {
2881 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2882 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2883 if (TREE_CODE (rtype) == ARRAY_TYPE)
2884 {
2885 ratype = rtype;
2886 rtype = TREE_TYPE (ratype);
2887 }
2888 }
2889
2890 prev_stmt_info = NULL;
2891 for (j = 0; j < ncopies; ++j)
2892 {
2893 /* Build argument list for the vectorized call. */
2894 if (j == 0)
2895 vargs.create (nargs);
2896 else
2897 vargs.truncate (0);
2898
2899 for (i = 0; i < nargs; i++)
2900 {
2901 unsigned int k, l, m, o;
2902 tree atype;
2903 op = gimple_call_arg (stmt, i);
2904 switch (bestn->simdclone->args[i].arg_type)
2905 {
2906 case SIMD_CLONE_ARG_TYPE_VECTOR:
2907 atype = bestn->simdclone->args[i].vector_type;
2908 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2909 for (m = j * o; m < (j + 1) * o; m++)
2910 {
2911 if (TYPE_VECTOR_SUBPARTS (atype)
2912 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2913 {
2914 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2915 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2916 / TYPE_VECTOR_SUBPARTS (atype));
2917 gcc_assert ((k & (k - 1)) == 0);
2918 if (m == 0)
2919 vec_oprnd0
2920 = vect_get_vec_def_for_operand (op, stmt, NULL);
2921 else
2922 {
2923 vec_oprnd0 = arginfo[i].op;
2924 if ((m & (k - 1)) == 0)
2925 vec_oprnd0
2926 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2927 vec_oprnd0);
2928 }
2929 arginfo[i].op = vec_oprnd0;
2930 vec_oprnd0
2931 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2932 size_int (prec),
2933 bitsize_int ((m & (k - 1)) * prec));
2934 new_stmt
2935 = gimple_build_assign (make_ssa_name (atype, NULL),
2936 vec_oprnd0);
2937 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2938 vargs.safe_push (gimple_assign_lhs (new_stmt));
2939 }
2940 else
2941 {
2942 k = (TYPE_VECTOR_SUBPARTS (atype)
2943 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
2944 gcc_assert ((k & (k - 1)) == 0);
2945 vec<constructor_elt, va_gc> *ctor_elts;
2946 if (k != 1)
2947 vec_alloc (ctor_elts, k);
2948 else
2949 ctor_elts = NULL;
2950 for (l = 0; l < k; l++)
2951 {
2952 if (m == 0 && l == 0)
2953 vec_oprnd0
2954 = vect_get_vec_def_for_operand (op, stmt, NULL);
2955 else
2956 vec_oprnd0
2957 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2958 arginfo[i].op);
2959 arginfo[i].op = vec_oprnd0;
2960 if (k == 1)
2961 break;
2962 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
2963 vec_oprnd0);
2964 }
2965 if (k == 1)
2966 vargs.safe_push (vec_oprnd0);
2967 else
2968 {
2969 vec_oprnd0 = build_constructor (atype, ctor_elts);
2970 new_stmt
2971 = gimple_build_assign (make_ssa_name (atype, NULL),
2972 vec_oprnd0);
2973 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2974 vargs.safe_push (gimple_assign_lhs (new_stmt));
2975 }
2976 }
2977 }
2978 break;
2979 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2980 vargs.safe_push (op);
2981 break;
2982 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2983 if (j == 0)
2984 {
2985 gimple_seq stmts;
2986 arginfo[i].op
2987 = force_gimple_operand (arginfo[i].op, &stmts, true,
2988 NULL_TREE);
2989 if (stmts != NULL)
2990 {
2991 basic_block new_bb;
2992 edge pe = loop_preheader_edge (loop);
2993 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2994 gcc_assert (!new_bb);
2995 }
2996 tree phi_res = copy_ssa_name (op, NULL);
2997 gimple new_phi = create_phi_node (phi_res, loop->header);
2998 set_vinfo_for_stmt (new_phi,
2999 new_stmt_vec_info (new_phi, loop_vinfo,
3000 NULL));
3001 add_phi_arg (new_phi, arginfo[i].op,
3002 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3003 enum tree_code code
3004 = POINTER_TYPE_P (TREE_TYPE (op))
3005 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3006 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3007 ? sizetype : TREE_TYPE (op);
3008 widest_int cst
3009 = wi::mul (bestn->simdclone->args[i].linear_step,
3010 ncopies * nunits);
3011 tree tcst = wide_int_to_tree (type, cst);
3012 tree phi_arg = copy_ssa_name (op, NULL);
3013 new_stmt = gimple_build_assign_with_ops (code, phi_arg,
3014 phi_res, tcst);
3015 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3016 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3017 set_vinfo_for_stmt (new_stmt,
3018 new_stmt_vec_info (new_stmt, loop_vinfo,
3019 NULL));
3020 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3021 UNKNOWN_LOCATION);
3022 arginfo[i].op = phi_res;
3023 vargs.safe_push (phi_res);
3024 }
3025 else
3026 {
3027 enum tree_code code
3028 = POINTER_TYPE_P (TREE_TYPE (op))
3029 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3030 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3031 ? sizetype : TREE_TYPE (op);
3032 widest_int cst
3033 = wi::mul (bestn->simdclone->args[i].linear_step,
3034 j * nunits);
3035 tree tcst = wide_int_to_tree (type, cst);
3036 new_temp = make_ssa_name (TREE_TYPE (op), NULL);
3037 new_stmt
3038 = gimple_build_assign_with_ops (code, new_temp,
3039 arginfo[i].op, tcst);
3040 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3041 vargs.safe_push (new_temp);
3042 }
3043 break;
3044 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3045 default:
3046 gcc_unreachable ();
3047 }
3048 }
3049
3050 new_stmt = gimple_build_call_vec (fndecl, vargs);
3051 if (vec_dest)
3052 {
3053 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3054 if (ratype)
3055 new_temp = create_tmp_var (ratype, NULL);
3056 else if (TYPE_VECTOR_SUBPARTS (vectype)
3057 == TYPE_VECTOR_SUBPARTS (rtype))
3058 new_temp = make_ssa_name (vec_dest, new_stmt);
3059 else
3060 new_temp = make_ssa_name (rtype, new_stmt);
3061 gimple_call_set_lhs (new_stmt, new_temp);
3062 }
3063 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3064
3065 if (vec_dest)
3066 {
3067 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3068 {
3069 unsigned int k, l;
3070 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3071 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3072 gcc_assert ((k & (k - 1)) == 0);
3073 for (l = 0; l < k; l++)
3074 {
3075 tree t;
3076 if (ratype)
3077 {
3078 t = build_fold_addr_expr (new_temp);
3079 t = build2 (MEM_REF, vectype, t,
3080 build_int_cst (TREE_TYPE (t),
3081 l * prec / BITS_PER_UNIT));
3082 }
3083 else
3084 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3085 size_int (prec), bitsize_int (l * prec));
3086 new_stmt
3087 = gimple_build_assign (make_ssa_name (vectype, NULL), t);
3088 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3089 if (j == 0 && l == 0)
3090 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3091 else
3092 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3093
3094 prev_stmt_info = vinfo_for_stmt (new_stmt);
3095 }
3096
3097 if (ratype)
3098 {
3099 tree clobber = build_constructor (ratype, NULL);
3100 TREE_THIS_VOLATILE (clobber) = 1;
3101 new_stmt = gimple_build_assign (new_temp, clobber);
3102 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3103 }
3104 continue;
3105 }
3106 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3107 {
3108 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3109 / TYPE_VECTOR_SUBPARTS (rtype));
3110 gcc_assert ((k & (k - 1)) == 0);
3111 if ((j & (k - 1)) == 0)
3112 vec_alloc (ret_ctor_elts, k);
3113 if (ratype)
3114 {
3115 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3116 for (m = 0; m < o; m++)
3117 {
3118 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3119 size_int (m), NULL_TREE, NULL_TREE);
3120 new_stmt
3121 = gimple_build_assign (make_ssa_name (rtype, NULL),
3122 tem);
3123 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3124 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3125 gimple_assign_lhs (new_stmt));
3126 }
3127 tree clobber = build_constructor (ratype, NULL);
3128 TREE_THIS_VOLATILE (clobber) = 1;
3129 new_stmt = gimple_build_assign (new_temp, clobber);
3130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3131 }
3132 else
3133 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3134 if ((j & (k - 1)) != k - 1)
3135 continue;
3136 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3137 new_stmt
3138 = gimple_build_assign (make_ssa_name (vec_dest, NULL),
3139 vec_oprnd0);
3140 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3141
3142 if ((unsigned) j == k - 1)
3143 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3144 else
3145 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3146
3147 prev_stmt_info = vinfo_for_stmt (new_stmt);
3148 continue;
3149 }
3150 else if (ratype)
3151 {
3152 tree t = build_fold_addr_expr (new_temp);
3153 t = build2 (MEM_REF, vectype, t,
3154 build_int_cst (TREE_TYPE (t), 0));
3155 new_stmt
3156 = gimple_build_assign (make_ssa_name (vec_dest, NULL), t);
3157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3158 tree clobber = build_constructor (ratype, NULL);
3159 TREE_THIS_VOLATILE (clobber) = 1;
3160 vect_finish_stmt_generation (stmt,
3161 gimple_build_assign (new_temp,
3162 clobber), gsi);
3163 }
3164 }
3165
3166 if (j == 0)
3167 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3168 else
3169 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3170
3171 prev_stmt_info = vinfo_for_stmt (new_stmt);
3172 }
3173
3174 vargs.release ();
3175
3176 /* The call in STMT might prevent it from being removed in dce.
3177 We however cannot remove it here, due to the way the ssa name
3178 it defines is mapped to the new definition. So just replace
3179 rhs of the statement with something harmless. */
3180
3181 if (slp_node)
3182 return true;
3183
3184 if (scalar_dest)
3185 {
3186 type = TREE_TYPE (scalar_dest);
3187 if (is_pattern_stmt_p (stmt_info))
3188 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3189 else
3190 lhs = gimple_call_lhs (stmt);
3191 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3192 }
3193 else
3194 new_stmt = gimple_build_nop ();
3195 set_vinfo_for_stmt (new_stmt, stmt_info);
3196 set_vinfo_for_stmt (stmt, NULL);
3197 STMT_VINFO_STMT (stmt_info) = new_stmt;
3198 gsi_replace (gsi, new_stmt, false);
3199 unlink_stmt_vdef (stmt);
3200
3201 return true;
3202 }
3203
3204
3205 /* Function vect_gen_widened_results_half
3206
3207 Create a vector stmt whose code, type, number of arguments, and result
3208 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3209 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3210 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3211 needs to be created (DECL is a function-decl of a target-builtin).
3212 STMT is the original scalar stmt that we are vectorizing. */
3213
3214 static gimple
3215 vect_gen_widened_results_half (enum tree_code code,
3216 tree decl,
3217 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3218 tree vec_dest, gimple_stmt_iterator *gsi,
3219 gimple stmt)
3220 {
3221 gimple new_stmt;
3222 tree new_temp;
3223
3224 /* Generate half of the widened result: */
3225 if (code == CALL_EXPR)
3226 {
3227 /* Target specific support */
3228 if (op_type == binary_op)
3229 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3230 else
3231 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3232 new_temp = make_ssa_name (vec_dest, new_stmt);
3233 gimple_call_set_lhs (new_stmt, new_temp);
3234 }
3235 else
3236 {
3237 /* Generic support */
3238 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3239 if (op_type != binary_op)
3240 vec_oprnd1 = NULL;
3241 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
3242 vec_oprnd1);
3243 new_temp = make_ssa_name (vec_dest, new_stmt);
3244 gimple_assign_set_lhs (new_stmt, new_temp);
3245 }
3246 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3247
3248 return new_stmt;
3249 }
3250
3251
3252 /* Get vectorized definitions for loop-based vectorization. For the first
3253 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3254 scalar operand), and for the rest we get a copy with
3255 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3256 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3257 The vectors are collected into VEC_OPRNDS. */
3258
3259 static void
3260 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
3261 vec<tree> *vec_oprnds, int multi_step_cvt)
3262 {
3263 tree vec_oprnd;
3264
3265 /* Get first vector operand. */
3266 /* All the vector operands except the very first one (that is scalar oprnd)
3267 are stmt copies. */
3268 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3269 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
3270 else
3271 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3272
3273 vec_oprnds->quick_push (vec_oprnd);
3274
3275 /* Get second vector operand. */
3276 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3277 vec_oprnds->quick_push (vec_oprnd);
3278
3279 *oprnd = vec_oprnd;
3280
3281 /* For conversion in multiple steps, continue to get operands
3282 recursively. */
3283 if (multi_step_cvt)
3284 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3285 }
3286
3287
3288 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3289 For multi-step conversions store the resulting vectors and call the function
3290 recursively. */
3291
3292 static void
3293 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3294 int multi_step_cvt, gimple stmt,
3295 vec<tree> vec_dsts,
3296 gimple_stmt_iterator *gsi,
3297 slp_tree slp_node, enum tree_code code,
3298 stmt_vec_info *prev_stmt_info)
3299 {
3300 unsigned int i;
3301 tree vop0, vop1, new_tmp, vec_dest;
3302 gimple new_stmt;
3303 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3304
3305 vec_dest = vec_dsts.pop ();
3306
3307 for (i = 0; i < vec_oprnds->length (); i += 2)
3308 {
3309 /* Create demotion operation. */
3310 vop0 = (*vec_oprnds)[i];
3311 vop1 = (*vec_oprnds)[i + 1];
3312 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
3313 new_tmp = make_ssa_name (vec_dest, new_stmt);
3314 gimple_assign_set_lhs (new_stmt, new_tmp);
3315 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3316
3317 if (multi_step_cvt)
3318 /* Store the resulting vector for next recursive call. */
3319 (*vec_oprnds)[i/2] = new_tmp;
3320 else
3321 {
3322 /* This is the last step of the conversion sequence. Store the
3323 vectors in SLP_NODE or in vector info of the scalar statement
3324 (or in STMT_VINFO_RELATED_STMT chain). */
3325 if (slp_node)
3326 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3327 else
3328 {
3329 if (!*prev_stmt_info)
3330 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3331 else
3332 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3333
3334 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3335 }
3336 }
3337 }
3338
3339 /* For multi-step demotion operations we first generate demotion operations
3340 from the source type to the intermediate types, and then combine the
3341 results (stored in VEC_OPRNDS) in demotion operation to the destination
3342 type. */
3343 if (multi_step_cvt)
3344 {
3345 /* At each level of recursion we have half of the operands we had at the
3346 previous level. */
3347 vec_oprnds->truncate ((i+1)/2);
3348 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3349 stmt, vec_dsts, gsi, slp_node,
3350 VEC_PACK_TRUNC_EXPR,
3351 prev_stmt_info);
3352 }
3353
3354 vec_dsts.quick_push (vec_dest);
3355 }
3356
3357
3358 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3359 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3360 the resulting vectors and call the function recursively. */
3361
3362 static void
3363 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3364 vec<tree> *vec_oprnds1,
3365 gimple stmt, tree vec_dest,
3366 gimple_stmt_iterator *gsi,
3367 enum tree_code code1,
3368 enum tree_code code2, tree decl1,
3369 tree decl2, int op_type)
3370 {
3371 int i;
3372 tree vop0, vop1, new_tmp1, new_tmp2;
3373 gimple new_stmt1, new_stmt2;
3374 vec<tree> vec_tmp = vNULL;
3375
3376 vec_tmp.create (vec_oprnds0->length () * 2);
3377 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3378 {
3379 if (op_type == binary_op)
3380 vop1 = (*vec_oprnds1)[i];
3381 else
3382 vop1 = NULL_TREE;
3383
3384 /* Generate the two halves of promotion operation. */
3385 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3386 op_type, vec_dest, gsi, stmt);
3387 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3388 op_type, vec_dest, gsi, stmt);
3389 if (is_gimple_call (new_stmt1))
3390 {
3391 new_tmp1 = gimple_call_lhs (new_stmt1);
3392 new_tmp2 = gimple_call_lhs (new_stmt2);
3393 }
3394 else
3395 {
3396 new_tmp1 = gimple_assign_lhs (new_stmt1);
3397 new_tmp2 = gimple_assign_lhs (new_stmt2);
3398 }
3399
3400 /* Store the results for the next step. */
3401 vec_tmp.quick_push (new_tmp1);
3402 vec_tmp.quick_push (new_tmp2);
3403 }
3404
3405 vec_oprnds0->release ();
3406 *vec_oprnds0 = vec_tmp;
3407 }
3408
3409
3410 /* Check if STMT performs a conversion operation, that can be vectorized.
3411 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3412 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3413 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3414
3415 static bool
3416 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3417 gimple *vec_stmt, slp_tree slp_node)
3418 {
3419 tree vec_dest;
3420 tree scalar_dest;
3421 tree op0, op1 = NULL_TREE;
3422 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3423 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3424 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3425 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3426 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3427 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3428 tree new_temp;
3429 tree def;
3430 gimple def_stmt;
3431 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3432 gimple new_stmt = NULL;
3433 stmt_vec_info prev_stmt_info;
3434 int nunits_in;
3435 int nunits_out;
3436 tree vectype_out, vectype_in;
3437 int ncopies, i, j;
3438 tree lhs_type, rhs_type;
3439 enum { NARROW, NONE, WIDEN } modifier;
3440 vec<tree> vec_oprnds0 = vNULL;
3441 vec<tree> vec_oprnds1 = vNULL;
3442 tree vop0;
3443 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3444 int multi_step_cvt = 0;
3445 vec<tree> vec_dsts = vNULL;
3446 vec<tree> interm_types = vNULL;
3447 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3448 int op_type;
3449 machine_mode rhs_mode;
3450 unsigned short fltsz;
3451
3452 /* Is STMT a vectorizable conversion? */
3453
3454 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3455 return false;
3456
3457 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3458 return false;
3459
3460 if (!is_gimple_assign (stmt))
3461 return false;
3462
3463 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3464 return false;
3465
3466 code = gimple_assign_rhs_code (stmt);
3467 if (!CONVERT_EXPR_CODE_P (code)
3468 && code != FIX_TRUNC_EXPR
3469 && code != FLOAT_EXPR
3470 && code != WIDEN_MULT_EXPR
3471 && code != WIDEN_LSHIFT_EXPR)
3472 return false;
3473
3474 op_type = TREE_CODE_LENGTH (code);
3475
3476 /* Check types of lhs and rhs. */
3477 scalar_dest = gimple_assign_lhs (stmt);
3478 lhs_type = TREE_TYPE (scalar_dest);
3479 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3480
3481 op0 = gimple_assign_rhs1 (stmt);
3482 rhs_type = TREE_TYPE (op0);
3483
3484 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3485 && !((INTEGRAL_TYPE_P (lhs_type)
3486 && INTEGRAL_TYPE_P (rhs_type))
3487 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3488 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3489 return false;
3490
3491 if ((INTEGRAL_TYPE_P (lhs_type)
3492 && (TYPE_PRECISION (lhs_type)
3493 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3494 || (INTEGRAL_TYPE_P (rhs_type)
3495 && (TYPE_PRECISION (rhs_type)
3496 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3497 {
3498 if (dump_enabled_p ())
3499 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3500 "type conversion to/from bit-precision unsupported."
3501 "\n");
3502 return false;
3503 }
3504
3505 /* Check the operands of the operation. */
3506 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
3507 &def_stmt, &def, &dt[0], &vectype_in))
3508 {
3509 if (dump_enabled_p ())
3510 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3511 "use not simple.\n");
3512 return false;
3513 }
3514 if (op_type == binary_op)
3515 {
3516 bool ok;
3517
3518 op1 = gimple_assign_rhs2 (stmt);
3519 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3520 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3521 OP1. */
3522 if (CONSTANT_CLASS_P (op0))
3523 ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
3524 &def_stmt, &def, &dt[1], &vectype_in);
3525 else
3526 ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
3527 &def, &dt[1]);
3528
3529 if (!ok)
3530 {
3531 if (dump_enabled_p ())
3532 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3533 "use not simple.\n");
3534 return false;
3535 }
3536 }
3537
3538 /* If op0 is an external or constant defs use a vector type of
3539 the same size as the output vector type. */
3540 if (!vectype_in)
3541 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3542 if (vec_stmt)
3543 gcc_assert (vectype_in);
3544 if (!vectype_in)
3545 {
3546 if (dump_enabled_p ())
3547 {
3548 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3549 "no vectype for scalar type ");
3550 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3551 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3552 }
3553
3554 return false;
3555 }
3556
3557 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3558 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3559 if (nunits_in < nunits_out)
3560 modifier = NARROW;
3561 else if (nunits_out == nunits_in)
3562 modifier = NONE;
3563 else
3564 modifier = WIDEN;
3565
3566 /* Multiple types in SLP are handled by creating the appropriate number of
3567 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3568 case of SLP. */
3569 if (slp_node || PURE_SLP_STMT (stmt_info))
3570 ncopies = 1;
3571 else if (modifier == NARROW)
3572 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3573 else
3574 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3575
3576 /* Sanity check: make sure that at least one copy of the vectorized stmt
3577 needs to be generated. */
3578 gcc_assert (ncopies >= 1);
3579
3580 /* Supportable by target? */
3581 switch (modifier)
3582 {
3583 case NONE:
3584 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3585 return false;
3586 if (supportable_convert_operation (code, vectype_out, vectype_in,
3587 &decl1, &code1))
3588 break;
3589 /* FALLTHRU */
3590 unsupported:
3591 if (dump_enabled_p ())
3592 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3593 "conversion not supported by target.\n");
3594 return false;
3595
3596 case WIDEN:
3597 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3598 &code1, &code2, &multi_step_cvt,
3599 &interm_types))
3600 {
3601 /* Binary widening operation can only be supported directly by the
3602 architecture. */
3603 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3604 break;
3605 }
3606
3607 if (code != FLOAT_EXPR
3608 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3609 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3610 goto unsupported;
3611
3612 rhs_mode = TYPE_MODE (rhs_type);
3613 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3614 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3615 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3616 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3617 {
3618 cvt_type
3619 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3620 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3621 if (cvt_type == NULL_TREE)
3622 goto unsupported;
3623
3624 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3625 {
3626 if (!supportable_convert_operation (code, vectype_out,
3627 cvt_type, &decl1, &codecvt1))
3628 goto unsupported;
3629 }
3630 else if (!supportable_widening_operation (code, stmt, vectype_out,
3631 cvt_type, &codecvt1,
3632 &codecvt2, &multi_step_cvt,
3633 &interm_types))
3634 continue;
3635 else
3636 gcc_assert (multi_step_cvt == 0);
3637
3638 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3639 vectype_in, &code1, &code2,
3640 &multi_step_cvt, &interm_types))
3641 break;
3642 }
3643
3644 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3645 goto unsupported;
3646
3647 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3648 codecvt2 = ERROR_MARK;
3649 else
3650 {
3651 multi_step_cvt++;
3652 interm_types.safe_push (cvt_type);
3653 cvt_type = NULL_TREE;
3654 }
3655 break;
3656
3657 case NARROW:
3658 gcc_assert (op_type == unary_op);
3659 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3660 &code1, &multi_step_cvt,
3661 &interm_types))
3662 break;
3663
3664 if (code != FIX_TRUNC_EXPR
3665 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3666 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3667 goto unsupported;
3668
3669 rhs_mode = TYPE_MODE (rhs_type);
3670 cvt_type
3671 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3672 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3673 if (cvt_type == NULL_TREE)
3674 goto unsupported;
3675 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3676 &decl1, &codecvt1))
3677 goto unsupported;
3678 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3679 &code1, &multi_step_cvt,
3680 &interm_types))
3681 break;
3682 goto unsupported;
3683
3684 default:
3685 gcc_unreachable ();
3686 }
3687
3688 if (!vec_stmt) /* transformation not required. */
3689 {
3690 if (dump_enabled_p ())
3691 dump_printf_loc (MSG_NOTE, vect_location,
3692 "=== vectorizable_conversion ===\n");
3693 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3694 {
3695 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3696 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3697 }
3698 else if (modifier == NARROW)
3699 {
3700 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3701 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3702 }
3703 else
3704 {
3705 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3706 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3707 }
3708 interm_types.release ();
3709 return true;
3710 }
3711
3712 /** Transform. **/
3713 if (dump_enabled_p ())
3714 dump_printf_loc (MSG_NOTE, vect_location,
3715 "transform conversion. ncopies = %d.\n", ncopies);
3716
3717 if (op_type == binary_op)
3718 {
3719 if (CONSTANT_CLASS_P (op0))
3720 op0 = fold_convert (TREE_TYPE (op1), op0);
3721 else if (CONSTANT_CLASS_P (op1))
3722 op1 = fold_convert (TREE_TYPE (op0), op1);
3723 }
3724
3725 /* In case of multi-step conversion, we first generate conversion operations
3726 to the intermediate types, and then from that types to the final one.
3727 We create vector destinations for the intermediate type (TYPES) received
3728 from supportable_*_operation, and store them in the correct order
3729 for future use in vect_create_vectorized_*_stmts (). */
3730 vec_dsts.create (multi_step_cvt + 1);
3731 vec_dest = vect_create_destination_var (scalar_dest,
3732 (cvt_type && modifier == WIDEN)
3733 ? cvt_type : vectype_out);
3734 vec_dsts.quick_push (vec_dest);
3735
3736 if (multi_step_cvt)
3737 {
3738 for (i = interm_types.length () - 1;
3739 interm_types.iterate (i, &intermediate_type); i--)
3740 {
3741 vec_dest = vect_create_destination_var (scalar_dest,
3742 intermediate_type);
3743 vec_dsts.quick_push (vec_dest);
3744 }
3745 }
3746
3747 if (cvt_type)
3748 vec_dest = vect_create_destination_var (scalar_dest,
3749 modifier == WIDEN
3750 ? vectype_out : cvt_type);
3751
3752 if (!slp_node)
3753 {
3754 if (modifier == WIDEN)
3755 {
3756 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3757 if (op_type == binary_op)
3758 vec_oprnds1.create (1);
3759 }
3760 else if (modifier == NARROW)
3761 vec_oprnds0.create (
3762 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3763 }
3764 else if (code == WIDEN_LSHIFT_EXPR)
3765 vec_oprnds1.create (slp_node->vec_stmts_size);
3766
3767 last_oprnd = op0;
3768 prev_stmt_info = NULL;
3769 switch (modifier)
3770 {
3771 case NONE:
3772 for (j = 0; j < ncopies; j++)
3773 {
3774 if (j == 0)
3775 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3776 -1);
3777 else
3778 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3779
3780 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3781 {
3782 /* Arguments are ready, create the new vector stmt. */
3783 if (code1 == CALL_EXPR)
3784 {
3785 new_stmt = gimple_build_call (decl1, 1, vop0);
3786 new_temp = make_ssa_name (vec_dest, new_stmt);
3787 gimple_call_set_lhs (new_stmt, new_temp);
3788 }
3789 else
3790 {
3791 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3792 new_stmt = gimple_build_assign_with_ops (code1, vec_dest,
3793 vop0, NULL);
3794 new_temp = make_ssa_name (vec_dest, new_stmt);
3795 gimple_assign_set_lhs (new_stmt, new_temp);
3796 }
3797
3798 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3799 if (slp_node)
3800 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3801 }
3802
3803 if (j == 0)
3804 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3805 else
3806 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3807 prev_stmt_info = vinfo_for_stmt (new_stmt);
3808 }
3809 break;
3810
3811 case WIDEN:
3812 /* In case the vectorization factor (VF) is bigger than the number
3813 of elements that we can fit in a vectype (nunits), we have to
3814 generate more than one vector stmt - i.e - we need to "unroll"
3815 the vector stmt by a factor VF/nunits. */
3816 for (j = 0; j < ncopies; j++)
3817 {
3818 /* Handle uses. */
3819 if (j == 0)
3820 {
3821 if (slp_node)
3822 {
3823 if (code == WIDEN_LSHIFT_EXPR)
3824 {
3825 unsigned int k;
3826
3827 vec_oprnd1 = op1;
3828 /* Store vec_oprnd1 for every vector stmt to be created
3829 for SLP_NODE. We check during the analysis that all
3830 the shift arguments are the same. */
3831 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3832 vec_oprnds1.quick_push (vec_oprnd1);
3833
3834 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3835 slp_node, -1);
3836 }
3837 else
3838 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3839 &vec_oprnds1, slp_node, -1);
3840 }
3841 else
3842 {
3843 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3844 vec_oprnds0.quick_push (vec_oprnd0);
3845 if (op_type == binary_op)
3846 {
3847 if (code == WIDEN_LSHIFT_EXPR)
3848 vec_oprnd1 = op1;
3849 else
3850 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
3851 NULL);
3852 vec_oprnds1.quick_push (vec_oprnd1);
3853 }
3854 }
3855 }
3856 else
3857 {
3858 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3859 vec_oprnds0.truncate (0);
3860 vec_oprnds0.quick_push (vec_oprnd0);
3861 if (op_type == binary_op)
3862 {
3863 if (code == WIDEN_LSHIFT_EXPR)
3864 vec_oprnd1 = op1;
3865 else
3866 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3867 vec_oprnd1);
3868 vec_oprnds1.truncate (0);
3869 vec_oprnds1.quick_push (vec_oprnd1);
3870 }
3871 }
3872
3873 /* Arguments are ready. Create the new vector stmts. */
3874 for (i = multi_step_cvt; i >= 0; i--)
3875 {
3876 tree this_dest = vec_dsts[i];
3877 enum tree_code c1 = code1, c2 = code2;
3878 if (i == 0 && codecvt2 != ERROR_MARK)
3879 {
3880 c1 = codecvt1;
3881 c2 = codecvt2;
3882 }
3883 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3884 &vec_oprnds1,
3885 stmt, this_dest, gsi,
3886 c1, c2, decl1, decl2,
3887 op_type);
3888 }
3889
3890 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3891 {
3892 if (cvt_type)
3893 {
3894 if (codecvt1 == CALL_EXPR)
3895 {
3896 new_stmt = gimple_build_call (decl1, 1, vop0);
3897 new_temp = make_ssa_name (vec_dest, new_stmt);
3898 gimple_call_set_lhs (new_stmt, new_temp);
3899 }
3900 else
3901 {
3902 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3903 new_temp = make_ssa_name (vec_dest, NULL);
3904 new_stmt = gimple_build_assign_with_ops (codecvt1,
3905 new_temp,
3906 vop0, NULL);
3907 }
3908
3909 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3910 }
3911 else
3912 new_stmt = SSA_NAME_DEF_STMT (vop0);
3913
3914 if (slp_node)
3915 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3916 else
3917 {
3918 if (!prev_stmt_info)
3919 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3920 else
3921 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3922 prev_stmt_info = vinfo_for_stmt (new_stmt);
3923 }
3924 }
3925 }
3926
3927 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3928 break;
3929
3930 case NARROW:
3931 /* In case the vectorization factor (VF) is bigger than the number
3932 of elements that we can fit in a vectype (nunits), we have to
3933 generate more than one vector stmt - i.e - we need to "unroll"
3934 the vector stmt by a factor VF/nunits. */
3935 for (j = 0; j < ncopies; j++)
3936 {
3937 /* Handle uses. */
3938 if (slp_node)
3939 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3940 slp_node, -1);
3941 else
3942 {
3943 vec_oprnds0.truncate (0);
3944 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3945 vect_pow2 (multi_step_cvt) - 1);
3946 }
3947
3948 /* Arguments are ready. Create the new vector stmts. */
3949 if (cvt_type)
3950 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3951 {
3952 if (codecvt1 == CALL_EXPR)
3953 {
3954 new_stmt = gimple_build_call (decl1, 1, vop0);
3955 new_temp = make_ssa_name (vec_dest, new_stmt);
3956 gimple_call_set_lhs (new_stmt, new_temp);
3957 }
3958 else
3959 {
3960 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3961 new_temp = make_ssa_name (vec_dest, NULL);
3962 new_stmt = gimple_build_assign_with_ops (codecvt1, new_temp,
3963 vop0, NULL);
3964 }
3965
3966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3967 vec_oprnds0[i] = new_temp;
3968 }
3969
3970 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
3971 stmt, vec_dsts, gsi,
3972 slp_node, code1,
3973 &prev_stmt_info);
3974 }
3975
3976 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3977 break;
3978 }
3979
3980 vec_oprnds0.release ();
3981 vec_oprnds1.release ();
3982 vec_dsts.release ();
3983 interm_types.release ();
3984
3985 return true;
3986 }
3987
3988
3989 /* Function vectorizable_assignment.
3990
3991 Check if STMT performs an assignment (copy) that can be vectorized.
3992 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3993 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3994 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3995
3996 static bool
3997 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
3998 gimple *vec_stmt, slp_tree slp_node)
3999 {
4000 tree vec_dest;
4001 tree scalar_dest;
4002 tree op;
4003 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4004 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4005 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4006 tree new_temp;
4007 tree def;
4008 gimple def_stmt;
4009 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4010 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4011 int ncopies;
4012 int i, j;
4013 vec<tree> vec_oprnds = vNULL;
4014 tree vop;
4015 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4016 gimple new_stmt = NULL;
4017 stmt_vec_info prev_stmt_info = NULL;
4018 enum tree_code code;
4019 tree vectype_in;
4020
4021 /* Multiple types in SLP are handled by creating the appropriate number of
4022 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4023 case of SLP. */
4024 if (slp_node || PURE_SLP_STMT (stmt_info))
4025 ncopies = 1;
4026 else
4027 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4028
4029 gcc_assert (ncopies >= 1);
4030
4031 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4032 return false;
4033
4034 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4035 return false;
4036
4037 /* Is vectorizable assignment? */
4038 if (!is_gimple_assign (stmt))
4039 return false;
4040
4041 scalar_dest = gimple_assign_lhs (stmt);
4042 if (TREE_CODE (scalar_dest) != SSA_NAME)
4043 return false;
4044
4045 code = gimple_assign_rhs_code (stmt);
4046 if (gimple_assign_single_p (stmt)
4047 || code == PAREN_EXPR
4048 || CONVERT_EXPR_CODE_P (code))
4049 op = gimple_assign_rhs1 (stmt);
4050 else
4051 return false;
4052
4053 if (code == VIEW_CONVERT_EXPR)
4054 op = TREE_OPERAND (op, 0);
4055
4056 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
4057 &def_stmt, &def, &dt[0], &vectype_in))
4058 {
4059 if (dump_enabled_p ())
4060 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4061 "use not simple.\n");
4062 return false;
4063 }
4064
4065 /* We can handle NOP_EXPR conversions that do not change the number
4066 of elements or the vector size. */
4067 if ((CONVERT_EXPR_CODE_P (code)
4068 || code == VIEW_CONVERT_EXPR)
4069 && (!vectype_in
4070 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4071 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4072 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4073 return false;
4074
4075 /* We do not handle bit-precision changes. */
4076 if ((CONVERT_EXPR_CODE_P (code)
4077 || code == VIEW_CONVERT_EXPR)
4078 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4079 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4080 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4081 || ((TYPE_PRECISION (TREE_TYPE (op))
4082 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4083 /* But a conversion that does not change the bit-pattern is ok. */
4084 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4085 > TYPE_PRECISION (TREE_TYPE (op)))
4086 && TYPE_UNSIGNED (TREE_TYPE (op))))
4087 {
4088 if (dump_enabled_p ())
4089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4090 "type conversion to/from bit-precision "
4091 "unsupported.\n");
4092 return false;
4093 }
4094
4095 if (!vec_stmt) /* transformation not required. */
4096 {
4097 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4098 if (dump_enabled_p ())
4099 dump_printf_loc (MSG_NOTE, vect_location,
4100 "=== vectorizable_assignment ===\n");
4101 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4102 return true;
4103 }
4104
4105 /** Transform. **/
4106 if (dump_enabled_p ())
4107 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4108
4109 /* Handle def. */
4110 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4111
4112 /* Handle use. */
4113 for (j = 0; j < ncopies; j++)
4114 {
4115 /* Handle uses. */
4116 if (j == 0)
4117 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4118 else
4119 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4120
4121 /* Arguments are ready. create the new vector stmt. */
4122 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4123 {
4124 if (CONVERT_EXPR_CODE_P (code)
4125 || code == VIEW_CONVERT_EXPR)
4126 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4127 new_stmt = gimple_build_assign (vec_dest, vop);
4128 new_temp = make_ssa_name (vec_dest, new_stmt);
4129 gimple_assign_set_lhs (new_stmt, new_temp);
4130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4131 if (slp_node)
4132 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4133 }
4134
4135 if (slp_node)
4136 continue;
4137
4138 if (j == 0)
4139 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4140 else
4141 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4142
4143 prev_stmt_info = vinfo_for_stmt (new_stmt);
4144 }
4145
4146 vec_oprnds.release ();
4147 return true;
4148 }
4149
4150
4151 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4152 either as shift by a scalar or by a vector. */
4153
4154 bool
4155 vect_supportable_shift (enum tree_code code, tree scalar_type)
4156 {
4157
4158 machine_mode vec_mode;
4159 optab optab;
4160 int icode;
4161 tree vectype;
4162
4163 vectype = get_vectype_for_scalar_type (scalar_type);
4164 if (!vectype)
4165 return false;
4166
4167 optab = optab_for_tree_code (code, vectype, optab_scalar);
4168 if (!optab
4169 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4170 {
4171 optab = optab_for_tree_code (code, vectype, optab_vector);
4172 if (!optab
4173 || (optab_handler (optab, TYPE_MODE (vectype))
4174 == CODE_FOR_nothing))
4175 return false;
4176 }
4177
4178 vec_mode = TYPE_MODE (vectype);
4179 icode = (int) optab_handler (optab, vec_mode);
4180 if (icode == CODE_FOR_nothing)
4181 return false;
4182
4183 return true;
4184 }
4185
4186
4187 /* Function vectorizable_shift.
4188
4189 Check if STMT performs a shift operation that can be vectorized.
4190 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4191 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4192 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4193
4194 static bool
4195 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
4196 gimple *vec_stmt, slp_tree slp_node)
4197 {
4198 tree vec_dest;
4199 tree scalar_dest;
4200 tree op0, op1 = NULL;
4201 tree vec_oprnd1 = NULL_TREE;
4202 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4203 tree vectype;
4204 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4205 enum tree_code code;
4206 machine_mode vec_mode;
4207 tree new_temp;
4208 optab optab;
4209 int icode;
4210 machine_mode optab_op2_mode;
4211 tree def;
4212 gimple def_stmt;
4213 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4214 gimple new_stmt = NULL;
4215 stmt_vec_info prev_stmt_info;
4216 int nunits_in;
4217 int nunits_out;
4218 tree vectype_out;
4219 tree op1_vectype;
4220 int ncopies;
4221 int j, i;
4222 vec<tree> vec_oprnds0 = vNULL;
4223 vec<tree> vec_oprnds1 = vNULL;
4224 tree vop0, vop1;
4225 unsigned int k;
4226 bool scalar_shift_arg = true;
4227 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4228 int vf;
4229
4230 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4231 return false;
4232
4233 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4234 return false;
4235
4236 /* Is STMT a vectorizable binary/unary operation? */
4237 if (!is_gimple_assign (stmt))
4238 return false;
4239
4240 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4241 return false;
4242
4243 code = gimple_assign_rhs_code (stmt);
4244
4245 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4246 || code == RROTATE_EXPR))
4247 return false;
4248
4249 scalar_dest = gimple_assign_lhs (stmt);
4250 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4251 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4252 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4253 {
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4256 "bit-precision shifts not supported.\n");
4257 return false;
4258 }
4259
4260 op0 = gimple_assign_rhs1 (stmt);
4261 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4262 &def_stmt, &def, &dt[0], &vectype))
4263 {
4264 if (dump_enabled_p ())
4265 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4266 "use not simple.\n");
4267 return false;
4268 }
4269 /* If op0 is an external or constant def use a vector type with
4270 the same size as the output vector type. */
4271 if (!vectype)
4272 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4273 if (vec_stmt)
4274 gcc_assert (vectype);
4275 if (!vectype)
4276 {
4277 if (dump_enabled_p ())
4278 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4279 "no vectype for scalar type\n");
4280 return false;
4281 }
4282
4283 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4284 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4285 if (nunits_out != nunits_in)
4286 return false;
4287
4288 op1 = gimple_assign_rhs2 (stmt);
4289 if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4290 &def, &dt[1], &op1_vectype))
4291 {
4292 if (dump_enabled_p ())
4293 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4294 "use not simple.\n");
4295 return false;
4296 }
4297
4298 if (loop_vinfo)
4299 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4300 else
4301 vf = 1;
4302
4303 /* Multiple types in SLP are handled by creating the appropriate number of
4304 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4305 case of SLP. */
4306 if (slp_node || PURE_SLP_STMT (stmt_info))
4307 ncopies = 1;
4308 else
4309 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4310
4311 gcc_assert (ncopies >= 1);
4312
4313 /* Determine whether the shift amount is a vector, or scalar. If the
4314 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4315
4316 if (dt[1] == vect_internal_def && !slp_node)
4317 scalar_shift_arg = false;
4318 else if (dt[1] == vect_constant_def
4319 || dt[1] == vect_external_def
4320 || dt[1] == vect_internal_def)
4321 {
4322 /* In SLP, need to check whether the shift count is the same,
4323 in loops if it is a constant or invariant, it is always
4324 a scalar shift. */
4325 if (slp_node)
4326 {
4327 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4328 gimple slpstmt;
4329
4330 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4331 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4332 scalar_shift_arg = false;
4333 }
4334 }
4335 else
4336 {
4337 if (dump_enabled_p ())
4338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4339 "operand mode requires invariant argument.\n");
4340 return false;
4341 }
4342
4343 /* Vector shifted by vector. */
4344 if (!scalar_shift_arg)
4345 {
4346 optab = optab_for_tree_code (code, vectype, optab_vector);
4347 if (dump_enabled_p ())
4348 dump_printf_loc (MSG_NOTE, vect_location,
4349 "vector/vector shift/rotate found.\n");
4350
4351 if (!op1_vectype)
4352 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4353 if (op1_vectype == NULL_TREE
4354 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4355 {
4356 if (dump_enabled_p ())
4357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4358 "unusable type for last operand in"
4359 " vector/vector shift/rotate.\n");
4360 return false;
4361 }
4362 }
4363 /* See if the machine has a vector shifted by scalar insn and if not
4364 then see if it has a vector shifted by vector insn. */
4365 else
4366 {
4367 optab = optab_for_tree_code (code, vectype, optab_scalar);
4368 if (optab
4369 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4370 {
4371 if (dump_enabled_p ())
4372 dump_printf_loc (MSG_NOTE, vect_location,
4373 "vector/scalar shift/rotate found.\n");
4374 }
4375 else
4376 {
4377 optab = optab_for_tree_code (code, vectype, optab_vector);
4378 if (optab
4379 && (optab_handler (optab, TYPE_MODE (vectype))
4380 != CODE_FOR_nothing))
4381 {
4382 scalar_shift_arg = false;
4383
4384 if (dump_enabled_p ())
4385 dump_printf_loc (MSG_NOTE, vect_location,
4386 "vector/vector shift/rotate found.\n");
4387
4388 /* Unlike the other binary operators, shifts/rotates have
4389 the rhs being int, instead of the same type as the lhs,
4390 so make sure the scalar is the right type if we are
4391 dealing with vectors of long long/long/short/char. */
4392 if (dt[1] == vect_constant_def)
4393 op1 = fold_convert (TREE_TYPE (vectype), op1);
4394 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4395 TREE_TYPE (op1)))
4396 {
4397 if (slp_node
4398 && TYPE_MODE (TREE_TYPE (vectype))
4399 != TYPE_MODE (TREE_TYPE (op1)))
4400 {
4401 if (dump_enabled_p ())
4402 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4403 "unusable type for last operand in"
4404 " vector/vector shift/rotate.\n");
4405 return false;
4406 }
4407 if (vec_stmt && !slp_node)
4408 {
4409 op1 = fold_convert (TREE_TYPE (vectype), op1);
4410 op1 = vect_init_vector (stmt, op1,
4411 TREE_TYPE (vectype), NULL);
4412 }
4413 }
4414 }
4415 }
4416 }
4417
4418 /* Supportable by target? */
4419 if (!optab)
4420 {
4421 if (dump_enabled_p ())
4422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4423 "no optab.\n");
4424 return false;
4425 }
4426 vec_mode = TYPE_MODE (vectype);
4427 icode = (int) optab_handler (optab, vec_mode);
4428 if (icode == CODE_FOR_nothing)
4429 {
4430 if (dump_enabled_p ())
4431 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4432 "op not supported by target.\n");
4433 /* Check only during analysis. */
4434 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4435 || (vf < vect_min_worthwhile_factor (code)
4436 && !vec_stmt))
4437 return false;
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE, vect_location,
4440 "proceeding using word mode.\n");
4441 }
4442
4443 /* Worthwhile without SIMD support? Check only during analysis. */
4444 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4445 && vf < vect_min_worthwhile_factor (code)
4446 && !vec_stmt)
4447 {
4448 if (dump_enabled_p ())
4449 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4450 "not worthwhile without SIMD support.\n");
4451 return false;
4452 }
4453
4454 if (!vec_stmt) /* transformation not required. */
4455 {
4456 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4457 if (dump_enabled_p ())
4458 dump_printf_loc (MSG_NOTE, vect_location,
4459 "=== vectorizable_shift ===\n");
4460 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4461 return true;
4462 }
4463
4464 /** Transform. **/
4465
4466 if (dump_enabled_p ())
4467 dump_printf_loc (MSG_NOTE, vect_location,
4468 "transform binary/unary operation.\n");
4469
4470 /* Handle def. */
4471 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4472
4473 prev_stmt_info = NULL;
4474 for (j = 0; j < ncopies; j++)
4475 {
4476 /* Handle uses. */
4477 if (j == 0)
4478 {
4479 if (scalar_shift_arg)
4480 {
4481 /* Vector shl and shr insn patterns can be defined with scalar
4482 operand 2 (shift operand). In this case, use constant or loop
4483 invariant op1 directly, without extending it to vector mode
4484 first. */
4485 optab_op2_mode = insn_data[icode].operand[2].mode;
4486 if (!VECTOR_MODE_P (optab_op2_mode))
4487 {
4488 if (dump_enabled_p ())
4489 dump_printf_loc (MSG_NOTE, vect_location,
4490 "operand 1 using scalar mode.\n");
4491 vec_oprnd1 = op1;
4492 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4493 vec_oprnds1.quick_push (vec_oprnd1);
4494 if (slp_node)
4495 {
4496 /* Store vec_oprnd1 for every vector stmt to be created
4497 for SLP_NODE. We check during the analysis that all
4498 the shift arguments are the same.
4499 TODO: Allow different constants for different vector
4500 stmts generated for an SLP instance. */
4501 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4502 vec_oprnds1.quick_push (vec_oprnd1);
4503 }
4504 }
4505 }
4506
4507 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4508 (a special case for certain kind of vector shifts); otherwise,
4509 operand 1 should be of a vector type (the usual case). */
4510 if (vec_oprnd1)
4511 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4512 slp_node, -1);
4513 else
4514 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4515 slp_node, -1);
4516 }
4517 else
4518 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4519
4520 /* Arguments are ready. Create the new vector stmt. */
4521 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4522 {
4523 vop1 = vec_oprnds1[i];
4524 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
4525 new_temp = make_ssa_name (vec_dest, new_stmt);
4526 gimple_assign_set_lhs (new_stmt, new_temp);
4527 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4528 if (slp_node)
4529 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4530 }
4531
4532 if (slp_node)
4533 continue;
4534
4535 if (j == 0)
4536 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4537 else
4538 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4539 prev_stmt_info = vinfo_for_stmt (new_stmt);
4540 }
4541
4542 vec_oprnds0.release ();
4543 vec_oprnds1.release ();
4544
4545 return true;
4546 }
4547
4548
4549 /* Function vectorizable_operation.
4550
4551 Check if STMT performs a binary, unary or ternary operation that can
4552 be vectorized.
4553 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4554 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4555 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4556
4557 static bool
4558 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
4559 gimple *vec_stmt, slp_tree slp_node)
4560 {
4561 tree vec_dest;
4562 tree scalar_dest;
4563 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4564 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4565 tree vectype;
4566 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4567 enum tree_code code;
4568 machine_mode vec_mode;
4569 tree new_temp;
4570 int op_type;
4571 optab optab;
4572 int icode;
4573 tree def;
4574 gimple def_stmt;
4575 enum vect_def_type dt[3]
4576 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4577 gimple new_stmt = NULL;
4578 stmt_vec_info prev_stmt_info;
4579 int nunits_in;
4580 int nunits_out;
4581 tree vectype_out;
4582 int ncopies;
4583 int j, i;
4584 vec<tree> vec_oprnds0 = vNULL;
4585 vec<tree> vec_oprnds1 = vNULL;
4586 vec<tree> vec_oprnds2 = vNULL;
4587 tree vop0, vop1, vop2;
4588 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4589 int vf;
4590
4591 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4592 return false;
4593
4594 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4595 return false;
4596
4597 /* Is STMT a vectorizable binary/unary operation? */
4598 if (!is_gimple_assign (stmt))
4599 return false;
4600
4601 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4602 return false;
4603
4604 code = gimple_assign_rhs_code (stmt);
4605
4606 /* For pointer addition, we should use the normal plus for
4607 the vector addition. */
4608 if (code == POINTER_PLUS_EXPR)
4609 code = PLUS_EXPR;
4610
4611 /* Support only unary or binary operations. */
4612 op_type = TREE_CODE_LENGTH (code);
4613 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4614 {
4615 if (dump_enabled_p ())
4616 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4617 "num. args = %d (not unary/binary/ternary op).\n",
4618 op_type);
4619 return false;
4620 }
4621
4622 scalar_dest = gimple_assign_lhs (stmt);
4623 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4624
4625 /* Most operations cannot handle bit-precision types without extra
4626 truncations. */
4627 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4628 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4629 /* Exception are bitwise binary operations. */
4630 && code != BIT_IOR_EXPR
4631 && code != BIT_XOR_EXPR
4632 && code != BIT_AND_EXPR)
4633 {
4634 if (dump_enabled_p ())
4635 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4636 "bit-precision arithmetic not supported.\n");
4637 return false;
4638 }
4639
4640 op0 = gimple_assign_rhs1 (stmt);
4641 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4642 &def_stmt, &def, &dt[0], &vectype))
4643 {
4644 if (dump_enabled_p ())
4645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4646 "use not simple.\n");
4647 return false;
4648 }
4649 /* If op0 is an external or constant def use a vector type with
4650 the same size as the output vector type. */
4651 if (!vectype)
4652 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4653 if (vec_stmt)
4654 gcc_assert (vectype);
4655 if (!vectype)
4656 {
4657 if (dump_enabled_p ())
4658 {
4659 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4660 "no vectype for scalar type ");
4661 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4662 TREE_TYPE (op0));
4663 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4664 }
4665
4666 return false;
4667 }
4668
4669 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4670 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4671 if (nunits_out != nunits_in)
4672 return false;
4673
4674 if (op_type == binary_op || op_type == ternary_op)
4675 {
4676 op1 = gimple_assign_rhs2 (stmt);
4677 if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4678 &def, &dt[1]))
4679 {
4680 if (dump_enabled_p ())
4681 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4682 "use not simple.\n");
4683 return false;
4684 }
4685 }
4686 if (op_type == ternary_op)
4687 {
4688 op2 = gimple_assign_rhs3 (stmt);
4689 if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4690 &def, &dt[2]))
4691 {
4692 if (dump_enabled_p ())
4693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4694 "use not simple.\n");
4695 return false;
4696 }
4697 }
4698
4699 if (loop_vinfo)
4700 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4701 else
4702 vf = 1;
4703
4704 /* Multiple types in SLP are handled by creating the appropriate number of
4705 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4706 case of SLP. */
4707 if (slp_node || PURE_SLP_STMT (stmt_info))
4708 ncopies = 1;
4709 else
4710 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4711
4712 gcc_assert (ncopies >= 1);
4713
4714 /* Shifts are handled in vectorizable_shift (). */
4715 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4716 || code == RROTATE_EXPR)
4717 return false;
4718
4719 /* Supportable by target? */
4720
4721 vec_mode = TYPE_MODE (vectype);
4722 if (code == MULT_HIGHPART_EXPR)
4723 {
4724 if (can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)))
4725 icode = LAST_INSN_CODE;
4726 else
4727 icode = CODE_FOR_nothing;
4728 }
4729 else
4730 {
4731 optab = optab_for_tree_code (code, vectype, optab_default);
4732 if (!optab)
4733 {
4734 if (dump_enabled_p ())
4735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4736 "no optab.\n");
4737 return false;
4738 }
4739 icode = (int) optab_handler (optab, vec_mode);
4740 }
4741
4742 if (icode == CODE_FOR_nothing)
4743 {
4744 if (dump_enabled_p ())
4745 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4746 "op not supported by target.\n");
4747 /* Check only during analysis. */
4748 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4749 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4750 return false;
4751 if (dump_enabled_p ())
4752 dump_printf_loc (MSG_NOTE, vect_location,
4753 "proceeding using word mode.\n");
4754 }
4755
4756 /* Worthwhile without SIMD support? Check only during analysis. */
4757 if (!VECTOR_MODE_P (vec_mode)
4758 && !vec_stmt
4759 && vf < vect_min_worthwhile_factor (code))
4760 {
4761 if (dump_enabled_p ())
4762 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4763 "not worthwhile without SIMD support.\n");
4764 return false;
4765 }
4766
4767 if (!vec_stmt) /* transformation not required. */
4768 {
4769 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4770 if (dump_enabled_p ())
4771 dump_printf_loc (MSG_NOTE, vect_location,
4772 "=== vectorizable_operation ===\n");
4773 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4774 return true;
4775 }
4776
4777 /** Transform. **/
4778
4779 if (dump_enabled_p ())
4780 dump_printf_loc (MSG_NOTE, vect_location,
4781 "transform binary/unary operation.\n");
4782
4783 /* Handle def. */
4784 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4785
4786 /* In case the vectorization factor (VF) is bigger than the number
4787 of elements that we can fit in a vectype (nunits), we have to generate
4788 more than one vector stmt - i.e - we need to "unroll" the
4789 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4790 from one copy of the vector stmt to the next, in the field
4791 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4792 stages to find the correct vector defs to be used when vectorizing
4793 stmts that use the defs of the current stmt. The example below
4794 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4795 we need to create 4 vectorized stmts):
4796
4797 before vectorization:
4798 RELATED_STMT VEC_STMT
4799 S1: x = memref - -
4800 S2: z = x + 1 - -
4801
4802 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4803 there):
4804 RELATED_STMT VEC_STMT
4805 VS1_0: vx0 = memref0 VS1_1 -
4806 VS1_1: vx1 = memref1 VS1_2 -
4807 VS1_2: vx2 = memref2 VS1_3 -
4808 VS1_3: vx3 = memref3 - -
4809 S1: x = load - VS1_0
4810 S2: z = x + 1 - -
4811
4812 step2: vectorize stmt S2 (done here):
4813 To vectorize stmt S2 we first need to find the relevant vector
4814 def for the first operand 'x'. This is, as usual, obtained from
4815 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4816 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4817 relevant vector def 'vx0'. Having found 'vx0' we can generate
4818 the vector stmt VS2_0, and as usual, record it in the
4819 STMT_VINFO_VEC_STMT of stmt S2.
4820 When creating the second copy (VS2_1), we obtain the relevant vector
4821 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4822 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4823 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4824 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4825 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4826 chain of stmts and pointers:
4827 RELATED_STMT VEC_STMT
4828 VS1_0: vx0 = memref0 VS1_1 -
4829 VS1_1: vx1 = memref1 VS1_2 -
4830 VS1_2: vx2 = memref2 VS1_3 -
4831 VS1_3: vx3 = memref3 - -
4832 S1: x = load - VS1_0
4833 VS2_0: vz0 = vx0 + v1 VS2_1 -
4834 VS2_1: vz1 = vx1 + v1 VS2_2 -
4835 VS2_2: vz2 = vx2 + v1 VS2_3 -
4836 VS2_3: vz3 = vx3 + v1 - -
4837 S2: z = x + 1 - VS2_0 */
4838
4839 prev_stmt_info = NULL;
4840 for (j = 0; j < ncopies; j++)
4841 {
4842 /* Handle uses. */
4843 if (j == 0)
4844 {
4845 if (op_type == binary_op || op_type == ternary_op)
4846 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4847 slp_node, -1);
4848 else
4849 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4850 slp_node, -1);
4851 if (op_type == ternary_op)
4852 {
4853 vec_oprnds2.create (1);
4854 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4855 stmt,
4856 NULL));
4857 }
4858 }
4859 else
4860 {
4861 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4862 if (op_type == ternary_op)
4863 {
4864 tree vec_oprnd = vec_oprnds2.pop ();
4865 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4866 vec_oprnd));
4867 }
4868 }
4869
4870 /* Arguments are ready. Create the new vector stmt. */
4871 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4872 {
4873 vop1 = ((op_type == binary_op || op_type == ternary_op)
4874 ? vec_oprnds1[i] : NULL_TREE);
4875 vop2 = ((op_type == ternary_op)
4876 ? vec_oprnds2[i] : NULL_TREE);
4877 new_stmt = gimple_build_assign_with_ops (code, vec_dest,
4878 vop0, vop1, vop2);
4879 new_temp = make_ssa_name (vec_dest, new_stmt);
4880 gimple_assign_set_lhs (new_stmt, new_temp);
4881 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4882 if (slp_node)
4883 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4884 }
4885
4886 if (slp_node)
4887 continue;
4888
4889 if (j == 0)
4890 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4891 else
4892 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4893 prev_stmt_info = vinfo_for_stmt (new_stmt);
4894 }
4895
4896 vec_oprnds0.release ();
4897 vec_oprnds1.release ();
4898 vec_oprnds2.release ();
4899
4900 return true;
4901 }
4902
4903 /* A helper function to ensure data reference DR's base alignment
4904 for STMT_INFO. */
4905
4906 static void
4907 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4908 {
4909 if (!dr->aux)
4910 return;
4911
4912 if (((dataref_aux *)dr->aux)->base_misaligned)
4913 {
4914 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4915 tree base_decl = ((dataref_aux *)dr->aux)->base_decl;
4916
4917 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4918 DECL_USER_ALIGN (base_decl) = 1;
4919 ((dataref_aux *)dr->aux)->base_misaligned = false;
4920 }
4921 }
4922
4923
4924 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4925 reversal of the vector elements. If that is impossible to do,
4926 returns NULL. */
4927
4928 static tree
4929 perm_mask_for_reverse (tree vectype)
4930 {
4931 int i, nunits;
4932 unsigned char *sel;
4933
4934 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4935 sel = XALLOCAVEC (unsigned char, nunits);
4936
4937 for (i = 0; i < nunits; ++i)
4938 sel[i] = nunits - 1 - i;
4939
4940 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
4941 return NULL_TREE;
4942 return vect_gen_perm_mask_checked (vectype, sel);
4943 }
4944
4945 /* Function vectorizable_store.
4946
4947 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4948 can be vectorized.
4949 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4950 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4951 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4952
4953 static bool
4954 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
4955 slp_tree slp_node)
4956 {
4957 tree scalar_dest;
4958 tree data_ref;
4959 tree op;
4960 tree vec_oprnd = NULL_TREE;
4961 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4962 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
4963 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4964 tree elem_type;
4965 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4966 struct loop *loop = NULL;
4967 machine_mode vec_mode;
4968 tree dummy;
4969 enum dr_alignment_support alignment_support_scheme;
4970 tree def;
4971 gimple def_stmt;
4972 enum vect_def_type dt;
4973 stmt_vec_info prev_stmt_info = NULL;
4974 tree dataref_ptr = NULL_TREE;
4975 tree dataref_offset = NULL_TREE;
4976 gimple ptr_incr = NULL;
4977 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4978 int ncopies;
4979 int j;
4980 gimple next_stmt, first_stmt = NULL;
4981 bool grouped_store = false;
4982 bool store_lanes_p = false;
4983 unsigned int group_size, i;
4984 vec<tree> dr_chain = vNULL;
4985 vec<tree> oprnds = vNULL;
4986 vec<tree> result_chain = vNULL;
4987 bool inv_p;
4988 bool negative = false;
4989 tree offset = NULL_TREE;
4990 vec<tree> vec_oprnds = vNULL;
4991 bool slp = (slp_node != NULL);
4992 unsigned int vec_num;
4993 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4994 tree aggr_type;
4995
4996 if (loop_vinfo)
4997 loop = LOOP_VINFO_LOOP (loop_vinfo);
4998
4999 /* Multiple types in SLP are handled by creating the appropriate number of
5000 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5001 case of SLP. */
5002 if (slp || PURE_SLP_STMT (stmt_info))
5003 ncopies = 1;
5004 else
5005 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5006
5007 gcc_assert (ncopies >= 1);
5008
5009 /* FORNOW. This restriction should be relaxed. */
5010 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5011 {
5012 if (dump_enabled_p ())
5013 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5014 "multiple types in nested loop.\n");
5015 return false;
5016 }
5017
5018 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5019 return false;
5020
5021 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5022 return false;
5023
5024 /* Is vectorizable store? */
5025
5026 if (!is_gimple_assign (stmt))
5027 return false;
5028
5029 scalar_dest = gimple_assign_lhs (stmt);
5030 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5031 && is_pattern_stmt_p (stmt_info))
5032 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5033 if (TREE_CODE (scalar_dest) != ARRAY_REF
5034 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5035 && TREE_CODE (scalar_dest) != INDIRECT_REF
5036 && TREE_CODE (scalar_dest) != COMPONENT_REF
5037 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5038 && TREE_CODE (scalar_dest) != REALPART_EXPR
5039 && TREE_CODE (scalar_dest) != MEM_REF)
5040 return false;
5041
5042 gcc_assert (gimple_assign_single_p (stmt));
5043 op = gimple_assign_rhs1 (stmt);
5044 if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
5045 &def, &dt))
5046 {
5047 if (dump_enabled_p ())
5048 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5049 "use not simple.\n");
5050 return false;
5051 }
5052
5053 elem_type = TREE_TYPE (vectype);
5054 vec_mode = TYPE_MODE (vectype);
5055
5056 /* FORNOW. In some cases can vectorize even if data-type not supported
5057 (e.g. - array initialization with 0). */
5058 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5059 return false;
5060
5061 if (!STMT_VINFO_DATA_REF (stmt_info))
5062 return false;
5063
5064 negative =
5065 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5066 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5067 size_zero_node) < 0;
5068 if (negative && ncopies > 1)
5069 {
5070 if (dump_enabled_p ())
5071 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5072 "multiple types with negative step.\n");
5073 return false;
5074 }
5075
5076 if (negative)
5077 {
5078 gcc_assert (!grouped_store);
5079 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5080 if (alignment_support_scheme != dr_aligned
5081 && alignment_support_scheme != dr_unaligned_supported)
5082 {
5083 if (dump_enabled_p ())
5084 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5085 "negative step but alignment required.\n");
5086 return false;
5087 }
5088 if (dt != vect_constant_def
5089 && dt != vect_external_def
5090 && !perm_mask_for_reverse (vectype))
5091 {
5092 if (dump_enabled_p ())
5093 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5094 "negative step and reversing not supported.\n");
5095 return false;
5096 }
5097 }
5098
5099 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5100 {
5101 grouped_store = true;
5102 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5103 if (!slp && !PURE_SLP_STMT (stmt_info))
5104 {
5105 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5106 if (vect_store_lanes_supported (vectype, group_size))
5107 store_lanes_p = true;
5108 else if (!vect_grouped_store_supported (vectype, group_size))
5109 return false;
5110 }
5111
5112 if (first_stmt == stmt)
5113 {
5114 /* STMT is the leader of the group. Check the operands of all the
5115 stmts of the group. */
5116 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5117 while (next_stmt)
5118 {
5119 gcc_assert (gimple_assign_single_p (next_stmt));
5120 op = gimple_assign_rhs1 (next_stmt);
5121 if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
5122 &def_stmt, &def, &dt))
5123 {
5124 if (dump_enabled_p ())
5125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5126 "use not simple.\n");
5127 return false;
5128 }
5129 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5130 }
5131 }
5132 }
5133
5134 if (!vec_stmt) /* transformation not required. */
5135 {
5136 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5137 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5138 NULL, NULL, NULL);
5139 return true;
5140 }
5141
5142 /** Transform. **/
5143
5144 ensure_base_align (stmt_info, dr);
5145
5146 if (grouped_store)
5147 {
5148 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5149 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5150
5151 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5152
5153 /* FORNOW */
5154 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5155
5156 /* We vectorize all the stmts of the interleaving group when we
5157 reach the last stmt in the group. */
5158 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5159 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5160 && !slp)
5161 {
5162 *vec_stmt = NULL;
5163 return true;
5164 }
5165
5166 if (slp)
5167 {
5168 grouped_store = false;
5169 /* VEC_NUM is the number of vect stmts to be created for this
5170 group. */
5171 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5172 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5173 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5174 op = gimple_assign_rhs1 (first_stmt);
5175 }
5176 else
5177 /* VEC_NUM is the number of vect stmts to be created for this
5178 group. */
5179 vec_num = group_size;
5180 }
5181 else
5182 {
5183 first_stmt = stmt;
5184 first_dr = dr;
5185 group_size = vec_num = 1;
5186 }
5187
5188 if (dump_enabled_p ())
5189 dump_printf_loc (MSG_NOTE, vect_location,
5190 "transform store. ncopies = %d\n", ncopies);
5191
5192 dr_chain.create (group_size);
5193 oprnds.create (group_size);
5194
5195 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5196 gcc_assert (alignment_support_scheme);
5197 /* Targets with store-lane instructions must not require explicit
5198 realignment. */
5199 gcc_assert (!store_lanes_p
5200 || alignment_support_scheme == dr_aligned
5201 || alignment_support_scheme == dr_unaligned_supported);
5202
5203 if (negative)
5204 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5205
5206 if (store_lanes_p)
5207 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5208 else
5209 aggr_type = vectype;
5210
5211 /* In case the vectorization factor (VF) is bigger than the number
5212 of elements that we can fit in a vectype (nunits), we have to generate
5213 more than one vector stmt - i.e - we need to "unroll" the
5214 vector stmt by a factor VF/nunits. For more details see documentation in
5215 vect_get_vec_def_for_copy_stmt. */
5216
5217 /* In case of interleaving (non-unit grouped access):
5218
5219 S1: &base + 2 = x2
5220 S2: &base = x0
5221 S3: &base + 1 = x1
5222 S4: &base + 3 = x3
5223
5224 We create vectorized stores starting from base address (the access of the
5225 first stmt in the chain (S2 in the above example), when the last store stmt
5226 of the chain (S4) is reached:
5227
5228 VS1: &base = vx2
5229 VS2: &base + vec_size*1 = vx0
5230 VS3: &base + vec_size*2 = vx1
5231 VS4: &base + vec_size*3 = vx3
5232
5233 Then permutation statements are generated:
5234
5235 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5236 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5237 ...
5238
5239 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5240 (the order of the data-refs in the output of vect_permute_store_chain
5241 corresponds to the order of scalar stmts in the interleaving chain - see
5242 the documentation of vect_permute_store_chain()).
5243
5244 In case of both multiple types and interleaving, above vector stores and
5245 permutation stmts are created for every copy. The result vector stmts are
5246 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5247 STMT_VINFO_RELATED_STMT for the next copies.
5248 */
5249
5250 prev_stmt_info = NULL;
5251 for (j = 0; j < ncopies; j++)
5252 {
5253 gimple new_stmt;
5254
5255 if (j == 0)
5256 {
5257 if (slp)
5258 {
5259 /* Get vectorized arguments for SLP_NODE. */
5260 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5261 NULL, slp_node, -1);
5262
5263 vec_oprnd = vec_oprnds[0];
5264 }
5265 else
5266 {
5267 /* For interleaved stores we collect vectorized defs for all the
5268 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5269 used as an input to vect_permute_store_chain(), and OPRNDS as
5270 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5271
5272 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5273 OPRNDS are of size 1. */
5274 next_stmt = first_stmt;
5275 for (i = 0; i < group_size; i++)
5276 {
5277 /* Since gaps are not supported for interleaved stores,
5278 GROUP_SIZE is the exact number of stmts in the chain.
5279 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5280 there is no interleaving, GROUP_SIZE is 1, and only one
5281 iteration of the loop will be executed. */
5282 gcc_assert (next_stmt
5283 && gimple_assign_single_p (next_stmt));
5284 op = gimple_assign_rhs1 (next_stmt);
5285
5286 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5287 NULL);
5288 dr_chain.quick_push (vec_oprnd);
5289 oprnds.quick_push (vec_oprnd);
5290 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5291 }
5292 }
5293
5294 /* We should have catched mismatched types earlier. */
5295 gcc_assert (useless_type_conversion_p (vectype,
5296 TREE_TYPE (vec_oprnd)));
5297 bool simd_lane_access_p
5298 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5299 if (simd_lane_access_p
5300 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5301 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5302 && integer_zerop (DR_OFFSET (first_dr))
5303 && integer_zerop (DR_INIT (first_dr))
5304 && alias_sets_conflict_p (get_alias_set (aggr_type),
5305 get_alias_set (DR_REF (first_dr))))
5306 {
5307 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5308 dataref_offset = build_int_cst (reference_alias_ptr_type
5309 (DR_REF (first_dr)), 0);
5310 inv_p = false;
5311 }
5312 else
5313 dataref_ptr
5314 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5315 simd_lane_access_p ? loop : NULL,
5316 offset, &dummy, gsi, &ptr_incr,
5317 simd_lane_access_p, &inv_p);
5318 gcc_assert (bb_vinfo || !inv_p);
5319 }
5320 else
5321 {
5322 /* For interleaved stores we created vectorized defs for all the
5323 defs stored in OPRNDS in the previous iteration (previous copy).
5324 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5325 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5326 next copy.
5327 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5328 OPRNDS are of size 1. */
5329 for (i = 0; i < group_size; i++)
5330 {
5331 op = oprnds[i];
5332 vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
5333 &def, &dt);
5334 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5335 dr_chain[i] = vec_oprnd;
5336 oprnds[i] = vec_oprnd;
5337 }
5338 if (dataref_offset)
5339 dataref_offset
5340 = int_const_binop (PLUS_EXPR, dataref_offset,
5341 TYPE_SIZE_UNIT (aggr_type));
5342 else
5343 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5344 TYPE_SIZE_UNIT (aggr_type));
5345 }
5346
5347 if (store_lanes_p)
5348 {
5349 tree vec_array;
5350
5351 /* Combine all the vectors into an array. */
5352 vec_array = create_vector_array (vectype, vec_num);
5353 for (i = 0; i < vec_num; i++)
5354 {
5355 vec_oprnd = dr_chain[i];
5356 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5357 }
5358
5359 /* Emit:
5360 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5361 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5362 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5363 gimple_call_set_lhs (new_stmt, data_ref);
5364 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5365 }
5366 else
5367 {
5368 new_stmt = NULL;
5369 if (grouped_store)
5370 {
5371 if (j == 0)
5372 result_chain.create (group_size);
5373 /* Permute. */
5374 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5375 &result_chain);
5376 }
5377
5378 next_stmt = first_stmt;
5379 for (i = 0; i < vec_num; i++)
5380 {
5381 unsigned align, misalign;
5382
5383 if (i > 0)
5384 /* Bump the vector pointer. */
5385 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5386 stmt, NULL_TREE);
5387
5388 if (slp)
5389 vec_oprnd = vec_oprnds[i];
5390 else if (grouped_store)
5391 /* For grouped stores vectorized defs are interleaved in
5392 vect_permute_store_chain(). */
5393 vec_oprnd = result_chain[i];
5394
5395 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
5396 dataref_offset
5397 ? dataref_offset
5398 : build_int_cst (reference_alias_ptr_type
5399 (DR_REF (first_dr)), 0));
5400 align = TYPE_ALIGN_UNIT (vectype);
5401 if (aligned_access_p (first_dr))
5402 misalign = 0;
5403 else if (DR_MISALIGNMENT (first_dr) == -1)
5404 {
5405 TREE_TYPE (data_ref)
5406 = build_aligned_type (TREE_TYPE (data_ref),
5407 TYPE_ALIGN (elem_type));
5408 align = TYPE_ALIGN_UNIT (elem_type);
5409 misalign = 0;
5410 }
5411 else
5412 {
5413 TREE_TYPE (data_ref)
5414 = build_aligned_type (TREE_TYPE (data_ref),
5415 TYPE_ALIGN (elem_type));
5416 misalign = DR_MISALIGNMENT (first_dr);
5417 }
5418 if (dataref_offset == NULL_TREE)
5419 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5420 misalign);
5421
5422 if (negative
5423 && dt != vect_constant_def
5424 && dt != vect_external_def)
5425 {
5426 tree perm_mask = perm_mask_for_reverse (vectype);
5427 tree perm_dest
5428 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5429 vectype);
5430 tree new_temp = make_ssa_name (perm_dest, NULL);
5431
5432 /* Generate the permute statement. */
5433 gimple perm_stmt
5434 = gimple_build_assign_with_ops (VEC_PERM_EXPR, new_temp,
5435 vec_oprnd, vec_oprnd,
5436 perm_mask);
5437 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5438
5439 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5440 vec_oprnd = new_temp;
5441 }
5442
5443 /* Arguments are ready. Create the new vector stmt. */
5444 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5445 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5446
5447 if (slp)
5448 continue;
5449
5450 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5451 if (!next_stmt)
5452 break;
5453 }
5454 }
5455 if (!slp)
5456 {
5457 if (j == 0)
5458 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5459 else
5460 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5461 prev_stmt_info = vinfo_for_stmt (new_stmt);
5462 }
5463 }
5464
5465 dr_chain.release ();
5466 oprnds.release ();
5467 result_chain.release ();
5468 vec_oprnds.release ();
5469
5470 return true;
5471 }
5472
5473 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5474 VECTOR_CST mask. No checks are made that the target platform supports the
5475 mask, so callers may wish to test can_vec_perm_p separately, or use
5476 vect_gen_perm_mask_checked. */
5477
5478 tree
5479 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5480 {
5481 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5482 int i, nunits;
5483
5484 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5485
5486 mask_elt_type = lang_hooks.types.type_for_mode
5487 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5488 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5489
5490 mask_elts = XALLOCAVEC (tree, nunits);
5491 for (i = nunits - 1; i >= 0; i--)
5492 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5493 mask_vec = build_vector (mask_type, mask_elts);
5494
5495 return mask_vec;
5496 }
5497
5498 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p. */
5499
5500 tree
5501 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5502 {
5503 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5504 return vect_gen_perm_mask_any (vectype, sel);
5505 }
5506
5507 /* Given a vector variable X and Y, that was generated for the scalar
5508 STMT, generate instructions to permute the vector elements of X and Y
5509 using permutation mask MASK_VEC, insert them at *GSI and return the
5510 permuted vector variable. */
5511
5512 static tree
5513 permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
5514 gimple_stmt_iterator *gsi)
5515 {
5516 tree vectype = TREE_TYPE (x);
5517 tree perm_dest, data_ref;
5518 gimple perm_stmt;
5519
5520 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5521 data_ref = make_ssa_name (perm_dest, NULL);
5522
5523 /* Generate the permute statement. */
5524 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref,
5525 x, y, mask_vec);
5526 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5527
5528 return data_ref;
5529 }
5530
5531 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5532 inserting them on the loops preheader edge. Returns true if we
5533 were successful in doing so (and thus STMT can be moved then),
5534 otherwise returns false. */
5535
5536 static bool
5537 hoist_defs_of_uses (gimple stmt, struct loop *loop)
5538 {
5539 ssa_op_iter i;
5540 tree op;
5541 bool any = false;
5542
5543 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5544 {
5545 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5546 if (!gimple_nop_p (def_stmt)
5547 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5548 {
5549 /* Make sure we don't need to recurse. While we could do
5550 so in simple cases when there are more complex use webs
5551 we don't have an easy way to preserve stmt order to fulfil
5552 dependencies within them. */
5553 tree op2;
5554 ssa_op_iter i2;
5555 if (gimple_code (def_stmt) == GIMPLE_PHI)
5556 return false;
5557 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5558 {
5559 gimple def_stmt2 = SSA_NAME_DEF_STMT (op2);
5560 if (!gimple_nop_p (def_stmt2)
5561 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5562 return false;
5563 }
5564 any = true;
5565 }
5566 }
5567
5568 if (!any)
5569 return true;
5570
5571 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5572 {
5573 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5574 if (!gimple_nop_p (def_stmt)
5575 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5576 {
5577 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5578 gsi_remove (&gsi, false);
5579 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5580 }
5581 }
5582
5583 return true;
5584 }
5585
5586 /* vectorizable_load.
5587
5588 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5589 can be vectorized.
5590 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5591 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5592 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5593
5594 static bool
5595 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5596 slp_tree slp_node, slp_instance slp_node_instance)
5597 {
5598 tree scalar_dest;
5599 tree vec_dest = NULL;
5600 tree data_ref = NULL;
5601 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5602 stmt_vec_info prev_stmt_info;
5603 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5604 struct loop *loop = NULL;
5605 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5606 bool nested_in_vect_loop = false;
5607 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5608 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5609 tree elem_type;
5610 tree new_temp;
5611 machine_mode mode;
5612 gimple new_stmt = NULL;
5613 tree dummy;
5614 enum dr_alignment_support alignment_support_scheme;
5615 tree dataref_ptr = NULL_TREE;
5616 tree dataref_offset = NULL_TREE;
5617 gimple ptr_incr = NULL;
5618 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5619 int ncopies;
5620 int i, j, group_size, group_gap;
5621 tree msq = NULL_TREE, lsq;
5622 tree offset = NULL_TREE;
5623 tree byte_offset = NULL_TREE;
5624 tree realignment_token = NULL_TREE;
5625 gimple phi = NULL;
5626 vec<tree> dr_chain = vNULL;
5627 bool grouped_load = false;
5628 bool load_lanes_p = false;
5629 gimple first_stmt;
5630 bool inv_p;
5631 bool negative = false;
5632 bool compute_in_loop = false;
5633 struct loop *at_loop;
5634 int vec_num;
5635 bool slp = (slp_node != NULL);
5636 bool slp_perm = false;
5637 enum tree_code code;
5638 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5639 int vf;
5640 tree aggr_type;
5641 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
5642 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
5643 int gather_scale = 1;
5644 enum vect_def_type gather_dt = vect_unknown_def_type;
5645
5646 if (loop_vinfo)
5647 {
5648 loop = LOOP_VINFO_LOOP (loop_vinfo);
5649 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
5650 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5651 }
5652 else
5653 vf = 1;
5654
5655 /* Multiple types in SLP are handled by creating the appropriate number of
5656 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5657 case of SLP. */
5658 if (slp || PURE_SLP_STMT (stmt_info))
5659 ncopies = 1;
5660 else
5661 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5662
5663 gcc_assert (ncopies >= 1);
5664
5665 /* FORNOW. This restriction should be relaxed. */
5666 if (nested_in_vect_loop && ncopies > 1)
5667 {
5668 if (dump_enabled_p ())
5669 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5670 "multiple types in nested loop.\n");
5671 return false;
5672 }
5673
5674 /* Invalidate assumptions made by dependence analysis when vectorization
5675 on the unrolled body effectively re-orders stmts. */
5676 if (ncopies > 1
5677 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5678 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5679 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5680 {
5681 if (dump_enabled_p ())
5682 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5683 "cannot perform implicit CSE when unrolling "
5684 "with negative dependence distance\n");
5685 return false;
5686 }
5687
5688 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5689 return false;
5690
5691 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5692 return false;
5693
5694 /* Is vectorizable load? */
5695 if (!is_gimple_assign (stmt))
5696 return false;
5697
5698 scalar_dest = gimple_assign_lhs (stmt);
5699 if (TREE_CODE (scalar_dest) != SSA_NAME)
5700 return false;
5701
5702 code = gimple_assign_rhs_code (stmt);
5703 if (code != ARRAY_REF
5704 && code != BIT_FIELD_REF
5705 && code != INDIRECT_REF
5706 && code != COMPONENT_REF
5707 && code != IMAGPART_EXPR
5708 && code != REALPART_EXPR
5709 && code != MEM_REF
5710 && TREE_CODE_CLASS (code) != tcc_declaration)
5711 return false;
5712
5713 if (!STMT_VINFO_DATA_REF (stmt_info))
5714 return false;
5715
5716 elem_type = TREE_TYPE (vectype);
5717 mode = TYPE_MODE (vectype);
5718
5719 /* FORNOW. In some cases can vectorize even if data-type not supported
5720 (e.g. - data copies). */
5721 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
5722 {
5723 if (dump_enabled_p ())
5724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5725 "Aligned load, but unsupported type.\n");
5726 return false;
5727 }
5728
5729 /* Check if the load is a part of an interleaving chain. */
5730 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5731 {
5732 grouped_load = true;
5733 /* FORNOW */
5734 gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
5735
5736 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5737 if (!slp && !PURE_SLP_STMT (stmt_info))
5738 {
5739 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5740 if (vect_load_lanes_supported (vectype, group_size))
5741 load_lanes_p = true;
5742 else if (!vect_grouped_load_supported (vectype, group_size))
5743 return false;
5744 }
5745
5746 /* Invalidate assumptions made by dependence analysis when vectorization
5747 on the unrolled body effectively re-orders stmts. */
5748 if (!PURE_SLP_STMT (stmt_info)
5749 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5750 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5751 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5752 {
5753 if (dump_enabled_p ())
5754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5755 "cannot perform implicit CSE when performing "
5756 "group loads with negative dependence distance\n");
5757 return false;
5758 }
5759 }
5760
5761
5762 if (STMT_VINFO_GATHER_P (stmt_info))
5763 {
5764 gimple def_stmt;
5765 tree def;
5766 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
5767 &gather_off, &gather_scale);
5768 gcc_assert (gather_decl);
5769 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
5770 &def_stmt, &def, &gather_dt,
5771 &gather_off_vectype))
5772 {
5773 if (dump_enabled_p ())
5774 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5775 "gather index use not simple.\n");
5776 return false;
5777 }
5778 }
5779 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
5780 ;
5781 else
5782 {
5783 negative = tree_int_cst_compare (nested_in_vect_loop
5784 ? STMT_VINFO_DR_STEP (stmt_info)
5785 : DR_STEP (dr),
5786 size_zero_node) < 0;
5787 if (negative && ncopies > 1)
5788 {
5789 if (dump_enabled_p ())
5790 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5791 "multiple types with negative step.\n");
5792 return false;
5793 }
5794
5795 if (negative)
5796 {
5797 if (grouped_load)
5798 {
5799 if (dump_enabled_p ())
5800 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5801 "negative step for group load not supported"
5802 "\n");
5803 return false;
5804 }
5805 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5806 if (alignment_support_scheme != dr_aligned
5807 && alignment_support_scheme != dr_unaligned_supported)
5808 {
5809 if (dump_enabled_p ())
5810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5811 "negative step but alignment required.\n");
5812 return false;
5813 }
5814 if (!perm_mask_for_reverse (vectype))
5815 {
5816 if (dump_enabled_p ())
5817 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5818 "negative step and reversing not supported."
5819 "\n");
5820 return false;
5821 }
5822 }
5823 }
5824
5825 if (!vec_stmt) /* transformation not required. */
5826 {
5827 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
5828 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL, NULL, NULL);
5829 return true;
5830 }
5831
5832 if (dump_enabled_p ())
5833 dump_printf_loc (MSG_NOTE, vect_location,
5834 "transform load. ncopies = %d\n", ncopies);
5835
5836 /** Transform. **/
5837
5838 ensure_base_align (stmt_info, dr);
5839
5840 if (STMT_VINFO_GATHER_P (stmt_info))
5841 {
5842 tree vec_oprnd0 = NULL_TREE, op;
5843 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
5844 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5845 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
5846 edge pe = loop_preheader_edge (loop);
5847 gimple_seq seq;
5848 basic_block new_bb;
5849 enum { NARROW, NONE, WIDEN } modifier;
5850 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
5851
5852 if (nunits == gather_off_nunits)
5853 modifier = NONE;
5854 else if (nunits == gather_off_nunits / 2)
5855 {
5856 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
5857 modifier = WIDEN;
5858
5859 for (i = 0; i < gather_off_nunits; ++i)
5860 sel[i] = i | nunits;
5861
5862 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
5863 }
5864 else if (nunits == gather_off_nunits * 2)
5865 {
5866 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5867 modifier = NARROW;
5868
5869 for (i = 0; i < nunits; ++i)
5870 sel[i] = i < gather_off_nunits
5871 ? i : i + nunits - gather_off_nunits;
5872
5873 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5874 ncopies *= 2;
5875 }
5876 else
5877 gcc_unreachable ();
5878
5879 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
5880 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5881 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5882 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5883 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5884 scaletype = TREE_VALUE (arglist);
5885 gcc_checking_assert (types_compatible_p (srctype, rettype));
5886
5887 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5888
5889 ptr = fold_convert (ptrtype, gather_base);
5890 if (!is_gimple_min_invariant (ptr))
5891 {
5892 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5893 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5894 gcc_assert (!new_bb);
5895 }
5896
5897 /* Currently we support only unconditional gather loads,
5898 so mask should be all ones. */
5899 if (TREE_CODE (masktype) == INTEGER_TYPE)
5900 mask = build_int_cst (masktype, -1);
5901 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
5902 {
5903 mask = build_int_cst (TREE_TYPE (masktype), -1);
5904 mask = build_vector_from_val (masktype, mask);
5905 mask = vect_init_vector (stmt, mask, masktype, NULL);
5906 }
5907 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
5908 {
5909 REAL_VALUE_TYPE r;
5910 long tmp[6];
5911 for (j = 0; j < 6; ++j)
5912 tmp[j] = -1;
5913 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
5914 mask = build_real (TREE_TYPE (masktype), r);
5915 mask = build_vector_from_val (masktype, mask);
5916 mask = vect_init_vector (stmt, mask, masktype, NULL);
5917 }
5918 else
5919 gcc_unreachable ();
5920
5921 scale = build_int_cst (scaletype, gather_scale);
5922
5923 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
5924 merge = build_int_cst (TREE_TYPE (rettype), 0);
5925 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
5926 {
5927 REAL_VALUE_TYPE r;
5928 long tmp[6];
5929 for (j = 0; j < 6; ++j)
5930 tmp[j] = 0;
5931 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
5932 merge = build_real (TREE_TYPE (rettype), r);
5933 }
5934 else
5935 gcc_unreachable ();
5936 merge = build_vector_from_val (rettype, merge);
5937 merge = vect_init_vector (stmt, merge, rettype, NULL);
5938
5939 prev_stmt_info = NULL;
5940 for (j = 0; j < ncopies; ++j)
5941 {
5942 if (modifier == WIDEN && (j & 1))
5943 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
5944 perm_mask, stmt, gsi);
5945 else if (j == 0)
5946 op = vec_oprnd0
5947 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
5948 else
5949 op = vec_oprnd0
5950 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
5951
5952 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5953 {
5954 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5955 == TYPE_VECTOR_SUBPARTS (idxtype));
5956 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
5957 var = make_ssa_name (var, NULL);
5958 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5959 new_stmt
5960 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
5961 op, NULL_TREE);
5962 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5963 op = var;
5964 }
5965
5966 new_stmt
5967 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
5968
5969 if (!useless_type_conversion_p (vectype, rettype))
5970 {
5971 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
5972 == TYPE_VECTOR_SUBPARTS (rettype));
5973 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
5974 op = make_ssa_name (var, new_stmt);
5975 gimple_call_set_lhs (new_stmt, op);
5976 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5977 var = make_ssa_name (vec_dest, NULL);
5978 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
5979 new_stmt
5980 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var, op,
5981 NULL_TREE);
5982 }
5983 else
5984 {
5985 var = make_ssa_name (vec_dest, new_stmt);
5986 gimple_call_set_lhs (new_stmt, var);
5987 }
5988
5989 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5990
5991 if (modifier == NARROW)
5992 {
5993 if ((j & 1) == 0)
5994 {
5995 prev_res = var;
5996 continue;
5997 }
5998 var = permute_vec_elements (prev_res, var,
5999 perm_mask, stmt, gsi);
6000 new_stmt = SSA_NAME_DEF_STMT (var);
6001 }
6002
6003 if (prev_stmt_info == NULL)
6004 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6005 else
6006 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6007 prev_stmt_info = vinfo_for_stmt (new_stmt);
6008 }
6009 return true;
6010 }
6011 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
6012 {
6013 gimple_stmt_iterator incr_gsi;
6014 bool insert_after;
6015 gimple incr;
6016 tree offvar;
6017 tree ivstep;
6018 tree running_off;
6019 vec<constructor_elt, va_gc> *v = NULL;
6020 gimple_seq stmts = NULL;
6021 tree stride_base, stride_step, alias_off;
6022
6023 gcc_assert (!nested_in_vect_loop);
6024
6025 stride_base
6026 = fold_build_pointer_plus
6027 (unshare_expr (DR_BASE_ADDRESS (dr)),
6028 size_binop (PLUS_EXPR,
6029 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr))),
6030 convert_to_ptrofftype (DR_INIT (dr))));
6031 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (dr)));
6032
6033 /* For a load with loop-invariant (but other than power-of-2)
6034 stride (i.e. not a grouped access) like so:
6035
6036 for (i = 0; i < n; i += stride)
6037 ... = array[i];
6038
6039 we generate a new induction variable and new accesses to
6040 form a new vector (or vectors, depending on ncopies):
6041
6042 for (j = 0; ; j += VF*stride)
6043 tmp1 = array[j];
6044 tmp2 = array[j + stride];
6045 ...
6046 vectemp = {tmp1, tmp2, ...}
6047 */
6048
6049 ivstep = stride_step;
6050 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6051 build_int_cst (TREE_TYPE (ivstep), vf));
6052
6053 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6054
6055 create_iv (stride_base, ivstep, NULL,
6056 loop, &incr_gsi, insert_after,
6057 &offvar, NULL);
6058 incr = gsi_stmt (incr_gsi);
6059 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
6060
6061 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6062 if (stmts)
6063 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6064
6065 prev_stmt_info = NULL;
6066 running_off = offvar;
6067 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0);
6068 for (j = 0; j < ncopies; j++)
6069 {
6070 tree vec_inv;
6071
6072 vec_alloc (v, nunits);
6073 for (i = 0; i < nunits; i++)
6074 {
6075 tree newref, newoff;
6076 gimple incr;
6077 newref = build2 (MEM_REF, TREE_TYPE (vectype),
6078 running_off, alias_off);
6079
6080 newref = force_gimple_operand_gsi (gsi, newref, true,
6081 NULL_TREE, true,
6082 GSI_SAME_STMT);
6083 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6084 newoff = copy_ssa_name (running_off, NULL);
6085 incr = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, newoff,
6086 running_off, stride_step);
6087 vect_finish_stmt_generation (stmt, incr, gsi);
6088
6089 running_off = newoff;
6090 }
6091
6092 vec_inv = build_constructor (vectype, v);
6093 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6094 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6095
6096 if (j == 0)
6097 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6098 else
6099 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6100 prev_stmt_info = vinfo_for_stmt (new_stmt);
6101 }
6102 return true;
6103 }
6104
6105 if (grouped_load)
6106 {
6107 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6108 if (slp
6109 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6110 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6111 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6112
6113 /* Check if the chain of loads is already vectorized. */
6114 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6115 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6116 ??? But we can only do so if there is exactly one
6117 as we have no way to get at the rest. Leave the CSE
6118 opportunity alone.
6119 ??? With the group load eventually participating
6120 in multiple different permutations (having multiple
6121 slp nodes which refer to the same group) the CSE
6122 is even wrong code. See PR56270. */
6123 && !slp)
6124 {
6125 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6126 return true;
6127 }
6128 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6129 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6130
6131 /* VEC_NUM is the number of vect stmts to be created for this group. */
6132 if (slp)
6133 {
6134 grouped_load = false;
6135 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6136 if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6137 slp_perm = true;
6138 group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
6139 }
6140 else
6141 {
6142 vec_num = group_size;
6143 group_gap = 0;
6144 }
6145 }
6146 else
6147 {
6148 first_stmt = stmt;
6149 first_dr = dr;
6150 group_size = vec_num = 1;
6151 group_gap = 0;
6152 }
6153
6154 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6155 gcc_assert (alignment_support_scheme);
6156 /* Targets with load-lane instructions must not require explicit
6157 realignment. */
6158 gcc_assert (!load_lanes_p
6159 || alignment_support_scheme == dr_aligned
6160 || alignment_support_scheme == dr_unaligned_supported);
6161
6162 /* In case the vectorization factor (VF) is bigger than the number
6163 of elements that we can fit in a vectype (nunits), we have to generate
6164 more than one vector stmt - i.e - we need to "unroll" the
6165 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6166 from one copy of the vector stmt to the next, in the field
6167 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6168 stages to find the correct vector defs to be used when vectorizing
6169 stmts that use the defs of the current stmt. The example below
6170 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6171 need to create 4 vectorized stmts):
6172
6173 before vectorization:
6174 RELATED_STMT VEC_STMT
6175 S1: x = memref - -
6176 S2: z = x + 1 - -
6177
6178 step 1: vectorize stmt S1:
6179 We first create the vector stmt VS1_0, and, as usual, record a
6180 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6181 Next, we create the vector stmt VS1_1, and record a pointer to
6182 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6183 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6184 stmts and pointers:
6185 RELATED_STMT VEC_STMT
6186 VS1_0: vx0 = memref0 VS1_1 -
6187 VS1_1: vx1 = memref1 VS1_2 -
6188 VS1_2: vx2 = memref2 VS1_3 -
6189 VS1_3: vx3 = memref3 - -
6190 S1: x = load - VS1_0
6191 S2: z = x + 1 - -
6192
6193 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6194 information we recorded in RELATED_STMT field is used to vectorize
6195 stmt S2. */
6196
6197 /* In case of interleaving (non-unit grouped access):
6198
6199 S1: x2 = &base + 2
6200 S2: x0 = &base
6201 S3: x1 = &base + 1
6202 S4: x3 = &base + 3
6203
6204 Vectorized loads are created in the order of memory accesses
6205 starting from the access of the first stmt of the chain:
6206
6207 VS1: vx0 = &base
6208 VS2: vx1 = &base + vec_size*1
6209 VS3: vx3 = &base + vec_size*2
6210 VS4: vx4 = &base + vec_size*3
6211
6212 Then permutation statements are generated:
6213
6214 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6215 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6216 ...
6217
6218 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6219 (the order of the data-refs in the output of vect_permute_load_chain
6220 corresponds to the order of scalar stmts in the interleaving chain - see
6221 the documentation of vect_permute_load_chain()).
6222 The generation of permutation stmts and recording them in
6223 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6224
6225 In case of both multiple types and interleaving, the vector loads and
6226 permutation stmts above are created for every copy. The result vector
6227 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6228 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6229
6230 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6231 on a target that supports unaligned accesses (dr_unaligned_supported)
6232 we generate the following code:
6233 p = initial_addr;
6234 indx = 0;
6235 loop {
6236 p = p + indx * vectype_size;
6237 vec_dest = *(p);
6238 indx = indx + 1;
6239 }
6240
6241 Otherwise, the data reference is potentially unaligned on a target that
6242 does not support unaligned accesses (dr_explicit_realign_optimized) -
6243 then generate the following code, in which the data in each iteration is
6244 obtained by two vector loads, one from the previous iteration, and one
6245 from the current iteration:
6246 p1 = initial_addr;
6247 msq_init = *(floor(p1))
6248 p2 = initial_addr + VS - 1;
6249 realignment_token = call target_builtin;
6250 indx = 0;
6251 loop {
6252 p2 = p2 + indx * vectype_size
6253 lsq = *(floor(p2))
6254 vec_dest = realign_load (msq, lsq, realignment_token)
6255 indx = indx + 1;
6256 msq = lsq;
6257 } */
6258
6259 /* If the misalignment remains the same throughout the execution of the
6260 loop, we can create the init_addr and permutation mask at the loop
6261 preheader. Otherwise, it needs to be created inside the loop.
6262 This can only occur when vectorizing memory accesses in the inner-loop
6263 nested within an outer-loop that is being vectorized. */
6264
6265 if (nested_in_vect_loop
6266 && (TREE_INT_CST_LOW (DR_STEP (dr))
6267 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6268 {
6269 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6270 compute_in_loop = true;
6271 }
6272
6273 if ((alignment_support_scheme == dr_explicit_realign_optimized
6274 || alignment_support_scheme == dr_explicit_realign)
6275 && !compute_in_loop)
6276 {
6277 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6278 alignment_support_scheme, NULL_TREE,
6279 &at_loop);
6280 if (alignment_support_scheme == dr_explicit_realign_optimized)
6281 {
6282 phi = SSA_NAME_DEF_STMT (msq);
6283 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6284 size_one_node);
6285 }
6286 }
6287 else
6288 at_loop = loop;
6289
6290 if (negative)
6291 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6292
6293 if (load_lanes_p)
6294 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6295 else
6296 aggr_type = vectype;
6297
6298 prev_stmt_info = NULL;
6299 for (j = 0; j < ncopies; j++)
6300 {
6301 /* 1. Create the vector or array pointer update chain. */
6302 if (j == 0)
6303 {
6304 bool simd_lane_access_p
6305 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6306 if (simd_lane_access_p
6307 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6308 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6309 && integer_zerop (DR_OFFSET (first_dr))
6310 && integer_zerop (DR_INIT (first_dr))
6311 && alias_sets_conflict_p (get_alias_set (aggr_type),
6312 get_alias_set (DR_REF (first_dr)))
6313 && (alignment_support_scheme == dr_aligned
6314 || alignment_support_scheme == dr_unaligned_supported))
6315 {
6316 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6317 dataref_offset = build_int_cst (reference_alias_ptr_type
6318 (DR_REF (first_dr)), 0);
6319 inv_p = false;
6320 }
6321 else
6322 dataref_ptr
6323 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6324 offset, &dummy, gsi, &ptr_incr,
6325 simd_lane_access_p, &inv_p,
6326 byte_offset);
6327 }
6328 else if (dataref_offset)
6329 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6330 TYPE_SIZE_UNIT (aggr_type));
6331 else
6332 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6333 TYPE_SIZE_UNIT (aggr_type));
6334
6335 if (grouped_load || slp_perm)
6336 dr_chain.create (vec_num);
6337
6338 if (load_lanes_p)
6339 {
6340 tree vec_array;
6341
6342 vec_array = create_vector_array (vectype, vec_num);
6343
6344 /* Emit:
6345 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6346 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6347 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6348 gimple_call_set_lhs (new_stmt, vec_array);
6349 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6350
6351 /* Extract each vector into an SSA_NAME. */
6352 for (i = 0; i < vec_num; i++)
6353 {
6354 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6355 vec_array, i);
6356 dr_chain.quick_push (new_temp);
6357 }
6358
6359 /* Record the mapping between SSA_NAMEs and statements. */
6360 vect_record_grouped_load_vectors (stmt, dr_chain);
6361 }
6362 else
6363 {
6364 for (i = 0; i < vec_num; i++)
6365 {
6366 if (i > 0)
6367 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6368 stmt, NULL_TREE);
6369
6370 /* 2. Create the vector-load in the loop. */
6371 switch (alignment_support_scheme)
6372 {
6373 case dr_aligned:
6374 case dr_unaligned_supported:
6375 {
6376 unsigned int align, misalign;
6377
6378 data_ref
6379 = build2 (MEM_REF, vectype, dataref_ptr,
6380 dataref_offset
6381 ? dataref_offset
6382 : build_int_cst (reference_alias_ptr_type
6383 (DR_REF (first_dr)), 0));
6384 align = TYPE_ALIGN_UNIT (vectype);
6385 if (alignment_support_scheme == dr_aligned)
6386 {
6387 gcc_assert (aligned_access_p (first_dr));
6388 misalign = 0;
6389 }
6390 else if (DR_MISALIGNMENT (first_dr) == -1)
6391 {
6392 TREE_TYPE (data_ref)
6393 = build_aligned_type (TREE_TYPE (data_ref),
6394 TYPE_ALIGN (elem_type));
6395 align = TYPE_ALIGN_UNIT (elem_type);
6396 misalign = 0;
6397 }
6398 else
6399 {
6400 TREE_TYPE (data_ref)
6401 = build_aligned_type (TREE_TYPE (data_ref),
6402 TYPE_ALIGN (elem_type));
6403 misalign = DR_MISALIGNMENT (first_dr);
6404 }
6405 if (dataref_offset == NULL_TREE)
6406 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6407 align, misalign);
6408 break;
6409 }
6410 case dr_explicit_realign:
6411 {
6412 tree ptr, bump;
6413 tree vs_minus_1;
6414
6415 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
6416
6417 if (compute_in_loop)
6418 msq = vect_setup_realignment (first_stmt, gsi,
6419 &realignment_token,
6420 dr_explicit_realign,
6421 dataref_ptr, NULL);
6422
6423 ptr = copy_ssa_name (dataref_ptr, NULL);
6424 new_stmt = gimple_build_assign_with_ops
6425 (BIT_AND_EXPR, ptr, dataref_ptr,
6426 build_int_cst
6427 (TREE_TYPE (dataref_ptr),
6428 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6429 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6430 data_ref
6431 = build2 (MEM_REF, vectype, ptr,
6432 build_int_cst (reference_alias_ptr_type
6433 (DR_REF (first_dr)), 0));
6434 vec_dest = vect_create_destination_var (scalar_dest,
6435 vectype);
6436 new_stmt = gimple_build_assign (vec_dest, data_ref);
6437 new_temp = make_ssa_name (vec_dest, new_stmt);
6438 gimple_assign_set_lhs (new_stmt, new_temp);
6439 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6440 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6441 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6442 msq = new_temp;
6443
6444 bump = size_binop (MULT_EXPR, vs_minus_1,
6445 TYPE_SIZE_UNIT (elem_type));
6446 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6447 new_stmt = gimple_build_assign_with_ops
6448 (BIT_AND_EXPR, NULL_TREE, ptr,
6449 build_int_cst
6450 (TREE_TYPE (ptr),
6451 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6452 ptr = copy_ssa_name (dataref_ptr, new_stmt);
6453 gimple_assign_set_lhs (new_stmt, ptr);
6454 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6455 data_ref
6456 = build2 (MEM_REF, vectype, ptr,
6457 build_int_cst (reference_alias_ptr_type
6458 (DR_REF (first_dr)), 0));
6459 break;
6460 }
6461 case dr_explicit_realign_optimized:
6462 new_temp = copy_ssa_name (dataref_ptr, NULL);
6463 new_stmt = gimple_build_assign_with_ops
6464 (BIT_AND_EXPR, new_temp, dataref_ptr,
6465 build_int_cst
6466 (TREE_TYPE (dataref_ptr),
6467 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6468 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6469 data_ref
6470 = build2 (MEM_REF, vectype, new_temp,
6471 build_int_cst (reference_alias_ptr_type
6472 (DR_REF (first_dr)), 0));
6473 break;
6474 default:
6475 gcc_unreachable ();
6476 }
6477 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6478 new_stmt = gimple_build_assign (vec_dest, data_ref);
6479 new_temp = make_ssa_name (vec_dest, new_stmt);
6480 gimple_assign_set_lhs (new_stmt, new_temp);
6481 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6482
6483 /* 3. Handle explicit realignment if necessary/supported.
6484 Create in loop:
6485 vec_dest = realign_load (msq, lsq, realignment_token) */
6486 if (alignment_support_scheme == dr_explicit_realign_optimized
6487 || alignment_support_scheme == dr_explicit_realign)
6488 {
6489 lsq = gimple_assign_lhs (new_stmt);
6490 if (!realignment_token)
6491 realignment_token = dataref_ptr;
6492 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6493 new_stmt
6494 = gimple_build_assign_with_ops (REALIGN_LOAD_EXPR,
6495 vec_dest, msq, lsq,
6496 realignment_token);
6497 new_temp = make_ssa_name (vec_dest, new_stmt);
6498 gimple_assign_set_lhs (new_stmt, new_temp);
6499 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6500
6501 if (alignment_support_scheme == dr_explicit_realign_optimized)
6502 {
6503 gcc_assert (phi);
6504 if (i == vec_num - 1 && j == ncopies - 1)
6505 add_phi_arg (phi, lsq,
6506 loop_latch_edge (containing_loop),
6507 UNKNOWN_LOCATION);
6508 msq = lsq;
6509 }
6510 }
6511
6512 /* 4. Handle invariant-load. */
6513 if (inv_p && !bb_vinfo)
6514 {
6515 gcc_assert (!grouped_load);
6516 /* If we have versioned for aliasing or the loop doesn't
6517 have any data dependencies that would preclude this,
6518 then we are sure this is a loop invariant load and
6519 thus we can insert it on the preheader edge. */
6520 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
6521 && !nested_in_vect_loop
6522 && hoist_defs_of_uses (stmt, loop))
6523 {
6524 if (dump_enabled_p ())
6525 {
6526 dump_printf_loc (MSG_NOTE, vect_location,
6527 "hoisting out of the vectorized "
6528 "loop: ");
6529 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6530 dump_printf (MSG_NOTE, "\n");
6531 }
6532 tree tem = copy_ssa_name (scalar_dest, NULL);
6533 gsi_insert_on_edge_immediate
6534 (loop_preheader_edge (loop),
6535 gimple_build_assign (tem,
6536 unshare_expr
6537 (gimple_assign_rhs1 (stmt))));
6538 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
6539 }
6540 else
6541 {
6542 gimple_stmt_iterator gsi2 = *gsi;
6543 gsi_next (&gsi2);
6544 new_temp = vect_init_vector (stmt, scalar_dest,
6545 vectype, &gsi2);
6546 }
6547 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6548 set_vinfo_for_stmt (new_stmt,
6549 new_stmt_vec_info (new_stmt, loop_vinfo,
6550 bb_vinfo));
6551 }
6552
6553 if (negative)
6554 {
6555 tree perm_mask = perm_mask_for_reverse (vectype);
6556 new_temp = permute_vec_elements (new_temp, new_temp,
6557 perm_mask, stmt, gsi);
6558 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6559 }
6560
6561 /* Collect vector loads and later create their permutation in
6562 vect_transform_grouped_load (). */
6563 if (grouped_load || slp_perm)
6564 dr_chain.quick_push (new_temp);
6565
6566 /* Store vector loads in the corresponding SLP_NODE. */
6567 if (slp && !slp_perm)
6568 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6569 }
6570 /* Bump the vector pointer to account for a gap. */
6571 if (slp && group_gap != 0)
6572 {
6573 tree bump = size_binop (MULT_EXPR,
6574 TYPE_SIZE_UNIT (elem_type),
6575 size_int (group_gap));
6576 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6577 stmt, bump);
6578 }
6579 }
6580
6581 if (slp && !slp_perm)
6582 continue;
6583
6584 if (slp_perm)
6585 {
6586 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6587 slp_node_instance, false))
6588 {
6589 dr_chain.release ();
6590 return false;
6591 }
6592 }
6593 else
6594 {
6595 if (grouped_load)
6596 {
6597 if (!load_lanes_p)
6598 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
6599 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6600 }
6601 else
6602 {
6603 if (j == 0)
6604 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6605 else
6606 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6607 prev_stmt_info = vinfo_for_stmt (new_stmt);
6608 }
6609 }
6610 dr_chain.release ();
6611 }
6612
6613 return true;
6614 }
6615
6616 /* Function vect_is_simple_cond.
6617
6618 Input:
6619 LOOP - the loop that is being vectorized.
6620 COND - Condition that is checked for simple use.
6621
6622 Output:
6623 *COMP_VECTYPE - the vector type for the comparison.
6624
6625 Returns whether a COND can be vectorized. Checks whether
6626 condition operands are supportable using vec_is_simple_use. */
6627
6628 static bool
6629 vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
6630 bb_vec_info bb_vinfo, tree *comp_vectype)
6631 {
6632 tree lhs, rhs;
6633 tree def;
6634 enum vect_def_type dt;
6635 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
6636
6637 if (!COMPARISON_CLASS_P (cond))
6638 return false;
6639
6640 lhs = TREE_OPERAND (cond, 0);
6641 rhs = TREE_OPERAND (cond, 1);
6642
6643 if (TREE_CODE (lhs) == SSA_NAME)
6644 {
6645 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
6646 if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
6647 &lhs_def_stmt, &def, &dt, &vectype1))
6648 return false;
6649 }
6650 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
6651 && TREE_CODE (lhs) != FIXED_CST)
6652 return false;
6653
6654 if (TREE_CODE (rhs) == SSA_NAME)
6655 {
6656 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
6657 if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
6658 &rhs_def_stmt, &def, &dt, &vectype2))
6659 return false;
6660 }
6661 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
6662 && TREE_CODE (rhs) != FIXED_CST)
6663 return false;
6664
6665 *comp_vectype = vectype1 ? vectype1 : vectype2;
6666 return true;
6667 }
6668
6669 /* vectorizable_condition.
6670
6671 Check if STMT is conditional modify expression that can be vectorized.
6672 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6673 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6674 at GSI.
6675
6676 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6677 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6678 else caluse if it is 2).
6679
6680 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6681
6682 bool
6683 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
6684 gimple *vec_stmt, tree reduc_def, int reduc_index,
6685 slp_tree slp_node)
6686 {
6687 tree scalar_dest = NULL_TREE;
6688 tree vec_dest = NULL_TREE;
6689 tree cond_expr, then_clause, else_clause;
6690 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6691 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6692 tree comp_vectype = NULL_TREE;
6693 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
6694 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
6695 tree vec_compare, vec_cond_expr;
6696 tree new_temp;
6697 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6698 tree def;
6699 enum vect_def_type dt, dts[4];
6700 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6701 int ncopies;
6702 enum tree_code code;
6703 stmt_vec_info prev_stmt_info = NULL;
6704 int i, j;
6705 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6706 vec<tree> vec_oprnds0 = vNULL;
6707 vec<tree> vec_oprnds1 = vNULL;
6708 vec<tree> vec_oprnds2 = vNULL;
6709 vec<tree> vec_oprnds3 = vNULL;
6710 tree vec_cmp_type;
6711
6712 if (slp_node || PURE_SLP_STMT (stmt_info))
6713 ncopies = 1;
6714 else
6715 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6716
6717 gcc_assert (ncopies >= 1);
6718 if (reduc_index && ncopies > 1)
6719 return false; /* FORNOW */
6720
6721 if (reduc_index && STMT_SLP_TYPE (stmt_info))
6722 return false;
6723
6724 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6725 return false;
6726
6727 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6728 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
6729 && reduc_def))
6730 return false;
6731
6732 /* FORNOW: not yet supported. */
6733 if (STMT_VINFO_LIVE_P (stmt_info))
6734 {
6735 if (dump_enabled_p ())
6736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6737 "value used after loop.\n");
6738 return false;
6739 }
6740
6741 /* Is vectorizable conditional operation? */
6742 if (!is_gimple_assign (stmt))
6743 return false;
6744
6745 code = gimple_assign_rhs_code (stmt);
6746
6747 if (code != COND_EXPR)
6748 return false;
6749
6750 cond_expr = gimple_assign_rhs1 (stmt);
6751 then_clause = gimple_assign_rhs2 (stmt);
6752 else_clause = gimple_assign_rhs3 (stmt);
6753
6754 if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
6755 &comp_vectype)
6756 || !comp_vectype)
6757 return false;
6758
6759 if (TREE_CODE (then_clause) == SSA_NAME)
6760 {
6761 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
6762 if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
6763 &then_def_stmt, &def, &dt))
6764 return false;
6765 }
6766 else if (TREE_CODE (then_clause) != INTEGER_CST
6767 && TREE_CODE (then_clause) != REAL_CST
6768 && TREE_CODE (then_clause) != FIXED_CST)
6769 return false;
6770
6771 if (TREE_CODE (else_clause) == SSA_NAME)
6772 {
6773 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
6774 if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
6775 &else_def_stmt, &def, &dt))
6776 return false;
6777 }
6778 else if (TREE_CODE (else_clause) != INTEGER_CST
6779 && TREE_CODE (else_clause) != REAL_CST
6780 && TREE_CODE (else_clause) != FIXED_CST)
6781 return false;
6782
6783 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
6784 /* The result of a vector comparison should be signed type. */
6785 tree cmp_type = build_nonstandard_integer_type (prec, 0);
6786 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
6787 if (vec_cmp_type == NULL_TREE)
6788 return false;
6789
6790 if (!vec_stmt)
6791 {
6792 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
6793 return expand_vec_cond_expr_p (vectype, comp_vectype);
6794 }
6795
6796 /* Transform. */
6797
6798 if (!slp_node)
6799 {
6800 vec_oprnds0.create (1);
6801 vec_oprnds1.create (1);
6802 vec_oprnds2.create (1);
6803 vec_oprnds3.create (1);
6804 }
6805
6806 /* Handle def. */
6807 scalar_dest = gimple_assign_lhs (stmt);
6808 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6809
6810 /* Handle cond expr. */
6811 for (j = 0; j < ncopies; j++)
6812 {
6813 gimple new_stmt = NULL;
6814 if (j == 0)
6815 {
6816 if (slp_node)
6817 {
6818 auto_vec<tree, 4> ops;
6819 auto_vec<vec<tree>, 4> vec_defs;
6820
6821 ops.safe_push (TREE_OPERAND (cond_expr, 0));
6822 ops.safe_push (TREE_OPERAND (cond_expr, 1));
6823 ops.safe_push (then_clause);
6824 ops.safe_push (else_clause);
6825 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
6826 vec_oprnds3 = vec_defs.pop ();
6827 vec_oprnds2 = vec_defs.pop ();
6828 vec_oprnds1 = vec_defs.pop ();
6829 vec_oprnds0 = vec_defs.pop ();
6830
6831 ops.release ();
6832 vec_defs.release ();
6833 }
6834 else
6835 {
6836 gimple gtemp;
6837 vec_cond_lhs =
6838 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
6839 stmt, NULL);
6840 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
6841 loop_vinfo, NULL, &gtemp, &def, &dts[0]);
6842
6843 vec_cond_rhs =
6844 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
6845 stmt, NULL);
6846 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
6847 loop_vinfo, NULL, &gtemp, &def, &dts[1]);
6848 if (reduc_index == 1)
6849 vec_then_clause = reduc_def;
6850 else
6851 {
6852 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
6853 stmt, NULL);
6854 vect_is_simple_use (then_clause, stmt, loop_vinfo,
6855 NULL, &gtemp, &def, &dts[2]);
6856 }
6857 if (reduc_index == 2)
6858 vec_else_clause = reduc_def;
6859 else
6860 {
6861 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
6862 stmt, NULL);
6863 vect_is_simple_use (else_clause, stmt, loop_vinfo,
6864 NULL, &gtemp, &def, &dts[3]);
6865 }
6866 }
6867 }
6868 else
6869 {
6870 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
6871 vec_oprnds0.pop ());
6872 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
6873 vec_oprnds1.pop ());
6874 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
6875 vec_oprnds2.pop ());
6876 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
6877 vec_oprnds3.pop ());
6878 }
6879
6880 if (!slp_node)
6881 {
6882 vec_oprnds0.quick_push (vec_cond_lhs);
6883 vec_oprnds1.quick_push (vec_cond_rhs);
6884 vec_oprnds2.quick_push (vec_then_clause);
6885 vec_oprnds3.quick_push (vec_else_clause);
6886 }
6887
6888 /* Arguments are ready. Create the new vector stmt. */
6889 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
6890 {
6891 vec_cond_rhs = vec_oprnds1[i];
6892 vec_then_clause = vec_oprnds2[i];
6893 vec_else_clause = vec_oprnds3[i];
6894
6895 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
6896 vec_cond_lhs, vec_cond_rhs);
6897 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
6898 vec_compare, vec_then_clause, vec_else_clause);
6899
6900 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
6901 new_temp = make_ssa_name (vec_dest, new_stmt);
6902 gimple_assign_set_lhs (new_stmt, new_temp);
6903 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6904 if (slp_node)
6905 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6906 }
6907
6908 if (slp_node)
6909 continue;
6910
6911 if (j == 0)
6912 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6913 else
6914 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6915
6916 prev_stmt_info = vinfo_for_stmt (new_stmt);
6917 }
6918
6919 vec_oprnds0.release ();
6920 vec_oprnds1.release ();
6921 vec_oprnds2.release ();
6922 vec_oprnds3.release ();
6923
6924 return true;
6925 }
6926
6927
6928 /* Make sure the statement is vectorizable. */
6929
6930 bool
6931 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
6932 {
6933 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6934 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6935 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
6936 bool ok;
6937 tree scalar_type, vectype;
6938 gimple pattern_stmt;
6939 gimple_seq pattern_def_seq;
6940
6941 if (dump_enabled_p ())
6942 {
6943 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
6944 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6945 dump_printf (MSG_NOTE, "\n");
6946 }
6947
6948 if (gimple_has_volatile_ops (stmt))
6949 {
6950 if (dump_enabled_p ())
6951 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6952 "not vectorized: stmt has volatile operands\n");
6953
6954 return false;
6955 }
6956
6957 /* Skip stmts that do not need to be vectorized. In loops this is expected
6958 to include:
6959 - the COND_EXPR which is the loop exit condition
6960 - any LABEL_EXPRs in the loop
6961 - computations that are used only for array indexing or loop control.
6962 In basic blocks we only analyze statements that are a part of some SLP
6963 instance, therefore, all the statements are relevant.
6964
6965 Pattern statement needs to be analyzed instead of the original statement
6966 if the original statement is not relevant. Otherwise, we analyze both
6967 statements. In basic blocks we are called from some SLP instance
6968 traversal, don't analyze pattern stmts instead, the pattern stmts
6969 already will be part of SLP instance. */
6970
6971 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
6972 if (!STMT_VINFO_RELEVANT_P (stmt_info)
6973 && !STMT_VINFO_LIVE_P (stmt_info))
6974 {
6975 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
6976 && pattern_stmt
6977 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
6978 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
6979 {
6980 /* Analyze PATTERN_STMT instead of the original stmt. */
6981 stmt = pattern_stmt;
6982 stmt_info = vinfo_for_stmt (pattern_stmt);
6983 if (dump_enabled_p ())
6984 {
6985 dump_printf_loc (MSG_NOTE, vect_location,
6986 "==> examining pattern statement: ");
6987 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6988 dump_printf (MSG_NOTE, "\n");
6989 }
6990 }
6991 else
6992 {
6993 if (dump_enabled_p ())
6994 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
6995
6996 return true;
6997 }
6998 }
6999 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7000 && node == NULL
7001 && pattern_stmt
7002 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7003 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7004 {
7005 /* Analyze PATTERN_STMT too. */
7006 if (dump_enabled_p ())
7007 {
7008 dump_printf_loc (MSG_NOTE, vect_location,
7009 "==> examining pattern statement: ");
7010 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7011 dump_printf (MSG_NOTE, "\n");
7012 }
7013
7014 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7015 return false;
7016 }
7017
7018 if (is_pattern_stmt_p (stmt_info)
7019 && node == NULL
7020 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7021 {
7022 gimple_stmt_iterator si;
7023
7024 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7025 {
7026 gimple pattern_def_stmt = gsi_stmt (si);
7027 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7028 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7029 {
7030 /* Analyze def stmt of STMT if it's a pattern stmt. */
7031 if (dump_enabled_p ())
7032 {
7033 dump_printf_loc (MSG_NOTE, vect_location,
7034 "==> examining pattern def statement: ");
7035 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7036 dump_printf (MSG_NOTE, "\n");
7037 }
7038
7039 if (!vect_analyze_stmt (pattern_def_stmt,
7040 need_to_vectorize, node))
7041 return false;
7042 }
7043 }
7044 }
7045
7046 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7047 {
7048 case vect_internal_def:
7049 break;
7050
7051 case vect_reduction_def:
7052 case vect_nested_cycle:
7053 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
7054 || relevance == vect_used_in_outer_by_reduction
7055 || relevance == vect_unused_in_scope));
7056 break;
7057
7058 case vect_induction_def:
7059 case vect_constant_def:
7060 case vect_external_def:
7061 case vect_unknown_def_type:
7062 default:
7063 gcc_unreachable ();
7064 }
7065
7066 if (bb_vinfo)
7067 {
7068 gcc_assert (PURE_SLP_STMT (stmt_info));
7069
7070 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7071 if (dump_enabled_p ())
7072 {
7073 dump_printf_loc (MSG_NOTE, vect_location,
7074 "get vectype for scalar type: ");
7075 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7076 dump_printf (MSG_NOTE, "\n");
7077 }
7078
7079 vectype = get_vectype_for_scalar_type (scalar_type);
7080 if (!vectype)
7081 {
7082 if (dump_enabled_p ())
7083 {
7084 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7085 "not SLPed: unsupported data-type ");
7086 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7087 scalar_type);
7088 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7089 }
7090 return false;
7091 }
7092
7093 if (dump_enabled_p ())
7094 {
7095 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7096 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7097 dump_printf (MSG_NOTE, "\n");
7098 }
7099
7100 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7101 }
7102
7103 if (STMT_VINFO_RELEVANT_P (stmt_info))
7104 {
7105 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7106 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7107 || (is_gimple_call (stmt)
7108 && gimple_call_lhs (stmt) == NULL_TREE));
7109 *need_to_vectorize = true;
7110 }
7111
7112 ok = true;
7113 if (!bb_vinfo
7114 && (STMT_VINFO_RELEVANT_P (stmt_info)
7115 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7116 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, NULL)
7117 || vectorizable_conversion (stmt, NULL, NULL, NULL)
7118 || vectorizable_shift (stmt, NULL, NULL, NULL)
7119 || vectorizable_operation (stmt, NULL, NULL, NULL)
7120 || vectorizable_assignment (stmt, NULL, NULL, NULL)
7121 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
7122 || vectorizable_call (stmt, NULL, NULL, NULL)
7123 || vectorizable_store (stmt, NULL, NULL, NULL)
7124 || vectorizable_reduction (stmt, NULL, NULL, NULL)
7125 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, NULL));
7126 else
7127 {
7128 if (bb_vinfo)
7129 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7130 || vectorizable_conversion (stmt, NULL, NULL, node)
7131 || vectorizable_shift (stmt, NULL, NULL, node)
7132 || vectorizable_operation (stmt, NULL, NULL, node)
7133 || vectorizable_assignment (stmt, NULL, NULL, node)
7134 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7135 || vectorizable_call (stmt, NULL, NULL, node)
7136 || vectorizable_store (stmt, NULL, NULL, node)
7137 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7138 }
7139
7140 if (!ok)
7141 {
7142 if (dump_enabled_p ())
7143 {
7144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7145 "not vectorized: relevant stmt not ");
7146 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7147 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7148 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7149 }
7150
7151 return false;
7152 }
7153
7154 if (bb_vinfo)
7155 return true;
7156
7157 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7158 need extra handling, except for vectorizable reductions. */
7159 if (STMT_VINFO_LIVE_P (stmt_info)
7160 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7161 ok = vectorizable_live_operation (stmt, NULL, NULL);
7162
7163 if (!ok)
7164 {
7165 if (dump_enabled_p ())
7166 {
7167 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7168 "not vectorized: live stmt not ");
7169 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7170 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7171 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7172 }
7173
7174 return false;
7175 }
7176
7177 return true;
7178 }
7179
7180
7181 /* Function vect_transform_stmt.
7182
7183 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7184
7185 bool
7186 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7187 bool *grouped_store, slp_tree slp_node,
7188 slp_instance slp_node_instance)
7189 {
7190 bool is_store = false;
7191 gimple vec_stmt = NULL;
7192 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7193 bool done;
7194
7195 switch (STMT_VINFO_TYPE (stmt_info))
7196 {
7197 case type_demotion_vec_info_type:
7198 case type_promotion_vec_info_type:
7199 case type_conversion_vec_info_type:
7200 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7201 gcc_assert (done);
7202 break;
7203
7204 case induc_vec_info_type:
7205 gcc_assert (!slp_node);
7206 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7207 gcc_assert (done);
7208 break;
7209
7210 case shift_vec_info_type:
7211 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7212 gcc_assert (done);
7213 break;
7214
7215 case op_vec_info_type:
7216 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7217 gcc_assert (done);
7218 break;
7219
7220 case assignment_vec_info_type:
7221 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7222 gcc_assert (done);
7223 break;
7224
7225 case load_vec_info_type:
7226 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7227 slp_node_instance);
7228 gcc_assert (done);
7229 break;
7230
7231 case store_vec_info_type:
7232 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7233 gcc_assert (done);
7234 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7235 {
7236 /* In case of interleaving, the whole chain is vectorized when the
7237 last store in the chain is reached. Store stmts before the last
7238 one are skipped, and there vec_stmt_info shouldn't be freed
7239 meanwhile. */
7240 *grouped_store = true;
7241 if (STMT_VINFO_VEC_STMT (stmt_info))
7242 is_store = true;
7243 }
7244 else
7245 is_store = true;
7246 break;
7247
7248 case condition_vec_info_type:
7249 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7250 gcc_assert (done);
7251 break;
7252
7253 case call_vec_info_type:
7254 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7255 stmt = gsi_stmt (*gsi);
7256 if (is_gimple_call (stmt)
7257 && gimple_call_internal_p (stmt)
7258 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7259 is_store = true;
7260 break;
7261
7262 case call_simd_clone_vec_info_type:
7263 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7264 stmt = gsi_stmt (*gsi);
7265 break;
7266
7267 case reduc_vec_info_type:
7268 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7269 gcc_assert (done);
7270 break;
7271
7272 default:
7273 if (!STMT_VINFO_LIVE_P (stmt_info))
7274 {
7275 if (dump_enabled_p ())
7276 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7277 "stmt not supported.\n");
7278 gcc_unreachable ();
7279 }
7280 }
7281
7282 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7283 is being vectorized, but outside the immediately enclosing loop. */
7284 if (vec_stmt
7285 && STMT_VINFO_LOOP_VINFO (stmt_info)
7286 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7287 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7288 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7289 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7290 || STMT_VINFO_RELEVANT (stmt_info) ==
7291 vect_used_in_outer_by_reduction))
7292 {
7293 struct loop *innerloop = LOOP_VINFO_LOOP (
7294 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7295 imm_use_iterator imm_iter;
7296 use_operand_p use_p;
7297 tree scalar_dest;
7298 gimple exit_phi;
7299
7300 if (dump_enabled_p ())
7301 dump_printf_loc (MSG_NOTE, vect_location,
7302 "Record the vdef for outer-loop vectorization.\n");
7303
7304 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7305 (to be used when vectorizing outer-loop stmts that use the DEF of
7306 STMT). */
7307 if (gimple_code (stmt) == GIMPLE_PHI)
7308 scalar_dest = PHI_RESULT (stmt);
7309 else
7310 scalar_dest = gimple_assign_lhs (stmt);
7311
7312 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7313 {
7314 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7315 {
7316 exit_phi = USE_STMT (use_p);
7317 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7318 }
7319 }
7320 }
7321
7322 /* Handle stmts whose DEF is used outside the loop-nest that is
7323 being vectorized. */
7324 if (STMT_VINFO_LIVE_P (stmt_info)
7325 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7326 {
7327 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7328 gcc_assert (done);
7329 }
7330
7331 if (vec_stmt)
7332 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7333
7334 return is_store;
7335 }
7336
7337
7338 /* Remove a group of stores (for SLP or interleaving), free their
7339 stmt_vec_info. */
7340
7341 void
7342 vect_remove_stores (gimple first_stmt)
7343 {
7344 gimple next = first_stmt;
7345 gimple tmp;
7346 gimple_stmt_iterator next_si;
7347
7348 while (next)
7349 {
7350 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7351
7352 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7353 if (is_pattern_stmt_p (stmt_info))
7354 next = STMT_VINFO_RELATED_STMT (stmt_info);
7355 /* Free the attached stmt_vec_info and remove the stmt. */
7356 next_si = gsi_for_stmt (next);
7357 unlink_stmt_vdef (next);
7358 gsi_remove (&next_si, true);
7359 release_defs (next);
7360 free_stmt_vec_info (next);
7361 next = tmp;
7362 }
7363 }
7364
7365
7366 /* Function new_stmt_vec_info.
7367
7368 Create and initialize a new stmt_vec_info struct for STMT. */
7369
7370 stmt_vec_info
7371 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
7372 bb_vec_info bb_vinfo)
7373 {
7374 stmt_vec_info res;
7375 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7376
7377 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7378 STMT_VINFO_STMT (res) = stmt;
7379 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
7380 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
7381 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7382 STMT_VINFO_LIVE_P (res) = false;
7383 STMT_VINFO_VECTYPE (res) = NULL;
7384 STMT_VINFO_VEC_STMT (res) = NULL;
7385 STMT_VINFO_VECTORIZABLE (res) = true;
7386 STMT_VINFO_IN_PATTERN_P (res) = false;
7387 STMT_VINFO_RELATED_STMT (res) = NULL;
7388 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7389 STMT_VINFO_DATA_REF (res) = NULL;
7390
7391 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7392 STMT_VINFO_DR_OFFSET (res) = NULL;
7393 STMT_VINFO_DR_INIT (res) = NULL;
7394 STMT_VINFO_DR_STEP (res) = NULL;
7395 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7396
7397 if (gimple_code (stmt) == GIMPLE_PHI
7398 && is_loop_header_bb_p (gimple_bb (stmt)))
7399 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7400 else
7401 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7402
7403 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7404 STMT_SLP_TYPE (res) = loop_vect;
7405 GROUP_FIRST_ELEMENT (res) = NULL;
7406 GROUP_NEXT_ELEMENT (res) = NULL;
7407 GROUP_SIZE (res) = 0;
7408 GROUP_STORE_COUNT (res) = 0;
7409 GROUP_GAP (res) = 0;
7410 GROUP_SAME_DR_STMT (res) = NULL;
7411
7412 return res;
7413 }
7414
7415
7416 /* Create a hash table for stmt_vec_info. */
7417
7418 void
7419 init_stmt_vec_info_vec (void)
7420 {
7421 gcc_assert (!stmt_vec_info_vec.exists ());
7422 stmt_vec_info_vec.create (50);
7423 }
7424
7425
7426 /* Free hash table for stmt_vec_info. */
7427
7428 void
7429 free_stmt_vec_info_vec (void)
7430 {
7431 unsigned int i;
7432 vec_void_p info;
7433 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7434 if (info != NULL)
7435 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info) info));
7436 gcc_assert (stmt_vec_info_vec.exists ());
7437 stmt_vec_info_vec.release ();
7438 }
7439
7440
7441 /* Free stmt vectorization related info. */
7442
7443 void
7444 free_stmt_vec_info (gimple stmt)
7445 {
7446 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7447
7448 if (!stmt_info)
7449 return;
7450
7451 /* Check if this statement has a related "pattern stmt"
7452 (introduced by the vectorizer during the pattern recognition
7453 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7454 too. */
7455 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7456 {
7457 stmt_vec_info patt_info
7458 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7459 if (patt_info)
7460 {
7461 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7462 gimple patt_stmt = STMT_VINFO_STMT (patt_info);
7463 gimple_set_bb (patt_stmt, NULL);
7464 tree lhs = gimple_get_lhs (patt_stmt);
7465 if (TREE_CODE (lhs) == SSA_NAME)
7466 release_ssa_name (lhs);
7467 if (seq)
7468 {
7469 gimple_stmt_iterator si;
7470 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7471 {
7472 gimple seq_stmt = gsi_stmt (si);
7473 gimple_set_bb (seq_stmt, NULL);
7474 lhs = gimple_get_lhs (patt_stmt);
7475 if (TREE_CODE (lhs) == SSA_NAME)
7476 release_ssa_name (lhs);
7477 free_stmt_vec_info (seq_stmt);
7478 }
7479 }
7480 free_stmt_vec_info (patt_stmt);
7481 }
7482 }
7483
7484 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7485 set_vinfo_for_stmt (stmt, NULL);
7486 free (stmt_info);
7487 }
7488
7489
7490 /* Function get_vectype_for_scalar_type_and_size.
7491
7492 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7493 by the target. */
7494
7495 static tree
7496 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7497 {
7498 machine_mode inner_mode = TYPE_MODE (scalar_type);
7499 machine_mode simd_mode;
7500 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7501 int nunits;
7502 tree vectype;
7503
7504 if (nbytes == 0)
7505 return NULL_TREE;
7506
7507 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7508 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7509 return NULL_TREE;
7510
7511 /* For vector types of elements whose mode precision doesn't
7512 match their types precision we use a element type of mode
7513 precision. The vectorization routines will have to make sure
7514 they support the proper result truncation/extension.
7515 We also make sure to build vector types with INTEGER_TYPE
7516 component type only. */
7517 if (INTEGRAL_TYPE_P (scalar_type)
7518 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
7519 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7520 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
7521 TYPE_UNSIGNED (scalar_type));
7522
7523 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7524 When the component mode passes the above test simply use a type
7525 corresponding to that mode. The theory is that any use that
7526 would cause problems with this will disable vectorization anyway. */
7527 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
7528 && !INTEGRAL_TYPE_P (scalar_type))
7529 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
7530
7531 /* We can't build a vector type of elements with alignment bigger than
7532 their size. */
7533 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
7534 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
7535 TYPE_UNSIGNED (scalar_type));
7536
7537 /* If we felt back to using the mode fail if there was
7538 no scalar type for it. */
7539 if (scalar_type == NULL_TREE)
7540 return NULL_TREE;
7541
7542 /* If no size was supplied use the mode the target prefers. Otherwise
7543 lookup a vector mode of the specified size. */
7544 if (size == 0)
7545 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
7546 else
7547 simd_mode = mode_for_vector (inner_mode, size / nbytes);
7548 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
7549 if (nunits <= 1)
7550 return NULL_TREE;
7551
7552 vectype = build_vector_type (scalar_type, nunits);
7553
7554 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
7555 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
7556 return NULL_TREE;
7557
7558 return vectype;
7559 }
7560
7561 unsigned int current_vector_size;
7562
7563 /* Function get_vectype_for_scalar_type.
7564
7565 Returns the vector type corresponding to SCALAR_TYPE as supported
7566 by the target. */
7567
7568 tree
7569 get_vectype_for_scalar_type (tree scalar_type)
7570 {
7571 tree vectype;
7572 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
7573 current_vector_size);
7574 if (vectype
7575 && current_vector_size == 0)
7576 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
7577 return vectype;
7578 }
7579
7580 /* Function get_same_sized_vectype
7581
7582 Returns a vector type corresponding to SCALAR_TYPE of size
7583 VECTOR_TYPE if supported by the target. */
7584
7585 tree
7586 get_same_sized_vectype (tree scalar_type, tree vector_type)
7587 {
7588 return get_vectype_for_scalar_type_and_size
7589 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
7590 }
7591
7592 /* Function vect_is_simple_use.
7593
7594 Input:
7595 LOOP_VINFO - the vect info of the loop that is being vectorized.
7596 BB_VINFO - the vect info of the basic block that is being vectorized.
7597 OPERAND - operand of STMT in the loop or bb.
7598 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7599
7600 Returns whether a stmt with OPERAND can be vectorized.
7601 For loops, supportable operands are constants, loop invariants, and operands
7602 that are defined by the current iteration of the loop. Unsupportable
7603 operands are those that are defined by a previous iteration of the loop (as
7604 is the case in reduction/induction computations).
7605 For basic blocks, supportable operands are constants and bb invariants.
7606 For now, operands defined outside the basic block are not supported. */
7607
7608 bool
7609 vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7610 bb_vec_info bb_vinfo, gimple *def_stmt,
7611 tree *def, enum vect_def_type *dt)
7612 {
7613 basic_block bb;
7614 stmt_vec_info stmt_vinfo;
7615 struct loop *loop = NULL;
7616
7617 if (loop_vinfo)
7618 loop = LOOP_VINFO_LOOP (loop_vinfo);
7619
7620 *def_stmt = NULL;
7621 *def = NULL_TREE;
7622
7623 if (dump_enabled_p ())
7624 {
7625 dump_printf_loc (MSG_NOTE, vect_location,
7626 "vect_is_simple_use: operand ");
7627 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
7628 dump_printf (MSG_NOTE, "\n");
7629 }
7630
7631 if (CONSTANT_CLASS_P (operand))
7632 {
7633 *dt = vect_constant_def;
7634 return true;
7635 }
7636
7637 if (is_gimple_min_invariant (operand))
7638 {
7639 *def = operand;
7640 *dt = vect_external_def;
7641 return true;
7642 }
7643
7644 if (TREE_CODE (operand) == PAREN_EXPR)
7645 {
7646 if (dump_enabled_p ())
7647 dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.\n");
7648 operand = TREE_OPERAND (operand, 0);
7649 }
7650
7651 if (TREE_CODE (operand) != SSA_NAME)
7652 {
7653 if (dump_enabled_p ())
7654 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7655 "not ssa-name.\n");
7656 return false;
7657 }
7658
7659 *def_stmt = SSA_NAME_DEF_STMT (operand);
7660 if (*def_stmt == NULL)
7661 {
7662 if (dump_enabled_p ())
7663 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7664 "no def_stmt.\n");
7665 return false;
7666 }
7667
7668 if (dump_enabled_p ())
7669 {
7670 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
7671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
7672 dump_printf (MSG_NOTE, "\n");
7673 }
7674
7675 /* Empty stmt is expected only in case of a function argument.
7676 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7677 if (gimple_nop_p (*def_stmt))
7678 {
7679 *def = operand;
7680 *dt = vect_external_def;
7681 return true;
7682 }
7683
7684 bb = gimple_bb (*def_stmt);
7685
7686 if ((loop && !flow_bb_inside_loop_p (loop, bb))
7687 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
7688 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
7689 *dt = vect_external_def;
7690 else
7691 {
7692 stmt_vinfo = vinfo_for_stmt (*def_stmt);
7693 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
7694 }
7695
7696 if (*dt == vect_unknown_def_type
7697 || (stmt
7698 && *dt == vect_double_reduction_def
7699 && gimple_code (stmt) != GIMPLE_PHI))
7700 {
7701 if (dump_enabled_p ())
7702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7703 "Unsupported pattern.\n");
7704 return false;
7705 }
7706
7707 if (dump_enabled_p ())
7708 dump_printf_loc (MSG_NOTE, vect_location, "type of def: %d.\n", *dt);
7709
7710 switch (gimple_code (*def_stmt))
7711 {
7712 case GIMPLE_PHI:
7713 *def = gimple_phi_result (*def_stmt);
7714 break;
7715
7716 case GIMPLE_ASSIGN:
7717 *def = gimple_assign_lhs (*def_stmt);
7718 break;
7719
7720 case GIMPLE_CALL:
7721 *def = gimple_call_lhs (*def_stmt);
7722 if (*def != NULL)
7723 break;
7724 /* FALLTHRU */
7725 default:
7726 if (dump_enabled_p ())
7727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7728 "unsupported defining stmt:\n");
7729 return false;
7730 }
7731
7732 return true;
7733 }
7734
7735 /* Function vect_is_simple_use_1.
7736
7737 Same as vect_is_simple_use_1 but also determines the vector operand
7738 type of OPERAND and stores it to *VECTYPE. If the definition of
7739 OPERAND is vect_uninitialized_def, vect_constant_def or
7740 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
7741 is responsible to compute the best suited vector type for the
7742 scalar operand. */
7743
7744 bool
7745 vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7746 bb_vec_info bb_vinfo, gimple *def_stmt,
7747 tree *def, enum vect_def_type *dt, tree *vectype)
7748 {
7749 if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
7750 def, dt))
7751 return false;
7752
7753 /* Now get a vector type if the def is internal, otherwise supply
7754 NULL_TREE and leave it up to the caller to figure out a proper
7755 type for the use stmt. */
7756 if (*dt == vect_internal_def
7757 || *dt == vect_induction_def
7758 || *dt == vect_reduction_def
7759 || *dt == vect_double_reduction_def
7760 || *dt == vect_nested_cycle)
7761 {
7762 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
7763
7764 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7765 && !STMT_VINFO_RELEVANT (stmt_info)
7766 && !STMT_VINFO_LIVE_P (stmt_info))
7767 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7768
7769 *vectype = STMT_VINFO_VECTYPE (stmt_info);
7770 gcc_assert (*vectype != NULL_TREE);
7771 }
7772 else if (*dt == vect_uninitialized_def
7773 || *dt == vect_constant_def
7774 || *dt == vect_external_def)
7775 *vectype = NULL_TREE;
7776 else
7777 gcc_unreachable ();
7778
7779 return true;
7780 }
7781
7782
7783 /* Function supportable_widening_operation
7784
7785 Check whether an operation represented by the code CODE is a
7786 widening operation that is supported by the target platform in
7787 vector form (i.e., when operating on arguments of type VECTYPE_IN
7788 producing a result of type VECTYPE_OUT).
7789
7790 Widening operations we currently support are NOP (CONVERT), FLOAT
7791 and WIDEN_MULT. This function checks if these operations are supported
7792 by the target platform either directly (via vector tree-codes), or via
7793 target builtins.
7794
7795 Output:
7796 - CODE1 and CODE2 are codes of vector operations to be used when
7797 vectorizing the operation, if available.
7798 - MULTI_STEP_CVT determines the number of required intermediate steps in
7799 case of multi-step conversion (like char->short->int - in that case
7800 MULTI_STEP_CVT will be 1).
7801 - INTERM_TYPES contains the intermediate type required to perform the
7802 widening operation (short in the above example). */
7803
7804 bool
7805 supportable_widening_operation (enum tree_code code, gimple stmt,
7806 tree vectype_out, tree vectype_in,
7807 enum tree_code *code1, enum tree_code *code2,
7808 int *multi_step_cvt,
7809 vec<tree> *interm_types)
7810 {
7811 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7812 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
7813 struct loop *vect_loop = NULL;
7814 machine_mode vec_mode;
7815 enum insn_code icode1, icode2;
7816 optab optab1, optab2;
7817 tree vectype = vectype_in;
7818 tree wide_vectype = vectype_out;
7819 enum tree_code c1, c2;
7820 int i;
7821 tree prev_type, intermediate_type;
7822 machine_mode intermediate_mode, prev_mode;
7823 optab optab3, optab4;
7824
7825 *multi_step_cvt = 0;
7826 if (loop_info)
7827 vect_loop = LOOP_VINFO_LOOP (loop_info);
7828
7829 switch (code)
7830 {
7831 case WIDEN_MULT_EXPR:
7832 /* The result of a vectorized widening operation usually requires
7833 two vectors (because the widened results do not fit into one vector).
7834 The generated vector results would normally be expected to be
7835 generated in the same order as in the original scalar computation,
7836 i.e. if 8 results are generated in each vector iteration, they are
7837 to be organized as follows:
7838 vect1: [res1,res2,res3,res4],
7839 vect2: [res5,res6,res7,res8].
7840
7841 However, in the special case that the result of the widening
7842 operation is used in a reduction computation only, the order doesn't
7843 matter (because when vectorizing a reduction we change the order of
7844 the computation). Some targets can take advantage of this and
7845 generate more efficient code. For example, targets like Altivec,
7846 that support widen_mult using a sequence of {mult_even,mult_odd}
7847 generate the following vectors:
7848 vect1: [res1,res3,res5,res7],
7849 vect2: [res2,res4,res6,res8].
7850
7851 When vectorizing outer-loops, we execute the inner-loop sequentially
7852 (each vectorized inner-loop iteration contributes to VF outer-loop
7853 iterations in parallel). We therefore don't allow to change the
7854 order of the computation in the inner-loop during outer-loop
7855 vectorization. */
7856 /* TODO: Another case in which order doesn't *really* matter is when we
7857 widen and then contract again, e.g. (short)((int)x * y >> 8).
7858 Normally, pack_trunc performs an even/odd permute, whereas the
7859 repack from an even/odd expansion would be an interleave, which
7860 would be significantly simpler for e.g. AVX2. */
7861 /* In any case, in order to avoid duplicating the code below, recurse
7862 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
7863 are properly set up for the caller. If we fail, we'll continue with
7864 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
7865 if (vect_loop
7866 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
7867 && !nested_in_vect_loop_p (vect_loop, stmt)
7868 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
7869 stmt, vectype_out, vectype_in,
7870 code1, code2, multi_step_cvt,
7871 interm_types))
7872 {
7873 /* Elements in a vector with vect_used_by_reduction property cannot
7874 be reordered if the use chain with this property does not have the
7875 same operation. One such an example is s += a * b, where elements
7876 in a and b cannot be reordered. Here we check if the vector defined
7877 by STMT is only directly used in the reduction statement. */
7878 tree lhs = gimple_assign_lhs (stmt);
7879 use_operand_p dummy;
7880 gimple use_stmt;
7881 stmt_vec_info use_stmt_info = NULL;
7882 if (single_imm_use (lhs, &dummy, &use_stmt)
7883 && (use_stmt_info = vinfo_for_stmt (use_stmt))
7884 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
7885 return true;
7886 }
7887 c1 = VEC_WIDEN_MULT_LO_EXPR;
7888 c2 = VEC_WIDEN_MULT_HI_EXPR;
7889 break;
7890
7891 case VEC_WIDEN_MULT_EVEN_EXPR:
7892 /* Support the recursion induced just above. */
7893 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
7894 c2 = VEC_WIDEN_MULT_ODD_EXPR;
7895 break;
7896
7897 case WIDEN_LSHIFT_EXPR:
7898 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
7899 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
7900 break;
7901
7902 CASE_CONVERT:
7903 c1 = VEC_UNPACK_LO_EXPR;
7904 c2 = VEC_UNPACK_HI_EXPR;
7905 break;
7906
7907 case FLOAT_EXPR:
7908 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
7909 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
7910 break;
7911
7912 case FIX_TRUNC_EXPR:
7913 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
7914 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
7915 computing the operation. */
7916 return false;
7917
7918 default:
7919 gcc_unreachable ();
7920 }
7921
7922 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
7923 {
7924 enum tree_code ctmp = c1;
7925 c1 = c2;
7926 c2 = ctmp;
7927 }
7928
7929 if (code == FIX_TRUNC_EXPR)
7930 {
7931 /* The signedness is determined from output operand. */
7932 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
7933 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
7934 }
7935 else
7936 {
7937 optab1 = optab_for_tree_code (c1, vectype, optab_default);
7938 optab2 = optab_for_tree_code (c2, vectype, optab_default);
7939 }
7940
7941 if (!optab1 || !optab2)
7942 return false;
7943
7944 vec_mode = TYPE_MODE (vectype);
7945 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
7946 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
7947 return false;
7948
7949 *code1 = c1;
7950 *code2 = c2;
7951
7952 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
7953 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
7954 return true;
7955
7956 /* Check if it's a multi-step conversion that can be done using intermediate
7957 types. */
7958
7959 prev_type = vectype;
7960 prev_mode = vec_mode;
7961
7962 if (!CONVERT_EXPR_CODE_P (code))
7963 return false;
7964
7965 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
7966 intermediate steps in promotion sequence. We try
7967 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
7968 not. */
7969 interm_types->create (MAX_INTERM_CVT_STEPS);
7970 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
7971 {
7972 intermediate_mode = insn_data[icode1].operand[0].mode;
7973 intermediate_type
7974 = lang_hooks.types.type_for_mode (intermediate_mode,
7975 TYPE_UNSIGNED (prev_type));
7976 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
7977 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
7978
7979 if (!optab3 || !optab4
7980 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
7981 || insn_data[icode1].operand[0].mode != intermediate_mode
7982 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
7983 || insn_data[icode2].operand[0].mode != intermediate_mode
7984 || ((icode1 = optab_handler (optab3, intermediate_mode))
7985 == CODE_FOR_nothing)
7986 || ((icode2 = optab_handler (optab4, intermediate_mode))
7987 == CODE_FOR_nothing))
7988 break;
7989
7990 interm_types->quick_push (intermediate_type);
7991 (*multi_step_cvt)++;
7992
7993 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
7994 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
7995 return true;
7996
7997 prev_type = intermediate_type;
7998 prev_mode = intermediate_mode;
7999 }
8000
8001 interm_types->release ();
8002 return false;
8003 }
8004
8005
8006 /* Function supportable_narrowing_operation
8007
8008 Check whether an operation represented by the code CODE is a
8009 narrowing operation that is supported by the target platform in
8010 vector form (i.e., when operating on arguments of type VECTYPE_IN
8011 and producing a result of type VECTYPE_OUT).
8012
8013 Narrowing operations we currently support are NOP (CONVERT) and
8014 FIX_TRUNC. This function checks if these operations are supported by
8015 the target platform directly via vector tree-codes.
8016
8017 Output:
8018 - CODE1 is the code of a vector operation to be used when
8019 vectorizing the operation, if available.
8020 - MULTI_STEP_CVT determines the number of required intermediate steps in
8021 case of multi-step conversion (like int->short->char - in that case
8022 MULTI_STEP_CVT will be 1).
8023 - INTERM_TYPES contains the intermediate type required to perform the
8024 narrowing operation (short in the above example). */
8025
8026 bool
8027 supportable_narrowing_operation (enum tree_code code,
8028 tree vectype_out, tree vectype_in,
8029 enum tree_code *code1, int *multi_step_cvt,
8030 vec<tree> *interm_types)
8031 {
8032 machine_mode vec_mode;
8033 enum insn_code icode1;
8034 optab optab1, interm_optab;
8035 tree vectype = vectype_in;
8036 tree narrow_vectype = vectype_out;
8037 enum tree_code c1;
8038 tree intermediate_type;
8039 machine_mode intermediate_mode, prev_mode;
8040 int i;
8041 bool uns;
8042
8043 *multi_step_cvt = 0;
8044 switch (code)
8045 {
8046 CASE_CONVERT:
8047 c1 = VEC_PACK_TRUNC_EXPR;
8048 break;
8049
8050 case FIX_TRUNC_EXPR:
8051 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8052 break;
8053
8054 case FLOAT_EXPR:
8055 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8056 tree code and optabs used for computing the operation. */
8057 return false;
8058
8059 default:
8060 gcc_unreachable ();
8061 }
8062
8063 if (code == FIX_TRUNC_EXPR)
8064 /* The signedness is determined from output operand. */
8065 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8066 else
8067 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8068
8069 if (!optab1)
8070 return false;
8071
8072 vec_mode = TYPE_MODE (vectype);
8073 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8074 return false;
8075
8076 *code1 = c1;
8077
8078 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8079 return true;
8080
8081 /* Check if it's a multi-step conversion that can be done using intermediate
8082 types. */
8083 prev_mode = vec_mode;
8084 if (code == FIX_TRUNC_EXPR)
8085 uns = TYPE_UNSIGNED (vectype_out);
8086 else
8087 uns = TYPE_UNSIGNED (vectype);
8088
8089 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8090 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8091 costly than signed. */
8092 if (code == FIX_TRUNC_EXPR && uns)
8093 {
8094 enum insn_code icode2;
8095
8096 intermediate_type
8097 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8098 interm_optab
8099 = optab_for_tree_code (c1, intermediate_type, optab_default);
8100 if (interm_optab != unknown_optab
8101 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8102 && insn_data[icode1].operand[0].mode
8103 == insn_data[icode2].operand[0].mode)
8104 {
8105 uns = false;
8106 optab1 = interm_optab;
8107 icode1 = icode2;
8108 }
8109 }
8110
8111 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8112 intermediate steps in promotion sequence. We try
8113 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8114 interm_types->create (MAX_INTERM_CVT_STEPS);
8115 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8116 {
8117 intermediate_mode = insn_data[icode1].operand[0].mode;
8118 intermediate_type
8119 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8120 interm_optab
8121 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8122 optab_default);
8123 if (!interm_optab
8124 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8125 || insn_data[icode1].operand[0].mode != intermediate_mode
8126 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8127 == CODE_FOR_nothing))
8128 break;
8129
8130 interm_types->quick_push (intermediate_type);
8131 (*multi_step_cvt)++;
8132
8133 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8134 return true;
8135
8136 prev_mode = intermediate_mode;
8137 optab1 = interm_optab;
8138 }
8139
8140 interm_types->release ();
8141 return false;
8142 }