tree-vectorizer.h (vect_is_simple_use): Remove unused parameters.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "backend.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "rtl.h"
30 #include "ssa.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "target.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
37 #include "tree-eh.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "cfgloop.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
46 #include "flags.h"
47 #include "insn-config.h"
48 #include "recog.h" /* FIXME: for insn_data */
49 #include "insn-codes.h"
50 #include "optabs-tree.h"
51 #include "diagnostic-core.h"
52 #include "tree-vectorizer.h"
53 #include "cgraph.h"
54 #include "builtins.h"
55
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
58
59 /* Return the vectorized type for the given statement. */
60
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
63 {
64 return STMT_VINFO_VECTYPE (stmt_info);
65 }
66
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
71 {
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
76
77 if (!loop_vinfo)
78 return false;
79
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
81
82 return (bb->loop_father == loop->inner);
83 }
84
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
88
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
93 {
94 if (body_cost_vec)
95 {
96 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
97 stmt_info_for_cost si = { count, kind,
98 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
99 misalign };
100 body_cost_vec->safe_push (si);
101 return (unsigned)
102 (builtin_vectorization_cost (kind, vectype, misalign) * count);
103 }
104 else
105 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
106 count, kind, stmt_info, misalign, where);
107 }
108
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
110
111 static tree
112 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
113 {
114 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
115 "vect_array");
116 }
117
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT and the vector is associated
121 with scalar destination SCALAR_DEST. */
122
123 static tree
124 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
125 tree array, unsigned HOST_WIDE_INT n)
126 {
127 tree vect_type, vect, vect_name, array_ref;
128 gimple *new_stmt;
129
130 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
131 vect_type = TREE_TYPE (TREE_TYPE (array));
132 vect = vect_create_destination_var (scalar_dest, vect_type);
133 array_ref = build4 (ARRAY_REF, vect_type, array,
134 build_int_cst (size_type_node, n),
135 NULL_TREE, NULL_TREE);
136
137 new_stmt = gimple_build_assign (vect, array_ref);
138 vect_name = make_ssa_name (vect, new_stmt);
139 gimple_assign_set_lhs (new_stmt, vect_name);
140 vect_finish_stmt_generation (stmt, new_stmt, gsi);
141
142 return vect_name;
143 }
144
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT. */
148
149 static void
150 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
151 tree array, unsigned HOST_WIDE_INT n)
152 {
153 tree array_ref;
154 gimple *new_stmt;
155
156 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
157 build_int_cst (size_type_node, n),
158 NULL_TREE, NULL_TREE);
159
160 new_stmt = gimple_build_assign (array_ref, vect);
161 vect_finish_stmt_generation (stmt, new_stmt, gsi);
162 }
163
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
166 (and its group). */
167
168 static tree
169 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
170 {
171 tree mem_ref, alias_ptr_type;
172
173 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
174 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
175 /* Arrays have the same alignment as their type. */
176 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
177 return mem_ref;
178 }
179
180 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
181
182 /* Function vect_mark_relevant.
183
184 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
185
186 static void
187 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
188 enum vect_relevant relevant, bool live_p,
189 bool used_in_pattern)
190 {
191 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
192 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
193 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
194 gimple *pattern_stmt;
195
196 if (dump_enabled_p ())
197 dump_printf_loc (MSG_NOTE, vect_location,
198 "mark relevant %d, live %d.\n", relevant, live_p);
199
200 /* If this stmt is an original stmt in a pattern, we might need to mark its
201 related pattern stmt instead of the original stmt. However, such stmts
202 may have their own uses that are not in any pattern, in such cases the
203 stmt itself should be marked. */
204 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
205 {
206 bool found = false;
207 if (!used_in_pattern)
208 {
209 imm_use_iterator imm_iter;
210 use_operand_p use_p;
211 gimple *use_stmt;
212 tree lhs;
213 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
214 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
215
216 if (is_gimple_assign (stmt))
217 lhs = gimple_assign_lhs (stmt);
218 else
219 lhs = gimple_call_lhs (stmt);
220
221 /* This use is out of pattern use, if LHS has other uses that are
222 pattern uses, we should mark the stmt itself, and not the pattern
223 stmt. */
224 if (lhs && TREE_CODE (lhs) == SSA_NAME)
225 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
226 {
227 if (is_gimple_debug (USE_STMT (use_p)))
228 continue;
229 use_stmt = USE_STMT (use_p);
230
231 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
232 continue;
233
234 if (vinfo_for_stmt (use_stmt)
235 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
236 {
237 found = true;
238 break;
239 }
240 }
241 }
242
243 if (!found)
244 {
245 /* This is the last stmt in a sequence that was detected as a
246 pattern that can potentially be vectorized. Don't mark the stmt
247 as relevant/live because it's not going to be vectorized.
248 Instead mark the pattern-stmt that replaces it. */
249
250 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
251
252 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE, vect_location,
254 "last stmt in pattern. don't mark"
255 " relevant/live.\n");
256 stmt_info = vinfo_for_stmt (pattern_stmt);
257 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
258 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
259 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
260 stmt = pattern_stmt;
261 }
262 }
263
264 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
265 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
266 STMT_VINFO_RELEVANT (stmt_info) = relevant;
267
268 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
269 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
270 {
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE, vect_location,
273 "already marked relevant/live.\n");
274 return;
275 }
276
277 worklist->safe_push (stmt);
278 }
279
280
281 /* Function vect_stmt_relevant_p.
282
283 Return true if STMT in loop that is represented by LOOP_VINFO is
284 "relevant for vectorization".
285
286 A stmt is considered "relevant for vectorization" if:
287 - it has uses outside the loop.
288 - it has vdefs (it alters memory).
289 - control stmts in the loop (except for the exit condition).
290
291 CHECKME: what other side effects would the vectorizer allow? */
292
293 static bool
294 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
295 enum vect_relevant *relevant, bool *live_p)
296 {
297 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
298 ssa_op_iter op_iter;
299 imm_use_iterator imm_iter;
300 use_operand_p use_p;
301 def_operand_p def_p;
302
303 *relevant = vect_unused_in_scope;
304 *live_p = false;
305
306 /* cond stmt other than loop exit cond. */
307 if (is_ctrl_stmt (stmt)
308 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
309 != loop_exit_ctrl_vec_info_type)
310 *relevant = vect_used_in_scope;
311
312 /* changing memory. */
313 if (gimple_code (stmt) != GIMPLE_PHI)
314 if (gimple_vdef (stmt)
315 && !gimple_clobber_p (stmt))
316 {
317 if (dump_enabled_p ())
318 dump_printf_loc (MSG_NOTE, vect_location,
319 "vec_stmt_relevant_p: stmt has vdefs.\n");
320 *relevant = vect_used_in_scope;
321 }
322
323 /* uses outside the loop. */
324 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
325 {
326 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 {
328 basic_block bb = gimple_bb (USE_STMT (use_p));
329 if (!flow_bb_inside_loop_p (loop, bb))
330 {
331 if (dump_enabled_p ())
332 dump_printf_loc (MSG_NOTE, vect_location,
333 "vec_stmt_relevant_p: used out of loop.\n");
334
335 if (is_gimple_debug (USE_STMT (use_p)))
336 continue;
337
338 /* We expect all such uses to be in the loop exit phis
339 (because of loop closed form) */
340 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
341 gcc_assert (bb == single_exit (loop)->dest);
342
343 *live_p = true;
344 }
345 }
346 }
347
348 return (*live_p || *relevant);
349 }
350
351
352 /* Function exist_non_indexing_operands_for_use_p
353
354 USE is one of the uses attached to STMT. Check if USE is
355 used in STMT for anything other than indexing an array. */
356
357 static bool
358 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
359 {
360 tree operand;
361 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
362
363 /* USE corresponds to some operand in STMT. If there is no data
364 reference in STMT, then any operand that corresponds to USE
365 is not indexing an array. */
366 if (!STMT_VINFO_DATA_REF (stmt_info))
367 return true;
368
369 /* STMT has a data_ref. FORNOW this means that its of one of
370 the following forms:
371 -1- ARRAY_REF = var
372 -2- var = ARRAY_REF
373 (This should have been verified in analyze_data_refs).
374
375 'var' in the second case corresponds to a def, not a use,
376 so USE cannot correspond to any operands that are not used
377 for array indexing.
378
379 Therefore, all we need to check is if STMT falls into the
380 first case, and whether var corresponds to USE. */
381
382 if (!gimple_assign_copy_p (stmt))
383 {
384 if (is_gimple_call (stmt)
385 && gimple_call_internal_p (stmt))
386 switch (gimple_call_internal_fn (stmt))
387 {
388 case IFN_MASK_STORE:
389 operand = gimple_call_arg (stmt, 3);
390 if (operand == use)
391 return true;
392 /* FALLTHRU */
393 case IFN_MASK_LOAD:
394 operand = gimple_call_arg (stmt, 2);
395 if (operand == use)
396 return true;
397 break;
398 default:
399 break;
400 }
401 return false;
402 }
403
404 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
405 return false;
406 operand = gimple_assign_rhs1 (stmt);
407 if (TREE_CODE (operand) != SSA_NAME)
408 return false;
409
410 if (operand == use)
411 return true;
412
413 return false;
414 }
415
416
417 /*
418 Function process_use.
419
420 Inputs:
421 - a USE in STMT in a loop represented by LOOP_VINFO
422 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
423 that defined USE. This is done by calling mark_relevant and passing it
424 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
425 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
426 be performed.
427
428 Outputs:
429 Generally, LIVE_P and RELEVANT are used to define the liveness and
430 relevance info of the DEF_STMT of this USE:
431 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
432 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
433 Exceptions:
434 - case 1: If USE is used only for address computations (e.g. array indexing),
435 which does not need to be directly vectorized, then the liveness/relevance
436 of the respective DEF_STMT is left unchanged.
437 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
438 skip DEF_STMT cause it had already been processed.
439 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
440 be modified accordingly.
441
442 Return true if everything is as expected. Return false otherwise. */
443
444 static bool
445 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
446 enum vect_relevant relevant, vec<gimple *> *worklist,
447 bool force)
448 {
449 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
450 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
451 stmt_vec_info dstmt_vinfo;
452 basic_block bb, def_bb;
453 gimple *def_stmt;
454 enum vect_def_type dt;
455
456 /* case 1: we are only interested in uses that need to be vectorized. Uses
457 that are used for address computation are not considered relevant. */
458 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
459 return true;
460
461 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
462 {
463 if (dump_enabled_p ())
464 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
465 "not vectorized: unsupported use in stmt.\n");
466 return false;
467 }
468
469 if (!def_stmt || gimple_nop_p (def_stmt))
470 return true;
471
472 def_bb = gimple_bb (def_stmt);
473 if (!flow_bb_inside_loop_p (loop, def_bb))
474 {
475 if (dump_enabled_p ())
476 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
477 return true;
478 }
479
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
481 DEF_STMT must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DEF_STMT in the loop. So we just
484 check that everything is as expected, and we are done. */
485 dstmt_vinfo = vinfo_for_stmt (def_stmt);
486 bb = gimple_bb (stmt);
487 if (gimple_code (stmt) == GIMPLE_PHI
488 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
489 && gimple_code (def_stmt) != GIMPLE_PHI
490 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
491 && bb->loop_father == def_bb->loop_father)
492 {
493 if (dump_enabled_p ())
494 dump_printf_loc (MSG_NOTE, vect_location,
495 "reduc-stmt defining reduc-phi in the same nest.\n");
496 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
497 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
498 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
499 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
500 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
501 return true;
502 }
503
504 /* case 3a: outer-loop stmt defining an inner-loop stmt:
505 outer-loop-header-bb:
506 d = def_stmt
507 inner-loop:
508 stmt # use (d)
509 outer-loop-tail-bb:
510 ... */
511 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
512 {
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE, vect_location,
515 "outer-loop def-stmt defining inner-loop stmt.\n");
516
517 switch (relevant)
518 {
519 case vect_unused_in_scope:
520 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
521 vect_used_in_scope : vect_unused_in_scope;
522 break;
523
524 case vect_used_in_outer_by_reduction:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
526 relevant = vect_used_by_reduction;
527 break;
528
529 case vect_used_in_outer:
530 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
531 relevant = vect_used_in_scope;
532 break;
533
534 case vect_used_in_scope:
535 break;
536
537 default:
538 gcc_unreachable ();
539 }
540 }
541
542 /* case 3b: inner-loop stmt defining an outer-loop stmt:
543 outer-loop-header-bb:
544 ...
545 inner-loop:
546 d = def_stmt
547 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
548 stmt # use (d) */
549 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
550 {
551 if (dump_enabled_p ())
552 dump_printf_loc (MSG_NOTE, vect_location,
553 "inner-loop def-stmt defining outer-loop stmt.\n");
554
555 switch (relevant)
556 {
557 case vect_unused_in_scope:
558 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
559 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
560 vect_used_in_outer_by_reduction : vect_unused_in_scope;
561 break;
562
563 case vect_used_by_reduction:
564 relevant = vect_used_in_outer_by_reduction;
565 break;
566
567 case vect_used_in_scope:
568 relevant = vect_used_in_outer;
569 break;
570
571 default:
572 gcc_unreachable ();
573 }
574 }
575
576 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
577 is_pattern_stmt_p (stmt_vinfo));
578 return true;
579 }
580
581
582 /* Function vect_mark_stmts_to_be_vectorized.
583
584 Not all stmts in the loop need to be vectorized. For example:
585
586 for i...
587 for j...
588 1. T0 = i + j
589 2. T1 = a[T0]
590
591 3. j = j + 1
592
593 Stmt 1 and 3 do not need to be vectorized, because loop control and
594 addressing of vectorized data-refs are handled differently.
595
596 This pass detects such stmts. */
597
598 bool
599 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
600 {
601 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
602 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
603 unsigned int nbbs = loop->num_nodes;
604 gimple_stmt_iterator si;
605 gimple *stmt;
606 unsigned int i;
607 stmt_vec_info stmt_vinfo;
608 basic_block bb;
609 gimple *phi;
610 bool live_p;
611 enum vect_relevant relevant, tmp_relevant;
612 enum vect_def_type def_type;
613
614 if (dump_enabled_p ())
615 dump_printf_loc (MSG_NOTE, vect_location,
616 "=== vect_mark_stmts_to_be_vectorized ===\n");
617
618 auto_vec<gimple *, 64> worklist;
619
620 /* 1. Init worklist. */
621 for (i = 0; i < nbbs; i++)
622 {
623 bb = bbs[i];
624 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
625 {
626 phi = gsi_stmt (si);
627 if (dump_enabled_p ())
628 {
629 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
630 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
631 }
632
633 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
634 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
635 }
636 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
637 {
638 stmt = gsi_stmt (si);
639 if (dump_enabled_p ())
640 {
641 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
642 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
643 }
644
645 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
646 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
647 }
648 }
649
650 /* 2. Process_worklist */
651 while (worklist.length () > 0)
652 {
653 use_operand_p use_p;
654 ssa_op_iter iter;
655
656 stmt = worklist.pop ();
657 if (dump_enabled_p ())
658 {
659 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
660 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
661 }
662
663 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
664 (DEF_STMT) as relevant/irrelevant and live/dead according to the
665 liveness and relevance properties of STMT. */
666 stmt_vinfo = vinfo_for_stmt (stmt);
667 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
668 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
669
670 /* Generally, the liveness and relevance properties of STMT are
671 propagated as is to the DEF_STMTs of its USEs:
672 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
673 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
674
675 One exception is when STMT has been identified as defining a reduction
676 variable; in this case we set the liveness/relevance as follows:
677 live_p = false
678 relevant = vect_used_by_reduction
679 This is because we distinguish between two kinds of relevant stmts -
680 those that are used by a reduction computation, and those that are
681 (also) used by a regular computation. This allows us later on to
682 identify stmts that are used solely by a reduction, and therefore the
683 order of the results that they produce does not have to be kept. */
684
685 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
686 tmp_relevant = relevant;
687 switch (def_type)
688 {
689 case vect_reduction_def:
690 switch (tmp_relevant)
691 {
692 case vect_unused_in_scope:
693 relevant = vect_used_by_reduction;
694 break;
695
696 case vect_used_by_reduction:
697 if (gimple_code (stmt) == GIMPLE_PHI)
698 break;
699 /* fall through */
700
701 default:
702 if (dump_enabled_p ())
703 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
704 "unsupported use of reduction.\n");
705 return false;
706 }
707
708 live_p = false;
709 break;
710
711 case vect_nested_cycle:
712 if (tmp_relevant != vect_unused_in_scope
713 && tmp_relevant != vect_used_in_outer_by_reduction
714 && tmp_relevant != vect_used_in_outer)
715 {
716 if (dump_enabled_p ())
717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
718 "unsupported use of nested cycle.\n");
719
720 return false;
721 }
722
723 live_p = false;
724 break;
725
726 case vect_double_reduction_def:
727 if (tmp_relevant != vect_unused_in_scope
728 && tmp_relevant != vect_used_by_reduction)
729 {
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of double reduction.\n");
733
734 return false;
735 }
736
737 live_p = false;
738 break;
739
740 default:
741 break;
742 }
743
744 if (is_pattern_stmt_p (stmt_vinfo))
745 {
746 /* Pattern statements are not inserted into the code, so
747 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
748 have to scan the RHS or function arguments instead. */
749 if (is_gimple_assign (stmt))
750 {
751 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
752 tree op = gimple_assign_rhs1 (stmt);
753
754 i = 1;
755 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
756 {
757 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
758 live_p, relevant, &worklist, false)
759 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
760 live_p, relevant, &worklist, false))
761 return false;
762 i = 2;
763 }
764 for (; i < gimple_num_ops (stmt); i++)
765 {
766 op = gimple_op (stmt, i);
767 if (TREE_CODE (op) == SSA_NAME
768 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
769 &worklist, false))
770 return false;
771 }
772 }
773 else if (is_gimple_call (stmt))
774 {
775 for (i = 0; i < gimple_call_num_args (stmt); i++)
776 {
777 tree arg = gimple_call_arg (stmt, i);
778 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
779 &worklist, false))
780 return false;
781 }
782 }
783 }
784 else
785 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
786 {
787 tree op = USE_FROM_PTR (use_p);
788 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
789 &worklist, false))
790 return false;
791 }
792
793 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
794 {
795 tree off;
796 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
797 gcc_assert (decl);
798 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
799 &worklist, true))
800 return false;
801 }
802 } /* while worklist */
803
804 return true;
805 }
806
807
808 /* Function vect_model_simple_cost.
809
810 Models cost for simple operations, i.e. those that only emit ncopies of a
811 single op. Right now, this does not account for multiple insns that could
812 be generated for the single vector op. We will handle that shortly. */
813
814 void
815 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
816 enum vect_def_type *dt,
817 stmt_vector_for_cost *prologue_cost_vec,
818 stmt_vector_for_cost *body_cost_vec)
819 {
820 int i;
821 int inside_cost = 0, prologue_cost = 0;
822
823 /* The SLP costs were already calculated during SLP tree build. */
824 if (PURE_SLP_STMT (stmt_info))
825 return;
826
827 /* FORNOW: Assuming maximum 2 args per stmts. */
828 for (i = 0; i < 2; i++)
829 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
830 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
831 stmt_info, 0, vect_prologue);
832
833 /* Pass the inside-of-loop statements to the target-specific cost model. */
834 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
835 stmt_info, 0, vect_body);
836
837 if (dump_enabled_p ())
838 dump_printf_loc (MSG_NOTE, vect_location,
839 "vect_model_simple_cost: inside_cost = %d, "
840 "prologue_cost = %d .\n", inside_cost, prologue_cost);
841 }
842
843
844 /* Model cost for type demotion and promotion operations. PWR is normally
845 zero for single-step promotions and demotions. It will be one if
846 two-step promotion/demotion is required, and so on. Each additional
847 step doubles the number of instructions required. */
848
849 static void
850 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
851 enum vect_def_type *dt, int pwr)
852 {
853 int i, tmp;
854 int inside_cost = 0, prologue_cost = 0;
855 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
856 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
857 void *target_cost_data;
858
859 /* The SLP costs were already calculated during SLP tree build. */
860 if (PURE_SLP_STMT (stmt_info))
861 return;
862
863 if (loop_vinfo)
864 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
865 else
866 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
867
868 for (i = 0; i < pwr + 1; i++)
869 {
870 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
871 (i + 1) : i;
872 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
873 vec_promote_demote, stmt_info, 0,
874 vect_body);
875 }
876
877 /* FORNOW: Assuming maximum 2 args per stmts. */
878 for (i = 0; i < 2; i++)
879 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
880 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
881 stmt_info, 0, vect_prologue);
882
883 if (dump_enabled_p ())
884 dump_printf_loc (MSG_NOTE, vect_location,
885 "vect_model_promotion_demotion_cost: inside_cost = %d, "
886 "prologue_cost = %d .\n", inside_cost, prologue_cost);
887 }
888
889 /* Function vect_cost_group_size
890
891 For grouped load or store, return the group_size only if it is the first
892 load or store of a group, else return 1. This ensures that group size is
893 only returned once per group. */
894
895 static int
896 vect_cost_group_size (stmt_vec_info stmt_info)
897 {
898 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
899
900 if (first_stmt == STMT_VINFO_STMT (stmt_info))
901 return GROUP_SIZE (stmt_info);
902
903 return 1;
904 }
905
906
907 /* Function vect_model_store_cost
908
909 Models cost for stores. In the case of grouped accesses, one access
910 has the overhead of the grouped access attributed to it. */
911
912 void
913 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
914 bool store_lanes_p, enum vect_def_type dt,
915 slp_tree slp_node,
916 stmt_vector_for_cost *prologue_cost_vec,
917 stmt_vector_for_cost *body_cost_vec)
918 {
919 int group_size;
920 unsigned int inside_cost = 0, prologue_cost = 0;
921 struct data_reference *first_dr;
922 gimple *first_stmt;
923
924 if (dt == vect_constant_def || dt == vect_external_def)
925 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
926 stmt_info, 0, vect_prologue);
927
928 /* Grouped access? */
929 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
930 {
931 if (slp_node)
932 {
933 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
934 group_size = 1;
935 }
936 else
937 {
938 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
939 group_size = vect_cost_group_size (stmt_info);
940 }
941
942 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
943 }
944 /* Not a grouped access. */
945 else
946 {
947 group_size = 1;
948 first_dr = STMT_VINFO_DATA_REF (stmt_info);
949 }
950
951 /* We assume that the cost of a single store-lanes instruction is
952 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
953 access is instead being provided by a permute-and-store operation,
954 include the cost of the permutes. */
955 if (!store_lanes_p && group_size > 1
956 && !STMT_VINFO_STRIDED_P (stmt_info))
957 {
958 /* Uses a high and low interleave or shuffle operations for each
959 needed permute. */
960 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
961 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
962 stmt_info, 0, vect_body);
963
964 if (dump_enabled_p ())
965 dump_printf_loc (MSG_NOTE, vect_location,
966 "vect_model_store_cost: strided group_size = %d .\n",
967 group_size);
968 }
969
970 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
971 /* Costs of the stores. */
972 if (STMT_VINFO_STRIDED_P (stmt_info)
973 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
974 {
975 /* N scalar stores plus extracting the elements. */
976 inside_cost += record_stmt_cost (body_cost_vec,
977 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
978 scalar_store, stmt_info, 0, vect_body);
979 }
980 else
981 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
982
983 if (STMT_VINFO_STRIDED_P (stmt_info))
984 inside_cost += record_stmt_cost (body_cost_vec,
985 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
986 vec_to_scalar, stmt_info, 0, vect_body);
987
988 if (dump_enabled_p ())
989 dump_printf_loc (MSG_NOTE, vect_location,
990 "vect_model_store_cost: inside_cost = %d, "
991 "prologue_cost = %d .\n", inside_cost, prologue_cost);
992 }
993
994
995 /* Calculate cost of DR's memory access. */
996 void
997 vect_get_store_cost (struct data_reference *dr, int ncopies,
998 unsigned int *inside_cost,
999 stmt_vector_for_cost *body_cost_vec)
1000 {
1001 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1002 gimple *stmt = DR_STMT (dr);
1003 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1004
1005 switch (alignment_support_scheme)
1006 {
1007 case dr_aligned:
1008 {
1009 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1010 vector_store, stmt_info, 0,
1011 vect_body);
1012
1013 if (dump_enabled_p ())
1014 dump_printf_loc (MSG_NOTE, vect_location,
1015 "vect_model_store_cost: aligned.\n");
1016 break;
1017 }
1018
1019 case dr_unaligned_supported:
1020 {
1021 /* Here, we assign an additional cost for the unaligned store. */
1022 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1023 unaligned_store, stmt_info,
1024 DR_MISALIGNMENT (dr), vect_body);
1025 if (dump_enabled_p ())
1026 dump_printf_loc (MSG_NOTE, vect_location,
1027 "vect_model_store_cost: unaligned supported by "
1028 "hardware.\n");
1029 break;
1030 }
1031
1032 case dr_unaligned_unsupported:
1033 {
1034 *inside_cost = VECT_MAX_COST;
1035
1036 if (dump_enabled_p ())
1037 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1038 "vect_model_store_cost: unsupported access.\n");
1039 break;
1040 }
1041
1042 default:
1043 gcc_unreachable ();
1044 }
1045 }
1046
1047
1048 /* Function vect_model_load_cost
1049
1050 Models cost for loads. In the case of grouped accesses, the last access
1051 has the overhead of the grouped access attributed to it. Since unaligned
1052 accesses are supported for loads, we also account for the costs of the
1053 access scheme chosen. */
1054
1055 void
1056 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1057 bool load_lanes_p, slp_tree slp_node,
1058 stmt_vector_for_cost *prologue_cost_vec,
1059 stmt_vector_for_cost *body_cost_vec)
1060 {
1061 int group_size;
1062 gimple *first_stmt;
1063 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1064 unsigned int inside_cost = 0, prologue_cost = 0;
1065
1066 /* Grouped accesses? */
1067 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1068 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1069 {
1070 group_size = vect_cost_group_size (stmt_info);
1071 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1072 }
1073 /* Not a grouped access. */
1074 else
1075 {
1076 group_size = 1;
1077 first_dr = dr;
1078 }
1079
1080 /* We assume that the cost of a single load-lanes instruction is
1081 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1082 access is instead being provided by a load-and-permute operation,
1083 include the cost of the permutes. */
1084 if (!load_lanes_p && group_size > 1
1085 && !STMT_VINFO_STRIDED_P (stmt_info))
1086 {
1087 /* Uses an even and odd extract operations or shuffle operations
1088 for each needed permute. */
1089 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1090 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1091 stmt_info, 0, vect_body);
1092
1093 if (dump_enabled_p ())
1094 dump_printf_loc (MSG_NOTE, vect_location,
1095 "vect_model_load_cost: strided group_size = %d .\n",
1096 group_size);
1097 }
1098
1099 /* The loads themselves. */
1100 if (STMT_VINFO_STRIDED_P (stmt_info)
1101 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1102 {
1103 /* N scalar loads plus gathering them into a vector. */
1104 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1105 inside_cost += record_stmt_cost (body_cost_vec,
1106 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1107 scalar_load, stmt_info, 0, vect_body);
1108 }
1109 else
1110 vect_get_load_cost (first_dr, ncopies,
1111 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1112 || group_size > 1 || slp_node),
1113 &inside_cost, &prologue_cost,
1114 prologue_cost_vec, body_cost_vec, true);
1115 if (STMT_VINFO_STRIDED_P (stmt_info))
1116 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1117 stmt_info, 0, vect_body);
1118
1119 if (dump_enabled_p ())
1120 dump_printf_loc (MSG_NOTE, vect_location,
1121 "vect_model_load_cost: inside_cost = %d, "
1122 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1123 }
1124
1125
1126 /* Calculate cost of DR's memory access. */
1127 void
1128 vect_get_load_cost (struct data_reference *dr, int ncopies,
1129 bool add_realign_cost, unsigned int *inside_cost,
1130 unsigned int *prologue_cost,
1131 stmt_vector_for_cost *prologue_cost_vec,
1132 stmt_vector_for_cost *body_cost_vec,
1133 bool record_prologue_costs)
1134 {
1135 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1136 gimple *stmt = DR_STMT (dr);
1137 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1138
1139 switch (alignment_support_scheme)
1140 {
1141 case dr_aligned:
1142 {
1143 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1144 stmt_info, 0, vect_body);
1145
1146 if (dump_enabled_p ())
1147 dump_printf_loc (MSG_NOTE, vect_location,
1148 "vect_model_load_cost: aligned.\n");
1149
1150 break;
1151 }
1152 case dr_unaligned_supported:
1153 {
1154 /* Here, we assign an additional cost for the unaligned load. */
1155 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1156 unaligned_load, stmt_info,
1157 DR_MISALIGNMENT (dr), vect_body);
1158
1159 if (dump_enabled_p ())
1160 dump_printf_loc (MSG_NOTE, vect_location,
1161 "vect_model_load_cost: unaligned supported by "
1162 "hardware.\n");
1163
1164 break;
1165 }
1166 case dr_explicit_realign:
1167 {
1168 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1169 vector_load, stmt_info, 0, vect_body);
1170 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1171 vec_perm, stmt_info, 0, vect_body);
1172
1173 /* FIXME: If the misalignment remains fixed across the iterations of
1174 the containing loop, the following cost should be added to the
1175 prologue costs. */
1176 if (targetm.vectorize.builtin_mask_for_load)
1177 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1178 stmt_info, 0, vect_body);
1179
1180 if (dump_enabled_p ())
1181 dump_printf_loc (MSG_NOTE, vect_location,
1182 "vect_model_load_cost: explicit realign\n");
1183
1184 break;
1185 }
1186 case dr_explicit_realign_optimized:
1187 {
1188 if (dump_enabled_p ())
1189 dump_printf_loc (MSG_NOTE, vect_location,
1190 "vect_model_load_cost: unaligned software "
1191 "pipelined.\n");
1192
1193 /* Unaligned software pipeline has a load of an address, an initial
1194 load, and possibly a mask operation to "prime" the loop. However,
1195 if this is an access in a group of loads, which provide grouped
1196 access, then the above cost should only be considered for one
1197 access in the group. Inside the loop, there is a load op
1198 and a realignment op. */
1199
1200 if (add_realign_cost && record_prologue_costs)
1201 {
1202 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1203 vector_stmt, stmt_info,
1204 0, vect_prologue);
1205 if (targetm.vectorize.builtin_mask_for_load)
1206 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1207 vector_stmt, stmt_info,
1208 0, vect_prologue);
1209 }
1210
1211 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1212 stmt_info, 0, vect_body);
1213 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1214 stmt_info, 0, vect_body);
1215
1216 if (dump_enabled_p ())
1217 dump_printf_loc (MSG_NOTE, vect_location,
1218 "vect_model_load_cost: explicit realign optimized"
1219 "\n");
1220
1221 break;
1222 }
1223
1224 case dr_unaligned_unsupported:
1225 {
1226 *inside_cost = VECT_MAX_COST;
1227
1228 if (dump_enabled_p ())
1229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1230 "vect_model_load_cost: unsupported access.\n");
1231 break;
1232 }
1233
1234 default:
1235 gcc_unreachable ();
1236 }
1237 }
1238
1239 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1240 the loop preheader for the vectorized stmt STMT. */
1241
1242 static void
1243 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1244 {
1245 if (gsi)
1246 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1247 else
1248 {
1249 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1250 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1251
1252 if (loop_vinfo)
1253 {
1254 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1255 basic_block new_bb;
1256 edge pe;
1257
1258 if (nested_in_vect_loop_p (loop, stmt))
1259 loop = loop->inner;
1260
1261 pe = loop_preheader_edge (loop);
1262 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1263 gcc_assert (!new_bb);
1264 }
1265 else
1266 {
1267 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1268 basic_block bb;
1269 gimple_stmt_iterator gsi_bb_start;
1270
1271 gcc_assert (bb_vinfo);
1272 bb = BB_VINFO_BB (bb_vinfo);
1273 gsi_bb_start = gsi_after_labels (bb);
1274 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1275 }
1276 }
1277
1278 if (dump_enabled_p ())
1279 {
1280 dump_printf_loc (MSG_NOTE, vect_location,
1281 "created new init_stmt: ");
1282 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1283 }
1284 }
1285
1286 /* Function vect_init_vector.
1287
1288 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1289 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1290 vector type a vector with all elements equal to VAL is created first.
1291 Place the initialization at BSI if it is not NULL. Otherwise, place the
1292 initialization at the loop preheader.
1293 Return the DEF of INIT_STMT.
1294 It will be used in the vectorization of STMT. */
1295
1296 tree
1297 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1298 {
1299 tree new_var;
1300 gimple *init_stmt;
1301 tree vec_oprnd;
1302 tree new_temp;
1303
1304 if (TREE_CODE (type) == VECTOR_TYPE
1305 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1306 {
1307 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1308 {
1309 if (CONSTANT_CLASS_P (val))
1310 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1311 else
1312 {
1313 new_temp = make_ssa_name (TREE_TYPE (type));
1314 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1315 vect_init_vector_1 (stmt, init_stmt, gsi);
1316 val = new_temp;
1317 }
1318 }
1319 val = build_vector_from_val (type, val);
1320 }
1321
1322 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1323 init_stmt = gimple_build_assign (new_var, val);
1324 new_temp = make_ssa_name (new_var, init_stmt);
1325 gimple_assign_set_lhs (init_stmt, new_temp);
1326 vect_init_vector_1 (stmt, init_stmt, gsi);
1327 vec_oprnd = gimple_assign_lhs (init_stmt);
1328 return vec_oprnd;
1329 }
1330
1331
1332 /* Function vect_get_vec_def_for_operand.
1333
1334 OP is an operand in STMT. This function returns a (vector) def that will be
1335 used in the vectorized stmt for STMT.
1336
1337 In the case that OP is an SSA_NAME which is defined in the loop, then
1338 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1339
1340 In case OP is an invariant or constant, a new stmt that creates a vector def
1341 needs to be introduced. */
1342
1343 tree
1344 vect_get_vec_def_for_operand (tree op, gimple *stmt)
1345 {
1346 tree vec_oprnd;
1347 gimple *vec_stmt;
1348 gimple *def_stmt;
1349 stmt_vec_info def_stmt_info = NULL;
1350 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1351 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1352 enum vect_def_type dt;
1353 bool is_simple_use;
1354 tree vector_type;
1355
1356 if (dump_enabled_p ())
1357 {
1358 dump_printf_loc (MSG_NOTE, vect_location,
1359 "vect_get_vec_def_for_operand: ");
1360 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1361 dump_printf (MSG_NOTE, "\n");
1362 }
1363
1364 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1365 gcc_assert (is_simple_use);
1366 if (dump_enabled_p ())
1367 {
1368 int loc_printed = 0;
1369 if (def_stmt)
1370 {
1371 if (loc_printed)
1372 dump_printf (MSG_NOTE, " def_stmt = ");
1373 else
1374 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1375 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1376 }
1377 }
1378
1379 switch (dt)
1380 {
1381 /* operand is a constant or a loop invariant. */
1382 case vect_constant_def:
1383 case vect_external_def:
1384 {
1385 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1386 gcc_assert (vector_type);
1387 return vect_init_vector (stmt, op, vector_type, NULL);
1388 }
1389
1390 /* operand is defined inside the loop. */
1391 case vect_internal_def:
1392 {
1393 /* Get the def from the vectorized stmt. */
1394 def_stmt_info = vinfo_for_stmt (def_stmt);
1395
1396 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1397 /* Get vectorized pattern statement. */
1398 if (!vec_stmt
1399 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1400 && !STMT_VINFO_RELEVANT (def_stmt_info))
1401 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1402 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1403 gcc_assert (vec_stmt);
1404 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1405 vec_oprnd = PHI_RESULT (vec_stmt);
1406 else if (is_gimple_call (vec_stmt))
1407 vec_oprnd = gimple_call_lhs (vec_stmt);
1408 else
1409 vec_oprnd = gimple_assign_lhs (vec_stmt);
1410 return vec_oprnd;
1411 }
1412
1413 /* operand is defined by a loop header phi - reduction */
1414 case vect_reduction_def:
1415 case vect_double_reduction_def:
1416 case vect_nested_cycle:
1417 /* Code should use get_initial_def_for_reduction. */
1418 gcc_unreachable ();
1419
1420 /* operand is defined by loop-header phi - induction. */
1421 case vect_induction_def:
1422 {
1423 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1424
1425 /* Get the def from the vectorized stmt. */
1426 def_stmt_info = vinfo_for_stmt (def_stmt);
1427 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1428 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1429 vec_oprnd = PHI_RESULT (vec_stmt);
1430 else
1431 vec_oprnd = gimple_get_lhs (vec_stmt);
1432 return vec_oprnd;
1433 }
1434
1435 default:
1436 gcc_unreachable ();
1437 }
1438 }
1439
1440
1441 /* Function vect_get_vec_def_for_stmt_copy
1442
1443 Return a vector-def for an operand. This function is used when the
1444 vectorized stmt to be created (by the caller to this function) is a "copy"
1445 created in case the vectorized result cannot fit in one vector, and several
1446 copies of the vector-stmt are required. In this case the vector-def is
1447 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1448 of the stmt that defines VEC_OPRND.
1449 DT is the type of the vector def VEC_OPRND.
1450
1451 Context:
1452 In case the vectorization factor (VF) is bigger than the number
1453 of elements that can fit in a vectype (nunits), we have to generate
1454 more than one vector stmt to vectorize the scalar stmt. This situation
1455 arises when there are multiple data-types operated upon in the loop; the
1456 smallest data-type determines the VF, and as a result, when vectorizing
1457 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1458 vector stmt (each computing a vector of 'nunits' results, and together
1459 computing 'VF' results in each iteration). This function is called when
1460 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1461 which VF=16 and nunits=4, so the number of copies required is 4):
1462
1463 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1464
1465 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1466 VS1.1: vx.1 = memref1 VS1.2
1467 VS1.2: vx.2 = memref2 VS1.3
1468 VS1.3: vx.3 = memref3
1469
1470 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1471 VSnew.1: vz1 = vx.1 + ... VSnew.2
1472 VSnew.2: vz2 = vx.2 + ... VSnew.3
1473 VSnew.3: vz3 = vx.3 + ...
1474
1475 The vectorization of S1 is explained in vectorizable_load.
1476 The vectorization of S2:
1477 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1478 the function 'vect_get_vec_def_for_operand' is called to
1479 get the relevant vector-def for each operand of S2. For operand x it
1480 returns the vector-def 'vx.0'.
1481
1482 To create the remaining copies of the vector-stmt (VSnew.j), this
1483 function is called to get the relevant vector-def for each operand. It is
1484 obtained from the respective VS1.j stmt, which is recorded in the
1485 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1486
1487 For example, to obtain the vector-def 'vx.1' in order to create the
1488 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1489 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1490 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1491 and return its def ('vx.1').
1492 Overall, to create the above sequence this function will be called 3 times:
1493 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1494 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1495 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1496
1497 tree
1498 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1499 {
1500 gimple *vec_stmt_for_operand;
1501 stmt_vec_info def_stmt_info;
1502
1503 /* Do nothing; can reuse same def. */
1504 if (dt == vect_external_def || dt == vect_constant_def )
1505 return vec_oprnd;
1506
1507 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1508 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1509 gcc_assert (def_stmt_info);
1510 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1511 gcc_assert (vec_stmt_for_operand);
1512 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1513 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1514 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1515 else
1516 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1517 return vec_oprnd;
1518 }
1519
1520
1521 /* Get vectorized definitions for the operands to create a copy of an original
1522 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1523
1524 static void
1525 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1526 vec<tree> *vec_oprnds0,
1527 vec<tree> *vec_oprnds1)
1528 {
1529 tree vec_oprnd = vec_oprnds0->pop ();
1530
1531 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1532 vec_oprnds0->quick_push (vec_oprnd);
1533
1534 if (vec_oprnds1 && vec_oprnds1->length ())
1535 {
1536 vec_oprnd = vec_oprnds1->pop ();
1537 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1538 vec_oprnds1->quick_push (vec_oprnd);
1539 }
1540 }
1541
1542
1543 /* Get vectorized definitions for OP0 and OP1.
1544 REDUC_INDEX is the index of reduction operand in case of reduction,
1545 and -1 otherwise. */
1546
1547 void
1548 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1549 vec<tree> *vec_oprnds0,
1550 vec<tree> *vec_oprnds1,
1551 slp_tree slp_node, int reduc_index)
1552 {
1553 if (slp_node)
1554 {
1555 int nops = (op1 == NULL_TREE) ? 1 : 2;
1556 auto_vec<tree> ops (nops);
1557 auto_vec<vec<tree> > vec_defs (nops);
1558
1559 ops.quick_push (op0);
1560 if (op1)
1561 ops.quick_push (op1);
1562
1563 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1564
1565 *vec_oprnds0 = vec_defs[0];
1566 if (op1)
1567 *vec_oprnds1 = vec_defs[1];
1568 }
1569 else
1570 {
1571 tree vec_oprnd;
1572
1573 vec_oprnds0->create (1);
1574 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1575 vec_oprnds0->quick_push (vec_oprnd);
1576
1577 if (op1)
1578 {
1579 vec_oprnds1->create (1);
1580 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1581 vec_oprnds1->quick_push (vec_oprnd);
1582 }
1583 }
1584 }
1585
1586
1587 /* Function vect_finish_stmt_generation.
1588
1589 Insert a new stmt. */
1590
1591 void
1592 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1593 gimple_stmt_iterator *gsi)
1594 {
1595 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1596 vec_info *vinfo = stmt_info->vinfo;
1597
1598 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1599
1600 if (!gsi_end_p (*gsi)
1601 && gimple_has_mem_ops (vec_stmt))
1602 {
1603 gimple *at_stmt = gsi_stmt (*gsi);
1604 tree vuse = gimple_vuse (at_stmt);
1605 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1606 {
1607 tree vdef = gimple_vdef (at_stmt);
1608 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1609 /* If we have an SSA vuse and insert a store, update virtual
1610 SSA form to avoid triggering the renamer. Do so only
1611 if we can easily see all uses - which is what almost always
1612 happens with the way vectorized stmts are inserted. */
1613 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1614 && ((is_gimple_assign (vec_stmt)
1615 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1616 || (is_gimple_call (vec_stmt)
1617 && !(gimple_call_flags (vec_stmt)
1618 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1619 {
1620 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1621 gimple_set_vdef (vec_stmt, new_vdef);
1622 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1623 }
1624 }
1625 }
1626 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1627
1628 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1629
1630 if (dump_enabled_p ())
1631 {
1632 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1633 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1634 }
1635
1636 gimple_set_location (vec_stmt, gimple_location (stmt));
1637
1638 /* While EH edges will generally prevent vectorization, stmt might
1639 e.g. be in a must-not-throw region. Ensure newly created stmts
1640 that could throw are part of the same region. */
1641 int lp_nr = lookup_stmt_eh_lp (stmt);
1642 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1643 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1644 }
1645
1646 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1647 a function declaration if the target has a vectorized version
1648 of the function, or NULL_TREE if the function cannot be vectorized. */
1649
1650 tree
1651 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1652 {
1653 tree fndecl = gimple_call_fndecl (call);
1654
1655 /* We only handle functions that do not read or clobber memory -- i.e.
1656 const or novops ones. */
1657 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1658 return NULL_TREE;
1659
1660 if (!fndecl
1661 || TREE_CODE (fndecl) != FUNCTION_DECL
1662 || !DECL_BUILT_IN (fndecl))
1663 return NULL_TREE;
1664
1665 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1666 vectype_in);
1667 }
1668
1669
1670 static tree permute_vec_elements (tree, tree, tree, gimple *,
1671 gimple_stmt_iterator *);
1672
1673
1674 /* Function vectorizable_mask_load_store.
1675
1676 Check if STMT performs a conditional load or store that can be vectorized.
1677 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1678 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1679 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1680
1681 static bool
1682 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1683 gimple **vec_stmt, slp_tree slp_node)
1684 {
1685 tree vec_dest = NULL;
1686 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1687 stmt_vec_info prev_stmt_info;
1688 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1689 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1690 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1691 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1692 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1693 tree elem_type;
1694 gimple *new_stmt;
1695 tree dummy;
1696 tree dataref_ptr = NULL_TREE;
1697 gimple *ptr_incr;
1698 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1699 int ncopies;
1700 int i, j;
1701 bool inv_p;
1702 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1703 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1704 int gather_scale = 1;
1705 enum vect_def_type gather_dt = vect_unknown_def_type;
1706 bool is_store;
1707 tree mask;
1708 gimple *def_stmt;
1709 enum vect_def_type dt;
1710
1711 if (slp_node != NULL)
1712 return false;
1713
1714 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1715 gcc_assert (ncopies >= 1);
1716
1717 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1718 mask = gimple_call_arg (stmt, 2);
1719 if (TYPE_PRECISION (TREE_TYPE (mask))
1720 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1721 return false;
1722
1723 /* FORNOW. This restriction should be relaxed. */
1724 if (nested_in_vect_loop && ncopies > 1)
1725 {
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1728 "multiple types in nested loop.");
1729 return false;
1730 }
1731
1732 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1733 return false;
1734
1735 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1736 return false;
1737
1738 if (!STMT_VINFO_DATA_REF (stmt_info))
1739 return false;
1740
1741 elem_type = TREE_TYPE (vectype);
1742
1743 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1744 return false;
1745
1746 if (STMT_VINFO_STRIDED_P (stmt_info))
1747 return false;
1748
1749 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1750 {
1751 gimple *def_stmt;
1752 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
1753 &gather_off, &gather_scale);
1754 gcc_assert (gather_decl);
1755 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1756 &gather_off_vectype))
1757 {
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1760 "gather index use not simple.");
1761 return false;
1762 }
1763
1764 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1765 tree masktype
1766 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1767 if (TREE_CODE (masktype) == INTEGER_TYPE)
1768 {
1769 if (dump_enabled_p ())
1770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1771 "masked gather with integer mask not supported.");
1772 return false;
1773 }
1774 }
1775 else if (tree_int_cst_compare (nested_in_vect_loop
1776 ? STMT_VINFO_DR_STEP (stmt_info)
1777 : DR_STEP (dr), size_zero_node) <= 0)
1778 return false;
1779 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1780 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1781 return false;
1782
1783 if (TREE_CODE (mask) != SSA_NAME)
1784 return false;
1785
1786 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt))
1787 return false;
1788
1789 if (is_store)
1790 {
1791 tree rhs = gimple_call_arg (stmt, 3);
1792 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt))
1793 return false;
1794 }
1795
1796 if (!vec_stmt) /* transformation not required. */
1797 {
1798 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1799 if (is_store)
1800 vect_model_store_cost (stmt_info, ncopies, false, dt,
1801 NULL, NULL, NULL);
1802 else
1803 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1804 return true;
1805 }
1806
1807 /** Transform. **/
1808
1809 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1810 {
1811 tree vec_oprnd0 = NULL_TREE, op;
1812 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1813 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1814 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1815 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1816 tree mask_perm_mask = NULL_TREE;
1817 edge pe = loop_preheader_edge (loop);
1818 gimple_seq seq;
1819 basic_block new_bb;
1820 enum { NARROW, NONE, WIDEN } modifier;
1821 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1822
1823 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1824 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1825 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1826 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1827 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1828 scaletype = TREE_VALUE (arglist);
1829 gcc_checking_assert (types_compatible_p (srctype, rettype)
1830 && types_compatible_p (srctype, masktype));
1831
1832 if (nunits == gather_off_nunits)
1833 modifier = NONE;
1834 else if (nunits == gather_off_nunits / 2)
1835 {
1836 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1837 modifier = WIDEN;
1838
1839 for (i = 0; i < gather_off_nunits; ++i)
1840 sel[i] = i | nunits;
1841
1842 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1843 }
1844 else if (nunits == gather_off_nunits * 2)
1845 {
1846 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1847 modifier = NARROW;
1848
1849 for (i = 0; i < nunits; ++i)
1850 sel[i] = i < gather_off_nunits
1851 ? i : i + nunits - gather_off_nunits;
1852
1853 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1854 ncopies *= 2;
1855 for (i = 0; i < nunits; ++i)
1856 sel[i] = i | gather_off_nunits;
1857 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1858 }
1859 else
1860 gcc_unreachable ();
1861
1862 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1863
1864 ptr = fold_convert (ptrtype, gather_base);
1865 if (!is_gimple_min_invariant (ptr))
1866 {
1867 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1868 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1869 gcc_assert (!new_bb);
1870 }
1871
1872 scale = build_int_cst (scaletype, gather_scale);
1873
1874 prev_stmt_info = NULL;
1875 for (j = 0; j < ncopies; ++j)
1876 {
1877 if (modifier == WIDEN && (j & 1))
1878 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1879 perm_mask, stmt, gsi);
1880 else if (j == 0)
1881 op = vec_oprnd0
1882 = vect_get_vec_def_for_operand (gather_off, stmt);
1883 else
1884 op = vec_oprnd0
1885 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1886
1887 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1888 {
1889 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1890 == TYPE_VECTOR_SUBPARTS (idxtype));
1891 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1892 var = make_ssa_name (var);
1893 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1894 new_stmt
1895 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1896 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1897 op = var;
1898 }
1899
1900 if (mask_perm_mask && (j & 1))
1901 mask_op = permute_vec_elements (mask_op, mask_op,
1902 mask_perm_mask, stmt, gsi);
1903 else
1904 {
1905 if (j == 0)
1906 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1907 else
1908 {
1909 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1910 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1911 }
1912
1913 mask_op = vec_mask;
1914 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1915 {
1916 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1917 == TYPE_VECTOR_SUBPARTS (masktype));
1918 var = vect_get_new_vect_var (masktype, vect_simple_var,
1919 NULL);
1920 var = make_ssa_name (var);
1921 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1922 new_stmt
1923 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1924 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1925 mask_op = var;
1926 }
1927 }
1928
1929 new_stmt
1930 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1931 scale);
1932
1933 if (!useless_type_conversion_p (vectype, rettype))
1934 {
1935 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1936 == TYPE_VECTOR_SUBPARTS (rettype));
1937 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
1938 op = make_ssa_name (var, new_stmt);
1939 gimple_call_set_lhs (new_stmt, op);
1940 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1941 var = make_ssa_name (vec_dest);
1942 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
1943 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1944 }
1945 else
1946 {
1947 var = make_ssa_name (vec_dest, new_stmt);
1948 gimple_call_set_lhs (new_stmt, var);
1949 }
1950
1951 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1952
1953 if (modifier == NARROW)
1954 {
1955 if ((j & 1) == 0)
1956 {
1957 prev_res = var;
1958 continue;
1959 }
1960 var = permute_vec_elements (prev_res, var,
1961 perm_mask, stmt, gsi);
1962 new_stmt = SSA_NAME_DEF_STMT (var);
1963 }
1964
1965 if (prev_stmt_info == NULL)
1966 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1967 else
1968 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1969 prev_stmt_info = vinfo_for_stmt (new_stmt);
1970 }
1971
1972 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1973 from the IL. */
1974 tree lhs = gimple_call_lhs (stmt);
1975 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
1976 set_vinfo_for_stmt (new_stmt, stmt_info);
1977 set_vinfo_for_stmt (stmt, NULL);
1978 STMT_VINFO_STMT (stmt_info) = new_stmt;
1979 gsi_replace (gsi, new_stmt, true);
1980 return true;
1981 }
1982 else if (is_store)
1983 {
1984 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
1985 prev_stmt_info = NULL;
1986 for (i = 0; i < ncopies; i++)
1987 {
1988 unsigned align, misalign;
1989
1990 if (i == 0)
1991 {
1992 tree rhs = gimple_call_arg (stmt, 3);
1993 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
1994 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1995 /* We should have catched mismatched types earlier. */
1996 gcc_assert (useless_type_conversion_p (vectype,
1997 TREE_TYPE (vec_rhs)));
1998 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
1999 NULL_TREE, &dummy, gsi,
2000 &ptr_incr, false, &inv_p);
2001 gcc_assert (!inv_p);
2002 }
2003 else
2004 {
2005 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2006 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2007 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2008 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2009 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2010 TYPE_SIZE_UNIT (vectype));
2011 }
2012
2013 align = TYPE_ALIGN_UNIT (vectype);
2014 if (aligned_access_p (dr))
2015 misalign = 0;
2016 else if (DR_MISALIGNMENT (dr) == -1)
2017 {
2018 align = TYPE_ALIGN_UNIT (elem_type);
2019 misalign = 0;
2020 }
2021 else
2022 misalign = DR_MISALIGNMENT (dr);
2023 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2024 misalign);
2025 new_stmt
2026 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2027 gimple_call_arg (stmt, 1),
2028 vec_mask, vec_rhs);
2029 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2030 if (i == 0)
2031 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2032 else
2033 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2034 prev_stmt_info = vinfo_for_stmt (new_stmt);
2035 }
2036 }
2037 else
2038 {
2039 tree vec_mask = NULL_TREE;
2040 prev_stmt_info = NULL;
2041 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2042 for (i = 0; i < ncopies; i++)
2043 {
2044 unsigned align, misalign;
2045
2046 if (i == 0)
2047 {
2048 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2049 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2050 NULL_TREE, &dummy, gsi,
2051 &ptr_incr, false, &inv_p);
2052 gcc_assert (!inv_p);
2053 }
2054 else
2055 {
2056 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2057 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2058 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2059 TYPE_SIZE_UNIT (vectype));
2060 }
2061
2062 align = TYPE_ALIGN_UNIT (vectype);
2063 if (aligned_access_p (dr))
2064 misalign = 0;
2065 else if (DR_MISALIGNMENT (dr) == -1)
2066 {
2067 align = TYPE_ALIGN_UNIT (elem_type);
2068 misalign = 0;
2069 }
2070 else
2071 misalign = DR_MISALIGNMENT (dr);
2072 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2073 misalign);
2074 new_stmt
2075 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2076 gimple_call_arg (stmt, 1),
2077 vec_mask);
2078 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2079 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2080 if (i == 0)
2081 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2082 else
2083 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2084 prev_stmt_info = vinfo_for_stmt (new_stmt);
2085 }
2086 }
2087
2088 if (!is_store)
2089 {
2090 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2091 from the IL. */
2092 tree lhs = gimple_call_lhs (stmt);
2093 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2094 set_vinfo_for_stmt (new_stmt, stmt_info);
2095 set_vinfo_for_stmt (stmt, NULL);
2096 STMT_VINFO_STMT (stmt_info) = new_stmt;
2097 gsi_replace (gsi, new_stmt, true);
2098 }
2099
2100 return true;
2101 }
2102
2103
2104 /* Function vectorizable_call.
2105
2106 Check if GS performs a function call that can be vectorized.
2107 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2108 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2109 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2110
2111 static bool
2112 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2113 slp_tree slp_node)
2114 {
2115 gcall *stmt;
2116 tree vec_dest;
2117 tree scalar_dest;
2118 tree op, type;
2119 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2120 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2121 tree vectype_out, vectype_in;
2122 int nunits_in;
2123 int nunits_out;
2124 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2125 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2126 vec_info *vinfo = stmt_info->vinfo;
2127 tree fndecl, new_temp, rhs_type;
2128 gimple *def_stmt;
2129 enum vect_def_type dt[3]
2130 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2131 gimple *new_stmt = NULL;
2132 int ncopies, j;
2133 vec<tree> vargs = vNULL;
2134 enum { NARROW, NONE, WIDEN } modifier;
2135 size_t i, nargs;
2136 tree lhs;
2137
2138 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2139 return false;
2140
2141 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2142 return false;
2143
2144 /* Is GS a vectorizable call? */
2145 stmt = dyn_cast <gcall *> (gs);
2146 if (!stmt)
2147 return false;
2148
2149 if (gimple_call_internal_p (stmt)
2150 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2151 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2152 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2153 slp_node);
2154
2155 if (gimple_call_lhs (stmt) == NULL_TREE
2156 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2157 return false;
2158
2159 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2160
2161 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2162
2163 /* Process function arguments. */
2164 rhs_type = NULL_TREE;
2165 vectype_in = NULL_TREE;
2166 nargs = gimple_call_num_args (stmt);
2167
2168 /* Bail out if the function has more than three arguments, we do not have
2169 interesting builtin functions to vectorize with more than two arguments
2170 except for fma. No arguments is also not good. */
2171 if (nargs == 0 || nargs > 3)
2172 return false;
2173
2174 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2175 if (gimple_call_internal_p (stmt)
2176 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2177 {
2178 nargs = 0;
2179 rhs_type = unsigned_type_node;
2180 }
2181
2182 for (i = 0; i < nargs; i++)
2183 {
2184 tree opvectype;
2185
2186 op = gimple_call_arg (stmt, i);
2187
2188 /* We can only handle calls with arguments of the same type. */
2189 if (rhs_type
2190 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2191 {
2192 if (dump_enabled_p ())
2193 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2194 "argument types differ.\n");
2195 return false;
2196 }
2197 if (!rhs_type)
2198 rhs_type = TREE_TYPE (op);
2199
2200 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2201 {
2202 if (dump_enabled_p ())
2203 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2204 "use not simple.\n");
2205 return false;
2206 }
2207
2208 if (!vectype_in)
2209 vectype_in = opvectype;
2210 else if (opvectype
2211 && opvectype != vectype_in)
2212 {
2213 if (dump_enabled_p ())
2214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2215 "argument vector types differ.\n");
2216 return false;
2217 }
2218 }
2219 /* If all arguments are external or constant defs use a vector type with
2220 the same size as the output vector type. */
2221 if (!vectype_in)
2222 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2223 if (vec_stmt)
2224 gcc_assert (vectype_in);
2225 if (!vectype_in)
2226 {
2227 if (dump_enabled_p ())
2228 {
2229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2230 "no vectype for scalar type ");
2231 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2232 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2233 }
2234
2235 return false;
2236 }
2237
2238 /* FORNOW */
2239 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2240 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2241 if (nunits_in == nunits_out / 2)
2242 modifier = NARROW;
2243 else if (nunits_out == nunits_in)
2244 modifier = NONE;
2245 else if (nunits_out == nunits_in / 2)
2246 modifier = WIDEN;
2247 else
2248 return false;
2249
2250 /* For now, we only vectorize functions if a target specific builtin
2251 is available. TODO -- in some cases, it might be profitable to
2252 insert the calls for pieces of the vector, in order to be able
2253 to vectorize other operations in the loop. */
2254 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2255 if (fndecl == NULL_TREE)
2256 {
2257 if (gimple_call_internal_p (stmt)
2258 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2259 && !slp_node
2260 && loop_vinfo
2261 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2262 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2263 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2264 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2265 {
2266 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2267 { 0, 1, 2, ... vf - 1 } vector. */
2268 gcc_assert (nargs == 0);
2269 }
2270 else
2271 {
2272 if (dump_enabled_p ())
2273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2274 "function is not vectorizable.\n");
2275 return false;
2276 }
2277 }
2278
2279 gcc_assert (!gimple_vuse (stmt));
2280
2281 if (slp_node || PURE_SLP_STMT (stmt_info))
2282 ncopies = 1;
2283 else if (modifier == NARROW)
2284 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2285 else
2286 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2287
2288 /* Sanity check: make sure that at least one copy of the vectorized stmt
2289 needs to be generated. */
2290 gcc_assert (ncopies >= 1);
2291
2292 if (!vec_stmt) /* transformation not required. */
2293 {
2294 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2295 if (dump_enabled_p ())
2296 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2297 "\n");
2298 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2299 return true;
2300 }
2301
2302 /** Transform. **/
2303
2304 if (dump_enabled_p ())
2305 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2306
2307 /* Handle def. */
2308 scalar_dest = gimple_call_lhs (stmt);
2309 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2310
2311 prev_stmt_info = NULL;
2312 switch (modifier)
2313 {
2314 case NONE:
2315 for (j = 0; j < ncopies; ++j)
2316 {
2317 /* Build argument list for the vectorized call. */
2318 if (j == 0)
2319 vargs.create (nargs);
2320 else
2321 vargs.truncate (0);
2322
2323 if (slp_node)
2324 {
2325 auto_vec<vec<tree> > vec_defs (nargs);
2326 vec<tree> vec_oprnds0;
2327
2328 for (i = 0; i < nargs; i++)
2329 vargs.quick_push (gimple_call_arg (stmt, i));
2330 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2331 vec_oprnds0 = vec_defs[0];
2332
2333 /* Arguments are ready. Create the new vector stmt. */
2334 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2335 {
2336 size_t k;
2337 for (k = 0; k < nargs; k++)
2338 {
2339 vec<tree> vec_oprndsk = vec_defs[k];
2340 vargs[k] = vec_oprndsk[i];
2341 }
2342 new_stmt = gimple_build_call_vec (fndecl, vargs);
2343 new_temp = make_ssa_name (vec_dest, new_stmt);
2344 gimple_call_set_lhs (new_stmt, new_temp);
2345 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2346 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2347 }
2348
2349 for (i = 0; i < nargs; i++)
2350 {
2351 vec<tree> vec_oprndsi = vec_defs[i];
2352 vec_oprndsi.release ();
2353 }
2354 continue;
2355 }
2356
2357 for (i = 0; i < nargs; i++)
2358 {
2359 op = gimple_call_arg (stmt, i);
2360 if (j == 0)
2361 vec_oprnd0
2362 = vect_get_vec_def_for_operand (op, stmt);
2363 else
2364 {
2365 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2366 vec_oprnd0
2367 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2368 }
2369
2370 vargs.quick_push (vec_oprnd0);
2371 }
2372
2373 if (gimple_call_internal_p (stmt)
2374 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2375 {
2376 tree *v = XALLOCAVEC (tree, nunits_out);
2377 int k;
2378 for (k = 0; k < nunits_out; ++k)
2379 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2380 tree cst = build_vector (vectype_out, v);
2381 tree new_var
2382 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2383 gimple *init_stmt = gimple_build_assign (new_var, cst);
2384 new_temp = make_ssa_name (new_var, init_stmt);
2385 gimple_assign_set_lhs (init_stmt, new_temp);
2386 vect_init_vector_1 (stmt, init_stmt, NULL);
2387 new_temp = make_ssa_name (vec_dest);
2388 new_stmt = gimple_build_assign (new_temp,
2389 gimple_assign_lhs (init_stmt));
2390 }
2391 else
2392 {
2393 new_stmt = gimple_build_call_vec (fndecl, vargs);
2394 new_temp = make_ssa_name (vec_dest, new_stmt);
2395 gimple_call_set_lhs (new_stmt, new_temp);
2396 }
2397 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2398
2399 if (j == 0)
2400 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2401 else
2402 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2403
2404 prev_stmt_info = vinfo_for_stmt (new_stmt);
2405 }
2406
2407 break;
2408
2409 case NARROW:
2410 for (j = 0; j < ncopies; ++j)
2411 {
2412 /* Build argument list for the vectorized call. */
2413 if (j == 0)
2414 vargs.create (nargs * 2);
2415 else
2416 vargs.truncate (0);
2417
2418 if (slp_node)
2419 {
2420 auto_vec<vec<tree> > vec_defs (nargs);
2421 vec<tree> vec_oprnds0;
2422
2423 for (i = 0; i < nargs; i++)
2424 vargs.quick_push (gimple_call_arg (stmt, i));
2425 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2426 vec_oprnds0 = vec_defs[0];
2427
2428 /* Arguments are ready. Create the new vector stmt. */
2429 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2430 {
2431 size_t k;
2432 vargs.truncate (0);
2433 for (k = 0; k < nargs; k++)
2434 {
2435 vec<tree> vec_oprndsk = vec_defs[k];
2436 vargs.quick_push (vec_oprndsk[i]);
2437 vargs.quick_push (vec_oprndsk[i + 1]);
2438 }
2439 new_stmt = gimple_build_call_vec (fndecl, vargs);
2440 new_temp = make_ssa_name (vec_dest, new_stmt);
2441 gimple_call_set_lhs (new_stmt, new_temp);
2442 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2443 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2444 }
2445
2446 for (i = 0; i < nargs; i++)
2447 {
2448 vec<tree> vec_oprndsi = vec_defs[i];
2449 vec_oprndsi.release ();
2450 }
2451 continue;
2452 }
2453
2454 for (i = 0; i < nargs; i++)
2455 {
2456 op = gimple_call_arg (stmt, i);
2457 if (j == 0)
2458 {
2459 vec_oprnd0
2460 = vect_get_vec_def_for_operand (op, stmt);
2461 vec_oprnd1
2462 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2463 }
2464 else
2465 {
2466 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2467 vec_oprnd0
2468 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2469 vec_oprnd1
2470 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2471 }
2472
2473 vargs.quick_push (vec_oprnd0);
2474 vargs.quick_push (vec_oprnd1);
2475 }
2476
2477 new_stmt = gimple_build_call_vec (fndecl, vargs);
2478 new_temp = make_ssa_name (vec_dest, new_stmt);
2479 gimple_call_set_lhs (new_stmt, new_temp);
2480 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2481
2482 if (j == 0)
2483 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2484 else
2485 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2486
2487 prev_stmt_info = vinfo_for_stmt (new_stmt);
2488 }
2489
2490 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2491
2492 break;
2493
2494 case WIDEN:
2495 /* No current target implements this case. */
2496 return false;
2497 }
2498
2499 vargs.release ();
2500
2501 /* The call in STMT might prevent it from being removed in dce.
2502 We however cannot remove it here, due to the way the ssa name
2503 it defines is mapped to the new definition. So just replace
2504 rhs of the statement with something harmless. */
2505
2506 if (slp_node)
2507 return true;
2508
2509 type = TREE_TYPE (scalar_dest);
2510 if (is_pattern_stmt_p (stmt_info))
2511 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2512 else
2513 lhs = gimple_call_lhs (stmt);
2514
2515 if (gimple_call_internal_p (stmt)
2516 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2517 {
2518 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2519 with vf - 1 rather than 0, that is the last iteration of the
2520 vectorized loop. */
2521 imm_use_iterator iter;
2522 use_operand_p use_p;
2523 gimple *use_stmt;
2524 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2525 {
2526 basic_block use_bb = gimple_bb (use_stmt);
2527 if (use_bb
2528 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2529 {
2530 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2531 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2532 ncopies * nunits_out - 1));
2533 update_stmt (use_stmt);
2534 }
2535 }
2536 }
2537
2538 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2539 set_vinfo_for_stmt (new_stmt, stmt_info);
2540 set_vinfo_for_stmt (stmt, NULL);
2541 STMT_VINFO_STMT (stmt_info) = new_stmt;
2542 gsi_replace (gsi, new_stmt, false);
2543
2544 return true;
2545 }
2546
2547
2548 struct simd_call_arg_info
2549 {
2550 tree vectype;
2551 tree op;
2552 enum vect_def_type dt;
2553 HOST_WIDE_INT linear_step;
2554 unsigned int align;
2555 bool simd_lane_linear;
2556 };
2557
2558 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2559 is linear within simd lane (but not within whole loop), note it in
2560 *ARGINFO. */
2561
2562 static void
2563 vect_simd_lane_linear (tree op, struct loop *loop,
2564 struct simd_call_arg_info *arginfo)
2565 {
2566 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
2567
2568 if (!is_gimple_assign (def_stmt)
2569 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2570 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2571 return;
2572
2573 tree base = gimple_assign_rhs1 (def_stmt);
2574 HOST_WIDE_INT linear_step = 0;
2575 tree v = gimple_assign_rhs2 (def_stmt);
2576 while (TREE_CODE (v) == SSA_NAME)
2577 {
2578 tree t;
2579 def_stmt = SSA_NAME_DEF_STMT (v);
2580 if (is_gimple_assign (def_stmt))
2581 switch (gimple_assign_rhs_code (def_stmt))
2582 {
2583 case PLUS_EXPR:
2584 t = gimple_assign_rhs2 (def_stmt);
2585 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2586 return;
2587 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2588 v = gimple_assign_rhs1 (def_stmt);
2589 continue;
2590 case MULT_EXPR:
2591 t = gimple_assign_rhs2 (def_stmt);
2592 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2593 return;
2594 linear_step = tree_to_shwi (t);
2595 v = gimple_assign_rhs1 (def_stmt);
2596 continue;
2597 CASE_CONVERT:
2598 t = gimple_assign_rhs1 (def_stmt);
2599 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2600 || (TYPE_PRECISION (TREE_TYPE (v))
2601 < TYPE_PRECISION (TREE_TYPE (t))))
2602 return;
2603 if (!linear_step)
2604 linear_step = 1;
2605 v = t;
2606 continue;
2607 default:
2608 return;
2609 }
2610 else if (is_gimple_call (def_stmt)
2611 && gimple_call_internal_p (def_stmt)
2612 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2613 && loop->simduid
2614 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2615 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2616 == loop->simduid))
2617 {
2618 if (!linear_step)
2619 linear_step = 1;
2620 arginfo->linear_step = linear_step;
2621 arginfo->op = base;
2622 arginfo->simd_lane_linear = true;
2623 return;
2624 }
2625 }
2626 }
2627
2628 /* Function vectorizable_simd_clone_call.
2629
2630 Check if STMT performs a function call that can be vectorized
2631 by calling a simd clone of the function.
2632 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2633 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2634 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2635
2636 static bool
2637 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2638 gimple **vec_stmt, slp_tree slp_node)
2639 {
2640 tree vec_dest;
2641 tree scalar_dest;
2642 tree op, type;
2643 tree vec_oprnd0 = NULL_TREE;
2644 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2645 tree vectype;
2646 unsigned int nunits;
2647 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2648 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2649 vec_info *vinfo = stmt_info->vinfo;
2650 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2651 tree fndecl, new_temp;
2652 gimple *def_stmt;
2653 gimple *new_stmt = NULL;
2654 int ncopies, j;
2655 vec<simd_call_arg_info> arginfo = vNULL;
2656 vec<tree> vargs = vNULL;
2657 size_t i, nargs;
2658 tree lhs, rtype, ratype;
2659 vec<constructor_elt, va_gc> *ret_ctor_elts;
2660
2661 /* Is STMT a vectorizable call? */
2662 if (!is_gimple_call (stmt))
2663 return false;
2664
2665 fndecl = gimple_call_fndecl (stmt);
2666 if (fndecl == NULL_TREE)
2667 return false;
2668
2669 struct cgraph_node *node = cgraph_node::get (fndecl);
2670 if (node == NULL || node->simd_clones == NULL)
2671 return false;
2672
2673 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2674 return false;
2675
2676 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2677 return false;
2678
2679 if (gimple_call_lhs (stmt)
2680 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2681 return false;
2682
2683 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2684
2685 vectype = STMT_VINFO_VECTYPE (stmt_info);
2686
2687 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2688 return false;
2689
2690 /* FORNOW */
2691 if (slp_node || PURE_SLP_STMT (stmt_info))
2692 return false;
2693
2694 /* Process function arguments. */
2695 nargs = gimple_call_num_args (stmt);
2696
2697 /* Bail out if the function has zero arguments. */
2698 if (nargs == 0)
2699 return false;
2700
2701 arginfo.create (nargs);
2702
2703 for (i = 0; i < nargs; i++)
2704 {
2705 simd_call_arg_info thisarginfo;
2706 affine_iv iv;
2707
2708 thisarginfo.linear_step = 0;
2709 thisarginfo.align = 0;
2710 thisarginfo.op = NULL_TREE;
2711 thisarginfo.simd_lane_linear = false;
2712
2713 op = gimple_call_arg (stmt, i);
2714 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2715 &thisarginfo.vectype)
2716 || thisarginfo.dt == vect_uninitialized_def)
2717 {
2718 if (dump_enabled_p ())
2719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2720 "use not simple.\n");
2721 arginfo.release ();
2722 return false;
2723 }
2724
2725 if (thisarginfo.dt == vect_constant_def
2726 || thisarginfo.dt == vect_external_def)
2727 gcc_assert (thisarginfo.vectype == NULL_TREE);
2728 else
2729 gcc_assert (thisarginfo.vectype != NULL_TREE);
2730
2731 /* For linear arguments, the analyze phase should have saved
2732 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2733 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2734 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2735 {
2736 gcc_assert (vec_stmt);
2737 thisarginfo.linear_step
2738 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2739 thisarginfo.op
2740 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2741 thisarginfo.simd_lane_linear
2742 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2743 == boolean_true_node);
2744 /* If loop has been peeled for alignment, we need to adjust it. */
2745 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2746 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2747 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2748 {
2749 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2750 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2751 tree opt = TREE_TYPE (thisarginfo.op);
2752 bias = fold_convert (TREE_TYPE (step), bias);
2753 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2754 thisarginfo.op
2755 = fold_build2 (POINTER_TYPE_P (opt)
2756 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2757 thisarginfo.op, bias);
2758 }
2759 }
2760 else if (!vec_stmt
2761 && thisarginfo.dt != vect_constant_def
2762 && thisarginfo.dt != vect_external_def
2763 && loop_vinfo
2764 && TREE_CODE (op) == SSA_NAME
2765 && simple_iv (loop, loop_containing_stmt (stmt), op,
2766 &iv, false)
2767 && tree_fits_shwi_p (iv.step))
2768 {
2769 thisarginfo.linear_step = tree_to_shwi (iv.step);
2770 thisarginfo.op = iv.base;
2771 }
2772 else if ((thisarginfo.dt == vect_constant_def
2773 || thisarginfo.dt == vect_external_def)
2774 && POINTER_TYPE_P (TREE_TYPE (op)))
2775 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2776 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2777 linear too. */
2778 if (POINTER_TYPE_P (TREE_TYPE (op))
2779 && !thisarginfo.linear_step
2780 && !vec_stmt
2781 && thisarginfo.dt != vect_constant_def
2782 && thisarginfo.dt != vect_external_def
2783 && loop_vinfo
2784 && !slp_node
2785 && TREE_CODE (op) == SSA_NAME)
2786 vect_simd_lane_linear (op, loop, &thisarginfo);
2787
2788 arginfo.quick_push (thisarginfo);
2789 }
2790
2791 unsigned int badness = 0;
2792 struct cgraph_node *bestn = NULL;
2793 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2794 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2795 else
2796 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2797 n = n->simdclone->next_clone)
2798 {
2799 unsigned int this_badness = 0;
2800 if (n->simdclone->simdlen
2801 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2802 || n->simdclone->nargs != nargs)
2803 continue;
2804 if (n->simdclone->simdlen
2805 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2806 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2807 - exact_log2 (n->simdclone->simdlen)) * 1024;
2808 if (n->simdclone->inbranch)
2809 this_badness += 2048;
2810 int target_badness = targetm.simd_clone.usable (n);
2811 if (target_badness < 0)
2812 continue;
2813 this_badness += target_badness * 512;
2814 /* FORNOW: Have to add code to add the mask argument. */
2815 if (n->simdclone->inbranch)
2816 continue;
2817 for (i = 0; i < nargs; i++)
2818 {
2819 switch (n->simdclone->args[i].arg_type)
2820 {
2821 case SIMD_CLONE_ARG_TYPE_VECTOR:
2822 if (!useless_type_conversion_p
2823 (n->simdclone->args[i].orig_type,
2824 TREE_TYPE (gimple_call_arg (stmt, i))))
2825 i = -1;
2826 else if (arginfo[i].dt == vect_constant_def
2827 || arginfo[i].dt == vect_external_def
2828 || arginfo[i].linear_step)
2829 this_badness += 64;
2830 break;
2831 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2832 if (arginfo[i].dt != vect_constant_def
2833 && arginfo[i].dt != vect_external_def)
2834 i = -1;
2835 break;
2836 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2837 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
2838 if (arginfo[i].dt == vect_constant_def
2839 || arginfo[i].dt == vect_external_def
2840 || (arginfo[i].linear_step
2841 != n->simdclone->args[i].linear_step))
2842 i = -1;
2843 break;
2844 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2845 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2846 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
2847 /* FORNOW */
2848 i = -1;
2849 break;
2850 case SIMD_CLONE_ARG_TYPE_MASK:
2851 gcc_unreachable ();
2852 }
2853 if (i == (size_t) -1)
2854 break;
2855 if (n->simdclone->args[i].alignment > arginfo[i].align)
2856 {
2857 i = -1;
2858 break;
2859 }
2860 if (arginfo[i].align)
2861 this_badness += (exact_log2 (arginfo[i].align)
2862 - exact_log2 (n->simdclone->args[i].alignment));
2863 }
2864 if (i == (size_t) -1)
2865 continue;
2866 if (bestn == NULL || this_badness < badness)
2867 {
2868 bestn = n;
2869 badness = this_badness;
2870 }
2871 }
2872
2873 if (bestn == NULL)
2874 {
2875 arginfo.release ();
2876 return false;
2877 }
2878
2879 for (i = 0; i < nargs; i++)
2880 if ((arginfo[i].dt == vect_constant_def
2881 || arginfo[i].dt == vect_external_def)
2882 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2883 {
2884 arginfo[i].vectype
2885 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2886 i)));
2887 if (arginfo[i].vectype == NULL
2888 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2889 > bestn->simdclone->simdlen))
2890 {
2891 arginfo.release ();
2892 return false;
2893 }
2894 }
2895
2896 fndecl = bestn->decl;
2897 nunits = bestn->simdclone->simdlen;
2898 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2899
2900 /* If the function isn't const, only allow it in simd loops where user
2901 has asserted that at least nunits consecutive iterations can be
2902 performed using SIMD instructions. */
2903 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2904 && gimple_vuse (stmt))
2905 {
2906 arginfo.release ();
2907 return false;
2908 }
2909
2910 /* Sanity check: make sure that at least one copy of the vectorized stmt
2911 needs to be generated. */
2912 gcc_assert (ncopies >= 1);
2913
2914 if (!vec_stmt) /* transformation not required. */
2915 {
2916 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2917 for (i = 0; i < nargs; i++)
2918 if (bestn->simdclone->args[i].arg_type
2919 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2920 {
2921 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
2922 + 1);
2923 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2924 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2925 ? size_type_node : TREE_TYPE (arginfo[i].op);
2926 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2927 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2928 tree sll = arginfo[i].simd_lane_linear
2929 ? boolean_true_node : boolean_false_node;
2930 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
2931 }
2932 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2933 if (dump_enabled_p ())
2934 dump_printf_loc (MSG_NOTE, vect_location,
2935 "=== vectorizable_simd_clone_call ===\n");
2936 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2937 arginfo.release ();
2938 return true;
2939 }
2940
2941 /** Transform. **/
2942
2943 if (dump_enabled_p ())
2944 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2945
2946 /* Handle def. */
2947 scalar_dest = gimple_call_lhs (stmt);
2948 vec_dest = NULL_TREE;
2949 rtype = NULL_TREE;
2950 ratype = NULL_TREE;
2951 if (scalar_dest)
2952 {
2953 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2954 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2955 if (TREE_CODE (rtype) == ARRAY_TYPE)
2956 {
2957 ratype = rtype;
2958 rtype = TREE_TYPE (ratype);
2959 }
2960 }
2961
2962 prev_stmt_info = NULL;
2963 for (j = 0; j < ncopies; ++j)
2964 {
2965 /* Build argument list for the vectorized call. */
2966 if (j == 0)
2967 vargs.create (nargs);
2968 else
2969 vargs.truncate (0);
2970
2971 for (i = 0; i < nargs; i++)
2972 {
2973 unsigned int k, l, m, o;
2974 tree atype;
2975 op = gimple_call_arg (stmt, i);
2976 switch (bestn->simdclone->args[i].arg_type)
2977 {
2978 case SIMD_CLONE_ARG_TYPE_VECTOR:
2979 atype = bestn->simdclone->args[i].vector_type;
2980 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2981 for (m = j * o; m < (j + 1) * o; m++)
2982 {
2983 if (TYPE_VECTOR_SUBPARTS (atype)
2984 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2985 {
2986 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2987 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2988 / TYPE_VECTOR_SUBPARTS (atype));
2989 gcc_assert ((k & (k - 1)) == 0);
2990 if (m == 0)
2991 vec_oprnd0
2992 = vect_get_vec_def_for_operand (op, stmt);
2993 else
2994 {
2995 vec_oprnd0 = arginfo[i].op;
2996 if ((m & (k - 1)) == 0)
2997 vec_oprnd0
2998 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2999 vec_oprnd0);
3000 }
3001 arginfo[i].op = vec_oprnd0;
3002 vec_oprnd0
3003 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3004 size_int (prec),
3005 bitsize_int ((m & (k - 1)) * prec));
3006 new_stmt
3007 = gimple_build_assign (make_ssa_name (atype),
3008 vec_oprnd0);
3009 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3010 vargs.safe_push (gimple_assign_lhs (new_stmt));
3011 }
3012 else
3013 {
3014 k = (TYPE_VECTOR_SUBPARTS (atype)
3015 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3016 gcc_assert ((k & (k - 1)) == 0);
3017 vec<constructor_elt, va_gc> *ctor_elts;
3018 if (k != 1)
3019 vec_alloc (ctor_elts, k);
3020 else
3021 ctor_elts = NULL;
3022 for (l = 0; l < k; l++)
3023 {
3024 if (m == 0 && l == 0)
3025 vec_oprnd0
3026 = vect_get_vec_def_for_operand (op, stmt);
3027 else
3028 vec_oprnd0
3029 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3030 arginfo[i].op);
3031 arginfo[i].op = vec_oprnd0;
3032 if (k == 1)
3033 break;
3034 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3035 vec_oprnd0);
3036 }
3037 if (k == 1)
3038 vargs.safe_push (vec_oprnd0);
3039 else
3040 {
3041 vec_oprnd0 = build_constructor (atype, ctor_elts);
3042 new_stmt
3043 = gimple_build_assign (make_ssa_name (atype),
3044 vec_oprnd0);
3045 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3046 vargs.safe_push (gimple_assign_lhs (new_stmt));
3047 }
3048 }
3049 }
3050 break;
3051 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3052 vargs.safe_push (op);
3053 break;
3054 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3055 if (j == 0)
3056 {
3057 gimple_seq stmts;
3058 arginfo[i].op
3059 = force_gimple_operand (arginfo[i].op, &stmts, true,
3060 NULL_TREE);
3061 if (stmts != NULL)
3062 {
3063 basic_block new_bb;
3064 edge pe = loop_preheader_edge (loop);
3065 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3066 gcc_assert (!new_bb);
3067 }
3068 if (arginfo[i].simd_lane_linear)
3069 {
3070 vargs.safe_push (arginfo[i].op);
3071 break;
3072 }
3073 tree phi_res = copy_ssa_name (op);
3074 gphi *new_phi = create_phi_node (phi_res, loop->header);
3075 set_vinfo_for_stmt (new_phi,
3076 new_stmt_vec_info (new_phi, loop_vinfo));
3077 add_phi_arg (new_phi, arginfo[i].op,
3078 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3079 enum tree_code code
3080 = POINTER_TYPE_P (TREE_TYPE (op))
3081 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3082 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3083 ? sizetype : TREE_TYPE (op);
3084 widest_int cst
3085 = wi::mul (bestn->simdclone->args[i].linear_step,
3086 ncopies * nunits);
3087 tree tcst = wide_int_to_tree (type, cst);
3088 tree phi_arg = copy_ssa_name (op);
3089 new_stmt
3090 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3091 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3092 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3093 set_vinfo_for_stmt (new_stmt,
3094 new_stmt_vec_info (new_stmt, loop_vinfo));
3095 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3096 UNKNOWN_LOCATION);
3097 arginfo[i].op = phi_res;
3098 vargs.safe_push (phi_res);
3099 }
3100 else
3101 {
3102 enum tree_code code
3103 = POINTER_TYPE_P (TREE_TYPE (op))
3104 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3105 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3106 ? sizetype : TREE_TYPE (op);
3107 widest_int cst
3108 = wi::mul (bestn->simdclone->args[i].linear_step,
3109 j * nunits);
3110 tree tcst = wide_int_to_tree (type, cst);
3111 new_temp = make_ssa_name (TREE_TYPE (op));
3112 new_stmt = gimple_build_assign (new_temp, code,
3113 arginfo[i].op, tcst);
3114 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3115 vargs.safe_push (new_temp);
3116 }
3117 break;
3118 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3119 default:
3120 gcc_unreachable ();
3121 }
3122 }
3123
3124 new_stmt = gimple_build_call_vec (fndecl, vargs);
3125 if (vec_dest)
3126 {
3127 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3128 if (ratype)
3129 new_temp = create_tmp_var (ratype);
3130 else if (TYPE_VECTOR_SUBPARTS (vectype)
3131 == TYPE_VECTOR_SUBPARTS (rtype))
3132 new_temp = make_ssa_name (vec_dest, new_stmt);
3133 else
3134 new_temp = make_ssa_name (rtype, new_stmt);
3135 gimple_call_set_lhs (new_stmt, new_temp);
3136 }
3137 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3138
3139 if (vec_dest)
3140 {
3141 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3142 {
3143 unsigned int k, l;
3144 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3145 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3146 gcc_assert ((k & (k - 1)) == 0);
3147 for (l = 0; l < k; l++)
3148 {
3149 tree t;
3150 if (ratype)
3151 {
3152 t = build_fold_addr_expr (new_temp);
3153 t = build2 (MEM_REF, vectype, t,
3154 build_int_cst (TREE_TYPE (t),
3155 l * prec / BITS_PER_UNIT));
3156 }
3157 else
3158 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3159 size_int (prec), bitsize_int (l * prec));
3160 new_stmt
3161 = gimple_build_assign (make_ssa_name (vectype), t);
3162 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3163 if (j == 0 && l == 0)
3164 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3165 else
3166 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3167
3168 prev_stmt_info = vinfo_for_stmt (new_stmt);
3169 }
3170
3171 if (ratype)
3172 {
3173 tree clobber = build_constructor (ratype, NULL);
3174 TREE_THIS_VOLATILE (clobber) = 1;
3175 new_stmt = gimple_build_assign (new_temp, clobber);
3176 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3177 }
3178 continue;
3179 }
3180 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3181 {
3182 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3183 / TYPE_VECTOR_SUBPARTS (rtype));
3184 gcc_assert ((k & (k - 1)) == 0);
3185 if ((j & (k - 1)) == 0)
3186 vec_alloc (ret_ctor_elts, k);
3187 if (ratype)
3188 {
3189 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3190 for (m = 0; m < o; m++)
3191 {
3192 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3193 size_int (m), NULL_TREE, NULL_TREE);
3194 new_stmt
3195 = gimple_build_assign (make_ssa_name (rtype), tem);
3196 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3197 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3198 gimple_assign_lhs (new_stmt));
3199 }
3200 tree clobber = build_constructor (ratype, NULL);
3201 TREE_THIS_VOLATILE (clobber) = 1;
3202 new_stmt = gimple_build_assign (new_temp, clobber);
3203 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3204 }
3205 else
3206 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3207 if ((j & (k - 1)) != k - 1)
3208 continue;
3209 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3210 new_stmt
3211 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3212 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3213
3214 if ((unsigned) j == k - 1)
3215 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3216 else
3217 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3218
3219 prev_stmt_info = vinfo_for_stmt (new_stmt);
3220 continue;
3221 }
3222 else if (ratype)
3223 {
3224 tree t = build_fold_addr_expr (new_temp);
3225 t = build2 (MEM_REF, vectype, t,
3226 build_int_cst (TREE_TYPE (t), 0));
3227 new_stmt
3228 = gimple_build_assign (make_ssa_name (vec_dest), t);
3229 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3230 tree clobber = build_constructor (ratype, NULL);
3231 TREE_THIS_VOLATILE (clobber) = 1;
3232 vect_finish_stmt_generation (stmt,
3233 gimple_build_assign (new_temp,
3234 clobber), gsi);
3235 }
3236 }
3237
3238 if (j == 0)
3239 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3240 else
3241 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3242
3243 prev_stmt_info = vinfo_for_stmt (new_stmt);
3244 }
3245
3246 vargs.release ();
3247
3248 /* The call in STMT might prevent it from being removed in dce.
3249 We however cannot remove it here, due to the way the ssa name
3250 it defines is mapped to the new definition. So just replace
3251 rhs of the statement with something harmless. */
3252
3253 if (slp_node)
3254 return true;
3255
3256 if (scalar_dest)
3257 {
3258 type = TREE_TYPE (scalar_dest);
3259 if (is_pattern_stmt_p (stmt_info))
3260 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3261 else
3262 lhs = gimple_call_lhs (stmt);
3263 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3264 }
3265 else
3266 new_stmt = gimple_build_nop ();
3267 set_vinfo_for_stmt (new_stmt, stmt_info);
3268 set_vinfo_for_stmt (stmt, NULL);
3269 STMT_VINFO_STMT (stmt_info) = new_stmt;
3270 gsi_replace (gsi, new_stmt, true);
3271 unlink_stmt_vdef (stmt);
3272
3273 return true;
3274 }
3275
3276
3277 /* Function vect_gen_widened_results_half
3278
3279 Create a vector stmt whose code, type, number of arguments, and result
3280 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3281 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3282 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3283 needs to be created (DECL is a function-decl of a target-builtin).
3284 STMT is the original scalar stmt that we are vectorizing. */
3285
3286 static gimple *
3287 vect_gen_widened_results_half (enum tree_code code,
3288 tree decl,
3289 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3290 tree vec_dest, gimple_stmt_iterator *gsi,
3291 gimple *stmt)
3292 {
3293 gimple *new_stmt;
3294 tree new_temp;
3295
3296 /* Generate half of the widened result: */
3297 if (code == CALL_EXPR)
3298 {
3299 /* Target specific support */
3300 if (op_type == binary_op)
3301 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3302 else
3303 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3304 new_temp = make_ssa_name (vec_dest, new_stmt);
3305 gimple_call_set_lhs (new_stmt, new_temp);
3306 }
3307 else
3308 {
3309 /* Generic support */
3310 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3311 if (op_type != binary_op)
3312 vec_oprnd1 = NULL;
3313 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3314 new_temp = make_ssa_name (vec_dest, new_stmt);
3315 gimple_assign_set_lhs (new_stmt, new_temp);
3316 }
3317 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3318
3319 return new_stmt;
3320 }
3321
3322
3323 /* Get vectorized definitions for loop-based vectorization. For the first
3324 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3325 scalar operand), and for the rest we get a copy with
3326 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3327 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3328 The vectors are collected into VEC_OPRNDS. */
3329
3330 static void
3331 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3332 vec<tree> *vec_oprnds, int multi_step_cvt)
3333 {
3334 tree vec_oprnd;
3335
3336 /* Get first vector operand. */
3337 /* All the vector operands except the very first one (that is scalar oprnd)
3338 are stmt copies. */
3339 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3340 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3341 else
3342 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3343
3344 vec_oprnds->quick_push (vec_oprnd);
3345
3346 /* Get second vector operand. */
3347 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3348 vec_oprnds->quick_push (vec_oprnd);
3349
3350 *oprnd = vec_oprnd;
3351
3352 /* For conversion in multiple steps, continue to get operands
3353 recursively. */
3354 if (multi_step_cvt)
3355 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3356 }
3357
3358
3359 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3360 For multi-step conversions store the resulting vectors and call the function
3361 recursively. */
3362
3363 static void
3364 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3365 int multi_step_cvt, gimple *stmt,
3366 vec<tree> vec_dsts,
3367 gimple_stmt_iterator *gsi,
3368 slp_tree slp_node, enum tree_code code,
3369 stmt_vec_info *prev_stmt_info)
3370 {
3371 unsigned int i;
3372 tree vop0, vop1, new_tmp, vec_dest;
3373 gimple *new_stmt;
3374 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3375
3376 vec_dest = vec_dsts.pop ();
3377
3378 for (i = 0; i < vec_oprnds->length (); i += 2)
3379 {
3380 /* Create demotion operation. */
3381 vop0 = (*vec_oprnds)[i];
3382 vop1 = (*vec_oprnds)[i + 1];
3383 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3384 new_tmp = make_ssa_name (vec_dest, new_stmt);
3385 gimple_assign_set_lhs (new_stmt, new_tmp);
3386 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3387
3388 if (multi_step_cvt)
3389 /* Store the resulting vector for next recursive call. */
3390 (*vec_oprnds)[i/2] = new_tmp;
3391 else
3392 {
3393 /* This is the last step of the conversion sequence. Store the
3394 vectors in SLP_NODE or in vector info of the scalar statement
3395 (or in STMT_VINFO_RELATED_STMT chain). */
3396 if (slp_node)
3397 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3398 else
3399 {
3400 if (!*prev_stmt_info)
3401 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3402 else
3403 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3404
3405 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3406 }
3407 }
3408 }
3409
3410 /* For multi-step demotion operations we first generate demotion operations
3411 from the source type to the intermediate types, and then combine the
3412 results (stored in VEC_OPRNDS) in demotion operation to the destination
3413 type. */
3414 if (multi_step_cvt)
3415 {
3416 /* At each level of recursion we have half of the operands we had at the
3417 previous level. */
3418 vec_oprnds->truncate ((i+1)/2);
3419 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3420 stmt, vec_dsts, gsi, slp_node,
3421 VEC_PACK_TRUNC_EXPR,
3422 prev_stmt_info);
3423 }
3424
3425 vec_dsts.quick_push (vec_dest);
3426 }
3427
3428
3429 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3430 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3431 the resulting vectors and call the function recursively. */
3432
3433 static void
3434 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3435 vec<tree> *vec_oprnds1,
3436 gimple *stmt, tree vec_dest,
3437 gimple_stmt_iterator *gsi,
3438 enum tree_code code1,
3439 enum tree_code code2, tree decl1,
3440 tree decl2, int op_type)
3441 {
3442 int i;
3443 tree vop0, vop1, new_tmp1, new_tmp2;
3444 gimple *new_stmt1, *new_stmt2;
3445 vec<tree> vec_tmp = vNULL;
3446
3447 vec_tmp.create (vec_oprnds0->length () * 2);
3448 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3449 {
3450 if (op_type == binary_op)
3451 vop1 = (*vec_oprnds1)[i];
3452 else
3453 vop1 = NULL_TREE;
3454
3455 /* Generate the two halves of promotion operation. */
3456 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3457 op_type, vec_dest, gsi, stmt);
3458 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3459 op_type, vec_dest, gsi, stmt);
3460 if (is_gimple_call (new_stmt1))
3461 {
3462 new_tmp1 = gimple_call_lhs (new_stmt1);
3463 new_tmp2 = gimple_call_lhs (new_stmt2);
3464 }
3465 else
3466 {
3467 new_tmp1 = gimple_assign_lhs (new_stmt1);
3468 new_tmp2 = gimple_assign_lhs (new_stmt2);
3469 }
3470
3471 /* Store the results for the next step. */
3472 vec_tmp.quick_push (new_tmp1);
3473 vec_tmp.quick_push (new_tmp2);
3474 }
3475
3476 vec_oprnds0->release ();
3477 *vec_oprnds0 = vec_tmp;
3478 }
3479
3480
3481 /* Check if STMT performs a conversion operation, that can be vectorized.
3482 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3483 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3484 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3485
3486 static bool
3487 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3488 gimple **vec_stmt, slp_tree slp_node)
3489 {
3490 tree vec_dest;
3491 tree scalar_dest;
3492 tree op0, op1 = NULL_TREE;
3493 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3494 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3495 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3496 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3497 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3498 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3499 tree new_temp;
3500 gimple *def_stmt;
3501 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3502 gimple *new_stmt = NULL;
3503 stmt_vec_info prev_stmt_info;
3504 int nunits_in;
3505 int nunits_out;
3506 tree vectype_out, vectype_in;
3507 int ncopies, i, j;
3508 tree lhs_type, rhs_type;
3509 enum { NARROW, NONE, WIDEN } modifier;
3510 vec<tree> vec_oprnds0 = vNULL;
3511 vec<tree> vec_oprnds1 = vNULL;
3512 tree vop0;
3513 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3514 vec_info *vinfo = stmt_info->vinfo;
3515 int multi_step_cvt = 0;
3516 vec<tree> vec_dsts = vNULL;
3517 vec<tree> interm_types = vNULL;
3518 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3519 int op_type;
3520 machine_mode rhs_mode;
3521 unsigned short fltsz;
3522
3523 /* Is STMT a vectorizable conversion? */
3524
3525 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3526 return false;
3527
3528 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3529 return false;
3530
3531 if (!is_gimple_assign (stmt))
3532 return false;
3533
3534 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3535 return false;
3536
3537 code = gimple_assign_rhs_code (stmt);
3538 if (!CONVERT_EXPR_CODE_P (code)
3539 && code != FIX_TRUNC_EXPR
3540 && code != FLOAT_EXPR
3541 && code != WIDEN_MULT_EXPR
3542 && code != WIDEN_LSHIFT_EXPR)
3543 return false;
3544
3545 op_type = TREE_CODE_LENGTH (code);
3546
3547 /* Check types of lhs and rhs. */
3548 scalar_dest = gimple_assign_lhs (stmt);
3549 lhs_type = TREE_TYPE (scalar_dest);
3550 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3551
3552 op0 = gimple_assign_rhs1 (stmt);
3553 rhs_type = TREE_TYPE (op0);
3554
3555 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3556 && !((INTEGRAL_TYPE_P (lhs_type)
3557 && INTEGRAL_TYPE_P (rhs_type))
3558 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3559 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3560 return false;
3561
3562 if ((INTEGRAL_TYPE_P (lhs_type)
3563 && (TYPE_PRECISION (lhs_type)
3564 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3565 || (INTEGRAL_TYPE_P (rhs_type)
3566 && (TYPE_PRECISION (rhs_type)
3567 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3568 {
3569 if (dump_enabled_p ())
3570 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3571 "type conversion to/from bit-precision unsupported."
3572 "\n");
3573 return false;
3574 }
3575
3576 /* Check the operands of the operation. */
3577 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
3578 {
3579 if (dump_enabled_p ())
3580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3581 "use not simple.\n");
3582 return false;
3583 }
3584 if (op_type == binary_op)
3585 {
3586 bool ok;
3587
3588 op1 = gimple_assign_rhs2 (stmt);
3589 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3590 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3591 OP1. */
3592 if (CONSTANT_CLASS_P (op0))
3593 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
3594 else
3595 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
3596
3597 if (!ok)
3598 {
3599 if (dump_enabled_p ())
3600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3601 "use not simple.\n");
3602 return false;
3603 }
3604 }
3605
3606 /* If op0 is an external or constant defs use a vector type of
3607 the same size as the output vector type. */
3608 if (!vectype_in)
3609 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3610 if (vec_stmt)
3611 gcc_assert (vectype_in);
3612 if (!vectype_in)
3613 {
3614 if (dump_enabled_p ())
3615 {
3616 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3617 "no vectype for scalar type ");
3618 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3619 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3620 }
3621
3622 return false;
3623 }
3624
3625 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3626 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3627 if (nunits_in < nunits_out)
3628 modifier = NARROW;
3629 else if (nunits_out == nunits_in)
3630 modifier = NONE;
3631 else
3632 modifier = WIDEN;
3633
3634 /* Multiple types in SLP are handled by creating the appropriate number of
3635 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3636 case of SLP. */
3637 if (slp_node || PURE_SLP_STMT (stmt_info))
3638 ncopies = 1;
3639 else if (modifier == NARROW)
3640 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3641 else
3642 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3643
3644 /* Sanity check: make sure that at least one copy of the vectorized stmt
3645 needs to be generated. */
3646 gcc_assert (ncopies >= 1);
3647
3648 /* Supportable by target? */
3649 switch (modifier)
3650 {
3651 case NONE:
3652 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3653 return false;
3654 if (supportable_convert_operation (code, vectype_out, vectype_in,
3655 &decl1, &code1))
3656 break;
3657 /* FALLTHRU */
3658 unsupported:
3659 if (dump_enabled_p ())
3660 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3661 "conversion not supported by target.\n");
3662 return false;
3663
3664 case WIDEN:
3665 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3666 &code1, &code2, &multi_step_cvt,
3667 &interm_types))
3668 {
3669 /* Binary widening operation can only be supported directly by the
3670 architecture. */
3671 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3672 break;
3673 }
3674
3675 if (code != FLOAT_EXPR
3676 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3677 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3678 goto unsupported;
3679
3680 rhs_mode = TYPE_MODE (rhs_type);
3681 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3682 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3683 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3684 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3685 {
3686 cvt_type
3687 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3688 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3689 if (cvt_type == NULL_TREE)
3690 goto unsupported;
3691
3692 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3693 {
3694 if (!supportable_convert_operation (code, vectype_out,
3695 cvt_type, &decl1, &codecvt1))
3696 goto unsupported;
3697 }
3698 else if (!supportable_widening_operation (code, stmt, vectype_out,
3699 cvt_type, &codecvt1,
3700 &codecvt2, &multi_step_cvt,
3701 &interm_types))
3702 continue;
3703 else
3704 gcc_assert (multi_step_cvt == 0);
3705
3706 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3707 vectype_in, &code1, &code2,
3708 &multi_step_cvt, &interm_types))
3709 break;
3710 }
3711
3712 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3713 goto unsupported;
3714
3715 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3716 codecvt2 = ERROR_MARK;
3717 else
3718 {
3719 multi_step_cvt++;
3720 interm_types.safe_push (cvt_type);
3721 cvt_type = NULL_TREE;
3722 }
3723 break;
3724
3725 case NARROW:
3726 gcc_assert (op_type == unary_op);
3727 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3728 &code1, &multi_step_cvt,
3729 &interm_types))
3730 break;
3731
3732 if (code != FIX_TRUNC_EXPR
3733 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3734 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3735 goto unsupported;
3736
3737 rhs_mode = TYPE_MODE (rhs_type);
3738 cvt_type
3739 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3740 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3741 if (cvt_type == NULL_TREE)
3742 goto unsupported;
3743 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3744 &decl1, &codecvt1))
3745 goto unsupported;
3746 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3747 &code1, &multi_step_cvt,
3748 &interm_types))
3749 break;
3750 goto unsupported;
3751
3752 default:
3753 gcc_unreachable ();
3754 }
3755
3756 if (!vec_stmt) /* transformation not required. */
3757 {
3758 if (dump_enabled_p ())
3759 dump_printf_loc (MSG_NOTE, vect_location,
3760 "=== vectorizable_conversion ===\n");
3761 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3762 {
3763 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3764 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3765 }
3766 else if (modifier == NARROW)
3767 {
3768 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3769 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3770 }
3771 else
3772 {
3773 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3774 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3775 }
3776 interm_types.release ();
3777 return true;
3778 }
3779
3780 /** Transform. **/
3781 if (dump_enabled_p ())
3782 dump_printf_loc (MSG_NOTE, vect_location,
3783 "transform conversion. ncopies = %d.\n", ncopies);
3784
3785 if (op_type == binary_op)
3786 {
3787 if (CONSTANT_CLASS_P (op0))
3788 op0 = fold_convert (TREE_TYPE (op1), op0);
3789 else if (CONSTANT_CLASS_P (op1))
3790 op1 = fold_convert (TREE_TYPE (op0), op1);
3791 }
3792
3793 /* In case of multi-step conversion, we first generate conversion operations
3794 to the intermediate types, and then from that types to the final one.
3795 We create vector destinations for the intermediate type (TYPES) received
3796 from supportable_*_operation, and store them in the correct order
3797 for future use in vect_create_vectorized_*_stmts (). */
3798 vec_dsts.create (multi_step_cvt + 1);
3799 vec_dest = vect_create_destination_var (scalar_dest,
3800 (cvt_type && modifier == WIDEN)
3801 ? cvt_type : vectype_out);
3802 vec_dsts.quick_push (vec_dest);
3803
3804 if (multi_step_cvt)
3805 {
3806 for (i = interm_types.length () - 1;
3807 interm_types.iterate (i, &intermediate_type); i--)
3808 {
3809 vec_dest = vect_create_destination_var (scalar_dest,
3810 intermediate_type);
3811 vec_dsts.quick_push (vec_dest);
3812 }
3813 }
3814
3815 if (cvt_type)
3816 vec_dest = vect_create_destination_var (scalar_dest,
3817 modifier == WIDEN
3818 ? vectype_out : cvt_type);
3819
3820 if (!slp_node)
3821 {
3822 if (modifier == WIDEN)
3823 {
3824 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3825 if (op_type == binary_op)
3826 vec_oprnds1.create (1);
3827 }
3828 else if (modifier == NARROW)
3829 vec_oprnds0.create (
3830 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3831 }
3832 else if (code == WIDEN_LSHIFT_EXPR)
3833 vec_oprnds1.create (slp_node->vec_stmts_size);
3834
3835 last_oprnd = op0;
3836 prev_stmt_info = NULL;
3837 switch (modifier)
3838 {
3839 case NONE:
3840 for (j = 0; j < ncopies; j++)
3841 {
3842 if (j == 0)
3843 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3844 -1);
3845 else
3846 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3847
3848 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3849 {
3850 /* Arguments are ready, create the new vector stmt. */
3851 if (code1 == CALL_EXPR)
3852 {
3853 new_stmt = gimple_build_call (decl1, 1, vop0);
3854 new_temp = make_ssa_name (vec_dest, new_stmt);
3855 gimple_call_set_lhs (new_stmt, new_temp);
3856 }
3857 else
3858 {
3859 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3860 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3861 new_temp = make_ssa_name (vec_dest, new_stmt);
3862 gimple_assign_set_lhs (new_stmt, new_temp);
3863 }
3864
3865 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3866 if (slp_node)
3867 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3868 else
3869 {
3870 if (!prev_stmt_info)
3871 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3872 else
3873 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3874 prev_stmt_info = vinfo_for_stmt (new_stmt);
3875 }
3876 }
3877 }
3878 break;
3879
3880 case WIDEN:
3881 /* In case the vectorization factor (VF) is bigger than the number
3882 of elements that we can fit in a vectype (nunits), we have to
3883 generate more than one vector stmt - i.e - we need to "unroll"
3884 the vector stmt by a factor VF/nunits. */
3885 for (j = 0; j < ncopies; j++)
3886 {
3887 /* Handle uses. */
3888 if (j == 0)
3889 {
3890 if (slp_node)
3891 {
3892 if (code == WIDEN_LSHIFT_EXPR)
3893 {
3894 unsigned int k;
3895
3896 vec_oprnd1 = op1;
3897 /* Store vec_oprnd1 for every vector stmt to be created
3898 for SLP_NODE. We check during the analysis that all
3899 the shift arguments are the same. */
3900 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3901 vec_oprnds1.quick_push (vec_oprnd1);
3902
3903 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3904 slp_node, -1);
3905 }
3906 else
3907 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3908 &vec_oprnds1, slp_node, -1);
3909 }
3910 else
3911 {
3912 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
3913 vec_oprnds0.quick_push (vec_oprnd0);
3914 if (op_type == binary_op)
3915 {
3916 if (code == WIDEN_LSHIFT_EXPR)
3917 vec_oprnd1 = op1;
3918 else
3919 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
3920 vec_oprnds1.quick_push (vec_oprnd1);
3921 }
3922 }
3923 }
3924 else
3925 {
3926 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3927 vec_oprnds0.truncate (0);
3928 vec_oprnds0.quick_push (vec_oprnd0);
3929 if (op_type == binary_op)
3930 {
3931 if (code == WIDEN_LSHIFT_EXPR)
3932 vec_oprnd1 = op1;
3933 else
3934 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3935 vec_oprnd1);
3936 vec_oprnds1.truncate (0);
3937 vec_oprnds1.quick_push (vec_oprnd1);
3938 }
3939 }
3940
3941 /* Arguments are ready. Create the new vector stmts. */
3942 for (i = multi_step_cvt; i >= 0; i--)
3943 {
3944 tree this_dest = vec_dsts[i];
3945 enum tree_code c1 = code1, c2 = code2;
3946 if (i == 0 && codecvt2 != ERROR_MARK)
3947 {
3948 c1 = codecvt1;
3949 c2 = codecvt2;
3950 }
3951 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3952 &vec_oprnds1,
3953 stmt, this_dest, gsi,
3954 c1, c2, decl1, decl2,
3955 op_type);
3956 }
3957
3958 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3959 {
3960 if (cvt_type)
3961 {
3962 if (codecvt1 == CALL_EXPR)
3963 {
3964 new_stmt = gimple_build_call (decl1, 1, vop0);
3965 new_temp = make_ssa_name (vec_dest, new_stmt);
3966 gimple_call_set_lhs (new_stmt, new_temp);
3967 }
3968 else
3969 {
3970 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3971 new_temp = make_ssa_name (vec_dest);
3972 new_stmt = gimple_build_assign (new_temp, codecvt1,
3973 vop0);
3974 }
3975
3976 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3977 }
3978 else
3979 new_stmt = SSA_NAME_DEF_STMT (vop0);
3980
3981 if (slp_node)
3982 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3983 else
3984 {
3985 if (!prev_stmt_info)
3986 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3987 else
3988 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3989 prev_stmt_info = vinfo_for_stmt (new_stmt);
3990 }
3991 }
3992 }
3993
3994 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3995 break;
3996
3997 case NARROW:
3998 /* In case the vectorization factor (VF) is bigger than the number
3999 of elements that we can fit in a vectype (nunits), we have to
4000 generate more than one vector stmt - i.e - we need to "unroll"
4001 the vector stmt by a factor VF/nunits. */
4002 for (j = 0; j < ncopies; j++)
4003 {
4004 /* Handle uses. */
4005 if (slp_node)
4006 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4007 slp_node, -1);
4008 else
4009 {
4010 vec_oprnds0.truncate (0);
4011 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4012 vect_pow2 (multi_step_cvt) - 1);
4013 }
4014
4015 /* Arguments are ready. Create the new vector stmts. */
4016 if (cvt_type)
4017 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4018 {
4019 if (codecvt1 == CALL_EXPR)
4020 {
4021 new_stmt = gimple_build_call (decl1, 1, vop0);
4022 new_temp = make_ssa_name (vec_dest, new_stmt);
4023 gimple_call_set_lhs (new_stmt, new_temp);
4024 }
4025 else
4026 {
4027 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4028 new_temp = make_ssa_name (vec_dest);
4029 new_stmt = gimple_build_assign (new_temp, codecvt1,
4030 vop0);
4031 }
4032
4033 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4034 vec_oprnds0[i] = new_temp;
4035 }
4036
4037 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4038 stmt, vec_dsts, gsi,
4039 slp_node, code1,
4040 &prev_stmt_info);
4041 }
4042
4043 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4044 break;
4045 }
4046
4047 vec_oprnds0.release ();
4048 vec_oprnds1.release ();
4049 vec_dsts.release ();
4050 interm_types.release ();
4051
4052 return true;
4053 }
4054
4055
4056 /* Function vectorizable_assignment.
4057
4058 Check if STMT performs an assignment (copy) that can be vectorized.
4059 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4060 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4061 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4062
4063 static bool
4064 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4065 gimple **vec_stmt, slp_tree slp_node)
4066 {
4067 tree vec_dest;
4068 tree scalar_dest;
4069 tree op;
4070 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4071 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4072 tree new_temp;
4073 gimple *def_stmt;
4074 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4075 int ncopies;
4076 int i, j;
4077 vec<tree> vec_oprnds = vNULL;
4078 tree vop;
4079 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4080 vec_info *vinfo = stmt_info->vinfo;
4081 gimple *new_stmt = NULL;
4082 stmt_vec_info prev_stmt_info = NULL;
4083 enum tree_code code;
4084 tree vectype_in;
4085
4086 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4087 return false;
4088
4089 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4090 return false;
4091
4092 /* Is vectorizable assignment? */
4093 if (!is_gimple_assign (stmt))
4094 return false;
4095
4096 scalar_dest = gimple_assign_lhs (stmt);
4097 if (TREE_CODE (scalar_dest) != SSA_NAME)
4098 return false;
4099
4100 code = gimple_assign_rhs_code (stmt);
4101 if (gimple_assign_single_p (stmt)
4102 || code == PAREN_EXPR
4103 || CONVERT_EXPR_CODE_P (code))
4104 op = gimple_assign_rhs1 (stmt);
4105 else
4106 return false;
4107
4108 if (code == VIEW_CONVERT_EXPR)
4109 op = TREE_OPERAND (op, 0);
4110
4111 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4112 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4113
4114 /* Multiple types in SLP are handled by creating the appropriate number of
4115 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4116 case of SLP. */
4117 if (slp_node || PURE_SLP_STMT (stmt_info))
4118 ncopies = 1;
4119 else
4120 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4121
4122 gcc_assert (ncopies >= 1);
4123
4124 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4125 {
4126 if (dump_enabled_p ())
4127 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4128 "use not simple.\n");
4129 return false;
4130 }
4131
4132 /* We can handle NOP_EXPR conversions that do not change the number
4133 of elements or the vector size. */
4134 if ((CONVERT_EXPR_CODE_P (code)
4135 || code == VIEW_CONVERT_EXPR)
4136 && (!vectype_in
4137 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4138 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4139 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4140 return false;
4141
4142 /* We do not handle bit-precision changes. */
4143 if ((CONVERT_EXPR_CODE_P (code)
4144 || code == VIEW_CONVERT_EXPR)
4145 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4146 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4147 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4148 || ((TYPE_PRECISION (TREE_TYPE (op))
4149 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4150 /* But a conversion that does not change the bit-pattern is ok. */
4151 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4152 > TYPE_PRECISION (TREE_TYPE (op)))
4153 && TYPE_UNSIGNED (TREE_TYPE (op))))
4154 {
4155 if (dump_enabled_p ())
4156 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4157 "type conversion to/from bit-precision "
4158 "unsupported.\n");
4159 return false;
4160 }
4161
4162 if (!vec_stmt) /* transformation not required. */
4163 {
4164 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4165 if (dump_enabled_p ())
4166 dump_printf_loc (MSG_NOTE, vect_location,
4167 "=== vectorizable_assignment ===\n");
4168 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4169 return true;
4170 }
4171
4172 /** Transform. **/
4173 if (dump_enabled_p ())
4174 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4175
4176 /* Handle def. */
4177 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4178
4179 /* Handle use. */
4180 for (j = 0; j < ncopies; j++)
4181 {
4182 /* Handle uses. */
4183 if (j == 0)
4184 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4185 else
4186 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4187
4188 /* Arguments are ready. create the new vector stmt. */
4189 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4190 {
4191 if (CONVERT_EXPR_CODE_P (code)
4192 || code == VIEW_CONVERT_EXPR)
4193 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4194 new_stmt = gimple_build_assign (vec_dest, vop);
4195 new_temp = make_ssa_name (vec_dest, new_stmt);
4196 gimple_assign_set_lhs (new_stmt, new_temp);
4197 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4198 if (slp_node)
4199 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4200 }
4201
4202 if (slp_node)
4203 continue;
4204
4205 if (j == 0)
4206 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4207 else
4208 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4209
4210 prev_stmt_info = vinfo_for_stmt (new_stmt);
4211 }
4212
4213 vec_oprnds.release ();
4214 return true;
4215 }
4216
4217
4218 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4219 either as shift by a scalar or by a vector. */
4220
4221 bool
4222 vect_supportable_shift (enum tree_code code, tree scalar_type)
4223 {
4224
4225 machine_mode vec_mode;
4226 optab optab;
4227 int icode;
4228 tree vectype;
4229
4230 vectype = get_vectype_for_scalar_type (scalar_type);
4231 if (!vectype)
4232 return false;
4233
4234 optab = optab_for_tree_code (code, vectype, optab_scalar);
4235 if (!optab
4236 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4237 {
4238 optab = optab_for_tree_code (code, vectype, optab_vector);
4239 if (!optab
4240 || (optab_handler (optab, TYPE_MODE (vectype))
4241 == CODE_FOR_nothing))
4242 return false;
4243 }
4244
4245 vec_mode = TYPE_MODE (vectype);
4246 icode = (int) optab_handler (optab, vec_mode);
4247 if (icode == CODE_FOR_nothing)
4248 return false;
4249
4250 return true;
4251 }
4252
4253
4254 /* Function vectorizable_shift.
4255
4256 Check if STMT performs a shift operation that can be vectorized.
4257 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4258 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4259 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4260
4261 static bool
4262 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4263 gimple **vec_stmt, slp_tree slp_node)
4264 {
4265 tree vec_dest;
4266 tree scalar_dest;
4267 tree op0, op1 = NULL;
4268 tree vec_oprnd1 = NULL_TREE;
4269 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4270 tree vectype;
4271 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4272 enum tree_code code;
4273 machine_mode vec_mode;
4274 tree new_temp;
4275 optab optab;
4276 int icode;
4277 machine_mode optab_op2_mode;
4278 gimple *def_stmt;
4279 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4280 gimple *new_stmt = NULL;
4281 stmt_vec_info prev_stmt_info;
4282 int nunits_in;
4283 int nunits_out;
4284 tree vectype_out;
4285 tree op1_vectype;
4286 int ncopies;
4287 int j, i;
4288 vec<tree> vec_oprnds0 = vNULL;
4289 vec<tree> vec_oprnds1 = vNULL;
4290 tree vop0, vop1;
4291 unsigned int k;
4292 bool scalar_shift_arg = true;
4293 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4294 vec_info *vinfo = stmt_info->vinfo;
4295 int vf;
4296
4297 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4298 return false;
4299
4300 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4301 return false;
4302
4303 /* Is STMT a vectorizable binary/unary operation? */
4304 if (!is_gimple_assign (stmt))
4305 return false;
4306
4307 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4308 return false;
4309
4310 code = gimple_assign_rhs_code (stmt);
4311
4312 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4313 || code == RROTATE_EXPR))
4314 return false;
4315
4316 scalar_dest = gimple_assign_lhs (stmt);
4317 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4318 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4319 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4320 {
4321 if (dump_enabled_p ())
4322 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4323 "bit-precision shifts not supported.\n");
4324 return false;
4325 }
4326
4327 op0 = gimple_assign_rhs1 (stmt);
4328 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4329 {
4330 if (dump_enabled_p ())
4331 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4332 "use not simple.\n");
4333 return false;
4334 }
4335 /* If op0 is an external or constant def use a vector type with
4336 the same size as the output vector type. */
4337 if (!vectype)
4338 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4339 if (vec_stmt)
4340 gcc_assert (vectype);
4341 if (!vectype)
4342 {
4343 if (dump_enabled_p ())
4344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4345 "no vectype for scalar type\n");
4346 return false;
4347 }
4348
4349 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4350 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4351 if (nunits_out != nunits_in)
4352 return false;
4353
4354 op1 = gimple_assign_rhs2 (stmt);
4355 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4356 {
4357 if (dump_enabled_p ())
4358 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4359 "use not simple.\n");
4360 return false;
4361 }
4362
4363 if (loop_vinfo)
4364 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4365 else
4366 vf = 1;
4367
4368 /* Multiple types in SLP are handled by creating the appropriate number of
4369 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4370 case of SLP. */
4371 if (slp_node || PURE_SLP_STMT (stmt_info))
4372 ncopies = 1;
4373 else
4374 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4375
4376 gcc_assert (ncopies >= 1);
4377
4378 /* Determine whether the shift amount is a vector, or scalar. If the
4379 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4380
4381 if ((dt[1] == vect_internal_def
4382 || dt[1] == vect_induction_def)
4383 && !slp_node)
4384 scalar_shift_arg = false;
4385 else if (dt[1] == vect_constant_def
4386 || dt[1] == vect_external_def
4387 || dt[1] == vect_internal_def)
4388 {
4389 /* In SLP, need to check whether the shift count is the same,
4390 in loops if it is a constant or invariant, it is always
4391 a scalar shift. */
4392 if (slp_node)
4393 {
4394 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4395 gimple *slpstmt;
4396
4397 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4398 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4399 scalar_shift_arg = false;
4400 }
4401 }
4402 else
4403 {
4404 if (dump_enabled_p ())
4405 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4406 "operand mode requires invariant argument.\n");
4407 return false;
4408 }
4409
4410 /* Vector shifted by vector. */
4411 if (!scalar_shift_arg)
4412 {
4413 optab = optab_for_tree_code (code, vectype, optab_vector);
4414 if (dump_enabled_p ())
4415 dump_printf_loc (MSG_NOTE, vect_location,
4416 "vector/vector shift/rotate found.\n");
4417
4418 if (!op1_vectype)
4419 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4420 if (op1_vectype == NULL_TREE
4421 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4422 {
4423 if (dump_enabled_p ())
4424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4425 "unusable type for last operand in"
4426 " vector/vector shift/rotate.\n");
4427 return false;
4428 }
4429 }
4430 /* See if the machine has a vector shifted by scalar insn and if not
4431 then see if it has a vector shifted by vector insn. */
4432 else
4433 {
4434 optab = optab_for_tree_code (code, vectype, optab_scalar);
4435 if (optab
4436 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4437 {
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE, vect_location,
4440 "vector/scalar shift/rotate found.\n");
4441 }
4442 else
4443 {
4444 optab = optab_for_tree_code (code, vectype, optab_vector);
4445 if (optab
4446 && (optab_handler (optab, TYPE_MODE (vectype))
4447 != CODE_FOR_nothing))
4448 {
4449 scalar_shift_arg = false;
4450
4451 if (dump_enabled_p ())
4452 dump_printf_loc (MSG_NOTE, vect_location,
4453 "vector/vector shift/rotate found.\n");
4454
4455 /* Unlike the other binary operators, shifts/rotates have
4456 the rhs being int, instead of the same type as the lhs,
4457 so make sure the scalar is the right type if we are
4458 dealing with vectors of long long/long/short/char. */
4459 if (dt[1] == vect_constant_def)
4460 op1 = fold_convert (TREE_TYPE (vectype), op1);
4461 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4462 TREE_TYPE (op1)))
4463 {
4464 if (slp_node
4465 && TYPE_MODE (TREE_TYPE (vectype))
4466 != TYPE_MODE (TREE_TYPE (op1)))
4467 {
4468 if (dump_enabled_p ())
4469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4470 "unusable type for last operand in"
4471 " vector/vector shift/rotate.\n");
4472 return false;
4473 }
4474 if (vec_stmt && !slp_node)
4475 {
4476 op1 = fold_convert (TREE_TYPE (vectype), op1);
4477 op1 = vect_init_vector (stmt, op1,
4478 TREE_TYPE (vectype), NULL);
4479 }
4480 }
4481 }
4482 }
4483 }
4484
4485 /* Supportable by target? */
4486 if (!optab)
4487 {
4488 if (dump_enabled_p ())
4489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4490 "no optab.\n");
4491 return false;
4492 }
4493 vec_mode = TYPE_MODE (vectype);
4494 icode = (int) optab_handler (optab, vec_mode);
4495 if (icode == CODE_FOR_nothing)
4496 {
4497 if (dump_enabled_p ())
4498 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4499 "op not supported by target.\n");
4500 /* Check only during analysis. */
4501 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4502 || (vf < vect_min_worthwhile_factor (code)
4503 && !vec_stmt))
4504 return false;
4505 if (dump_enabled_p ())
4506 dump_printf_loc (MSG_NOTE, vect_location,
4507 "proceeding using word mode.\n");
4508 }
4509
4510 /* Worthwhile without SIMD support? Check only during analysis. */
4511 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4512 && vf < vect_min_worthwhile_factor (code)
4513 && !vec_stmt)
4514 {
4515 if (dump_enabled_p ())
4516 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4517 "not worthwhile without SIMD support.\n");
4518 return false;
4519 }
4520
4521 if (!vec_stmt) /* transformation not required. */
4522 {
4523 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4524 if (dump_enabled_p ())
4525 dump_printf_loc (MSG_NOTE, vect_location,
4526 "=== vectorizable_shift ===\n");
4527 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4528 return true;
4529 }
4530
4531 /** Transform. **/
4532
4533 if (dump_enabled_p ())
4534 dump_printf_loc (MSG_NOTE, vect_location,
4535 "transform binary/unary operation.\n");
4536
4537 /* Handle def. */
4538 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4539
4540 prev_stmt_info = NULL;
4541 for (j = 0; j < ncopies; j++)
4542 {
4543 /* Handle uses. */
4544 if (j == 0)
4545 {
4546 if (scalar_shift_arg)
4547 {
4548 /* Vector shl and shr insn patterns can be defined with scalar
4549 operand 2 (shift operand). In this case, use constant or loop
4550 invariant op1 directly, without extending it to vector mode
4551 first. */
4552 optab_op2_mode = insn_data[icode].operand[2].mode;
4553 if (!VECTOR_MODE_P (optab_op2_mode))
4554 {
4555 if (dump_enabled_p ())
4556 dump_printf_loc (MSG_NOTE, vect_location,
4557 "operand 1 using scalar mode.\n");
4558 vec_oprnd1 = op1;
4559 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4560 vec_oprnds1.quick_push (vec_oprnd1);
4561 if (slp_node)
4562 {
4563 /* Store vec_oprnd1 for every vector stmt to be created
4564 for SLP_NODE. We check during the analysis that all
4565 the shift arguments are the same.
4566 TODO: Allow different constants for different vector
4567 stmts generated for an SLP instance. */
4568 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4569 vec_oprnds1.quick_push (vec_oprnd1);
4570 }
4571 }
4572 }
4573
4574 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4575 (a special case for certain kind of vector shifts); otherwise,
4576 operand 1 should be of a vector type (the usual case). */
4577 if (vec_oprnd1)
4578 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4579 slp_node, -1);
4580 else
4581 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4582 slp_node, -1);
4583 }
4584 else
4585 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4586
4587 /* Arguments are ready. Create the new vector stmt. */
4588 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4589 {
4590 vop1 = vec_oprnds1[i];
4591 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4592 new_temp = make_ssa_name (vec_dest, new_stmt);
4593 gimple_assign_set_lhs (new_stmt, new_temp);
4594 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4595 if (slp_node)
4596 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4597 }
4598
4599 if (slp_node)
4600 continue;
4601
4602 if (j == 0)
4603 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4604 else
4605 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4606 prev_stmt_info = vinfo_for_stmt (new_stmt);
4607 }
4608
4609 vec_oprnds0.release ();
4610 vec_oprnds1.release ();
4611
4612 return true;
4613 }
4614
4615
4616 /* Function vectorizable_operation.
4617
4618 Check if STMT performs a binary, unary or ternary operation that can
4619 be vectorized.
4620 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4621 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4622 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4623
4624 static bool
4625 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4626 gimple **vec_stmt, slp_tree slp_node)
4627 {
4628 tree vec_dest;
4629 tree scalar_dest;
4630 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4631 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4632 tree vectype;
4633 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4634 enum tree_code code;
4635 machine_mode vec_mode;
4636 tree new_temp;
4637 int op_type;
4638 optab optab;
4639 bool target_support_p;
4640 gimple *def_stmt;
4641 enum vect_def_type dt[3]
4642 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4643 gimple *new_stmt = NULL;
4644 stmt_vec_info prev_stmt_info;
4645 int nunits_in;
4646 int nunits_out;
4647 tree vectype_out;
4648 int ncopies;
4649 int j, i;
4650 vec<tree> vec_oprnds0 = vNULL;
4651 vec<tree> vec_oprnds1 = vNULL;
4652 vec<tree> vec_oprnds2 = vNULL;
4653 tree vop0, vop1, vop2;
4654 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4655 vec_info *vinfo = stmt_info->vinfo;
4656 int vf;
4657
4658 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4659 return false;
4660
4661 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4662 return false;
4663
4664 /* Is STMT a vectorizable binary/unary operation? */
4665 if (!is_gimple_assign (stmt))
4666 return false;
4667
4668 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4669 return false;
4670
4671 code = gimple_assign_rhs_code (stmt);
4672
4673 /* For pointer addition, we should use the normal plus for
4674 the vector addition. */
4675 if (code == POINTER_PLUS_EXPR)
4676 code = PLUS_EXPR;
4677
4678 /* Support only unary or binary operations. */
4679 op_type = TREE_CODE_LENGTH (code);
4680 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4681 {
4682 if (dump_enabled_p ())
4683 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4684 "num. args = %d (not unary/binary/ternary op).\n",
4685 op_type);
4686 return false;
4687 }
4688
4689 scalar_dest = gimple_assign_lhs (stmt);
4690 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4691
4692 /* Most operations cannot handle bit-precision types without extra
4693 truncations. */
4694 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4695 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4696 /* Exception are bitwise binary operations. */
4697 && code != BIT_IOR_EXPR
4698 && code != BIT_XOR_EXPR
4699 && code != BIT_AND_EXPR)
4700 {
4701 if (dump_enabled_p ())
4702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4703 "bit-precision arithmetic not supported.\n");
4704 return false;
4705 }
4706
4707 op0 = gimple_assign_rhs1 (stmt);
4708 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4709 {
4710 if (dump_enabled_p ())
4711 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4712 "use not simple.\n");
4713 return false;
4714 }
4715 /* If op0 is an external or constant def use a vector type with
4716 the same size as the output vector type. */
4717 if (!vectype)
4718 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4719 if (vec_stmt)
4720 gcc_assert (vectype);
4721 if (!vectype)
4722 {
4723 if (dump_enabled_p ())
4724 {
4725 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4726 "no vectype for scalar type ");
4727 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4728 TREE_TYPE (op0));
4729 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4730 }
4731
4732 return false;
4733 }
4734
4735 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4736 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4737 if (nunits_out != nunits_in)
4738 return false;
4739
4740 if (op_type == binary_op || op_type == ternary_op)
4741 {
4742 op1 = gimple_assign_rhs2 (stmt);
4743 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
4744 {
4745 if (dump_enabled_p ())
4746 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4747 "use not simple.\n");
4748 return false;
4749 }
4750 }
4751 if (op_type == ternary_op)
4752 {
4753 op2 = gimple_assign_rhs3 (stmt);
4754 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
4755 {
4756 if (dump_enabled_p ())
4757 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4758 "use not simple.\n");
4759 return false;
4760 }
4761 }
4762
4763 if (loop_vinfo)
4764 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4765 else
4766 vf = 1;
4767
4768 /* Multiple types in SLP are handled by creating the appropriate number of
4769 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4770 case of SLP. */
4771 if (slp_node || PURE_SLP_STMT (stmt_info))
4772 ncopies = 1;
4773 else
4774 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4775
4776 gcc_assert (ncopies >= 1);
4777
4778 /* Shifts are handled in vectorizable_shift (). */
4779 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4780 || code == RROTATE_EXPR)
4781 return false;
4782
4783 /* Supportable by target? */
4784
4785 vec_mode = TYPE_MODE (vectype);
4786 if (code == MULT_HIGHPART_EXPR)
4787 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4788 else
4789 {
4790 optab = optab_for_tree_code (code, vectype, optab_default);
4791 if (!optab)
4792 {
4793 if (dump_enabled_p ())
4794 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4795 "no optab.\n");
4796 return false;
4797 }
4798 target_support_p = (optab_handler (optab, vec_mode)
4799 != CODE_FOR_nothing);
4800 }
4801
4802 if (!target_support_p)
4803 {
4804 if (dump_enabled_p ())
4805 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4806 "op not supported by target.\n");
4807 /* Check only during analysis. */
4808 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4809 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4810 return false;
4811 if (dump_enabled_p ())
4812 dump_printf_loc (MSG_NOTE, vect_location,
4813 "proceeding using word mode.\n");
4814 }
4815
4816 /* Worthwhile without SIMD support? Check only during analysis. */
4817 if (!VECTOR_MODE_P (vec_mode)
4818 && !vec_stmt
4819 && vf < vect_min_worthwhile_factor (code))
4820 {
4821 if (dump_enabled_p ())
4822 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4823 "not worthwhile without SIMD support.\n");
4824 return false;
4825 }
4826
4827 if (!vec_stmt) /* transformation not required. */
4828 {
4829 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4830 if (dump_enabled_p ())
4831 dump_printf_loc (MSG_NOTE, vect_location,
4832 "=== vectorizable_operation ===\n");
4833 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4834 return true;
4835 }
4836
4837 /** Transform. **/
4838
4839 if (dump_enabled_p ())
4840 dump_printf_loc (MSG_NOTE, vect_location,
4841 "transform binary/unary operation.\n");
4842
4843 /* Handle def. */
4844 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4845
4846 /* In case the vectorization factor (VF) is bigger than the number
4847 of elements that we can fit in a vectype (nunits), we have to generate
4848 more than one vector stmt - i.e - we need to "unroll" the
4849 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4850 from one copy of the vector stmt to the next, in the field
4851 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4852 stages to find the correct vector defs to be used when vectorizing
4853 stmts that use the defs of the current stmt. The example below
4854 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4855 we need to create 4 vectorized stmts):
4856
4857 before vectorization:
4858 RELATED_STMT VEC_STMT
4859 S1: x = memref - -
4860 S2: z = x + 1 - -
4861
4862 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4863 there):
4864 RELATED_STMT VEC_STMT
4865 VS1_0: vx0 = memref0 VS1_1 -
4866 VS1_1: vx1 = memref1 VS1_2 -
4867 VS1_2: vx2 = memref2 VS1_3 -
4868 VS1_3: vx3 = memref3 - -
4869 S1: x = load - VS1_0
4870 S2: z = x + 1 - -
4871
4872 step2: vectorize stmt S2 (done here):
4873 To vectorize stmt S2 we first need to find the relevant vector
4874 def for the first operand 'x'. This is, as usual, obtained from
4875 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4876 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4877 relevant vector def 'vx0'. Having found 'vx0' we can generate
4878 the vector stmt VS2_0, and as usual, record it in the
4879 STMT_VINFO_VEC_STMT of stmt S2.
4880 When creating the second copy (VS2_1), we obtain the relevant vector
4881 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4882 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4883 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4884 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4885 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4886 chain of stmts and pointers:
4887 RELATED_STMT VEC_STMT
4888 VS1_0: vx0 = memref0 VS1_1 -
4889 VS1_1: vx1 = memref1 VS1_2 -
4890 VS1_2: vx2 = memref2 VS1_3 -
4891 VS1_3: vx3 = memref3 - -
4892 S1: x = load - VS1_0
4893 VS2_0: vz0 = vx0 + v1 VS2_1 -
4894 VS2_1: vz1 = vx1 + v1 VS2_2 -
4895 VS2_2: vz2 = vx2 + v1 VS2_3 -
4896 VS2_3: vz3 = vx3 + v1 - -
4897 S2: z = x + 1 - VS2_0 */
4898
4899 prev_stmt_info = NULL;
4900 for (j = 0; j < ncopies; j++)
4901 {
4902 /* Handle uses. */
4903 if (j == 0)
4904 {
4905 if (op_type == binary_op || op_type == ternary_op)
4906 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4907 slp_node, -1);
4908 else
4909 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4910 slp_node, -1);
4911 if (op_type == ternary_op)
4912 {
4913 vec_oprnds2.create (1);
4914 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4915 stmt));
4916 }
4917 }
4918 else
4919 {
4920 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4921 if (op_type == ternary_op)
4922 {
4923 tree vec_oprnd = vec_oprnds2.pop ();
4924 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4925 vec_oprnd));
4926 }
4927 }
4928
4929 /* Arguments are ready. Create the new vector stmt. */
4930 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4931 {
4932 vop1 = ((op_type == binary_op || op_type == ternary_op)
4933 ? vec_oprnds1[i] : NULL_TREE);
4934 vop2 = ((op_type == ternary_op)
4935 ? vec_oprnds2[i] : NULL_TREE);
4936 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4937 new_temp = make_ssa_name (vec_dest, new_stmt);
4938 gimple_assign_set_lhs (new_stmt, new_temp);
4939 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4940 if (slp_node)
4941 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4942 }
4943
4944 if (slp_node)
4945 continue;
4946
4947 if (j == 0)
4948 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4949 else
4950 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4951 prev_stmt_info = vinfo_for_stmt (new_stmt);
4952 }
4953
4954 vec_oprnds0.release ();
4955 vec_oprnds1.release ();
4956 vec_oprnds2.release ();
4957
4958 return true;
4959 }
4960
4961 /* A helper function to ensure data reference DR's base alignment
4962 for STMT_INFO. */
4963
4964 static void
4965 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4966 {
4967 if (!dr->aux)
4968 return;
4969
4970 if (DR_VECT_AUX (dr)->base_misaligned)
4971 {
4972 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4973 tree base_decl = DR_VECT_AUX (dr)->base_decl;
4974
4975 if (decl_in_symtab_p (base_decl))
4976 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4977 else
4978 {
4979 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4980 DECL_USER_ALIGN (base_decl) = 1;
4981 }
4982 DR_VECT_AUX (dr)->base_misaligned = false;
4983 }
4984 }
4985
4986
4987 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4988 reversal of the vector elements. If that is impossible to do,
4989 returns NULL. */
4990
4991 static tree
4992 perm_mask_for_reverse (tree vectype)
4993 {
4994 int i, nunits;
4995 unsigned char *sel;
4996
4997 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4998 sel = XALLOCAVEC (unsigned char, nunits);
4999
5000 for (i = 0; i < nunits; ++i)
5001 sel[i] = nunits - 1 - i;
5002
5003 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5004 return NULL_TREE;
5005 return vect_gen_perm_mask_checked (vectype, sel);
5006 }
5007
5008 /* Function vectorizable_store.
5009
5010 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5011 can be vectorized.
5012 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5013 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5014 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5015
5016 static bool
5017 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5018 slp_tree slp_node)
5019 {
5020 tree scalar_dest;
5021 tree data_ref;
5022 tree op;
5023 tree vec_oprnd = NULL_TREE;
5024 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5025 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5026 tree elem_type;
5027 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5028 struct loop *loop = NULL;
5029 machine_mode vec_mode;
5030 tree dummy;
5031 enum dr_alignment_support alignment_support_scheme;
5032 gimple *def_stmt;
5033 enum vect_def_type dt;
5034 stmt_vec_info prev_stmt_info = NULL;
5035 tree dataref_ptr = NULL_TREE;
5036 tree dataref_offset = NULL_TREE;
5037 gimple *ptr_incr = NULL;
5038 int ncopies;
5039 int j;
5040 gimple *next_stmt, *first_stmt = NULL;
5041 bool grouped_store = false;
5042 bool store_lanes_p = false;
5043 unsigned int group_size, i;
5044 vec<tree> dr_chain = vNULL;
5045 vec<tree> oprnds = vNULL;
5046 vec<tree> result_chain = vNULL;
5047 bool inv_p;
5048 bool negative = false;
5049 tree offset = NULL_TREE;
5050 vec<tree> vec_oprnds = vNULL;
5051 bool slp = (slp_node != NULL);
5052 unsigned int vec_num;
5053 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5054 vec_info *vinfo = stmt_info->vinfo;
5055 tree aggr_type;
5056 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5057 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5058 int scatter_scale = 1;
5059 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5060 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5061 gimple *new_stmt;
5062
5063 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5064 return false;
5065
5066 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5067 return false;
5068
5069 /* Is vectorizable store? */
5070
5071 if (!is_gimple_assign (stmt))
5072 return false;
5073
5074 scalar_dest = gimple_assign_lhs (stmt);
5075 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5076 && is_pattern_stmt_p (stmt_info))
5077 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5078 if (TREE_CODE (scalar_dest) != ARRAY_REF
5079 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5080 && TREE_CODE (scalar_dest) != INDIRECT_REF
5081 && TREE_CODE (scalar_dest) != COMPONENT_REF
5082 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5083 && TREE_CODE (scalar_dest) != REALPART_EXPR
5084 && TREE_CODE (scalar_dest) != MEM_REF)
5085 return false;
5086
5087 gcc_assert (gimple_assign_single_p (stmt));
5088
5089 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5090 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5091
5092 if (loop_vinfo)
5093 loop = LOOP_VINFO_LOOP (loop_vinfo);
5094
5095 /* Multiple types in SLP are handled by creating the appropriate number of
5096 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5097 case of SLP. */
5098 if (slp || PURE_SLP_STMT (stmt_info))
5099 ncopies = 1;
5100 else
5101 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5102
5103 gcc_assert (ncopies >= 1);
5104
5105 /* FORNOW. This restriction should be relaxed. */
5106 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5107 {
5108 if (dump_enabled_p ())
5109 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5110 "multiple types in nested loop.\n");
5111 return false;
5112 }
5113
5114 op = gimple_assign_rhs1 (stmt);
5115 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5116 {
5117 if (dump_enabled_p ())
5118 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5119 "use not simple.\n");
5120 return false;
5121 }
5122
5123 elem_type = TREE_TYPE (vectype);
5124 vec_mode = TYPE_MODE (vectype);
5125
5126 /* FORNOW. In some cases can vectorize even if data-type not supported
5127 (e.g. - array initialization with 0). */
5128 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5129 return false;
5130
5131 if (!STMT_VINFO_DATA_REF (stmt_info))
5132 return false;
5133
5134 if (!STMT_VINFO_STRIDED_P (stmt_info))
5135 {
5136 negative =
5137 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5138 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5139 size_zero_node) < 0;
5140 if (negative && ncopies > 1)
5141 {
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5144 "multiple types with negative step.\n");
5145 return false;
5146 }
5147 if (negative)
5148 {
5149 gcc_assert (!grouped_store);
5150 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5151 if (alignment_support_scheme != dr_aligned
5152 && alignment_support_scheme != dr_unaligned_supported)
5153 {
5154 if (dump_enabled_p ())
5155 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5156 "negative step but alignment required.\n");
5157 return false;
5158 }
5159 if (dt != vect_constant_def
5160 && dt != vect_external_def
5161 && !perm_mask_for_reverse (vectype))
5162 {
5163 if (dump_enabled_p ())
5164 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5165 "negative step and reversing not supported.\n");
5166 return false;
5167 }
5168 }
5169 }
5170
5171 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5172 {
5173 grouped_store = true;
5174 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5175 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5176 if (!slp
5177 && !PURE_SLP_STMT (stmt_info)
5178 && !STMT_VINFO_STRIDED_P (stmt_info))
5179 {
5180 if (vect_store_lanes_supported (vectype, group_size))
5181 store_lanes_p = true;
5182 else if (!vect_grouped_store_supported (vectype, group_size))
5183 return false;
5184 }
5185
5186 if (STMT_VINFO_STRIDED_P (stmt_info)
5187 && (slp || PURE_SLP_STMT (stmt_info))
5188 && (group_size > nunits
5189 || nunits % group_size != 0))
5190 {
5191 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5192 "unhandled strided group store\n");
5193 return false;
5194 }
5195
5196 if (first_stmt == stmt)
5197 {
5198 /* STMT is the leader of the group. Check the operands of all the
5199 stmts of the group. */
5200 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5201 while (next_stmt)
5202 {
5203 gcc_assert (gimple_assign_single_p (next_stmt));
5204 op = gimple_assign_rhs1 (next_stmt);
5205 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5206 {
5207 if (dump_enabled_p ())
5208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5209 "use not simple.\n");
5210 return false;
5211 }
5212 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5213 }
5214 }
5215 }
5216
5217 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5218 {
5219 gimple *def_stmt;
5220 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5221 &scatter_off, &scatter_scale);
5222 gcc_assert (scatter_decl);
5223 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5224 &scatter_off_vectype))
5225 {
5226 if (dump_enabled_p ())
5227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5228 "scatter index use not simple.");
5229 return false;
5230 }
5231 }
5232
5233 if (!vec_stmt) /* transformation not required. */
5234 {
5235 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5236 /* The SLP costs are calculated during SLP analysis. */
5237 if (!PURE_SLP_STMT (stmt_info))
5238 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5239 NULL, NULL, NULL);
5240 return true;
5241 }
5242
5243 /** Transform. **/
5244
5245 ensure_base_align (stmt_info, dr);
5246
5247 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5248 {
5249 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5250 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5251 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5252 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5253 edge pe = loop_preheader_edge (loop);
5254 gimple_seq seq;
5255 basic_block new_bb;
5256 enum { NARROW, NONE, WIDEN } modifier;
5257 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5258
5259 if (nunits == (unsigned int) scatter_off_nunits)
5260 modifier = NONE;
5261 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5262 {
5263 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5264 modifier = WIDEN;
5265
5266 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5267 sel[i] = i | nunits;
5268
5269 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5270 gcc_assert (perm_mask != NULL_TREE);
5271 }
5272 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5273 {
5274 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5275 modifier = NARROW;
5276
5277 for (i = 0; i < (unsigned int) nunits; ++i)
5278 sel[i] = i | scatter_off_nunits;
5279
5280 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5281 gcc_assert (perm_mask != NULL_TREE);
5282 ncopies *= 2;
5283 }
5284 else
5285 gcc_unreachable ();
5286
5287 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5288 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5289 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5290 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5291 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5292 scaletype = TREE_VALUE (arglist);
5293
5294 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5295 && TREE_CODE (rettype) == VOID_TYPE);
5296
5297 ptr = fold_convert (ptrtype, scatter_base);
5298 if (!is_gimple_min_invariant (ptr))
5299 {
5300 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5301 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5302 gcc_assert (!new_bb);
5303 }
5304
5305 /* Currently we support only unconditional scatter stores,
5306 so mask should be all ones. */
5307 mask = build_int_cst (masktype, -1);
5308 mask = vect_init_vector (stmt, mask, masktype, NULL);
5309
5310 scale = build_int_cst (scaletype, scatter_scale);
5311
5312 prev_stmt_info = NULL;
5313 for (j = 0; j < ncopies; ++j)
5314 {
5315 if (j == 0)
5316 {
5317 src = vec_oprnd1
5318 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5319 op = vec_oprnd0
5320 = vect_get_vec_def_for_operand (scatter_off, stmt);
5321 }
5322 else if (modifier != NONE && (j & 1))
5323 {
5324 if (modifier == WIDEN)
5325 {
5326 src = vec_oprnd1
5327 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5328 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5329 stmt, gsi);
5330 }
5331 else if (modifier == NARROW)
5332 {
5333 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5334 stmt, gsi);
5335 op = vec_oprnd0
5336 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5337 }
5338 else
5339 gcc_unreachable ();
5340 }
5341 else
5342 {
5343 src = vec_oprnd1
5344 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5345 op = vec_oprnd0
5346 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5347 }
5348
5349 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5350 {
5351 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5352 == TYPE_VECTOR_SUBPARTS (srctype));
5353 var = vect_get_new_vect_var (srctype, vect_simple_var, NULL);
5354 var = make_ssa_name (var);
5355 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5356 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5357 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5358 src = var;
5359 }
5360
5361 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5362 {
5363 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5364 == TYPE_VECTOR_SUBPARTS (idxtype));
5365 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
5366 var = make_ssa_name (var);
5367 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5368 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5369 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5370 op = var;
5371 }
5372
5373 new_stmt
5374 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5375
5376 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5377
5378 if (prev_stmt_info == NULL)
5379 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5380 else
5381 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5382 prev_stmt_info = vinfo_for_stmt (new_stmt);
5383 }
5384 return true;
5385 }
5386
5387 if (grouped_store)
5388 {
5389 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5390 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5391
5392 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5393
5394 /* FORNOW */
5395 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5396
5397 /* We vectorize all the stmts of the interleaving group when we
5398 reach the last stmt in the group. */
5399 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5400 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5401 && !slp)
5402 {
5403 *vec_stmt = NULL;
5404 return true;
5405 }
5406
5407 if (slp)
5408 {
5409 grouped_store = false;
5410 /* VEC_NUM is the number of vect stmts to be created for this
5411 group. */
5412 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5413 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5414 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5415 op = gimple_assign_rhs1 (first_stmt);
5416 }
5417 else
5418 /* VEC_NUM is the number of vect stmts to be created for this
5419 group. */
5420 vec_num = group_size;
5421 }
5422 else
5423 {
5424 first_stmt = stmt;
5425 first_dr = dr;
5426 group_size = vec_num = 1;
5427 }
5428
5429 if (dump_enabled_p ())
5430 dump_printf_loc (MSG_NOTE, vect_location,
5431 "transform store. ncopies = %d\n", ncopies);
5432
5433 if (STMT_VINFO_STRIDED_P (stmt_info))
5434 {
5435 gimple_stmt_iterator incr_gsi;
5436 bool insert_after;
5437 gimple *incr;
5438 tree offvar;
5439 tree ivstep;
5440 tree running_off;
5441 gimple_seq stmts = NULL;
5442 tree stride_base, stride_step, alias_off;
5443 tree vec_oprnd;
5444 unsigned int g;
5445
5446 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5447
5448 stride_base
5449 = fold_build_pointer_plus
5450 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5451 size_binop (PLUS_EXPR,
5452 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5453 convert_to_ptrofftype (DR_INIT(first_dr))));
5454 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5455
5456 /* For a store with loop-invariant (but other than power-of-2)
5457 stride (i.e. not a grouped access) like so:
5458
5459 for (i = 0; i < n; i += stride)
5460 array[i] = ...;
5461
5462 we generate a new induction variable and new stores from
5463 the components of the (vectorized) rhs:
5464
5465 for (j = 0; ; j += VF*stride)
5466 vectemp = ...;
5467 tmp1 = vectemp[0];
5468 array[j] = tmp1;
5469 tmp2 = vectemp[1];
5470 array[j + stride] = tmp2;
5471 ...
5472 */
5473
5474 unsigned nstores = nunits;
5475 tree ltype = elem_type;
5476 if (slp)
5477 {
5478 nstores = nunits / group_size;
5479 if (group_size < nunits)
5480 ltype = build_vector_type (elem_type, group_size);
5481 else
5482 ltype = vectype;
5483 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5484 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5485 group_size = 1;
5486 }
5487
5488 ivstep = stride_step;
5489 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5490 build_int_cst (TREE_TYPE (ivstep),
5491 ncopies * nstores));
5492
5493 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5494
5495 create_iv (stride_base, ivstep, NULL,
5496 loop, &incr_gsi, insert_after,
5497 &offvar, NULL);
5498 incr = gsi_stmt (incr_gsi);
5499 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
5500
5501 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5502 if (stmts)
5503 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5504
5505 prev_stmt_info = NULL;
5506 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5507 next_stmt = first_stmt;
5508 for (g = 0; g < group_size; g++)
5509 {
5510 running_off = offvar;
5511 if (g)
5512 {
5513 tree size = TYPE_SIZE_UNIT (ltype);
5514 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5515 size);
5516 tree newoff = copy_ssa_name (running_off, NULL);
5517 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5518 running_off, pos);
5519 vect_finish_stmt_generation (stmt, incr, gsi);
5520 running_off = newoff;
5521 }
5522 for (j = 0; j < ncopies; j++)
5523 {
5524 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5525 and first_stmt == stmt. */
5526 if (j == 0)
5527 {
5528 if (slp)
5529 {
5530 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5531 slp_node, -1);
5532 vec_oprnd = vec_oprnds[0];
5533 }
5534 else
5535 {
5536 gcc_assert (gimple_assign_single_p (next_stmt));
5537 op = gimple_assign_rhs1 (next_stmt);
5538 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5539 }
5540 }
5541 else
5542 {
5543 if (slp)
5544 vec_oprnd = vec_oprnds[j];
5545 else
5546 {
5547 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
5548 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5549 }
5550 }
5551
5552 for (i = 0; i < nstores; i++)
5553 {
5554 tree newref, newoff;
5555 gimple *incr, *assign;
5556 tree size = TYPE_SIZE (ltype);
5557 /* Extract the i'th component. */
5558 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5559 bitsize_int (i), size);
5560 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5561 size, pos);
5562
5563 elem = force_gimple_operand_gsi (gsi, elem, true,
5564 NULL_TREE, true,
5565 GSI_SAME_STMT);
5566
5567 newref = build2 (MEM_REF, ltype,
5568 running_off, alias_off);
5569
5570 /* And store it to *running_off. */
5571 assign = gimple_build_assign (newref, elem);
5572 vect_finish_stmt_generation (stmt, assign, gsi);
5573
5574 newoff = copy_ssa_name (running_off, NULL);
5575 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5576 running_off, stride_step);
5577 vect_finish_stmt_generation (stmt, incr, gsi);
5578
5579 running_off = newoff;
5580 if (g == group_size - 1
5581 && !slp)
5582 {
5583 if (j == 0 && i == 0)
5584 STMT_VINFO_VEC_STMT (stmt_info)
5585 = *vec_stmt = assign;
5586 else
5587 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5588 prev_stmt_info = vinfo_for_stmt (assign);
5589 }
5590 }
5591 }
5592 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5593 }
5594 return true;
5595 }
5596
5597 dr_chain.create (group_size);
5598 oprnds.create (group_size);
5599
5600 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5601 gcc_assert (alignment_support_scheme);
5602 /* Targets with store-lane instructions must not require explicit
5603 realignment. */
5604 gcc_assert (!store_lanes_p
5605 || alignment_support_scheme == dr_aligned
5606 || alignment_support_scheme == dr_unaligned_supported);
5607
5608 if (negative)
5609 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5610
5611 if (store_lanes_p)
5612 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5613 else
5614 aggr_type = vectype;
5615
5616 /* In case the vectorization factor (VF) is bigger than the number
5617 of elements that we can fit in a vectype (nunits), we have to generate
5618 more than one vector stmt - i.e - we need to "unroll" the
5619 vector stmt by a factor VF/nunits. For more details see documentation in
5620 vect_get_vec_def_for_copy_stmt. */
5621
5622 /* In case of interleaving (non-unit grouped access):
5623
5624 S1: &base + 2 = x2
5625 S2: &base = x0
5626 S3: &base + 1 = x1
5627 S4: &base + 3 = x3
5628
5629 We create vectorized stores starting from base address (the access of the
5630 first stmt in the chain (S2 in the above example), when the last store stmt
5631 of the chain (S4) is reached:
5632
5633 VS1: &base = vx2
5634 VS2: &base + vec_size*1 = vx0
5635 VS3: &base + vec_size*2 = vx1
5636 VS4: &base + vec_size*3 = vx3
5637
5638 Then permutation statements are generated:
5639
5640 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5641 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5642 ...
5643
5644 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5645 (the order of the data-refs in the output of vect_permute_store_chain
5646 corresponds to the order of scalar stmts in the interleaving chain - see
5647 the documentation of vect_permute_store_chain()).
5648
5649 In case of both multiple types and interleaving, above vector stores and
5650 permutation stmts are created for every copy. The result vector stmts are
5651 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5652 STMT_VINFO_RELATED_STMT for the next copies.
5653 */
5654
5655 prev_stmt_info = NULL;
5656 for (j = 0; j < ncopies; j++)
5657 {
5658
5659 if (j == 0)
5660 {
5661 if (slp)
5662 {
5663 /* Get vectorized arguments for SLP_NODE. */
5664 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5665 NULL, slp_node, -1);
5666
5667 vec_oprnd = vec_oprnds[0];
5668 }
5669 else
5670 {
5671 /* For interleaved stores we collect vectorized defs for all the
5672 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5673 used as an input to vect_permute_store_chain(), and OPRNDS as
5674 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5675
5676 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5677 OPRNDS are of size 1. */
5678 next_stmt = first_stmt;
5679 for (i = 0; i < group_size; i++)
5680 {
5681 /* Since gaps are not supported for interleaved stores,
5682 GROUP_SIZE is the exact number of stmts in the chain.
5683 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5684 there is no interleaving, GROUP_SIZE is 1, and only one
5685 iteration of the loop will be executed. */
5686 gcc_assert (next_stmt
5687 && gimple_assign_single_p (next_stmt));
5688 op = gimple_assign_rhs1 (next_stmt);
5689
5690 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5691 dr_chain.quick_push (vec_oprnd);
5692 oprnds.quick_push (vec_oprnd);
5693 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5694 }
5695 }
5696
5697 /* We should have catched mismatched types earlier. */
5698 gcc_assert (useless_type_conversion_p (vectype,
5699 TREE_TYPE (vec_oprnd)));
5700 bool simd_lane_access_p
5701 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5702 if (simd_lane_access_p
5703 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5704 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5705 && integer_zerop (DR_OFFSET (first_dr))
5706 && integer_zerop (DR_INIT (first_dr))
5707 && alias_sets_conflict_p (get_alias_set (aggr_type),
5708 get_alias_set (DR_REF (first_dr))))
5709 {
5710 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5711 dataref_offset = build_int_cst (reference_alias_ptr_type
5712 (DR_REF (first_dr)), 0);
5713 inv_p = false;
5714 }
5715 else
5716 dataref_ptr
5717 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5718 simd_lane_access_p ? loop : NULL,
5719 offset, &dummy, gsi, &ptr_incr,
5720 simd_lane_access_p, &inv_p);
5721 gcc_assert (bb_vinfo || !inv_p);
5722 }
5723 else
5724 {
5725 /* For interleaved stores we created vectorized defs for all the
5726 defs stored in OPRNDS in the previous iteration (previous copy).
5727 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5728 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5729 next copy.
5730 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5731 OPRNDS are of size 1. */
5732 for (i = 0; i < group_size; i++)
5733 {
5734 op = oprnds[i];
5735 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
5736 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5737 dr_chain[i] = vec_oprnd;
5738 oprnds[i] = vec_oprnd;
5739 }
5740 if (dataref_offset)
5741 dataref_offset
5742 = int_const_binop (PLUS_EXPR, dataref_offset,
5743 TYPE_SIZE_UNIT (aggr_type));
5744 else
5745 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5746 TYPE_SIZE_UNIT (aggr_type));
5747 }
5748
5749 if (store_lanes_p)
5750 {
5751 tree vec_array;
5752
5753 /* Combine all the vectors into an array. */
5754 vec_array = create_vector_array (vectype, vec_num);
5755 for (i = 0; i < vec_num; i++)
5756 {
5757 vec_oprnd = dr_chain[i];
5758 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5759 }
5760
5761 /* Emit:
5762 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5763 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5764 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5765 gimple_call_set_lhs (new_stmt, data_ref);
5766 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5767 }
5768 else
5769 {
5770 new_stmt = NULL;
5771 if (grouped_store)
5772 {
5773 if (j == 0)
5774 result_chain.create (group_size);
5775 /* Permute. */
5776 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5777 &result_chain);
5778 }
5779
5780 next_stmt = first_stmt;
5781 for (i = 0; i < vec_num; i++)
5782 {
5783 unsigned align, misalign;
5784
5785 if (i > 0)
5786 /* Bump the vector pointer. */
5787 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5788 stmt, NULL_TREE);
5789
5790 if (slp)
5791 vec_oprnd = vec_oprnds[i];
5792 else if (grouped_store)
5793 /* For grouped stores vectorized defs are interleaved in
5794 vect_permute_store_chain(). */
5795 vec_oprnd = result_chain[i];
5796
5797 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5798 dataref_ptr,
5799 dataref_offset
5800 ? dataref_offset
5801 : build_int_cst (reference_alias_ptr_type
5802 (DR_REF (first_dr)), 0));
5803 align = TYPE_ALIGN_UNIT (vectype);
5804 if (aligned_access_p (first_dr))
5805 misalign = 0;
5806 else if (DR_MISALIGNMENT (first_dr) == -1)
5807 {
5808 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5809 align = TYPE_ALIGN_UNIT (elem_type);
5810 else
5811 align = get_object_alignment (DR_REF (first_dr))
5812 / BITS_PER_UNIT;
5813 misalign = 0;
5814 TREE_TYPE (data_ref)
5815 = build_aligned_type (TREE_TYPE (data_ref),
5816 align * BITS_PER_UNIT);
5817 }
5818 else
5819 {
5820 TREE_TYPE (data_ref)
5821 = build_aligned_type (TREE_TYPE (data_ref),
5822 TYPE_ALIGN (elem_type));
5823 misalign = DR_MISALIGNMENT (first_dr);
5824 }
5825 if (dataref_offset == NULL_TREE
5826 && TREE_CODE (dataref_ptr) == SSA_NAME)
5827 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5828 misalign);
5829
5830 if (negative
5831 && dt != vect_constant_def
5832 && dt != vect_external_def)
5833 {
5834 tree perm_mask = perm_mask_for_reverse (vectype);
5835 tree perm_dest
5836 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5837 vectype);
5838 tree new_temp = make_ssa_name (perm_dest);
5839
5840 /* Generate the permute statement. */
5841 gimple *perm_stmt
5842 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5843 vec_oprnd, perm_mask);
5844 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5845
5846 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5847 vec_oprnd = new_temp;
5848 }
5849
5850 /* Arguments are ready. Create the new vector stmt. */
5851 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5852 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5853
5854 if (slp)
5855 continue;
5856
5857 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5858 if (!next_stmt)
5859 break;
5860 }
5861 }
5862 if (!slp)
5863 {
5864 if (j == 0)
5865 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5866 else
5867 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5868 prev_stmt_info = vinfo_for_stmt (new_stmt);
5869 }
5870 }
5871
5872 dr_chain.release ();
5873 oprnds.release ();
5874 result_chain.release ();
5875 vec_oprnds.release ();
5876
5877 return true;
5878 }
5879
5880 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5881 VECTOR_CST mask. No checks are made that the target platform supports the
5882 mask, so callers may wish to test can_vec_perm_p separately, or use
5883 vect_gen_perm_mask_checked. */
5884
5885 tree
5886 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5887 {
5888 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5889 int i, nunits;
5890
5891 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5892
5893 mask_elt_type = lang_hooks.types.type_for_mode
5894 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5895 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5896
5897 mask_elts = XALLOCAVEC (tree, nunits);
5898 for (i = nunits - 1; i >= 0; i--)
5899 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5900 mask_vec = build_vector (mask_type, mask_elts);
5901
5902 return mask_vec;
5903 }
5904
5905 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5906 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5907
5908 tree
5909 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5910 {
5911 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5912 return vect_gen_perm_mask_any (vectype, sel);
5913 }
5914
5915 /* Given a vector variable X and Y, that was generated for the scalar
5916 STMT, generate instructions to permute the vector elements of X and Y
5917 using permutation mask MASK_VEC, insert them at *GSI and return the
5918 permuted vector variable. */
5919
5920 static tree
5921 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
5922 gimple_stmt_iterator *gsi)
5923 {
5924 tree vectype = TREE_TYPE (x);
5925 tree perm_dest, data_ref;
5926 gimple *perm_stmt;
5927
5928 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5929 data_ref = make_ssa_name (perm_dest);
5930
5931 /* Generate the permute statement. */
5932 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5933 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5934
5935 return data_ref;
5936 }
5937
5938 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5939 inserting them on the loops preheader edge. Returns true if we
5940 were successful in doing so (and thus STMT can be moved then),
5941 otherwise returns false. */
5942
5943 static bool
5944 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
5945 {
5946 ssa_op_iter i;
5947 tree op;
5948 bool any = false;
5949
5950 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5951 {
5952 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5953 if (!gimple_nop_p (def_stmt)
5954 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5955 {
5956 /* Make sure we don't need to recurse. While we could do
5957 so in simple cases when there are more complex use webs
5958 we don't have an easy way to preserve stmt order to fulfil
5959 dependencies within them. */
5960 tree op2;
5961 ssa_op_iter i2;
5962 if (gimple_code (def_stmt) == GIMPLE_PHI)
5963 return false;
5964 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5965 {
5966 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
5967 if (!gimple_nop_p (def_stmt2)
5968 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5969 return false;
5970 }
5971 any = true;
5972 }
5973 }
5974
5975 if (!any)
5976 return true;
5977
5978 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5979 {
5980 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5981 if (!gimple_nop_p (def_stmt)
5982 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5983 {
5984 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5985 gsi_remove (&gsi, false);
5986 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5987 }
5988 }
5989
5990 return true;
5991 }
5992
5993 /* vectorizable_load.
5994
5995 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5996 can be vectorized.
5997 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5998 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5999 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6000
6001 static bool
6002 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6003 slp_tree slp_node, slp_instance slp_node_instance)
6004 {
6005 tree scalar_dest;
6006 tree vec_dest = NULL;
6007 tree data_ref = NULL;
6008 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6009 stmt_vec_info prev_stmt_info;
6010 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6011 struct loop *loop = NULL;
6012 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6013 bool nested_in_vect_loop = false;
6014 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6015 tree elem_type;
6016 tree new_temp;
6017 machine_mode mode;
6018 gimple *new_stmt = NULL;
6019 tree dummy;
6020 enum dr_alignment_support alignment_support_scheme;
6021 tree dataref_ptr = NULL_TREE;
6022 tree dataref_offset = NULL_TREE;
6023 gimple *ptr_incr = NULL;
6024 int ncopies;
6025 int i, j, group_size = -1, group_gap_adj;
6026 tree msq = NULL_TREE, lsq;
6027 tree offset = NULL_TREE;
6028 tree byte_offset = NULL_TREE;
6029 tree realignment_token = NULL_TREE;
6030 gphi *phi = NULL;
6031 vec<tree> dr_chain = vNULL;
6032 bool grouped_load = false;
6033 bool load_lanes_p = false;
6034 gimple *first_stmt;
6035 bool inv_p;
6036 bool negative = false;
6037 bool compute_in_loop = false;
6038 struct loop *at_loop;
6039 int vec_num;
6040 bool slp = (slp_node != NULL);
6041 bool slp_perm = false;
6042 enum tree_code code;
6043 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6044 int vf;
6045 tree aggr_type;
6046 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6047 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6048 int gather_scale = 1;
6049 enum vect_def_type gather_dt = vect_unknown_def_type;
6050 vec_info *vinfo = stmt_info->vinfo;
6051
6052 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6053 return false;
6054
6055 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
6056 return false;
6057
6058 /* Is vectorizable load? */
6059 if (!is_gimple_assign (stmt))
6060 return false;
6061
6062 scalar_dest = gimple_assign_lhs (stmt);
6063 if (TREE_CODE (scalar_dest) != SSA_NAME)
6064 return false;
6065
6066 code = gimple_assign_rhs_code (stmt);
6067 if (code != ARRAY_REF
6068 && code != BIT_FIELD_REF
6069 && code != INDIRECT_REF
6070 && code != COMPONENT_REF
6071 && code != IMAGPART_EXPR
6072 && code != REALPART_EXPR
6073 && code != MEM_REF
6074 && TREE_CODE_CLASS (code) != tcc_declaration)
6075 return false;
6076
6077 if (!STMT_VINFO_DATA_REF (stmt_info))
6078 return false;
6079
6080 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6081 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6082
6083 if (loop_vinfo)
6084 {
6085 loop = LOOP_VINFO_LOOP (loop_vinfo);
6086 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6087 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6088 }
6089 else
6090 vf = 1;
6091
6092 /* Multiple types in SLP are handled by creating the appropriate number of
6093 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6094 case of SLP. */
6095 if (slp || PURE_SLP_STMT (stmt_info))
6096 ncopies = 1;
6097 else
6098 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6099
6100 gcc_assert (ncopies >= 1);
6101
6102 /* FORNOW. This restriction should be relaxed. */
6103 if (nested_in_vect_loop && ncopies > 1)
6104 {
6105 if (dump_enabled_p ())
6106 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6107 "multiple types in nested loop.\n");
6108 return false;
6109 }
6110
6111 /* Invalidate assumptions made by dependence analysis when vectorization
6112 on the unrolled body effectively re-orders stmts. */
6113 if (ncopies > 1
6114 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6115 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6116 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6117 {
6118 if (dump_enabled_p ())
6119 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6120 "cannot perform implicit CSE when unrolling "
6121 "with negative dependence distance\n");
6122 return false;
6123 }
6124
6125 elem_type = TREE_TYPE (vectype);
6126 mode = TYPE_MODE (vectype);
6127
6128 /* FORNOW. In some cases can vectorize even if data-type not supported
6129 (e.g. - data copies). */
6130 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6131 {
6132 if (dump_enabled_p ())
6133 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6134 "Aligned load, but unsupported type.\n");
6135 return false;
6136 }
6137
6138 /* Check if the load is a part of an interleaving chain. */
6139 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6140 {
6141 grouped_load = true;
6142 /* FORNOW */
6143 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6144
6145 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6146
6147 /* If this is single-element interleaving with an element distance
6148 that leaves unused vector loads around punt - we at least create
6149 very sub-optimal code in that case (and blow up memory,
6150 see PR65518). */
6151 if (first_stmt == stmt
6152 && !GROUP_NEXT_ELEMENT (stmt_info)
6153 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6154 {
6155 if (dump_enabled_p ())
6156 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6157 "single-element interleaving not supported "
6158 "for not adjacent vector loads\n");
6159 return false;
6160 }
6161
6162 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6163 slp_perm = true;
6164
6165 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6166 if (!slp
6167 && !PURE_SLP_STMT (stmt_info)
6168 && !STMT_VINFO_STRIDED_P (stmt_info))
6169 {
6170 if (vect_load_lanes_supported (vectype, group_size))
6171 load_lanes_p = true;
6172 else if (!vect_grouped_load_supported (vectype, group_size))
6173 return false;
6174 }
6175
6176 /* Invalidate assumptions made by dependence analysis when vectorization
6177 on the unrolled body effectively re-orders stmts. */
6178 if (!PURE_SLP_STMT (stmt_info)
6179 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6180 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6181 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6182 {
6183 if (dump_enabled_p ())
6184 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6185 "cannot perform implicit CSE when performing "
6186 "group loads with negative dependence distance\n");
6187 return false;
6188 }
6189
6190 /* Similarly when the stmt is a load that is both part of a SLP
6191 instance and a loop vectorized stmt via the same-dr mechanism
6192 we have to give up. */
6193 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6194 && (STMT_SLP_TYPE (stmt_info)
6195 != STMT_SLP_TYPE (vinfo_for_stmt
6196 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6197 {
6198 if (dump_enabled_p ())
6199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6200 "conflicting SLP types for CSEd load\n");
6201 return false;
6202 }
6203 }
6204
6205
6206 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6207 {
6208 gimple *def_stmt;
6209 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6210 &gather_off, &gather_scale);
6211 gcc_assert (gather_decl);
6212 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6213 &gather_off_vectype))
6214 {
6215 if (dump_enabled_p ())
6216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6217 "gather index use not simple.\n");
6218 return false;
6219 }
6220 }
6221 else if (STMT_VINFO_STRIDED_P (stmt_info))
6222 {
6223 if ((grouped_load
6224 && (slp || PURE_SLP_STMT (stmt_info)))
6225 && (group_size > nunits
6226 || nunits % group_size != 0))
6227 {
6228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6229 "unhandled strided group load\n");
6230 return false;
6231 }
6232 }
6233 else
6234 {
6235 negative = tree_int_cst_compare (nested_in_vect_loop
6236 ? STMT_VINFO_DR_STEP (stmt_info)
6237 : DR_STEP (dr),
6238 size_zero_node) < 0;
6239 if (negative && ncopies > 1)
6240 {
6241 if (dump_enabled_p ())
6242 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6243 "multiple types with negative step.\n");
6244 return false;
6245 }
6246
6247 if (negative)
6248 {
6249 if (grouped_load)
6250 {
6251 if (dump_enabled_p ())
6252 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6253 "negative step for group load not supported"
6254 "\n");
6255 return false;
6256 }
6257 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6258 if (alignment_support_scheme != dr_aligned
6259 && alignment_support_scheme != dr_unaligned_supported)
6260 {
6261 if (dump_enabled_p ())
6262 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6263 "negative step but alignment required.\n");
6264 return false;
6265 }
6266 if (!perm_mask_for_reverse (vectype))
6267 {
6268 if (dump_enabled_p ())
6269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6270 "negative step and reversing not supported."
6271 "\n");
6272 return false;
6273 }
6274 }
6275 }
6276
6277 if (!vec_stmt) /* transformation not required. */
6278 {
6279 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6280 /* The SLP costs are calculated during SLP analysis. */
6281 if (!PURE_SLP_STMT (stmt_info))
6282 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6283 NULL, NULL, NULL);
6284 return true;
6285 }
6286
6287 if (dump_enabled_p ())
6288 dump_printf_loc (MSG_NOTE, vect_location,
6289 "transform load. ncopies = %d\n", ncopies);
6290
6291 /** Transform. **/
6292
6293 ensure_base_align (stmt_info, dr);
6294
6295 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6296 {
6297 tree vec_oprnd0 = NULL_TREE, op;
6298 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6299 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6300 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6301 edge pe = loop_preheader_edge (loop);
6302 gimple_seq seq;
6303 basic_block new_bb;
6304 enum { NARROW, NONE, WIDEN } modifier;
6305 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6306
6307 if (nunits == gather_off_nunits)
6308 modifier = NONE;
6309 else if (nunits == gather_off_nunits / 2)
6310 {
6311 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6312 modifier = WIDEN;
6313
6314 for (i = 0; i < gather_off_nunits; ++i)
6315 sel[i] = i | nunits;
6316
6317 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6318 }
6319 else if (nunits == gather_off_nunits * 2)
6320 {
6321 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6322 modifier = NARROW;
6323
6324 for (i = 0; i < nunits; ++i)
6325 sel[i] = i < gather_off_nunits
6326 ? i : i + nunits - gather_off_nunits;
6327
6328 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6329 ncopies *= 2;
6330 }
6331 else
6332 gcc_unreachable ();
6333
6334 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6335 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6336 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6337 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6338 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6339 scaletype = TREE_VALUE (arglist);
6340 gcc_checking_assert (types_compatible_p (srctype, rettype));
6341
6342 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6343
6344 ptr = fold_convert (ptrtype, gather_base);
6345 if (!is_gimple_min_invariant (ptr))
6346 {
6347 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6348 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6349 gcc_assert (!new_bb);
6350 }
6351
6352 /* Currently we support only unconditional gather loads,
6353 so mask should be all ones. */
6354 if (TREE_CODE (masktype) == INTEGER_TYPE)
6355 mask = build_int_cst (masktype, -1);
6356 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6357 {
6358 mask = build_int_cst (TREE_TYPE (masktype), -1);
6359 mask = build_vector_from_val (masktype, mask);
6360 mask = vect_init_vector (stmt, mask, masktype, NULL);
6361 }
6362 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6363 {
6364 REAL_VALUE_TYPE r;
6365 long tmp[6];
6366 for (j = 0; j < 6; ++j)
6367 tmp[j] = -1;
6368 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6369 mask = build_real (TREE_TYPE (masktype), r);
6370 mask = build_vector_from_val (masktype, mask);
6371 mask = vect_init_vector (stmt, mask, masktype, NULL);
6372 }
6373 else
6374 gcc_unreachable ();
6375
6376 scale = build_int_cst (scaletype, gather_scale);
6377
6378 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6379 merge = build_int_cst (TREE_TYPE (rettype), 0);
6380 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6381 {
6382 REAL_VALUE_TYPE r;
6383 long tmp[6];
6384 for (j = 0; j < 6; ++j)
6385 tmp[j] = 0;
6386 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6387 merge = build_real (TREE_TYPE (rettype), r);
6388 }
6389 else
6390 gcc_unreachable ();
6391 merge = build_vector_from_val (rettype, merge);
6392 merge = vect_init_vector (stmt, merge, rettype, NULL);
6393
6394 prev_stmt_info = NULL;
6395 for (j = 0; j < ncopies; ++j)
6396 {
6397 if (modifier == WIDEN && (j & 1))
6398 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6399 perm_mask, stmt, gsi);
6400 else if (j == 0)
6401 op = vec_oprnd0
6402 = vect_get_vec_def_for_operand (gather_off, stmt);
6403 else
6404 op = vec_oprnd0
6405 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6406
6407 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6408 {
6409 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6410 == TYPE_VECTOR_SUBPARTS (idxtype));
6411 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
6412 var = make_ssa_name (var);
6413 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6414 new_stmt
6415 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6416 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6417 op = var;
6418 }
6419
6420 new_stmt
6421 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6422
6423 if (!useless_type_conversion_p (vectype, rettype))
6424 {
6425 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6426 == TYPE_VECTOR_SUBPARTS (rettype));
6427 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
6428 op = make_ssa_name (var, new_stmt);
6429 gimple_call_set_lhs (new_stmt, op);
6430 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6431 var = make_ssa_name (vec_dest);
6432 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6433 new_stmt
6434 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6435 }
6436 else
6437 {
6438 var = make_ssa_name (vec_dest, new_stmt);
6439 gimple_call_set_lhs (new_stmt, var);
6440 }
6441
6442 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6443
6444 if (modifier == NARROW)
6445 {
6446 if ((j & 1) == 0)
6447 {
6448 prev_res = var;
6449 continue;
6450 }
6451 var = permute_vec_elements (prev_res, var,
6452 perm_mask, stmt, gsi);
6453 new_stmt = SSA_NAME_DEF_STMT (var);
6454 }
6455
6456 if (prev_stmt_info == NULL)
6457 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6458 else
6459 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6460 prev_stmt_info = vinfo_for_stmt (new_stmt);
6461 }
6462 return true;
6463 }
6464 else if (STMT_VINFO_STRIDED_P (stmt_info))
6465 {
6466 gimple_stmt_iterator incr_gsi;
6467 bool insert_after;
6468 gimple *incr;
6469 tree offvar;
6470 tree ivstep;
6471 tree running_off;
6472 vec<constructor_elt, va_gc> *v = NULL;
6473 gimple_seq stmts = NULL;
6474 tree stride_base, stride_step, alias_off;
6475
6476 gcc_assert (!nested_in_vect_loop);
6477
6478 if (slp && grouped_load)
6479 first_dr = STMT_VINFO_DATA_REF
6480 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6481 else
6482 first_dr = dr;
6483
6484 stride_base
6485 = fold_build_pointer_plus
6486 (DR_BASE_ADDRESS (first_dr),
6487 size_binop (PLUS_EXPR,
6488 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6489 convert_to_ptrofftype (DR_INIT (first_dr))));
6490 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6491
6492 /* For a load with loop-invariant (but other than power-of-2)
6493 stride (i.e. not a grouped access) like so:
6494
6495 for (i = 0; i < n; i += stride)
6496 ... = array[i];
6497
6498 we generate a new induction variable and new accesses to
6499 form a new vector (or vectors, depending on ncopies):
6500
6501 for (j = 0; ; j += VF*stride)
6502 tmp1 = array[j];
6503 tmp2 = array[j + stride];
6504 ...
6505 vectemp = {tmp1, tmp2, ...}
6506 */
6507
6508 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6509 build_int_cst (TREE_TYPE (stride_step), vf));
6510
6511 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6512
6513 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6514 loop, &incr_gsi, insert_after,
6515 &offvar, NULL);
6516 incr = gsi_stmt (incr_gsi);
6517 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6518
6519 stride_step = force_gimple_operand (unshare_expr (stride_step),
6520 &stmts, true, NULL_TREE);
6521 if (stmts)
6522 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6523
6524 prev_stmt_info = NULL;
6525 running_off = offvar;
6526 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6527 int nloads = nunits;
6528 tree ltype = TREE_TYPE (vectype);
6529 auto_vec<tree> dr_chain;
6530 if (slp)
6531 {
6532 nloads = nunits / group_size;
6533 if (group_size < nunits)
6534 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6535 else
6536 ltype = vectype;
6537 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6538 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6539 if (slp_perm)
6540 dr_chain.create (ncopies);
6541 }
6542 for (j = 0; j < ncopies; j++)
6543 {
6544 tree vec_inv;
6545
6546 if (nloads > 1)
6547 {
6548 vec_alloc (v, nloads);
6549 for (i = 0; i < nloads; i++)
6550 {
6551 tree newref, newoff;
6552 gimple *incr;
6553 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6554
6555 newref = force_gimple_operand_gsi (gsi, newref, true,
6556 NULL_TREE, true,
6557 GSI_SAME_STMT);
6558 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6559 newoff = copy_ssa_name (running_off);
6560 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6561 running_off, stride_step);
6562 vect_finish_stmt_generation (stmt, incr, gsi);
6563
6564 running_off = newoff;
6565 }
6566
6567 vec_inv = build_constructor (vectype, v);
6568 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6569 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6570 }
6571 else
6572 {
6573 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6574 build2 (MEM_REF, ltype,
6575 running_off, alias_off));
6576 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6577
6578 tree newoff = copy_ssa_name (running_off);
6579 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6580 running_off, stride_step);
6581 vect_finish_stmt_generation (stmt, incr, gsi);
6582
6583 running_off = newoff;
6584 }
6585
6586 if (slp)
6587 {
6588 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6589 if (slp_perm)
6590 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6591 }
6592 else
6593 {
6594 if (j == 0)
6595 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6596 else
6597 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6598 prev_stmt_info = vinfo_for_stmt (new_stmt);
6599 }
6600 }
6601 if (slp_perm)
6602 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6603 slp_node_instance, false);
6604 return true;
6605 }
6606
6607 if (grouped_load)
6608 {
6609 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6610 if (slp
6611 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6612 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6613 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6614
6615 /* Check if the chain of loads is already vectorized. */
6616 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6617 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6618 ??? But we can only do so if there is exactly one
6619 as we have no way to get at the rest. Leave the CSE
6620 opportunity alone.
6621 ??? With the group load eventually participating
6622 in multiple different permutations (having multiple
6623 slp nodes which refer to the same group) the CSE
6624 is even wrong code. See PR56270. */
6625 && !slp)
6626 {
6627 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6628 return true;
6629 }
6630 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6631 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6632 group_gap_adj = 0;
6633
6634 /* VEC_NUM is the number of vect stmts to be created for this group. */
6635 if (slp)
6636 {
6637 grouped_load = false;
6638 /* For SLP permutation support we need to load the whole group,
6639 not only the number of vector stmts the permutation result
6640 fits in. */
6641 if (slp_perm)
6642 vec_num = (group_size * vf + nunits - 1) / nunits;
6643 else
6644 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6645 group_gap_adj = vf * group_size - nunits * vec_num;
6646 }
6647 else
6648 vec_num = group_size;
6649 }
6650 else
6651 {
6652 first_stmt = stmt;
6653 first_dr = dr;
6654 group_size = vec_num = 1;
6655 group_gap_adj = 0;
6656 }
6657
6658 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6659 gcc_assert (alignment_support_scheme);
6660 /* Targets with load-lane instructions must not require explicit
6661 realignment. */
6662 gcc_assert (!load_lanes_p
6663 || alignment_support_scheme == dr_aligned
6664 || alignment_support_scheme == dr_unaligned_supported);
6665
6666 /* In case the vectorization factor (VF) is bigger than the number
6667 of elements that we can fit in a vectype (nunits), we have to generate
6668 more than one vector stmt - i.e - we need to "unroll" the
6669 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6670 from one copy of the vector stmt to the next, in the field
6671 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6672 stages to find the correct vector defs to be used when vectorizing
6673 stmts that use the defs of the current stmt. The example below
6674 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6675 need to create 4 vectorized stmts):
6676
6677 before vectorization:
6678 RELATED_STMT VEC_STMT
6679 S1: x = memref - -
6680 S2: z = x + 1 - -
6681
6682 step 1: vectorize stmt S1:
6683 We first create the vector stmt VS1_0, and, as usual, record a
6684 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6685 Next, we create the vector stmt VS1_1, and record a pointer to
6686 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6687 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6688 stmts and pointers:
6689 RELATED_STMT VEC_STMT
6690 VS1_0: vx0 = memref0 VS1_1 -
6691 VS1_1: vx1 = memref1 VS1_2 -
6692 VS1_2: vx2 = memref2 VS1_3 -
6693 VS1_3: vx3 = memref3 - -
6694 S1: x = load - VS1_0
6695 S2: z = x + 1 - -
6696
6697 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6698 information we recorded in RELATED_STMT field is used to vectorize
6699 stmt S2. */
6700
6701 /* In case of interleaving (non-unit grouped access):
6702
6703 S1: x2 = &base + 2
6704 S2: x0 = &base
6705 S3: x1 = &base + 1
6706 S4: x3 = &base + 3
6707
6708 Vectorized loads are created in the order of memory accesses
6709 starting from the access of the first stmt of the chain:
6710
6711 VS1: vx0 = &base
6712 VS2: vx1 = &base + vec_size*1
6713 VS3: vx3 = &base + vec_size*2
6714 VS4: vx4 = &base + vec_size*3
6715
6716 Then permutation statements are generated:
6717
6718 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6719 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6720 ...
6721
6722 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6723 (the order of the data-refs in the output of vect_permute_load_chain
6724 corresponds to the order of scalar stmts in the interleaving chain - see
6725 the documentation of vect_permute_load_chain()).
6726 The generation of permutation stmts and recording them in
6727 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6728
6729 In case of both multiple types and interleaving, the vector loads and
6730 permutation stmts above are created for every copy. The result vector
6731 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6732 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6733
6734 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6735 on a target that supports unaligned accesses (dr_unaligned_supported)
6736 we generate the following code:
6737 p = initial_addr;
6738 indx = 0;
6739 loop {
6740 p = p + indx * vectype_size;
6741 vec_dest = *(p);
6742 indx = indx + 1;
6743 }
6744
6745 Otherwise, the data reference is potentially unaligned on a target that
6746 does not support unaligned accesses (dr_explicit_realign_optimized) -
6747 then generate the following code, in which the data in each iteration is
6748 obtained by two vector loads, one from the previous iteration, and one
6749 from the current iteration:
6750 p1 = initial_addr;
6751 msq_init = *(floor(p1))
6752 p2 = initial_addr + VS - 1;
6753 realignment_token = call target_builtin;
6754 indx = 0;
6755 loop {
6756 p2 = p2 + indx * vectype_size
6757 lsq = *(floor(p2))
6758 vec_dest = realign_load (msq, lsq, realignment_token)
6759 indx = indx + 1;
6760 msq = lsq;
6761 } */
6762
6763 /* If the misalignment remains the same throughout the execution of the
6764 loop, we can create the init_addr and permutation mask at the loop
6765 preheader. Otherwise, it needs to be created inside the loop.
6766 This can only occur when vectorizing memory accesses in the inner-loop
6767 nested within an outer-loop that is being vectorized. */
6768
6769 if (nested_in_vect_loop
6770 && (TREE_INT_CST_LOW (DR_STEP (dr))
6771 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6772 {
6773 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6774 compute_in_loop = true;
6775 }
6776
6777 if ((alignment_support_scheme == dr_explicit_realign_optimized
6778 || alignment_support_scheme == dr_explicit_realign)
6779 && !compute_in_loop)
6780 {
6781 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6782 alignment_support_scheme, NULL_TREE,
6783 &at_loop);
6784 if (alignment_support_scheme == dr_explicit_realign_optimized)
6785 {
6786 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6787 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6788 size_one_node);
6789 }
6790 }
6791 else
6792 at_loop = loop;
6793
6794 if (negative)
6795 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6796
6797 if (load_lanes_p)
6798 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6799 else
6800 aggr_type = vectype;
6801
6802 prev_stmt_info = NULL;
6803 for (j = 0; j < ncopies; j++)
6804 {
6805 /* 1. Create the vector or array pointer update chain. */
6806 if (j == 0)
6807 {
6808 bool simd_lane_access_p
6809 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6810 if (simd_lane_access_p
6811 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6812 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6813 && integer_zerop (DR_OFFSET (first_dr))
6814 && integer_zerop (DR_INIT (first_dr))
6815 && alias_sets_conflict_p (get_alias_set (aggr_type),
6816 get_alias_set (DR_REF (first_dr)))
6817 && (alignment_support_scheme == dr_aligned
6818 || alignment_support_scheme == dr_unaligned_supported))
6819 {
6820 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6821 dataref_offset = build_int_cst (reference_alias_ptr_type
6822 (DR_REF (first_dr)), 0);
6823 inv_p = false;
6824 }
6825 else
6826 dataref_ptr
6827 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6828 offset, &dummy, gsi, &ptr_incr,
6829 simd_lane_access_p, &inv_p,
6830 byte_offset);
6831 }
6832 else if (dataref_offset)
6833 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6834 TYPE_SIZE_UNIT (aggr_type));
6835 else
6836 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6837 TYPE_SIZE_UNIT (aggr_type));
6838
6839 if (grouped_load || slp_perm)
6840 dr_chain.create (vec_num);
6841
6842 if (load_lanes_p)
6843 {
6844 tree vec_array;
6845
6846 vec_array = create_vector_array (vectype, vec_num);
6847
6848 /* Emit:
6849 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6850 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6851 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6852 gimple_call_set_lhs (new_stmt, vec_array);
6853 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6854
6855 /* Extract each vector into an SSA_NAME. */
6856 for (i = 0; i < vec_num; i++)
6857 {
6858 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6859 vec_array, i);
6860 dr_chain.quick_push (new_temp);
6861 }
6862
6863 /* Record the mapping between SSA_NAMEs and statements. */
6864 vect_record_grouped_load_vectors (stmt, dr_chain);
6865 }
6866 else
6867 {
6868 for (i = 0; i < vec_num; i++)
6869 {
6870 if (i > 0)
6871 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6872 stmt, NULL_TREE);
6873
6874 /* 2. Create the vector-load in the loop. */
6875 switch (alignment_support_scheme)
6876 {
6877 case dr_aligned:
6878 case dr_unaligned_supported:
6879 {
6880 unsigned int align, misalign;
6881
6882 data_ref
6883 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6884 dataref_offset
6885 ? dataref_offset
6886 : build_int_cst (reference_alias_ptr_type
6887 (DR_REF (first_dr)), 0));
6888 align = TYPE_ALIGN_UNIT (vectype);
6889 if (alignment_support_scheme == dr_aligned)
6890 {
6891 gcc_assert (aligned_access_p (first_dr));
6892 misalign = 0;
6893 }
6894 else if (DR_MISALIGNMENT (first_dr) == -1)
6895 {
6896 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6897 align = TYPE_ALIGN_UNIT (elem_type);
6898 else
6899 align = (get_object_alignment (DR_REF (first_dr))
6900 / BITS_PER_UNIT);
6901 misalign = 0;
6902 TREE_TYPE (data_ref)
6903 = build_aligned_type (TREE_TYPE (data_ref),
6904 align * BITS_PER_UNIT);
6905 }
6906 else
6907 {
6908 TREE_TYPE (data_ref)
6909 = build_aligned_type (TREE_TYPE (data_ref),
6910 TYPE_ALIGN (elem_type));
6911 misalign = DR_MISALIGNMENT (first_dr);
6912 }
6913 if (dataref_offset == NULL_TREE
6914 && TREE_CODE (dataref_ptr) == SSA_NAME)
6915 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6916 align, misalign);
6917 break;
6918 }
6919 case dr_explicit_realign:
6920 {
6921 tree ptr, bump;
6922
6923 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6924
6925 if (compute_in_loop)
6926 msq = vect_setup_realignment (first_stmt, gsi,
6927 &realignment_token,
6928 dr_explicit_realign,
6929 dataref_ptr, NULL);
6930
6931 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6932 ptr = copy_ssa_name (dataref_ptr);
6933 else
6934 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6935 new_stmt = gimple_build_assign
6936 (ptr, BIT_AND_EXPR, dataref_ptr,
6937 build_int_cst
6938 (TREE_TYPE (dataref_ptr),
6939 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6940 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6941 data_ref
6942 = build2 (MEM_REF, vectype, ptr,
6943 build_int_cst (reference_alias_ptr_type
6944 (DR_REF (first_dr)), 0));
6945 vec_dest = vect_create_destination_var (scalar_dest,
6946 vectype);
6947 new_stmt = gimple_build_assign (vec_dest, data_ref);
6948 new_temp = make_ssa_name (vec_dest, new_stmt);
6949 gimple_assign_set_lhs (new_stmt, new_temp);
6950 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6951 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6952 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6953 msq = new_temp;
6954
6955 bump = size_binop (MULT_EXPR, vs,
6956 TYPE_SIZE_UNIT (elem_type));
6957 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6958 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6959 new_stmt = gimple_build_assign
6960 (NULL_TREE, BIT_AND_EXPR, ptr,
6961 build_int_cst
6962 (TREE_TYPE (ptr),
6963 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6964 ptr = copy_ssa_name (ptr, new_stmt);
6965 gimple_assign_set_lhs (new_stmt, ptr);
6966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6967 data_ref
6968 = build2 (MEM_REF, vectype, ptr,
6969 build_int_cst (reference_alias_ptr_type
6970 (DR_REF (first_dr)), 0));
6971 break;
6972 }
6973 case dr_explicit_realign_optimized:
6974 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6975 new_temp = copy_ssa_name (dataref_ptr);
6976 else
6977 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
6978 new_stmt = gimple_build_assign
6979 (new_temp, BIT_AND_EXPR, dataref_ptr,
6980 build_int_cst
6981 (TREE_TYPE (dataref_ptr),
6982 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6983 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6984 data_ref
6985 = build2 (MEM_REF, vectype, new_temp,
6986 build_int_cst (reference_alias_ptr_type
6987 (DR_REF (first_dr)), 0));
6988 break;
6989 default:
6990 gcc_unreachable ();
6991 }
6992 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6993 new_stmt = gimple_build_assign (vec_dest, data_ref);
6994 new_temp = make_ssa_name (vec_dest, new_stmt);
6995 gimple_assign_set_lhs (new_stmt, new_temp);
6996 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6997
6998 /* 3. Handle explicit realignment if necessary/supported.
6999 Create in loop:
7000 vec_dest = realign_load (msq, lsq, realignment_token) */
7001 if (alignment_support_scheme == dr_explicit_realign_optimized
7002 || alignment_support_scheme == dr_explicit_realign)
7003 {
7004 lsq = gimple_assign_lhs (new_stmt);
7005 if (!realignment_token)
7006 realignment_token = dataref_ptr;
7007 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7008 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7009 msq, lsq, realignment_token);
7010 new_temp = make_ssa_name (vec_dest, new_stmt);
7011 gimple_assign_set_lhs (new_stmt, new_temp);
7012 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7013
7014 if (alignment_support_scheme == dr_explicit_realign_optimized)
7015 {
7016 gcc_assert (phi);
7017 if (i == vec_num - 1 && j == ncopies - 1)
7018 add_phi_arg (phi, lsq,
7019 loop_latch_edge (containing_loop),
7020 UNKNOWN_LOCATION);
7021 msq = lsq;
7022 }
7023 }
7024
7025 /* 4. Handle invariant-load. */
7026 if (inv_p && !bb_vinfo)
7027 {
7028 gcc_assert (!grouped_load);
7029 /* If we have versioned for aliasing or the loop doesn't
7030 have any data dependencies that would preclude this,
7031 then we are sure this is a loop invariant load and
7032 thus we can insert it on the preheader edge. */
7033 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7034 && !nested_in_vect_loop
7035 && hoist_defs_of_uses (stmt, loop))
7036 {
7037 if (dump_enabled_p ())
7038 {
7039 dump_printf_loc (MSG_NOTE, vect_location,
7040 "hoisting out of the vectorized "
7041 "loop: ");
7042 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7043 }
7044 tree tem = copy_ssa_name (scalar_dest);
7045 gsi_insert_on_edge_immediate
7046 (loop_preheader_edge (loop),
7047 gimple_build_assign (tem,
7048 unshare_expr
7049 (gimple_assign_rhs1 (stmt))));
7050 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7051 }
7052 else
7053 {
7054 gimple_stmt_iterator gsi2 = *gsi;
7055 gsi_next (&gsi2);
7056 new_temp = vect_init_vector (stmt, scalar_dest,
7057 vectype, &gsi2);
7058 }
7059 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7060 set_vinfo_for_stmt (new_stmt,
7061 new_stmt_vec_info (new_stmt, vinfo));
7062 }
7063
7064 if (negative)
7065 {
7066 tree perm_mask = perm_mask_for_reverse (vectype);
7067 new_temp = permute_vec_elements (new_temp, new_temp,
7068 perm_mask, stmt, gsi);
7069 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7070 }
7071
7072 /* Collect vector loads and later create their permutation in
7073 vect_transform_grouped_load (). */
7074 if (grouped_load || slp_perm)
7075 dr_chain.quick_push (new_temp);
7076
7077 /* Store vector loads in the corresponding SLP_NODE. */
7078 if (slp && !slp_perm)
7079 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7080 }
7081 /* Bump the vector pointer to account for a gap or for excess
7082 elements loaded for a permuted SLP load. */
7083 if (group_gap_adj != 0)
7084 {
7085 bool ovf;
7086 tree bump
7087 = wide_int_to_tree (sizetype,
7088 wi::smul (TYPE_SIZE_UNIT (elem_type),
7089 group_gap_adj, &ovf));
7090 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7091 stmt, bump);
7092 }
7093 }
7094
7095 if (slp && !slp_perm)
7096 continue;
7097
7098 if (slp_perm)
7099 {
7100 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7101 slp_node_instance, false))
7102 {
7103 dr_chain.release ();
7104 return false;
7105 }
7106 }
7107 else
7108 {
7109 if (grouped_load)
7110 {
7111 if (!load_lanes_p)
7112 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7113 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7114 }
7115 else
7116 {
7117 if (j == 0)
7118 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7119 else
7120 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7121 prev_stmt_info = vinfo_for_stmt (new_stmt);
7122 }
7123 }
7124 dr_chain.release ();
7125 }
7126
7127 return true;
7128 }
7129
7130 /* Function vect_is_simple_cond.
7131
7132 Input:
7133 LOOP - the loop that is being vectorized.
7134 COND - Condition that is checked for simple use.
7135
7136 Output:
7137 *COMP_VECTYPE - the vector type for the comparison.
7138
7139 Returns whether a COND can be vectorized. Checks whether
7140 condition operands are supportable using vec_is_simple_use. */
7141
7142 static bool
7143 vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
7144 {
7145 tree lhs, rhs;
7146 enum vect_def_type dt;
7147 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7148
7149 if (!COMPARISON_CLASS_P (cond))
7150 return false;
7151
7152 lhs = TREE_OPERAND (cond, 0);
7153 rhs = TREE_OPERAND (cond, 1);
7154
7155 if (TREE_CODE (lhs) == SSA_NAME)
7156 {
7157 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7158 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
7159 return false;
7160 }
7161 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7162 && TREE_CODE (lhs) != FIXED_CST)
7163 return false;
7164
7165 if (TREE_CODE (rhs) == SSA_NAME)
7166 {
7167 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7168 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
7169 return false;
7170 }
7171 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7172 && TREE_CODE (rhs) != FIXED_CST)
7173 return false;
7174
7175 *comp_vectype = vectype1 ? vectype1 : vectype2;
7176 return true;
7177 }
7178
7179 /* vectorizable_condition.
7180
7181 Check if STMT is conditional modify expression that can be vectorized.
7182 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7183 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7184 at GSI.
7185
7186 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7187 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7188 else clause if it is 2).
7189
7190 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7191
7192 bool
7193 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7194 gimple **vec_stmt, tree reduc_def, int reduc_index,
7195 slp_tree slp_node)
7196 {
7197 tree scalar_dest = NULL_TREE;
7198 tree vec_dest = NULL_TREE;
7199 tree cond_expr, then_clause, else_clause;
7200 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7201 tree comp_vectype = NULL_TREE;
7202 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7203 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7204 tree vec_compare, vec_cond_expr;
7205 tree new_temp;
7206 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7207 enum vect_def_type dt, dts[4];
7208 int ncopies;
7209 enum tree_code code;
7210 stmt_vec_info prev_stmt_info = NULL;
7211 int i, j;
7212 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7213 vec<tree> vec_oprnds0 = vNULL;
7214 vec<tree> vec_oprnds1 = vNULL;
7215 vec<tree> vec_oprnds2 = vNULL;
7216 vec<tree> vec_oprnds3 = vNULL;
7217 tree vec_cmp_type;
7218
7219 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7220 return false;
7221
7222 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7223 return false;
7224
7225 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7226 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7227 && reduc_def))
7228 return false;
7229
7230 /* FORNOW: not yet supported. */
7231 if (STMT_VINFO_LIVE_P (stmt_info))
7232 {
7233 if (dump_enabled_p ())
7234 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7235 "value used after loop.\n");
7236 return false;
7237 }
7238
7239 /* Is vectorizable conditional operation? */
7240 if (!is_gimple_assign (stmt))
7241 return false;
7242
7243 code = gimple_assign_rhs_code (stmt);
7244
7245 if (code != COND_EXPR)
7246 return false;
7247
7248 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7249 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7250
7251 if (slp_node || PURE_SLP_STMT (stmt_info))
7252 ncopies = 1;
7253 else
7254 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7255
7256 gcc_assert (ncopies >= 1);
7257 if (reduc_index && ncopies > 1)
7258 return false; /* FORNOW */
7259
7260 cond_expr = gimple_assign_rhs1 (stmt);
7261 then_clause = gimple_assign_rhs2 (stmt);
7262 else_clause = gimple_assign_rhs3 (stmt);
7263
7264 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
7265 || !comp_vectype)
7266 return false;
7267
7268 gimple *def_stmt;
7269 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt))
7270 return false;
7271 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt))
7272 return false;
7273
7274 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
7275 /* The result of a vector comparison should be signed type. */
7276 tree cmp_type = build_nonstandard_integer_type (prec, 0);
7277 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
7278 if (vec_cmp_type == NULL_TREE)
7279 return false;
7280
7281 if (!vec_stmt)
7282 {
7283 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7284 return expand_vec_cond_expr_p (vectype, comp_vectype);
7285 }
7286
7287 /* Transform. */
7288
7289 if (!slp_node)
7290 {
7291 vec_oprnds0.create (1);
7292 vec_oprnds1.create (1);
7293 vec_oprnds2.create (1);
7294 vec_oprnds3.create (1);
7295 }
7296
7297 /* Handle def. */
7298 scalar_dest = gimple_assign_lhs (stmt);
7299 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7300
7301 /* Handle cond expr. */
7302 for (j = 0; j < ncopies; j++)
7303 {
7304 gassign *new_stmt = NULL;
7305 if (j == 0)
7306 {
7307 if (slp_node)
7308 {
7309 auto_vec<tree, 4> ops;
7310 auto_vec<vec<tree>, 4> vec_defs;
7311
7312 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7313 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7314 ops.safe_push (then_clause);
7315 ops.safe_push (else_clause);
7316 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7317 vec_oprnds3 = vec_defs.pop ();
7318 vec_oprnds2 = vec_defs.pop ();
7319 vec_oprnds1 = vec_defs.pop ();
7320 vec_oprnds0 = vec_defs.pop ();
7321
7322 ops.release ();
7323 vec_defs.release ();
7324 }
7325 else
7326 {
7327 gimple *gtemp;
7328 vec_cond_lhs =
7329 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt);
7330 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7331 loop_vinfo, &gtemp, &dts[0]);
7332
7333 vec_cond_rhs =
7334 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7335 stmt);
7336 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7337 loop_vinfo, &gtemp, &dts[1]);
7338 if (reduc_index == 1)
7339 vec_then_clause = reduc_def;
7340 else
7341 {
7342 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7343 stmt);
7344 vect_is_simple_use (then_clause, loop_vinfo,
7345 &gtemp, &dts[2]);
7346 }
7347 if (reduc_index == 2)
7348 vec_else_clause = reduc_def;
7349 else
7350 {
7351 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7352 stmt);
7353 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
7354 }
7355 }
7356 }
7357 else
7358 {
7359 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7360 vec_oprnds0.pop ());
7361 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7362 vec_oprnds1.pop ());
7363 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7364 vec_oprnds2.pop ());
7365 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7366 vec_oprnds3.pop ());
7367 }
7368
7369 if (!slp_node)
7370 {
7371 vec_oprnds0.quick_push (vec_cond_lhs);
7372 vec_oprnds1.quick_push (vec_cond_rhs);
7373 vec_oprnds2.quick_push (vec_then_clause);
7374 vec_oprnds3.quick_push (vec_else_clause);
7375 }
7376
7377 /* Arguments are ready. Create the new vector stmt. */
7378 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7379 {
7380 vec_cond_rhs = vec_oprnds1[i];
7381 vec_then_clause = vec_oprnds2[i];
7382 vec_else_clause = vec_oprnds3[i];
7383
7384 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7385 vec_cond_lhs, vec_cond_rhs);
7386 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7387 vec_compare, vec_then_clause, vec_else_clause);
7388
7389 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7390 new_temp = make_ssa_name (vec_dest, new_stmt);
7391 gimple_assign_set_lhs (new_stmt, new_temp);
7392 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7393 if (slp_node)
7394 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7395 }
7396
7397 if (slp_node)
7398 continue;
7399
7400 if (j == 0)
7401 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7402 else
7403 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7404
7405 prev_stmt_info = vinfo_for_stmt (new_stmt);
7406 }
7407
7408 vec_oprnds0.release ();
7409 vec_oprnds1.release ();
7410 vec_oprnds2.release ();
7411 vec_oprnds3.release ();
7412
7413 return true;
7414 }
7415
7416
7417 /* Make sure the statement is vectorizable. */
7418
7419 bool
7420 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
7421 {
7422 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7423 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7424 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7425 bool ok;
7426 tree scalar_type, vectype;
7427 gimple *pattern_stmt;
7428 gimple_seq pattern_def_seq;
7429
7430 if (dump_enabled_p ())
7431 {
7432 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7433 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7434 }
7435
7436 if (gimple_has_volatile_ops (stmt))
7437 {
7438 if (dump_enabled_p ())
7439 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7440 "not vectorized: stmt has volatile operands\n");
7441
7442 return false;
7443 }
7444
7445 /* Skip stmts that do not need to be vectorized. In loops this is expected
7446 to include:
7447 - the COND_EXPR which is the loop exit condition
7448 - any LABEL_EXPRs in the loop
7449 - computations that are used only for array indexing or loop control.
7450 In basic blocks we only analyze statements that are a part of some SLP
7451 instance, therefore, all the statements are relevant.
7452
7453 Pattern statement needs to be analyzed instead of the original statement
7454 if the original statement is not relevant. Otherwise, we analyze both
7455 statements. In basic blocks we are called from some SLP instance
7456 traversal, don't analyze pattern stmts instead, the pattern stmts
7457 already will be part of SLP instance. */
7458
7459 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7460 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7461 && !STMT_VINFO_LIVE_P (stmt_info))
7462 {
7463 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7464 && pattern_stmt
7465 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7466 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7467 {
7468 /* Analyze PATTERN_STMT instead of the original stmt. */
7469 stmt = pattern_stmt;
7470 stmt_info = vinfo_for_stmt (pattern_stmt);
7471 if (dump_enabled_p ())
7472 {
7473 dump_printf_loc (MSG_NOTE, vect_location,
7474 "==> examining pattern statement: ");
7475 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7476 }
7477 }
7478 else
7479 {
7480 if (dump_enabled_p ())
7481 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7482
7483 return true;
7484 }
7485 }
7486 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7487 && node == NULL
7488 && pattern_stmt
7489 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7490 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7491 {
7492 /* Analyze PATTERN_STMT too. */
7493 if (dump_enabled_p ())
7494 {
7495 dump_printf_loc (MSG_NOTE, vect_location,
7496 "==> examining pattern statement: ");
7497 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7498 }
7499
7500 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7501 return false;
7502 }
7503
7504 if (is_pattern_stmt_p (stmt_info)
7505 && node == NULL
7506 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7507 {
7508 gimple_stmt_iterator si;
7509
7510 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7511 {
7512 gimple *pattern_def_stmt = gsi_stmt (si);
7513 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7514 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7515 {
7516 /* Analyze def stmt of STMT if it's a pattern stmt. */
7517 if (dump_enabled_p ())
7518 {
7519 dump_printf_loc (MSG_NOTE, vect_location,
7520 "==> examining pattern def statement: ");
7521 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7522 }
7523
7524 if (!vect_analyze_stmt (pattern_def_stmt,
7525 need_to_vectorize, node))
7526 return false;
7527 }
7528 }
7529 }
7530
7531 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7532 {
7533 case vect_internal_def:
7534 break;
7535
7536 case vect_reduction_def:
7537 case vect_nested_cycle:
7538 gcc_assert (!bb_vinfo
7539 && (relevance == vect_used_in_outer
7540 || relevance == vect_used_in_outer_by_reduction
7541 || relevance == vect_used_by_reduction
7542 || relevance == vect_unused_in_scope));
7543 break;
7544
7545 case vect_induction_def:
7546 case vect_constant_def:
7547 case vect_external_def:
7548 case vect_unknown_def_type:
7549 default:
7550 gcc_unreachable ();
7551 }
7552
7553 if (bb_vinfo)
7554 {
7555 gcc_assert (PURE_SLP_STMT (stmt_info));
7556
7557 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7558 if (dump_enabled_p ())
7559 {
7560 dump_printf_loc (MSG_NOTE, vect_location,
7561 "get vectype for scalar type: ");
7562 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7563 dump_printf (MSG_NOTE, "\n");
7564 }
7565
7566 vectype = get_vectype_for_scalar_type (scalar_type);
7567 if (!vectype)
7568 {
7569 if (dump_enabled_p ())
7570 {
7571 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7572 "not SLPed: unsupported data-type ");
7573 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7574 scalar_type);
7575 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7576 }
7577 return false;
7578 }
7579
7580 if (dump_enabled_p ())
7581 {
7582 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7583 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7584 dump_printf (MSG_NOTE, "\n");
7585 }
7586
7587 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7588 }
7589
7590 if (STMT_VINFO_RELEVANT_P (stmt_info))
7591 {
7592 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7593 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7594 || (is_gimple_call (stmt)
7595 && gimple_call_lhs (stmt) == NULL_TREE));
7596 *need_to_vectorize = true;
7597 }
7598
7599 if (PURE_SLP_STMT (stmt_info) && !node)
7600 {
7601 dump_printf_loc (MSG_NOTE, vect_location,
7602 "handled only by SLP analysis\n");
7603 return true;
7604 }
7605
7606 ok = true;
7607 if (!bb_vinfo
7608 && (STMT_VINFO_RELEVANT_P (stmt_info)
7609 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7610 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7611 || vectorizable_conversion (stmt, NULL, NULL, node)
7612 || vectorizable_shift (stmt, NULL, NULL, node)
7613 || vectorizable_operation (stmt, NULL, NULL, node)
7614 || vectorizable_assignment (stmt, NULL, NULL, node)
7615 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7616 || vectorizable_call (stmt, NULL, NULL, node)
7617 || vectorizable_store (stmt, NULL, NULL, node)
7618 || vectorizable_reduction (stmt, NULL, NULL, node)
7619 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7620 else
7621 {
7622 if (bb_vinfo)
7623 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7624 || vectorizable_conversion (stmt, NULL, NULL, node)
7625 || vectorizable_shift (stmt, NULL, NULL, node)
7626 || vectorizable_operation (stmt, NULL, NULL, node)
7627 || vectorizable_assignment (stmt, NULL, NULL, node)
7628 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7629 || vectorizable_call (stmt, NULL, NULL, node)
7630 || vectorizable_store (stmt, NULL, NULL, node)
7631 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7632 }
7633
7634 if (!ok)
7635 {
7636 if (dump_enabled_p ())
7637 {
7638 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7639 "not vectorized: relevant stmt not ");
7640 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7641 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7642 }
7643
7644 return false;
7645 }
7646
7647 if (bb_vinfo)
7648 return true;
7649
7650 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7651 need extra handling, except for vectorizable reductions. */
7652 if (STMT_VINFO_LIVE_P (stmt_info)
7653 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7654 ok = vectorizable_live_operation (stmt, NULL, NULL);
7655
7656 if (!ok)
7657 {
7658 if (dump_enabled_p ())
7659 {
7660 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7661 "not vectorized: live stmt not ");
7662 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7663 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7664 }
7665
7666 return false;
7667 }
7668
7669 return true;
7670 }
7671
7672
7673 /* Function vect_transform_stmt.
7674
7675 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7676
7677 bool
7678 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
7679 bool *grouped_store, slp_tree slp_node,
7680 slp_instance slp_node_instance)
7681 {
7682 bool is_store = false;
7683 gimple *vec_stmt = NULL;
7684 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7685 bool done;
7686
7687 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7688
7689 switch (STMT_VINFO_TYPE (stmt_info))
7690 {
7691 case type_demotion_vec_info_type:
7692 case type_promotion_vec_info_type:
7693 case type_conversion_vec_info_type:
7694 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7695 gcc_assert (done);
7696 break;
7697
7698 case induc_vec_info_type:
7699 gcc_assert (!slp_node);
7700 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7701 gcc_assert (done);
7702 break;
7703
7704 case shift_vec_info_type:
7705 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7706 gcc_assert (done);
7707 break;
7708
7709 case op_vec_info_type:
7710 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7711 gcc_assert (done);
7712 break;
7713
7714 case assignment_vec_info_type:
7715 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7716 gcc_assert (done);
7717 break;
7718
7719 case load_vec_info_type:
7720 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7721 slp_node_instance);
7722 gcc_assert (done);
7723 break;
7724
7725 case store_vec_info_type:
7726 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7727 gcc_assert (done);
7728 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7729 {
7730 /* In case of interleaving, the whole chain is vectorized when the
7731 last store in the chain is reached. Store stmts before the last
7732 one are skipped, and there vec_stmt_info shouldn't be freed
7733 meanwhile. */
7734 *grouped_store = true;
7735 if (STMT_VINFO_VEC_STMT (stmt_info))
7736 is_store = true;
7737 }
7738 else
7739 is_store = true;
7740 break;
7741
7742 case condition_vec_info_type:
7743 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7744 gcc_assert (done);
7745 break;
7746
7747 case call_vec_info_type:
7748 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7749 stmt = gsi_stmt (*gsi);
7750 if (is_gimple_call (stmt)
7751 && gimple_call_internal_p (stmt)
7752 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7753 is_store = true;
7754 break;
7755
7756 case call_simd_clone_vec_info_type:
7757 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7758 stmt = gsi_stmt (*gsi);
7759 break;
7760
7761 case reduc_vec_info_type:
7762 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7763 gcc_assert (done);
7764 break;
7765
7766 default:
7767 if (!STMT_VINFO_LIVE_P (stmt_info))
7768 {
7769 if (dump_enabled_p ())
7770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7771 "stmt not supported.\n");
7772 gcc_unreachable ();
7773 }
7774 }
7775
7776 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7777 This would break hybrid SLP vectorization. */
7778 if (slp_node)
7779 gcc_assert (!vec_stmt
7780 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
7781
7782 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7783 is being vectorized, but outside the immediately enclosing loop. */
7784 if (vec_stmt
7785 && STMT_VINFO_LOOP_VINFO (stmt_info)
7786 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7787 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7788 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7789 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7790 || STMT_VINFO_RELEVANT (stmt_info) ==
7791 vect_used_in_outer_by_reduction))
7792 {
7793 struct loop *innerloop = LOOP_VINFO_LOOP (
7794 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7795 imm_use_iterator imm_iter;
7796 use_operand_p use_p;
7797 tree scalar_dest;
7798 gimple *exit_phi;
7799
7800 if (dump_enabled_p ())
7801 dump_printf_loc (MSG_NOTE, vect_location,
7802 "Record the vdef for outer-loop vectorization.\n");
7803
7804 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7805 (to be used when vectorizing outer-loop stmts that use the DEF of
7806 STMT). */
7807 if (gimple_code (stmt) == GIMPLE_PHI)
7808 scalar_dest = PHI_RESULT (stmt);
7809 else
7810 scalar_dest = gimple_assign_lhs (stmt);
7811
7812 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7813 {
7814 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7815 {
7816 exit_phi = USE_STMT (use_p);
7817 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7818 }
7819 }
7820 }
7821
7822 /* Handle stmts whose DEF is used outside the loop-nest that is
7823 being vectorized. */
7824 if (STMT_VINFO_LIVE_P (stmt_info)
7825 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7826 {
7827 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7828 gcc_assert (done);
7829 }
7830
7831 if (vec_stmt)
7832 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7833
7834 return is_store;
7835 }
7836
7837
7838 /* Remove a group of stores (for SLP or interleaving), free their
7839 stmt_vec_info. */
7840
7841 void
7842 vect_remove_stores (gimple *first_stmt)
7843 {
7844 gimple *next = first_stmt;
7845 gimple *tmp;
7846 gimple_stmt_iterator next_si;
7847
7848 while (next)
7849 {
7850 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7851
7852 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7853 if (is_pattern_stmt_p (stmt_info))
7854 next = STMT_VINFO_RELATED_STMT (stmt_info);
7855 /* Free the attached stmt_vec_info and remove the stmt. */
7856 next_si = gsi_for_stmt (next);
7857 unlink_stmt_vdef (next);
7858 gsi_remove (&next_si, true);
7859 release_defs (next);
7860 free_stmt_vec_info (next);
7861 next = tmp;
7862 }
7863 }
7864
7865
7866 /* Function new_stmt_vec_info.
7867
7868 Create and initialize a new stmt_vec_info struct for STMT. */
7869
7870 stmt_vec_info
7871 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
7872 {
7873 stmt_vec_info res;
7874 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7875
7876 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7877 STMT_VINFO_STMT (res) = stmt;
7878 res->vinfo = vinfo;
7879 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7880 STMT_VINFO_LIVE_P (res) = false;
7881 STMT_VINFO_VECTYPE (res) = NULL;
7882 STMT_VINFO_VEC_STMT (res) = NULL;
7883 STMT_VINFO_VECTORIZABLE (res) = true;
7884 STMT_VINFO_IN_PATTERN_P (res) = false;
7885 STMT_VINFO_RELATED_STMT (res) = NULL;
7886 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7887 STMT_VINFO_DATA_REF (res) = NULL;
7888
7889 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7890 STMT_VINFO_DR_OFFSET (res) = NULL;
7891 STMT_VINFO_DR_INIT (res) = NULL;
7892 STMT_VINFO_DR_STEP (res) = NULL;
7893 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7894
7895 if (gimple_code (stmt) == GIMPLE_PHI
7896 && is_loop_header_bb_p (gimple_bb (stmt)))
7897 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7898 else
7899 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7900
7901 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7902 STMT_SLP_TYPE (res) = loop_vect;
7903 GROUP_FIRST_ELEMENT (res) = NULL;
7904 GROUP_NEXT_ELEMENT (res) = NULL;
7905 GROUP_SIZE (res) = 0;
7906 GROUP_STORE_COUNT (res) = 0;
7907 GROUP_GAP (res) = 0;
7908 GROUP_SAME_DR_STMT (res) = NULL;
7909
7910 return res;
7911 }
7912
7913
7914 /* Create a hash table for stmt_vec_info. */
7915
7916 void
7917 init_stmt_vec_info_vec (void)
7918 {
7919 gcc_assert (!stmt_vec_info_vec.exists ());
7920 stmt_vec_info_vec.create (50);
7921 }
7922
7923
7924 /* Free hash table for stmt_vec_info. */
7925
7926 void
7927 free_stmt_vec_info_vec (void)
7928 {
7929 unsigned int i;
7930 stmt_vec_info info;
7931 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7932 if (info != NULL)
7933 free_stmt_vec_info (STMT_VINFO_STMT (info));
7934 gcc_assert (stmt_vec_info_vec.exists ());
7935 stmt_vec_info_vec.release ();
7936 }
7937
7938
7939 /* Free stmt vectorization related info. */
7940
7941 void
7942 free_stmt_vec_info (gimple *stmt)
7943 {
7944 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7945
7946 if (!stmt_info)
7947 return;
7948
7949 /* Check if this statement has a related "pattern stmt"
7950 (introduced by the vectorizer during the pattern recognition
7951 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7952 too. */
7953 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7954 {
7955 stmt_vec_info patt_info
7956 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7957 if (patt_info)
7958 {
7959 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7960 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
7961 gimple_set_bb (patt_stmt, NULL);
7962 tree lhs = gimple_get_lhs (patt_stmt);
7963 if (TREE_CODE (lhs) == SSA_NAME)
7964 release_ssa_name (lhs);
7965 if (seq)
7966 {
7967 gimple_stmt_iterator si;
7968 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7969 {
7970 gimple *seq_stmt = gsi_stmt (si);
7971 gimple_set_bb (seq_stmt, NULL);
7972 lhs = gimple_get_lhs (seq_stmt);
7973 if (TREE_CODE (lhs) == SSA_NAME)
7974 release_ssa_name (lhs);
7975 free_stmt_vec_info (seq_stmt);
7976 }
7977 }
7978 free_stmt_vec_info (patt_stmt);
7979 }
7980 }
7981
7982 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7983 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7984 set_vinfo_for_stmt (stmt, NULL);
7985 free (stmt_info);
7986 }
7987
7988
7989 /* Function get_vectype_for_scalar_type_and_size.
7990
7991 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7992 by the target. */
7993
7994 static tree
7995 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7996 {
7997 machine_mode inner_mode = TYPE_MODE (scalar_type);
7998 machine_mode simd_mode;
7999 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
8000 int nunits;
8001 tree vectype;
8002
8003 if (nbytes == 0)
8004 return NULL_TREE;
8005
8006 if (GET_MODE_CLASS (inner_mode) != MODE_INT
8007 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
8008 return NULL_TREE;
8009
8010 /* For vector types of elements whose mode precision doesn't
8011 match their types precision we use a element type of mode
8012 precision. The vectorization routines will have to make sure
8013 they support the proper result truncation/extension.
8014 We also make sure to build vector types with INTEGER_TYPE
8015 component type only. */
8016 if (INTEGRAL_TYPE_P (scalar_type)
8017 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8018 || TREE_CODE (scalar_type) != INTEGER_TYPE))
8019 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8020 TYPE_UNSIGNED (scalar_type));
8021
8022 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8023 When the component mode passes the above test simply use a type
8024 corresponding to that mode. The theory is that any use that
8025 would cause problems with this will disable vectorization anyway. */
8026 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
8027 && !INTEGRAL_TYPE_P (scalar_type))
8028 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8029
8030 /* We can't build a vector type of elements with alignment bigger than
8031 their size. */
8032 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
8033 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8034 TYPE_UNSIGNED (scalar_type));
8035
8036 /* If we felt back to using the mode fail if there was
8037 no scalar type for it. */
8038 if (scalar_type == NULL_TREE)
8039 return NULL_TREE;
8040
8041 /* If no size was supplied use the mode the target prefers. Otherwise
8042 lookup a vector mode of the specified size. */
8043 if (size == 0)
8044 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8045 else
8046 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8047 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8048 if (nunits <= 1)
8049 return NULL_TREE;
8050
8051 vectype = build_vector_type (scalar_type, nunits);
8052
8053 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8054 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8055 return NULL_TREE;
8056
8057 return vectype;
8058 }
8059
8060 unsigned int current_vector_size;
8061
8062 /* Function get_vectype_for_scalar_type.
8063
8064 Returns the vector type corresponding to SCALAR_TYPE as supported
8065 by the target. */
8066
8067 tree
8068 get_vectype_for_scalar_type (tree scalar_type)
8069 {
8070 tree vectype;
8071 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8072 current_vector_size);
8073 if (vectype
8074 && current_vector_size == 0)
8075 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8076 return vectype;
8077 }
8078
8079 /* Function get_same_sized_vectype
8080
8081 Returns a vector type corresponding to SCALAR_TYPE of size
8082 VECTOR_TYPE if supported by the target. */
8083
8084 tree
8085 get_same_sized_vectype (tree scalar_type, tree vector_type)
8086 {
8087 return get_vectype_for_scalar_type_and_size
8088 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8089 }
8090
8091 /* Function vect_is_simple_use.
8092
8093 Input:
8094 VINFO - the vect info of the loop or basic block that is being vectorized.
8095 OPERAND - operand in the loop or bb.
8096 Output:
8097 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8098 DT - the type of definition
8099
8100 Returns whether a stmt with OPERAND can be vectorized.
8101 For loops, supportable operands are constants, loop invariants, and operands
8102 that are defined by the current iteration of the loop. Unsupportable
8103 operands are those that are defined by a previous iteration of the loop (as
8104 is the case in reduction/induction computations).
8105 For basic blocks, supportable operands are constants and bb invariants.
8106 For now, operands defined outside the basic block are not supported. */
8107
8108 bool
8109 vect_is_simple_use (tree operand, vec_info *vinfo,
8110 gimple **def_stmt, enum vect_def_type *dt)
8111 {
8112 *def_stmt = NULL;
8113 *dt = vect_unknown_def_type;
8114
8115 if (dump_enabled_p ())
8116 {
8117 dump_printf_loc (MSG_NOTE, vect_location,
8118 "vect_is_simple_use: operand ");
8119 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8120 dump_printf (MSG_NOTE, "\n");
8121 }
8122
8123 if (CONSTANT_CLASS_P (operand))
8124 {
8125 *dt = vect_constant_def;
8126 return true;
8127 }
8128
8129 if (is_gimple_min_invariant (operand))
8130 {
8131 *dt = vect_external_def;
8132 return true;
8133 }
8134
8135 if (TREE_CODE (operand) != SSA_NAME)
8136 {
8137 if (dump_enabled_p ())
8138 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8139 "not ssa-name.\n");
8140 return false;
8141 }
8142
8143 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8144 {
8145 *dt = vect_external_def;
8146 return true;
8147 }
8148
8149 *def_stmt = SSA_NAME_DEF_STMT (operand);
8150 if (dump_enabled_p ())
8151 {
8152 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8153 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8154 }
8155
8156 basic_block bb = gimple_bb (*def_stmt);
8157 if ((is_a <loop_vec_info> (vinfo)
8158 && !flow_bb_inside_loop_p (as_a <loop_vec_info> (vinfo)->loop, bb))
8159 || (is_a <bb_vec_info> (vinfo)
8160 && (bb != as_a <bb_vec_info> (vinfo)->bb
8161 || gimple_code (*def_stmt) == GIMPLE_PHI)))
8162 *dt = vect_external_def;
8163 else
8164 {
8165 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8166 if (is_a <bb_vec_info> (vinfo) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8167 *dt = vect_external_def;
8168 else
8169 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8170 }
8171
8172 if (dump_enabled_p ())
8173 {
8174 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8175 switch (*dt)
8176 {
8177 case vect_uninitialized_def:
8178 dump_printf (MSG_NOTE, "uninitialized\n");
8179 break;
8180 case vect_constant_def:
8181 dump_printf (MSG_NOTE, "constant\n");
8182 break;
8183 case vect_external_def:
8184 dump_printf (MSG_NOTE, "external\n");
8185 break;
8186 case vect_internal_def:
8187 dump_printf (MSG_NOTE, "internal\n");
8188 break;
8189 case vect_induction_def:
8190 dump_printf (MSG_NOTE, "induction\n");
8191 break;
8192 case vect_reduction_def:
8193 dump_printf (MSG_NOTE, "reduction\n");
8194 break;
8195 case vect_double_reduction_def:
8196 dump_printf (MSG_NOTE, "double reduction\n");
8197 break;
8198 case vect_nested_cycle:
8199 dump_printf (MSG_NOTE, "nested cycle\n");
8200 break;
8201 case vect_unknown_def_type:
8202 dump_printf (MSG_NOTE, "unknown\n");
8203 break;
8204 }
8205 }
8206
8207 if (*dt == vect_unknown_def_type)
8208 {
8209 if (dump_enabled_p ())
8210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8211 "Unsupported pattern.\n");
8212 return false;
8213 }
8214
8215 switch (gimple_code (*def_stmt))
8216 {
8217 case GIMPLE_PHI:
8218 case GIMPLE_ASSIGN:
8219 case GIMPLE_CALL:
8220 break;
8221 default:
8222 if (dump_enabled_p ())
8223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8224 "unsupported defining stmt:\n");
8225 return false;
8226 }
8227
8228 return true;
8229 }
8230
8231 /* Function vect_is_simple_use.
8232
8233 Same as vect_is_simple_use but also determines the vector operand
8234 type of OPERAND and stores it to *VECTYPE. If the definition of
8235 OPERAND is vect_uninitialized_def, vect_constant_def or
8236 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8237 is responsible to compute the best suited vector type for the
8238 scalar operand. */
8239
8240 bool
8241 vect_is_simple_use (tree operand, vec_info *vinfo,
8242 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
8243 {
8244 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
8245 return false;
8246
8247 /* Now get a vector type if the def is internal, otherwise supply
8248 NULL_TREE and leave it up to the caller to figure out a proper
8249 type for the use stmt. */
8250 if (*dt == vect_internal_def
8251 || *dt == vect_induction_def
8252 || *dt == vect_reduction_def
8253 || *dt == vect_double_reduction_def
8254 || *dt == vect_nested_cycle)
8255 {
8256 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8257
8258 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8259 && !STMT_VINFO_RELEVANT (stmt_info)
8260 && !STMT_VINFO_LIVE_P (stmt_info))
8261 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8262
8263 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8264 gcc_assert (*vectype != NULL_TREE);
8265 }
8266 else if (*dt == vect_uninitialized_def
8267 || *dt == vect_constant_def
8268 || *dt == vect_external_def)
8269 *vectype = NULL_TREE;
8270 else
8271 gcc_unreachable ();
8272
8273 return true;
8274 }
8275
8276
8277 /* Function supportable_widening_operation
8278
8279 Check whether an operation represented by the code CODE is a
8280 widening operation that is supported by the target platform in
8281 vector form (i.e., when operating on arguments of type VECTYPE_IN
8282 producing a result of type VECTYPE_OUT).
8283
8284 Widening operations we currently support are NOP (CONVERT), FLOAT
8285 and WIDEN_MULT. This function checks if these operations are supported
8286 by the target platform either directly (via vector tree-codes), or via
8287 target builtins.
8288
8289 Output:
8290 - CODE1 and CODE2 are codes of vector operations to be used when
8291 vectorizing the operation, if available.
8292 - MULTI_STEP_CVT determines the number of required intermediate steps in
8293 case of multi-step conversion (like char->short->int - in that case
8294 MULTI_STEP_CVT will be 1).
8295 - INTERM_TYPES contains the intermediate type required to perform the
8296 widening operation (short in the above example). */
8297
8298 bool
8299 supportable_widening_operation (enum tree_code code, gimple *stmt,
8300 tree vectype_out, tree vectype_in,
8301 enum tree_code *code1, enum tree_code *code2,
8302 int *multi_step_cvt,
8303 vec<tree> *interm_types)
8304 {
8305 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8306 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8307 struct loop *vect_loop = NULL;
8308 machine_mode vec_mode;
8309 enum insn_code icode1, icode2;
8310 optab optab1, optab2;
8311 tree vectype = vectype_in;
8312 tree wide_vectype = vectype_out;
8313 enum tree_code c1, c2;
8314 int i;
8315 tree prev_type, intermediate_type;
8316 machine_mode intermediate_mode, prev_mode;
8317 optab optab3, optab4;
8318
8319 *multi_step_cvt = 0;
8320 if (loop_info)
8321 vect_loop = LOOP_VINFO_LOOP (loop_info);
8322
8323 switch (code)
8324 {
8325 case WIDEN_MULT_EXPR:
8326 /* The result of a vectorized widening operation usually requires
8327 two vectors (because the widened results do not fit into one vector).
8328 The generated vector results would normally be expected to be
8329 generated in the same order as in the original scalar computation,
8330 i.e. if 8 results are generated in each vector iteration, they are
8331 to be organized as follows:
8332 vect1: [res1,res2,res3,res4],
8333 vect2: [res5,res6,res7,res8].
8334
8335 However, in the special case that the result of the widening
8336 operation is used in a reduction computation only, the order doesn't
8337 matter (because when vectorizing a reduction we change the order of
8338 the computation). Some targets can take advantage of this and
8339 generate more efficient code. For example, targets like Altivec,
8340 that support widen_mult using a sequence of {mult_even,mult_odd}
8341 generate the following vectors:
8342 vect1: [res1,res3,res5,res7],
8343 vect2: [res2,res4,res6,res8].
8344
8345 When vectorizing outer-loops, we execute the inner-loop sequentially
8346 (each vectorized inner-loop iteration contributes to VF outer-loop
8347 iterations in parallel). We therefore don't allow to change the
8348 order of the computation in the inner-loop during outer-loop
8349 vectorization. */
8350 /* TODO: Another case in which order doesn't *really* matter is when we
8351 widen and then contract again, e.g. (short)((int)x * y >> 8).
8352 Normally, pack_trunc performs an even/odd permute, whereas the
8353 repack from an even/odd expansion would be an interleave, which
8354 would be significantly simpler for e.g. AVX2. */
8355 /* In any case, in order to avoid duplicating the code below, recurse
8356 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8357 are properly set up for the caller. If we fail, we'll continue with
8358 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8359 if (vect_loop
8360 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8361 && !nested_in_vect_loop_p (vect_loop, stmt)
8362 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8363 stmt, vectype_out, vectype_in,
8364 code1, code2, multi_step_cvt,
8365 interm_types))
8366 {
8367 /* Elements in a vector with vect_used_by_reduction property cannot
8368 be reordered if the use chain with this property does not have the
8369 same operation. One such an example is s += a * b, where elements
8370 in a and b cannot be reordered. Here we check if the vector defined
8371 by STMT is only directly used in the reduction statement. */
8372 tree lhs = gimple_assign_lhs (stmt);
8373 use_operand_p dummy;
8374 gimple *use_stmt;
8375 stmt_vec_info use_stmt_info = NULL;
8376 if (single_imm_use (lhs, &dummy, &use_stmt)
8377 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8378 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8379 return true;
8380 }
8381 c1 = VEC_WIDEN_MULT_LO_EXPR;
8382 c2 = VEC_WIDEN_MULT_HI_EXPR;
8383 break;
8384
8385 case DOT_PROD_EXPR:
8386 c1 = DOT_PROD_EXPR;
8387 c2 = DOT_PROD_EXPR;
8388 break;
8389
8390 case SAD_EXPR:
8391 c1 = SAD_EXPR;
8392 c2 = SAD_EXPR;
8393 break;
8394
8395 case VEC_WIDEN_MULT_EVEN_EXPR:
8396 /* Support the recursion induced just above. */
8397 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8398 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8399 break;
8400
8401 case WIDEN_LSHIFT_EXPR:
8402 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8403 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8404 break;
8405
8406 CASE_CONVERT:
8407 c1 = VEC_UNPACK_LO_EXPR;
8408 c2 = VEC_UNPACK_HI_EXPR;
8409 break;
8410
8411 case FLOAT_EXPR:
8412 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8413 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8414 break;
8415
8416 case FIX_TRUNC_EXPR:
8417 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8418 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8419 computing the operation. */
8420 return false;
8421
8422 default:
8423 gcc_unreachable ();
8424 }
8425
8426 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8427 std::swap (c1, c2);
8428
8429 if (code == FIX_TRUNC_EXPR)
8430 {
8431 /* The signedness is determined from output operand. */
8432 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8433 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8434 }
8435 else
8436 {
8437 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8438 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8439 }
8440
8441 if (!optab1 || !optab2)
8442 return false;
8443
8444 vec_mode = TYPE_MODE (vectype);
8445 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8446 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8447 return false;
8448
8449 *code1 = c1;
8450 *code2 = c2;
8451
8452 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8453 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8454 return true;
8455
8456 /* Check if it's a multi-step conversion that can be done using intermediate
8457 types. */
8458
8459 prev_type = vectype;
8460 prev_mode = vec_mode;
8461
8462 if (!CONVERT_EXPR_CODE_P (code))
8463 return false;
8464
8465 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8466 intermediate steps in promotion sequence. We try
8467 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8468 not. */
8469 interm_types->create (MAX_INTERM_CVT_STEPS);
8470 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8471 {
8472 intermediate_mode = insn_data[icode1].operand[0].mode;
8473 intermediate_type
8474 = lang_hooks.types.type_for_mode (intermediate_mode,
8475 TYPE_UNSIGNED (prev_type));
8476 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8477 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8478
8479 if (!optab3 || !optab4
8480 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8481 || insn_data[icode1].operand[0].mode != intermediate_mode
8482 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8483 || insn_data[icode2].operand[0].mode != intermediate_mode
8484 || ((icode1 = optab_handler (optab3, intermediate_mode))
8485 == CODE_FOR_nothing)
8486 || ((icode2 = optab_handler (optab4, intermediate_mode))
8487 == CODE_FOR_nothing))
8488 break;
8489
8490 interm_types->quick_push (intermediate_type);
8491 (*multi_step_cvt)++;
8492
8493 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8494 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8495 return true;
8496
8497 prev_type = intermediate_type;
8498 prev_mode = intermediate_mode;
8499 }
8500
8501 interm_types->release ();
8502 return false;
8503 }
8504
8505
8506 /* Function supportable_narrowing_operation
8507
8508 Check whether an operation represented by the code CODE is a
8509 narrowing operation that is supported by the target platform in
8510 vector form (i.e., when operating on arguments of type VECTYPE_IN
8511 and producing a result of type VECTYPE_OUT).
8512
8513 Narrowing operations we currently support are NOP (CONVERT) and
8514 FIX_TRUNC. This function checks if these operations are supported by
8515 the target platform directly via vector tree-codes.
8516
8517 Output:
8518 - CODE1 is the code of a vector operation to be used when
8519 vectorizing the operation, if available.
8520 - MULTI_STEP_CVT determines the number of required intermediate steps in
8521 case of multi-step conversion (like int->short->char - in that case
8522 MULTI_STEP_CVT will be 1).
8523 - INTERM_TYPES contains the intermediate type required to perform the
8524 narrowing operation (short in the above example). */
8525
8526 bool
8527 supportable_narrowing_operation (enum tree_code code,
8528 tree vectype_out, tree vectype_in,
8529 enum tree_code *code1, int *multi_step_cvt,
8530 vec<tree> *interm_types)
8531 {
8532 machine_mode vec_mode;
8533 enum insn_code icode1;
8534 optab optab1, interm_optab;
8535 tree vectype = vectype_in;
8536 tree narrow_vectype = vectype_out;
8537 enum tree_code c1;
8538 tree intermediate_type;
8539 machine_mode intermediate_mode, prev_mode;
8540 int i;
8541 bool uns;
8542
8543 *multi_step_cvt = 0;
8544 switch (code)
8545 {
8546 CASE_CONVERT:
8547 c1 = VEC_PACK_TRUNC_EXPR;
8548 break;
8549
8550 case FIX_TRUNC_EXPR:
8551 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8552 break;
8553
8554 case FLOAT_EXPR:
8555 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8556 tree code and optabs used for computing the operation. */
8557 return false;
8558
8559 default:
8560 gcc_unreachable ();
8561 }
8562
8563 if (code == FIX_TRUNC_EXPR)
8564 /* The signedness is determined from output operand. */
8565 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8566 else
8567 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8568
8569 if (!optab1)
8570 return false;
8571
8572 vec_mode = TYPE_MODE (vectype);
8573 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8574 return false;
8575
8576 *code1 = c1;
8577
8578 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8579 return true;
8580
8581 /* Check if it's a multi-step conversion that can be done using intermediate
8582 types. */
8583 prev_mode = vec_mode;
8584 if (code == FIX_TRUNC_EXPR)
8585 uns = TYPE_UNSIGNED (vectype_out);
8586 else
8587 uns = TYPE_UNSIGNED (vectype);
8588
8589 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8590 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8591 costly than signed. */
8592 if (code == FIX_TRUNC_EXPR && uns)
8593 {
8594 enum insn_code icode2;
8595
8596 intermediate_type
8597 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8598 interm_optab
8599 = optab_for_tree_code (c1, intermediate_type, optab_default);
8600 if (interm_optab != unknown_optab
8601 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8602 && insn_data[icode1].operand[0].mode
8603 == insn_data[icode2].operand[0].mode)
8604 {
8605 uns = false;
8606 optab1 = interm_optab;
8607 icode1 = icode2;
8608 }
8609 }
8610
8611 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8612 intermediate steps in promotion sequence. We try
8613 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8614 interm_types->create (MAX_INTERM_CVT_STEPS);
8615 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8616 {
8617 intermediate_mode = insn_data[icode1].operand[0].mode;
8618 intermediate_type
8619 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8620 interm_optab
8621 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8622 optab_default);
8623 if (!interm_optab
8624 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8625 || insn_data[icode1].operand[0].mode != intermediate_mode
8626 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8627 == CODE_FOR_nothing))
8628 break;
8629
8630 interm_types->quick_push (intermediate_type);
8631 (*multi_step_cvt)++;
8632
8633 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8634 return true;
8635
8636 prev_mode = intermediate_mode;
8637 optab1 = interm_optab;
8638 }
8639
8640 interm_types->release ();
8641 return false;
8642 }