tree-vectorizer.h (struct _bb_vec_info): Add region_begin/end members.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50
51 /* For lang_hooks.types.type_for_mode. */
52 #include "langhooks.h"
53
54 /* Return the vectorized type for the given statement. */
55
56 tree
57 stmt_vectype (struct _stmt_vec_info *stmt_info)
58 {
59 return STMT_VINFO_VECTYPE (stmt_info);
60 }
61
62 /* Return TRUE iff the given statement is in an inner loop relative to
63 the loop being vectorized. */
64 bool
65 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
66 {
67 gimple *stmt = STMT_VINFO_STMT (stmt_info);
68 basic_block bb = gimple_bb (stmt);
69 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
70 struct loop* loop;
71
72 if (!loop_vinfo)
73 return false;
74
75 loop = LOOP_VINFO_LOOP (loop_vinfo);
76
77 return (bb->loop_father == loop->inner);
78 }
79
80 /* Record the cost of a statement, either by directly informing the
81 target model or by saving it in a vector for later processing.
82 Return a preliminary estimate of the statement's cost. */
83
84 unsigned
85 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
86 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
87 int misalign, enum vect_cost_model_location where)
88 {
89 if (body_cost_vec)
90 {
91 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
92 stmt_info_for_cost si = { count, kind,
93 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
94 misalign };
95 body_cost_vec->safe_push (si);
96 return (unsigned)
97 (builtin_vectorization_cost (kind, vectype, misalign) * count);
98 }
99 else
100 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
101 count, kind, stmt_info, misalign, where);
102 }
103
104 /* Return a variable of type ELEM_TYPE[NELEMS]. */
105
106 static tree
107 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
108 {
109 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
110 "vect_array");
111 }
112
113 /* ARRAY is an array of vectors created by create_vector_array.
114 Return an SSA_NAME for the vector in index N. The reference
115 is part of the vectorization of STMT and the vector is associated
116 with scalar destination SCALAR_DEST. */
117
118 static tree
119 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
120 tree array, unsigned HOST_WIDE_INT n)
121 {
122 tree vect_type, vect, vect_name, array_ref;
123 gimple *new_stmt;
124
125 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
126 vect_type = TREE_TYPE (TREE_TYPE (array));
127 vect = vect_create_destination_var (scalar_dest, vect_type);
128 array_ref = build4 (ARRAY_REF, vect_type, array,
129 build_int_cst (size_type_node, n),
130 NULL_TREE, NULL_TREE);
131
132 new_stmt = gimple_build_assign (vect, array_ref);
133 vect_name = make_ssa_name (vect, new_stmt);
134 gimple_assign_set_lhs (new_stmt, vect_name);
135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
136
137 return vect_name;
138 }
139
140 /* ARRAY is an array of vectors created by create_vector_array.
141 Emit code to store SSA_NAME VECT in index N of the array.
142 The store is part of the vectorization of STMT. */
143
144 static void
145 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
146 tree array, unsigned HOST_WIDE_INT n)
147 {
148 tree array_ref;
149 gimple *new_stmt;
150
151 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
152 build_int_cst (size_type_node, n),
153 NULL_TREE, NULL_TREE);
154
155 new_stmt = gimple_build_assign (array_ref, vect);
156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
157 }
158
159 /* PTR is a pointer to an array of type TYPE. Return a representation
160 of *PTR. The memory reference replaces those in FIRST_DR
161 (and its group). */
162
163 static tree
164 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
165 {
166 tree mem_ref, alias_ptr_type;
167
168 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
169 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
170 /* Arrays have the same alignment as their type. */
171 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
172 return mem_ref;
173 }
174
175 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
176
177 /* Function vect_mark_relevant.
178
179 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
180
181 static void
182 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
183 enum vect_relevant relevant, bool live_p,
184 bool used_in_pattern)
185 {
186 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
187 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
188 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
189 gimple *pattern_stmt;
190
191 if (dump_enabled_p ())
192 dump_printf_loc (MSG_NOTE, vect_location,
193 "mark relevant %d, live %d.\n", relevant, live_p);
194
195 /* If this stmt is an original stmt in a pattern, we might need to mark its
196 related pattern stmt instead of the original stmt. However, such stmts
197 may have their own uses that are not in any pattern, in such cases the
198 stmt itself should be marked. */
199 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
200 {
201 bool found = false;
202 if (!used_in_pattern)
203 {
204 imm_use_iterator imm_iter;
205 use_operand_p use_p;
206 gimple *use_stmt;
207 tree lhs;
208 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
209 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
210
211 if (is_gimple_assign (stmt))
212 lhs = gimple_assign_lhs (stmt);
213 else
214 lhs = gimple_call_lhs (stmt);
215
216 /* This use is out of pattern use, if LHS has other uses that are
217 pattern uses, we should mark the stmt itself, and not the pattern
218 stmt. */
219 if (lhs && TREE_CODE (lhs) == SSA_NAME)
220 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
221 {
222 if (is_gimple_debug (USE_STMT (use_p)))
223 continue;
224 use_stmt = USE_STMT (use_p);
225
226 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
227 continue;
228
229 if (vinfo_for_stmt (use_stmt)
230 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
231 {
232 found = true;
233 break;
234 }
235 }
236 }
237
238 if (!found)
239 {
240 /* This is the last stmt in a sequence that was detected as a
241 pattern that can potentially be vectorized. Don't mark the stmt
242 as relevant/live because it's not going to be vectorized.
243 Instead mark the pattern-stmt that replaces it. */
244
245 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
246
247 if (dump_enabled_p ())
248 dump_printf_loc (MSG_NOTE, vect_location,
249 "last stmt in pattern. don't mark"
250 " relevant/live.\n");
251 stmt_info = vinfo_for_stmt (pattern_stmt);
252 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
253 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
254 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
255 stmt = pattern_stmt;
256 }
257 }
258
259 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
260 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
261 STMT_VINFO_RELEVANT (stmt_info) = relevant;
262
263 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
264 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
265 {
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE, vect_location,
268 "already marked relevant/live.\n");
269 return;
270 }
271
272 worklist->safe_push (stmt);
273 }
274
275
276 /* Function vect_stmt_relevant_p.
277
278 Return true if STMT in loop that is represented by LOOP_VINFO is
279 "relevant for vectorization".
280
281 A stmt is considered "relevant for vectorization" if:
282 - it has uses outside the loop.
283 - it has vdefs (it alters memory).
284 - control stmts in the loop (except for the exit condition).
285
286 CHECKME: what other side effects would the vectorizer allow? */
287
288 static bool
289 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
290 enum vect_relevant *relevant, bool *live_p)
291 {
292 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
293 ssa_op_iter op_iter;
294 imm_use_iterator imm_iter;
295 use_operand_p use_p;
296 def_operand_p def_p;
297
298 *relevant = vect_unused_in_scope;
299 *live_p = false;
300
301 /* cond stmt other than loop exit cond. */
302 if (is_ctrl_stmt (stmt)
303 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
304 != loop_exit_ctrl_vec_info_type)
305 *relevant = vect_used_in_scope;
306
307 /* changing memory. */
308 if (gimple_code (stmt) != GIMPLE_PHI)
309 if (gimple_vdef (stmt)
310 && !gimple_clobber_p (stmt))
311 {
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE, vect_location,
314 "vec_stmt_relevant_p: stmt has vdefs.\n");
315 *relevant = vect_used_in_scope;
316 }
317
318 /* uses outside the loop. */
319 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
320 {
321 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
322 {
323 basic_block bb = gimple_bb (USE_STMT (use_p));
324 if (!flow_bb_inside_loop_p (loop, bb))
325 {
326 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "vec_stmt_relevant_p: used out of loop.\n");
329
330 if (is_gimple_debug (USE_STMT (use_p)))
331 continue;
332
333 /* We expect all such uses to be in the loop exit phis
334 (because of loop closed form) */
335 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
336 gcc_assert (bb == single_exit (loop)->dest);
337
338 *live_p = true;
339 }
340 }
341 }
342
343 return (*live_p || *relevant);
344 }
345
346
347 /* Function exist_non_indexing_operands_for_use_p
348
349 USE is one of the uses attached to STMT. Check if USE is
350 used in STMT for anything other than indexing an array. */
351
352 static bool
353 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
354 {
355 tree operand;
356 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
357
358 /* USE corresponds to some operand in STMT. If there is no data
359 reference in STMT, then any operand that corresponds to USE
360 is not indexing an array. */
361 if (!STMT_VINFO_DATA_REF (stmt_info))
362 return true;
363
364 /* STMT has a data_ref. FORNOW this means that its of one of
365 the following forms:
366 -1- ARRAY_REF = var
367 -2- var = ARRAY_REF
368 (This should have been verified in analyze_data_refs).
369
370 'var' in the second case corresponds to a def, not a use,
371 so USE cannot correspond to any operands that are not used
372 for array indexing.
373
374 Therefore, all we need to check is if STMT falls into the
375 first case, and whether var corresponds to USE. */
376
377 if (!gimple_assign_copy_p (stmt))
378 {
379 if (is_gimple_call (stmt)
380 && gimple_call_internal_p (stmt))
381 switch (gimple_call_internal_fn (stmt))
382 {
383 case IFN_MASK_STORE:
384 operand = gimple_call_arg (stmt, 3);
385 if (operand == use)
386 return true;
387 /* FALLTHRU */
388 case IFN_MASK_LOAD:
389 operand = gimple_call_arg (stmt, 2);
390 if (operand == use)
391 return true;
392 break;
393 default:
394 break;
395 }
396 return false;
397 }
398
399 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
400 return false;
401 operand = gimple_assign_rhs1 (stmt);
402 if (TREE_CODE (operand) != SSA_NAME)
403 return false;
404
405 if (operand == use)
406 return true;
407
408 return false;
409 }
410
411
412 /*
413 Function process_use.
414
415 Inputs:
416 - a USE in STMT in a loop represented by LOOP_VINFO
417 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
418 that defined USE. This is done by calling mark_relevant and passing it
419 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
420 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
421 be performed.
422
423 Outputs:
424 Generally, LIVE_P and RELEVANT are used to define the liveness and
425 relevance info of the DEF_STMT of this USE:
426 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
427 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
428 Exceptions:
429 - case 1: If USE is used only for address computations (e.g. array indexing),
430 which does not need to be directly vectorized, then the liveness/relevance
431 of the respective DEF_STMT is left unchanged.
432 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
433 skip DEF_STMT cause it had already been processed.
434 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
435 be modified accordingly.
436
437 Return true if everything is as expected. Return false otherwise. */
438
439 static bool
440 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
441 enum vect_relevant relevant, vec<gimple *> *worklist,
442 bool force)
443 {
444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
445 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
446 stmt_vec_info dstmt_vinfo;
447 basic_block bb, def_bb;
448 gimple *def_stmt;
449 enum vect_def_type dt;
450
451 /* case 1: we are only interested in uses that need to be vectorized. Uses
452 that are used for address computation are not considered relevant. */
453 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
454 return true;
455
456 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
457 {
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
460 "not vectorized: unsupported use in stmt.\n");
461 return false;
462 }
463
464 if (!def_stmt || gimple_nop_p (def_stmt))
465 return true;
466
467 def_bb = gimple_bb (def_stmt);
468 if (!flow_bb_inside_loop_p (loop, def_bb))
469 {
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
472 return true;
473 }
474
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
476 DEF_STMT must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DEF_STMT in the loop. So we just
479 check that everything is as expected, and we are done. */
480 dstmt_vinfo = vinfo_for_stmt (def_stmt);
481 bb = gimple_bb (stmt);
482 if (gimple_code (stmt) == GIMPLE_PHI
483 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
484 && gimple_code (def_stmt) != GIMPLE_PHI
485 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
486 && bb->loop_father == def_bb->loop_father)
487 {
488 if (dump_enabled_p ())
489 dump_printf_loc (MSG_NOTE, vect_location,
490 "reduc-stmt defining reduc-phi in the same nest.\n");
491 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
492 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
493 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
494 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
495 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
496 return true;
497 }
498
499 /* case 3a: outer-loop stmt defining an inner-loop stmt:
500 outer-loop-header-bb:
501 d = def_stmt
502 inner-loop:
503 stmt # use (d)
504 outer-loop-tail-bb:
505 ... */
506 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
507 {
508 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE, vect_location,
510 "outer-loop def-stmt defining inner-loop stmt.\n");
511
512 switch (relevant)
513 {
514 case vect_unused_in_scope:
515 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
516 vect_used_in_scope : vect_unused_in_scope;
517 break;
518
519 case vect_used_in_outer_by_reduction:
520 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
521 relevant = vect_used_by_reduction;
522 break;
523
524 case vect_used_in_outer:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
526 relevant = vect_used_in_scope;
527 break;
528
529 case vect_used_in_scope:
530 break;
531
532 default:
533 gcc_unreachable ();
534 }
535 }
536
537 /* case 3b: inner-loop stmt defining an outer-loop stmt:
538 outer-loop-header-bb:
539 ...
540 inner-loop:
541 d = def_stmt
542 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
543 stmt # use (d) */
544 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
545 {
546 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE, vect_location,
548 "inner-loop def-stmt defining outer-loop stmt.\n");
549
550 switch (relevant)
551 {
552 case vect_unused_in_scope:
553 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
554 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
555 vect_used_in_outer_by_reduction : vect_unused_in_scope;
556 break;
557
558 case vect_used_by_reduction:
559 relevant = vect_used_in_outer_by_reduction;
560 break;
561
562 case vect_used_in_scope:
563 relevant = vect_used_in_outer;
564 break;
565
566 default:
567 gcc_unreachable ();
568 }
569 }
570
571 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
572 is_pattern_stmt_p (stmt_vinfo));
573 return true;
574 }
575
576
577 /* Function vect_mark_stmts_to_be_vectorized.
578
579 Not all stmts in the loop need to be vectorized. For example:
580
581 for i...
582 for j...
583 1. T0 = i + j
584 2. T1 = a[T0]
585
586 3. j = j + 1
587
588 Stmt 1 and 3 do not need to be vectorized, because loop control and
589 addressing of vectorized data-refs are handled differently.
590
591 This pass detects such stmts. */
592
593 bool
594 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
595 {
596 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
597 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
598 unsigned int nbbs = loop->num_nodes;
599 gimple_stmt_iterator si;
600 gimple *stmt;
601 unsigned int i;
602 stmt_vec_info stmt_vinfo;
603 basic_block bb;
604 gimple *phi;
605 bool live_p;
606 enum vect_relevant relevant, tmp_relevant;
607 enum vect_def_type def_type;
608
609 if (dump_enabled_p ())
610 dump_printf_loc (MSG_NOTE, vect_location,
611 "=== vect_mark_stmts_to_be_vectorized ===\n");
612
613 auto_vec<gimple *, 64> worklist;
614
615 /* 1. Init worklist. */
616 for (i = 0; i < nbbs; i++)
617 {
618 bb = bbs[i];
619 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
620 {
621 phi = gsi_stmt (si);
622 if (dump_enabled_p ())
623 {
624 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
625 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
626 }
627
628 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
629 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
630 }
631 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
632 {
633 stmt = gsi_stmt (si);
634 if (dump_enabled_p ())
635 {
636 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
637 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
638 }
639
640 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
641 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
642 }
643 }
644
645 /* 2. Process_worklist */
646 while (worklist.length () > 0)
647 {
648 use_operand_p use_p;
649 ssa_op_iter iter;
650
651 stmt = worklist.pop ();
652 if (dump_enabled_p ())
653 {
654 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
655 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
656 }
657
658 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
659 (DEF_STMT) as relevant/irrelevant and live/dead according to the
660 liveness and relevance properties of STMT. */
661 stmt_vinfo = vinfo_for_stmt (stmt);
662 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
663 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
664
665 /* Generally, the liveness and relevance properties of STMT are
666 propagated as is to the DEF_STMTs of its USEs:
667 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
668 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
669
670 One exception is when STMT has been identified as defining a reduction
671 variable; in this case we set the liveness/relevance as follows:
672 live_p = false
673 relevant = vect_used_by_reduction
674 This is because we distinguish between two kinds of relevant stmts -
675 those that are used by a reduction computation, and those that are
676 (also) used by a regular computation. This allows us later on to
677 identify stmts that are used solely by a reduction, and therefore the
678 order of the results that they produce does not have to be kept. */
679
680 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
681 tmp_relevant = relevant;
682 switch (def_type)
683 {
684 case vect_reduction_def:
685 switch (tmp_relevant)
686 {
687 case vect_unused_in_scope:
688 relevant = vect_used_by_reduction;
689 break;
690
691 case vect_used_by_reduction:
692 if (gimple_code (stmt) == GIMPLE_PHI)
693 break;
694 /* fall through */
695
696 default:
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "unsupported use of reduction.\n");
700 return false;
701 }
702
703 live_p = false;
704 break;
705
706 case vect_nested_cycle:
707 if (tmp_relevant != vect_unused_in_scope
708 && tmp_relevant != vect_used_in_outer_by_reduction
709 && tmp_relevant != vect_used_in_outer)
710 {
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
713 "unsupported use of nested cycle.\n");
714
715 return false;
716 }
717
718 live_p = false;
719 break;
720
721 case vect_double_reduction_def:
722 if (tmp_relevant != vect_unused_in_scope
723 && tmp_relevant != vect_used_by_reduction)
724 {
725 if (dump_enabled_p ())
726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
727 "unsupported use of double reduction.\n");
728
729 return false;
730 }
731
732 live_p = false;
733 break;
734
735 default:
736 break;
737 }
738
739 if (is_pattern_stmt_p (stmt_vinfo))
740 {
741 /* Pattern statements are not inserted into the code, so
742 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
743 have to scan the RHS or function arguments instead. */
744 if (is_gimple_assign (stmt))
745 {
746 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
747 tree op = gimple_assign_rhs1 (stmt);
748
749 i = 1;
750 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
751 {
752 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
753 live_p, relevant, &worklist, false)
754 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
755 live_p, relevant, &worklist, false))
756 return false;
757 i = 2;
758 }
759 for (; i < gimple_num_ops (stmt); i++)
760 {
761 op = gimple_op (stmt, i);
762 if (TREE_CODE (op) == SSA_NAME
763 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
764 &worklist, false))
765 return false;
766 }
767 }
768 else if (is_gimple_call (stmt))
769 {
770 for (i = 0; i < gimple_call_num_args (stmt); i++)
771 {
772 tree arg = gimple_call_arg (stmt, i);
773 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
774 &worklist, false))
775 return false;
776 }
777 }
778 }
779 else
780 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
781 {
782 tree op = USE_FROM_PTR (use_p);
783 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
784 &worklist, false))
785 return false;
786 }
787
788 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
789 {
790 tree off;
791 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
792 gcc_assert (decl);
793 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
794 &worklist, true))
795 return false;
796 }
797 } /* while worklist */
798
799 return true;
800 }
801
802
803 /* Function vect_model_simple_cost.
804
805 Models cost for simple operations, i.e. those that only emit ncopies of a
806 single op. Right now, this does not account for multiple insns that could
807 be generated for the single vector op. We will handle that shortly. */
808
809 void
810 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
811 enum vect_def_type *dt,
812 stmt_vector_for_cost *prologue_cost_vec,
813 stmt_vector_for_cost *body_cost_vec)
814 {
815 int i;
816 int inside_cost = 0, prologue_cost = 0;
817
818 /* The SLP costs were already calculated during SLP tree build. */
819 if (PURE_SLP_STMT (stmt_info))
820 return;
821
822 /* FORNOW: Assuming maximum 2 args per stmts. */
823 for (i = 0; i < 2; i++)
824 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
825 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
826 stmt_info, 0, vect_prologue);
827
828 /* Pass the inside-of-loop statements to the target-specific cost model. */
829 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
830 stmt_info, 0, vect_body);
831
832 if (dump_enabled_p ())
833 dump_printf_loc (MSG_NOTE, vect_location,
834 "vect_model_simple_cost: inside_cost = %d, "
835 "prologue_cost = %d .\n", inside_cost, prologue_cost);
836 }
837
838
839 /* Model cost for type demotion and promotion operations. PWR is normally
840 zero for single-step promotions and demotions. It will be one if
841 two-step promotion/demotion is required, and so on. Each additional
842 step doubles the number of instructions required. */
843
844 static void
845 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
846 enum vect_def_type *dt, int pwr)
847 {
848 int i, tmp;
849 int inside_cost = 0, prologue_cost = 0;
850 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
852 void *target_cost_data;
853
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info))
856 return;
857
858 if (loop_vinfo)
859 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
860 else
861 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
862
863 for (i = 0; i < pwr + 1; i++)
864 {
865 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
866 (i + 1) : i;
867 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
868 vec_promote_demote, stmt_info, 0,
869 vect_body);
870 }
871
872 /* FORNOW: Assuming maximum 2 args per stmts. */
873 for (i = 0; i < 2; i++)
874 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
875 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
876 stmt_info, 0, vect_prologue);
877
878 if (dump_enabled_p ())
879 dump_printf_loc (MSG_NOTE, vect_location,
880 "vect_model_promotion_demotion_cost: inside_cost = %d, "
881 "prologue_cost = %d .\n", inside_cost, prologue_cost);
882 }
883
884 /* Function vect_cost_group_size
885
886 For grouped load or store, return the group_size only if it is the first
887 load or store of a group, else return 1. This ensures that group size is
888 only returned once per group. */
889
890 static int
891 vect_cost_group_size (stmt_vec_info stmt_info)
892 {
893 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
894
895 if (first_stmt == STMT_VINFO_STMT (stmt_info))
896 return GROUP_SIZE (stmt_info);
897
898 return 1;
899 }
900
901
902 /* Function vect_model_store_cost
903
904 Models cost for stores. In the case of grouped accesses, one access
905 has the overhead of the grouped access attributed to it. */
906
907 void
908 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
909 bool store_lanes_p, enum vect_def_type dt,
910 slp_tree slp_node,
911 stmt_vector_for_cost *prologue_cost_vec,
912 stmt_vector_for_cost *body_cost_vec)
913 {
914 int group_size;
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *first_dr;
917 gimple *first_stmt;
918
919 if (dt == vect_constant_def || dt == vect_external_def)
920 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
921 stmt_info, 0, vect_prologue);
922
923 /* Grouped access? */
924 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
925 {
926 if (slp_node)
927 {
928 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
929 group_size = 1;
930 }
931 else
932 {
933 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
934 group_size = vect_cost_group_size (stmt_info);
935 }
936
937 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
938 }
939 /* Not a grouped access. */
940 else
941 {
942 group_size = 1;
943 first_dr = STMT_VINFO_DATA_REF (stmt_info);
944 }
945
946 /* We assume that the cost of a single store-lanes instruction is
947 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
948 access is instead being provided by a permute-and-store operation,
949 include the cost of the permutes. */
950 if (!store_lanes_p && group_size > 1
951 && !STMT_VINFO_STRIDED_P (stmt_info))
952 {
953 /* Uses a high and low interleave or shuffle operations for each
954 needed permute. */
955 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
956 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
957 stmt_info, 0, vect_body);
958
959 if (dump_enabled_p ())
960 dump_printf_loc (MSG_NOTE, vect_location,
961 "vect_model_store_cost: strided group_size = %d .\n",
962 group_size);
963 }
964
965 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
966 /* Costs of the stores. */
967 if (STMT_VINFO_STRIDED_P (stmt_info)
968 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
969 {
970 /* N scalar stores plus extracting the elements. */
971 inside_cost += record_stmt_cost (body_cost_vec,
972 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
973 scalar_store, stmt_info, 0, vect_body);
974 }
975 else
976 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
977
978 if (STMT_VINFO_STRIDED_P (stmt_info))
979 inside_cost += record_stmt_cost (body_cost_vec,
980 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
981 vec_to_scalar, stmt_info, 0, vect_body);
982
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE, vect_location,
985 "vect_model_store_cost: inside_cost = %d, "
986 "prologue_cost = %d .\n", inside_cost, prologue_cost);
987 }
988
989
990 /* Calculate cost of DR's memory access. */
991 void
992 vect_get_store_cost (struct data_reference *dr, int ncopies,
993 unsigned int *inside_cost,
994 stmt_vector_for_cost *body_cost_vec)
995 {
996 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
997 gimple *stmt = DR_STMT (dr);
998 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
999
1000 switch (alignment_support_scheme)
1001 {
1002 case dr_aligned:
1003 {
1004 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1005 vector_store, stmt_info, 0,
1006 vect_body);
1007
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE, vect_location,
1010 "vect_model_store_cost: aligned.\n");
1011 break;
1012 }
1013
1014 case dr_unaligned_supported:
1015 {
1016 /* Here, we assign an additional cost for the unaligned store. */
1017 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1018 unaligned_store, stmt_info,
1019 DR_MISALIGNMENT (dr), vect_body);
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_NOTE, vect_location,
1022 "vect_model_store_cost: unaligned supported by "
1023 "hardware.\n");
1024 break;
1025 }
1026
1027 case dr_unaligned_unsupported:
1028 {
1029 *inside_cost = VECT_MAX_COST;
1030
1031 if (dump_enabled_p ())
1032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1033 "vect_model_store_cost: unsupported access.\n");
1034 break;
1035 }
1036
1037 default:
1038 gcc_unreachable ();
1039 }
1040 }
1041
1042
1043 /* Function vect_model_load_cost
1044
1045 Models cost for loads. In the case of grouped accesses, the last access
1046 has the overhead of the grouped access attributed to it. Since unaligned
1047 accesses are supported for loads, we also account for the costs of the
1048 access scheme chosen. */
1049
1050 void
1051 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1052 bool load_lanes_p, slp_tree slp_node,
1053 stmt_vector_for_cost *prologue_cost_vec,
1054 stmt_vector_for_cost *body_cost_vec)
1055 {
1056 int group_size;
1057 gimple *first_stmt;
1058 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1059 unsigned int inside_cost = 0, prologue_cost = 0;
1060
1061 /* Grouped accesses? */
1062 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1063 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1064 {
1065 group_size = vect_cost_group_size (stmt_info);
1066 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1067 }
1068 /* Not a grouped access. */
1069 else
1070 {
1071 group_size = 1;
1072 first_dr = dr;
1073 }
1074
1075 /* We assume that the cost of a single load-lanes instruction is
1076 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1077 access is instead being provided by a load-and-permute operation,
1078 include the cost of the permutes. */
1079 if (!load_lanes_p && group_size > 1
1080 && !STMT_VINFO_STRIDED_P (stmt_info))
1081 {
1082 /* Uses an even and odd extract operations or shuffle operations
1083 for each needed permute. */
1084 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1085 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1086 stmt_info, 0, vect_body);
1087
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_NOTE, vect_location,
1090 "vect_model_load_cost: strided group_size = %d .\n",
1091 group_size);
1092 }
1093
1094 /* The loads themselves. */
1095 if (STMT_VINFO_STRIDED_P (stmt_info)
1096 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1097 {
1098 /* N scalar loads plus gathering them into a vector. */
1099 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1100 inside_cost += record_stmt_cost (body_cost_vec,
1101 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1102 scalar_load, stmt_info, 0, vect_body);
1103 }
1104 else
1105 vect_get_load_cost (first_dr, ncopies,
1106 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1107 || group_size > 1 || slp_node),
1108 &inside_cost, &prologue_cost,
1109 prologue_cost_vec, body_cost_vec, true);
1110 if (STMT_VINFO_STRIDED_P (stmt_info))
1111 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1112 stmt_info, 0, vect_body);
1113
1114 if (dump_enabled_p ())
1115 dump_printf_loc (MSG_NOTE, vect_location,
1116 "vect_model_load_cost: inside_cost = %d, "
1117 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1118 }
1119
1120
1121 /* Calculate cost of DR's memory access. */
1122 void
1123 vect_get_load_cost (struct data_reference *dr, int ncopies,
1124 bool add_realign_cost, unsigned int *inside_cost,
1125 unsigned int *prologue_cost,
1126 stmt_vector_for_cost *prologue_cost_vec,
1127 stmt_vector_for_cost *body_cost_vec,
1128 bool record_prologue_costs)
1129 {
1130 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1131 gimple *stmt = DR_STMT (dr);
1132 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1133
1134 switch (alignment_support_scheme)
1135 {
1136 case dr_aligned:
1137 {
1138 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1139 stmt_info, 0, vect_body);
1140
1141 if (dump_enabled_p ())
1142 dump_printf_loc (MSG_NOTE, vect_location,
1143 "vect_model_load_cost: aligned.\n");
1144
1145 break;
1146 }
1147 case dr_unaligned_supported:
1148 {
1149 /* Here, we assign an additional cost for the unaligned load. */
1150 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1151 unaligned_load, stmt_info,
1152 DR_MISALIGNMENT (dr), vect_body);
1153
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_NOTE, vect_location,
1156 "vect_model_load_cost: unaligned supported by "
1157 "hardware.\n");
1158
1159 break;
1160 }
1161 case dr_explicit_realign:
1162 {
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1164 vector_load, stmt_info, 0, vect_body);
1165 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1166 vec_perm, stmt_info, 0, vect_body);
1167
1168 /* FIXME: If the misalignment remains fixed across the iterations of
1169 the containing loop, the following cost should be added to the
1170 prologue costs. */
1171 if (targetm.vectorize.builtin_mask_for_load)
1172 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1173 stmt_info, 0, vect_body);
1174
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE, vect_location,
1177 "vect_model_load_cost: explicit realign\n");
1178
1179 break;
1180 }
1181 case dr_explicit_realign_optimized:
1182 {
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_NOTE, vect_location,
1185 "vect_model_load_cost: unaligned software "
1186 "pipelined.\n");
1187
1188 /* Unaligned software pipeline has a load of an address, an initial
1189 load, and possibly a mask operation to "prime" the loop. However,
1190 if this is an access in a group of loads, which provide grouped
1191 access, then the above cost should only be considered for one
1192 access in the group. Inside the loop, there is a load op
1193 and a realignment op. */
1194
1195 if (add_realign_cost && record_prologue_costs)
1196 {
1197 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1198 vector_stmt, stmt_info,
1199 0, vect_prologue);
1200 if (targetm.vectorize.builtin_mask_for_load)
1201 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1202 vector_stmt, stmt_info,
1203 0, vect_prologue);
1204 }
1205
1206 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1207 stmt_info, 0, vect_body);
1208 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1209 stmt_info, 0, vect_body);
1210
1211 if (dump_enabled_p ())
1212 dump_printf_loc (MSG_NOTE, vect_location,
1213 "vect_model_load_cost: explicit realign optimized"
1214 "\n");
1215
1216 break;
1217 }
1218
1219 case dr_unaligned_unsupported:
1220 {
1221 *inside_cost = VECT_MAX_COST;
1222
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1225 "vect_model_load_cost: unsupported access.\n");
1226 break;
1227 }
1228
1229 default:
1230 gcc_unreachable ();
1231 }
1232 }
1233
1234 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1235 the loop preheader for the vectorized stmt STMT. */
1236
1237 static void
1238 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1239 {
1240 if (gsi)
1241 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1242 else
1243 {
1244 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1245 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1246
1247 if (loop_vinfo)
1248 {
1249 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1250 basic_block new_bb;
1251 edge pe;
1252
1253 if (nested_in_vect_loop_p (loop, stmt))
1254 loop = loop->inner;
1255
1256 pe = loop_preheader_edge (loop);
1257 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1258 gcc_assert (!new_bb);
1259 }
1260 else
1261 {
1262 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1263 basic_block bb;
1264 gimple_stmt_iterator gsi_bb_start;
1265
1266 gcc_assert (bb_vinfo);
1267 bb = BB_VINFO_BB (bb_vinfo);
1268 gsi_bb_start = gsi_after_labels (bb);
1269 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1270 }
1271 }
1272
1273 if (dump_enabled_p ())
1274 {
1275 dump_printf_loc (MSG_NOTE, vect_location,
1276 "created new init_stmt: ");
1277 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1278 }
1279 }
1280
1281 /* Function vect_init_vector.
1282
1283 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1284 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1285 vector type a vector with all elements equal to VAL is created first.
1286 Place the initialization at BSI if it is not NULL. Otherwise, place the
1287 initialization at the loop preheader.
1288 Return the DEF of INIT_STMT.
1289 It will be used in the vectorization of STMT. */
1290
1291 tree
1292 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1293 {
1294 gimple *init_stmt;
1295 tree new_temp;
1296
1297 if (TREE_CODE (type) == VECTOR_TYPE
1298 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1299 {
1300 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1301 {
1302 if (CONSTANT_CLASS_P (val))
1303 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1304 else
1305 {
1306 new_temp = make_ssa_name (TREE_TYPE (type));
1307 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1308 vect_init_vector_1 (stmt, init_stmt, gsi);
1309 val = new_temp;
1310 }
1311 }
1312 val = build_vector_from_val (type, val);
1313 }
1314
1315 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1316 init_stmt = gimple_build_assign (new_temp, val);
1317 vect_init_vector_1 (stmt, init_stmt, gsi);
1318 return new_temp;
1319 }
1320
1321
1322 /* Function vect_get_vec_def_for_operand.
1323
1324 OP is an operand in STMT. This function returns a (vector) def that will be
1325 used in the vectorized stmt for STMT.
1326
1327 In the case that OP is an SSA_NAME which is defined in the loop, then
1328 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1329
1330 In case OP is an invariant or constant, a new stmt that creates a vector def
1331 needs to be introduced. */
1332
1333 tree
1334 vect_get_vec_def_for_operand (tree op, gimple *stmt)
1335 {
1336 tree vec_oprnd;
1337 gimple *vec_stmt;
1338 gimple *def_stmt;
1339 stmt_vec_info def_stmt_info = NULL;
1340 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1341 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1342 enum vect_def_type dt;
1343 bool is_simple_use;
1344 tree vector_type;
1345
1346 if (dump_enabled_p ())
1347 {
1348 dump_printf_loc (MSG_NOTE, vect_location,
1349 "vect_get_vec_def_for_operand: ");
1350 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1351 dump_printf (MSG_NOTE, "\n");
1352 }
1353
1354 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1355 gcc_assert (is_simple_use);
1356 if (dump_enabled_p ())
1357 {
1358 int loc_printed = 0;
1359 if (def_stmt)
1360 {
1361 if (loc_printed)
1362 dump_printf (MSG_NOTE, " def_stmt = ");
1363 else
1364 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1365 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1366 }
1367 }
1368
1369 switch (dt)
1370 {
1371 /* operand is a constant or a loop invariant. */
1372 case vect_constant_def:
1373 case vect_external_def:
1374 {
1375 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1376 gcc_assert (vector_type);
1377 return vect_init_vector (stmt, op, vector_type, NULL);
1378 }
1379
1380 /* operand is defined inside the loop. */
1381 case vect_internal_def:
1382 {
1383 /* Get the def from the vectorized stmt. */
1384 def_stmt_info = vinfo_for_stmt (def_stmt);
1385
1386 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1387 /* Get vectorized pattern statement. */
1388 if (!vec_stmt
1389 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1390 && !STMT_VINFO_RELEVANT (def_stmt_info))
1391 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1392 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1393 gcc_assert (vec_stmt);
1394 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1395 vec_oprnd = PHI_RESULT (vec_stmt);
1396 else if (is_gimple_call (vec_stmt))
1397 vec_oprnd = gimple_call_lhs (vec_stmt);
1398 else
1399 vec_oprnd = gimple_assign_lhs (vec_stmt);
1400 return vec_oprnd;
1401 }
1402
1403 /* operand is defined by a loop header phi - reduction */
1404 case vect_reduction_def:
1405 case vect_double_reduction_def:
1406 case vect_nested_cycle:
1407 /* Code should use get_initial_def_for_reduction. */
1408 gcc_unreachable ();
1409
1410 /* operand is defined by loop-header phi - induction. */
1411 case vect_induction_def:
1412 {
1413 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1414
1415 /* Get the def from the vectorized stmt. */
1416 def_stmt_info = vinfo_for_stmt (def_stmt);
1417 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1418 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1419 vec_oprnd = PHI_RESULT (vec_stmt);
1420 else
1421 vec_oprnd = gimple_get_lhs (vec_stmt);
1422 return vec_oprnd;
1423 }
1424
1425 default:
1426 gcc_unreachable ();
1427 }
1428 }
1429
1430
1431 /* Function vect_get_vec_def_for_stmt_copy
1432
1433 Return a vector-def for an operand. This function is used when the
1434 vectorized stmt to be created (by the caller to this function) is a "copy"
1435 created in case the vectorized result cannot fit in one vector, and several
1436 copies of the vector-stmt are required. In this case the vector-def is
1437 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1438 of the stmt that defines VEC_OPRND.
1439 DT is the type of the vector def VEC_OPRND.
1440
1441 Context:
1442 In case the vectorization factor (VF) is bigger than the number
1443 of elements that can fit in a vectype (nunits), we have to generate
1444 more than one vector stmt to vectorize the scalar stmt. This situation
1445 arises when there are multiple data-types operated upon in the loop; the
1446 smallest data-type determines the VF, and as a result, when vectorizing
1447 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1448 vector stmt (each computing a vector of 'nunits' results, and together
1449 computing 'VF' results in each iteration). This function is called when
1450 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1451 which VF=16 and nunits=4, so the number of copies required is 4):
1452
1453 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1454
1455 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1456 VS1.1: vx.1 = memref1 VS1.2
1457 VS1.2: vx.2 = memref2 VS1.3
1458 VS1.3: vx.3 = memref3
1459
1460 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1461 VSnew.1: vz1 = vx.1 + ... VSnew.2
1462 VSnew.2: vz2 = vx.2 + ... VSnew.3
1463 VSnew.3: vz3 = vx.3 + ...
1464
1465 The vectorization of S1 is explained in vectorizable_load.
1466 The vectorization of S2:
1467 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1468 the function 'vect_get_vec_def_for_operand' is called to
1469 get the relevant vector-def for each operand of S2. For operand x it
1470 returns the vector-def 'vx.0'.
1471
1472 To create the remaining copies of the vector-stmt (VSnew.j), this
1473 function is called to get the relevant vector-def for each operand. It is
1474 obtained from the respective VS1.j stmt, which is recorded in the
1475 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1476
1477 For example, to obtain the vector-def 'vx.1' in order to create the
1478 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1479 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1480 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1481 and return its def ('vx.1').
1482 Overall, to create the above sequence this function will be called 3 times:
1483 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1484 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1485 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1486
1487 tree
1488 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1489 {
1490 gimple *vec_stmt_for_operand;
1491 stmt_vec_info def_stmt_info;
1492
1493 /* Do nothing; can reuse same def. */
1494 if (dt == vect_external_def || dt == vect_constant_def )
1495 return vec_oprnd;
1496
1497 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1498 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1499 gcc_assert (def_stmt_info);
1500 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1501 gcc_assert (vec_stmt_for_operand);
1502 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1503 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1504 else
1505 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1506 return vec_oprnd;
1507 }
1508
1509
1510 /* Get vectorized definitions for the operands to create a copy of an original
1511 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1512
1513 static void
1514 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1515 vec<tree> *vec_oprnds0,
1516 vec<tree> *vec_oprnds1)
1517 {
1518 tree vec_oprnd = vec_oprnds0->pop ();
1519
1520 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1521 vec_oprnds0->quick_push (vec_oprnd);
1522
1523 if (vec_oprnds1 && vec_oprnds1->length ())
1524 {
1525 vec_oprnd = vec_oprnds1->pop ();
1526 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1527 vec_oprnds1->quick_push (vec_oprnd);
1528 }
1529 }
1530
1531
1532 /* Get vectorized definitions for OP0 and OP1.
1533 REDUC_INDEX is the index of reduction operand in case of reduction,
1534 and -1 otherwise. */
1535
1536 void
1537 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1538 vec<tree> *vec_oprnds0,
1539 vec<tree> *vec_oprnds1,
1540 slp_tree slp_node, int reduc_index)
1541 {
1542 if (slp_node)
1543 {
1544 int nops = (op1 == NULL_TREE) ? 1 : 2;
1545 auto_vec<tree> ops (nops);
1546 auto_vec<vec<tree> > vec_defs (nops);
1547
1548 ops.quick_push (op0);
1549 if (op1)
1550 ops.quick_push (op1);
1551
1552 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1553
1554 *vec_oprnds0 = vec_defs[0];
1555 if (op1)
1556 *vec_oprnds1 = vec_defs[1];
1557 }
1558 else
1559 {
1560 tree vec_oprnd;
1561
1562 vec_oprnds0->create (1);
1563 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1564 vec_oprnds0->quick_push (vec_oprnd);
1565
1566 if (op1)
1567 {
1568 vec_oprnds1->create (1);
1569 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1570 vec_oprnds1->quick_push (vec_oprnd);
1571 }
1572 }
1573 }
1574
1575
1576 /* Function vect_finish_stmt_generation.
1577
1578 Insert a new stmt. */
1579
1580 void
1581 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1582 gimple_stmt_iterator *gsi)
1583 {
1584 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1585 vec_info *vinfo = stmt_info->vinfo;
1586
1587 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1588
1589 if (!gsi_end_p (*gsi)
1590 && gimple_has_mem_ops (vec_stmt))
1591 {
1592 gimple *at_stmt = gsi_stmt (*gsi);
1593 tree vuse = gimple_vuse (at_stmt);
1594 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1595 {
1596 tree vdef = gimple_vdef (at_stmt);
1597 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1598 /* If we have an SSA vuse and insert a store, update virtual
1599 SSA form to avoid triggering the renamer. Do so only
1600 if we can easily see all uses - which is what almost always
1601 happens with the way vectorized stmts are inserted. */
1602 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1603 && ((is_gimple_assign (vec_stmt)
1604 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1605 || (is_gimple_call (vec_stmt)
1606 && !(gimple_call_flags (vec_stmt)
1607 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1608 {
1609 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1610 gimple_set_vdef (vec_stmt, new_vdef);
1611 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1612 }
1613 }
1614 }
1615 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1616
1617 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1618
1619 if (dump_enabled_p ())
1620 {
1621 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1622 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1623 }
1624
1625 gimple_set_location (vec_stmt, gimple_location (stmt));
1626
1627 /* While EH edges will generally prevent vectorization, stmt might
1628 e.g. be in a must-not-throw region. Ensure newly created stmts
1629 that could throw are part of the same region. */
1630 int lp_nr = lookup_stmt_eh_lp (stmt);
1631 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1632 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1633 }
1634
1635 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1636 a function declaration if the target has a vectorized version
1637 of the function, or NULL_TREE if the function cannot be vectorized. */
1638
1639 tree
1640 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1641 {
1642 tree fndecl = gimple_call_fndecl (call);
1643
1644 /* We only handle functions that do not read or clobber memory -- i.e.
1645 const or novops ones. */
1646 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1647 return NULL_TREE;
1648
1649 if (!fndecl
1650 || TREE_CODE (fndecl) != FUNCTION_DECL
1651 || !DECL_BUILT_IN (fndecl))
1652 return NULL_TREE;
1653
1654 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1655 vectype_in);
1656 }
1657
1658
1659 static tree permute_vec_elements (tree, tree, tree, gimple *,
1660 gimple_stmt_iterator *);
1661
1662
1663 /* Function vectorizable_mask_load_store.
1664
1665 Check if STMT performs a conditional load or store that can be vectorized.
1666 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1667 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1668 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1669
1670 static bool
1671 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1672 gimple **vec_stmt, slp_tree slp_node)
1673 {
1674 tree vec_dest = NULL;
1675 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1676 stmt_vec_info prev_stmt_info;
1677 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1678 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1679 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1680 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1681 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1682 tree elem_type;
1683 gimple *new_stmt;
1684 tree dummy;
1685 tree dataref_ptr = NULL_TREE;
1686 gimple *ptr_incr;
1687 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1688 int ncopies;
1689 int i, j;
1690 bool inv_p;
1691 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1692 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1693 int gather_scale = 1;
1694 enum vect_def_type gather_dt = vect_unknown_def_type;
1695 bool is_store;
1696 tree mask;
1697 gimple *def_stmt;
1698 enum vect_def_type dt;
1699
1700 if (slp_node != NULL)
1701 return false;
1702
1703 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1704 gcc_assert (ncopies >= 1);
1705
1706 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1707 mask = gimple_call_arg (stmt, 2);
1708 if (TYPE_PRECISION (TREE_TYPE (mask))
1709 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1710 return false;
1711
1712 /* FORNOW. This restriction should be relaxed. */
1713 if (nested_in_vect_loop && ncopies > 1)
1714 {
1715 if (dump_enabled_p ())
1716 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1717 "multiple types in nested loop.");
1718 return false;
1719 }
1720
1721 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1722 return false;
1723
1724 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1725 return false;
1726
1727 if (!STMT_VINFO_DATA_REF (stmt_info))
1728 return false;
1729
1730 elem_type = TREE_TYPE (vectype);
1731
1732 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1733 return false;
1734
1735 if (STMT_VINFO_STRIDED_P (stmt_info))
1736 return false;
1737
1738 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1739 {
1740 gimple *def_stmt;
1741 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
1742 &gather_off, &gather_scale);
1743 gcc_assert (gather_decl);
1744 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1745 &gather_off_vectype))
1746 {
1747 if (dump_enabled_p ())
1748 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1749 "gather index use not simple.");
1750 return false;
1751 }
1752
1753 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1754 tree masktype
1755 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1756 if (TREE_CODE (masktype) == INTEGER_TYPE)
1757 {
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1760 "masked gather with integer mask not supported.");
1761 return false;
1762 }
1763 }
1764 else if (tree_int_cst_compare (nested_in_vect_loop
1765 ? STMT_VINFO_DR_STEP (stmt_info)
1766 : DR_STEP (dr), size_zero_node) <= 0)
1767 return false;
1768 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1769 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1770 return false;
1771
1772 if (TREE_CODE (mask) != SSA_NAME)
1773 return false;
1774
1775 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt))
1776 return false;
1777
1778 if (is_store)
1779 {
1780 tree rhs = gimple_call_arg (stmt, 3);
1781 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt))
1782 return false;
1783 }
1784
1785 if (!vec_stmt) /* transformation not required. */
1786 {
1787 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1788 if (is_store)
1789 vect_model_store_cost (stmt_info, ncopies, false, dt,
1790 NULL, NULL, NULL);
1791 else
1792 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1793 return true;
1794 }
1795
1796 /** Transform. **/
1797
1798 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1799 {
1800 tree vec_oprnd0 = NULL_TREE, op;
1801 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1802 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1803 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1804 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1805 tree mask_perm_mask = NULL_TREE;
1806 edge pe = loop_preheader_edge (loop);
1807 gimple_seq seq;
1808 basic_block new_bb;
1809 enum { NARROW, NONE, WIDEN } modifier;
1810 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1811
1812 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1813 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1814 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1815 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1816 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1817 scaletype = TREE_VALUE (arglist);
1818 gcc_checking_assert (types_compatible_p (srctype, rettype)
1819 && types_compatible_p (srctype, masktype));
1820
1821 if (nunits == gather_off_nunits)
1822 modifier = NONE;
1823 else if (nunits == gather_off_nunits / 2)
1824 {
1825 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1826 modifier = WIDEN;
1827
1828 for (i = 0; i < gather_off_nunits; ++i)
1829 sel[i] = i | nunits;
1830
1831 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1832 }
1833 else if (nunits == gather_off_nunits * 2)
1834 {
1835 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1836 modifier = NARROW;
1837
1838 for (i = 0; i < nunits; ++i)
1839 sel[i] = i < gather_off_nunits
1840 ? i : i + nunits - gather_off_nunits;
1841
1842 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1843 ncopies *= 2;
1844 for (i = 0; i < nunits; ++i)
1845 sel[i] = i | gather_off_nunits;
1846 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1847 }
1848 else
1849 gcc_unreachable ();
1850
1851 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1852
1853 ptr = fold_convert (ptrtype, gather_base);
1854 if (!is_gimple_min_invariant (ptr))
1855 {
1856 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1857 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1858 gcc_assert (!new_bb);
1859 }
1860
1861 scale = build_int_cst (scaletype, gather_scale);
1862
1863 prev_stmt_info = NULL;
1864 for (j = 0; j < ncopies; ++j)
1865 {
1866 if (modifier == WIDEN && (j & 1))
1867 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1868 perm_mask, stmt, gsi);
1869 else if (j == 0)
1870 op = vec_oprnd0
1871 = vect_get_vec_def_for_operand (gather_off, stmt);
1872 else
1873 op = vec_oprnd0
1874 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1875
1876 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1877 {
1878 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1879 == TYPE_VECTOR_SUBPARTS (idxtype));
1880 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
1881 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1882 new_stmt
1883 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1884 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1885 op = var;
1886 }
1887
1888 if (mask_perm_mask && (j & 1))
1889 mask_op = permute_vec_elements (mask_op, mask_op,
1890 mask_perm_mask, stmt, gsi);
1891 else
1892 {
1893 if (j == 0)
1894 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1895 else
1896 {
1897 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1898 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1899 }
1900
1901 mask_op = vec_mask;
1902 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1903 {
1904 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1905 == TYPE_VECTOR_SUBPARTS (masktype));
1906 var = vect_get_new_ssa_name (masktype, vect_simple_var);
1907 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1908 new_stmt
1909 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1910 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1911 mask_op = var;
1912 }
1913 }
1914
1915 new_stmt
1916 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1917 scale);
1918
1919 if (!useless_type_conversion_p (vectype, rettype))
1920 {
1921 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1922 == TYPE_VECTOR_SUBPARTS (rettype));
1923 op = vect_get_new_ssa_name (rettype, vect_simple_var);
1924 gimple_call_set_lhs (new_stmt, op);
1925 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1926 var = make_ssa_name (vec_dest);
1927 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
1928 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1929 }
1930 else
1931 {
1932 var = make_ssa_name (vec_dest, new_stmt);
1933 gimple_call_set_lhs (new_stmt, var);
1934 }
1935
1936 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1937
1938 if (modifier == NARROW)
1939 {
1940 if ((j & 1) == 0)
1941 {
1942 prev_res = var;
1943 continue;
1944 }
1945 var = permute_vec_elements (prev_res, var,
1946 perm_mask, stmt, gsi);
1947 new_stmt = SSA_NAME_DEF_STMT (var);
1948 }
1949
1950 if (prev_stmt_info == NULL)
1951 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1952 else
1953 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1954 prev_stmt_info = vinfo_for_stmt (new_stmt);
1955 }
1956
1957 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1958 from the IL. */
1959 tree lhs = gimple_call_lhs (stmt);
1960 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
1961 set_vinfo_for_stmt (new_stmt, stmt_info);
1962 set_vinfo_for_stmt (stmt, NULL);
1963 STMT_VINFO_STMT (stmt_info) = new_stmt;
1964 gsi_replace (gsi, new_stmt, true);
1965 return true;
1966 }
1967 else if (is_store)
1968 {
1969 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
1970 prev_stmt_info = NULL;
1971 for (i = 0; i < ncopies; i++)
1972 {
1973 unsigned align, misalign;
1974
1975 if (i == 0)
1976 {
1977 tree rhs = gimple_call_arg (stmt, 3);
1978 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
1979 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1980 /* We should have catched mismatched types earlier. */
1981 gcc_assert (useless_type_conversion_p (vectype,
1982 TREE_TYPE (vec_rhs)));
1983 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
1984 NULL_TREE, &dummy, gsi,
1985 &ptr_incr, false, &inv_p);
1986 gcc_assert (!inv_p);
1987 }
1988 else
1989 {
1990 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
1991 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
1992 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1993 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1994 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
1995 TYPE_SIZE_UNIT (vectype));
1996 }
1997
1998 align = TYPE_ALIGN_UNIT (vectype);
1999 if (aligned_access_p (dr))
2000 misalign = 0;
2001 else if (DR_MISALIGNMENT (dr) == -1)
2002 {
2003 align = TYPE_ALIGN_UNIT (elem_type);
2004 misalign = 0;
2005 }
2006 else
2007 misalign = DR_MISALIGNMENT (dr);
2008 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2009 misalign);
2010 new_stmt
2011 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2012 gimple_call_arg (stmt, 1),
2013 vec_mask, vec_rhs);
2014 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2015 if (i == 0)
2016 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2017 else
2018 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2019 prev_stmt_info = vinfo_for_stmt (new_stmt);
2020 }
2021 }
2022 else
2023 {
2024 tree vec_mask = NULL_TREE;
2025 prev_stmt_info = NULL;
2026 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2027 for (i = 0; i < ncopies; i++)
2028 {
2029 unsigned align, misalign;
2030
2031 if (i == 0)
2032 {
2033 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2034 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2035 NULL_TREE, &dummy, gsi,
2036 &ptr_incr, false, &inv_p);
2037 gcc_assert (!inv_p);
2038 }
2039 else
2040 {
2041 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2042 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2043 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2044 TYPE_SIZE_UNIT (vectype));
2045 }
2046
2047 align = TYPE_ALIGN_UNIT (vectype);
2048 if (aligned_access_p (dr))
2049 misalign = 0;
2050 else if (DR_MISALIGNMENT (dr) == -1)
2051 {
2052 align = TYPE_ALIGN_UNIT (elem_type);
2053 misalign = 0;
2054 }
2055 else
2056 misalign = DR_MISALIGNMENT (dr);
2057 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2058 misalign);
2059 new_stmt
2060 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2061 gimple_call_arg (stmt, 1),
2062 vec_mask);
2063 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2064 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2065 if (i == 0)
2066 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2067 else
2068 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2069 prev_stmt_info = vinfo_for_stmt (new_stmt);
2070 }
2071 }
2072
2073 if (!is_store)
2074 {
2075 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2076 from the IL. */
2077 tree lhs = gimple_call_lhs (stmt);
2078 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2079 set_vinfo_for_stmt (new_stmt, stmt_info);
2080 set_vinfo_for_stmt (stmt, NULL);
2081 STMT_VINFO_STMT (stmt_info) = new_stmt;
2082 gsi_replace (gsi, new_stmt, true);
2083 }
2084
2085 return true;
2086 }
2087
2088
2089 /* Function vectorizable_call.
2090
2091 Check if GS performs a function call that can be vectorized.
2092 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2093 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2094 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2095
2096 static bool
2097 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2098 slp_tree slp_node)
2099 {
2100 gcall *stmt;
2101 tree vec_dest;
2102 tree scalar_dest;
2103 tree op, type;
2104 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2105 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2106 tree vectype_out, vectype_in;
2107 int nunits_in;
2108 int nunits_out;
2109 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2110 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2111 vec_info *vinfo = stmt_info->vinfo;
2112 tree fndecl, new_temp, rhs_type;
2113 gimple *def_stmt;
2114 enum vect_def_type dt[3]
2115 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2116 gimple *new_stmt = NULL;
2117 int ncopies, j;
2118 vec<tree> vargs = vNULL;
2119 enum { NARROW, NONE, WIDEN } modifier;
2120 size_t i, nargs;
2121 tree lhs;
2122
2123 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2124 return false;
2125
2126 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2127 return false;
2128
2129 /* Is GS a vectorizable call? */
2130 stmt = dyn_cast <gcall *> (gs);
2131 if (!stmt)
2132 return false;
2133
2134 if (gimple_call_internal_p (stmt)
2135 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2136 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2137 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2138 slp_node);
2139
2140 if (gimple_call_lhs (stmt) == NULL_TREE
2141 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2142 return false;
2143
2144 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2145
2146 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2147
2148 /* Process function arguments. */
2149 rhs_type = NULL_TREE;
2150 vectype_in = NULL_TREE;
2151 nargs = gimple_call_num_args (stmt);
2152
2153 /* Bail out if the function has more than three arguments, we do not have
2154 interesting builtin functions to vectorize with more than two arguments
2155 except for fma. No arguments is also not good. */
2156 if (nargs == 0 || nargs > 3)
2157 return false;
2158
2159 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2160 if (gimple_call_internal_p (stmt)
2161 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2162 {
2163 nargs = 0;
2164 rhs_type = unsigned_type_node;
2165 }
2166
2167 for (i = 0; i < nargs; i++)
2168 {
2169 tree opvectype;
2170
2171 op = gimple_call_arg (stmt, i);
2172
2173 /* We can only handle calls with arguments of the same type. */
2174 if (rhs_type
2175 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2176 {
2177 if (dump_enabled_p ())
2178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2179 "argument types differ.\n");
2180 return false;
2181 }
2182 if (!rhs_type)
2183 rhs_type = TREE_TYPE (op);
2184
2185 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2186 {
2187 if (dump_enabled_p ())
2188 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2189 "use not simple.\n");
2190 return false;
2191 }
2192
2193 if (!vectype_in)
2194 vectype_in = opvectype;
2195 else if (opvectype
2196 && opvectype != vectype_in)
2197 {
2198 if (dump_enabled_p ())
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2200 "argument vector types differ.\n");
2201 return false;
2202 }
2203 }
2204 /* If all arguments are external or constant defs use a vector type with
2205 the same size as the output vector type. */
2206 if (!vectype_in)
2207 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2208 if (vec_stmt)
2209 gcc_assert (vectype_in);
2210 if (!vectype_in)
2211 {
2212 if (dump_enabled_p ())
2213 {
2214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2215 "no vectype for scalar type ");
2216 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2217 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2218 }
2219
2220 return false;
2221 }
2222
2223 /* FORNOW */
2224 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2225 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2226 if (nunits_in == nunits_out / 2)
2227 modifier = NARROW;
2228 else if (nunits_out == nunits_in)
2229 modifier = NONE;
2230 else if (nunits_out == nunits_in / 2)
2231 modifier = WIDEN;
2232 else
2233 return false;
2234
2235 /* For now, we only vectorize functions if a target specific builtin
2236 is available. TODO -- in some cases, it might be profitable to
2237 insert the calls for pieces of the vector, in order to be able
2238 to vectorize other operations in the loop. */
2239 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2240 if (fndecl == NULL_TREE)
2241 {
2242 if (gimple_call_internal_p (stmt)
2243 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2244 && !slp_node
2245 && loop_vinfo
2246 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2247 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2248 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2249 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2250 {
2251 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2252 { 0, 1, 2, ... vf - 1 } vector. */
2253 gcc_assert (nargs == 0);
2254 }
2255 else
2256 {
2257 if (dump_enabled_p ())
2258 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2259 "function is not vectorizable.\n");
2260 return false;
2261 }
2262 }
2263
2264 gcc_assert (!gimple_vuse (stmt));
2265
2266 if (slp_node || PURE_SLP_STMT (stmt_info))
2267 ncopies = 1;
2268 else if (modifier == NARROW)
2269 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2270 else
2271 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2272
2273 /* Sanity check: make sure that at least one copy of the vectorized stmt
2274 needs to be generated. */
2275 gcc_assert (ncopies >= 1);
2276
2277 if (!vec_stmt) /* transformation not required. */
2278 {
2279 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2280 if (dump_enabled_p ())
2281 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2282 "\n");
2283 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2284 return true;
2285 }
2286
2287 /** Transform. **/
2288
2289 if (dump_enabled_p ())
2290 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2291
2292 /* Handle def. */
2293 scalar_dest = gimple_call_lhs (stmt);
2294 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2295
2296 prev_stmt_info = NULL;
2297 switch (modifier)
2298 {
2299 case NONE:
2300 for (j = 0; j < ncopies; ++j)
2301 {
2302 /* Build argument list for the vectorized call. */
2303 if (j == 0)
2304 vargs.create (nargs);
2305 else
2306 vargs.truncate (0);
2307
2308 if (slp_node)
2309 {
2310 auto_vec<vec<tree> > vec_defs (nargs);
2311 vec<tree> vec_oprnds0;
2312
2313 for (i = 0; i < nargs; i++)
2314 vargs.quick_push (gimple_call_arg (stmt, i));
2315 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2316 vec_oprnds0 = vec_defs[0];
2317
2318 /* Arguments are ready. Create the new vector stmt. */
2319 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2320 {
2321 size_t k;
2322 for (k = 0; k < nargs; k++)
2323 {
2324 vec<tree> vec_oprndsk = vec_defs[k];
2325 vargs[k] = vec_oprndsk[i];
2326 }
2327 new_stmt = gimple_build_call_vec (fndecl, vargs);
2328 new_temp = make_ssa_name (vec_dest, new_stmt);
2329 gimple_call_set_lhs (new_stmt, new_temp);
2330 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2331 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2332 }
2333
2334 for (i = 0; i < nargs; i++)
2335 {
2336 vec<tree> vec_oprndsi = vec_defs[i];
2337 vec_oprndsi.release ();
2338 }
2339 continue;
2340 }
2341
2342 for (i = 0; i < nargs; i++)
2343 {
2344 op = gimple_call_arg (stmt, i);
2345 if (j == 0)
2346 vec_oprnd0
2347 = vect_get_vec_def_for_operand (op, stmt);
2348 else
2349 {
2350 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2351 vec_oprnd0
2352 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2353 }
2354
2355 vargs.quick_push (vec_oprnd0);
2356 }
2357
2358 if (gimple_call_internal_p (stmt)
2359 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2360 {
2361 tree *v = XALLOCAVEC (tree, nunits_out);
2362 int k;
2363 for (k = 0; k < nunits_out; ++k)
2364 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2365 tree cst = build_vector (vectype_out, v);
2366 tree new_var
2367 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2368 gimple *init_stmt = gimple_build_assign (new_var, cst);
2369 vect_init_vector_1 (stmt, init_stmt, NULL);
2370 new_temp = make_ssa_name (vec_dest);
2371 new_stmt = gimple_build_assign (new_temp, new_var);
2372 }
2373 else
2374 {
2375 new_stmt = gimple_build_call_vec (fndecl, vargs);
2376 new_temp = make_ssa_name (vec_dest, new_stmt);
2377 gimple_call_set_lhs (new_stmt, new_temp);
2378 }
2379 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2380
2381 if (j == 0)
2382 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2383 else
2384 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2385
2386 prev_stmt_info = vinfo_for_stmt (new_stmt);
2387 }
2388
2389 break;
2390
2391 case NARROW:
2392 for (j = 0; j < ncopies; ++j)
2393 {
2394 /* Build argument list for the vectorized call. */
2395 if (j == 0)
2396 vargs.create (nargs * 2);
2397 else
2398 vargs.truncate (0);
2399
2400 if (slp_node)
2401 {
2402 auto_vec<vec<tree> > vec_defs (nargs);
2403 vec<tree> vec_oprnds0;
2404
2405 for (i = 0; i < nargs; i++)
2406 vargs.quick_push (gimple_call_arg (stmt, i));
2407 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2408 vec_oprnds0 = vec_defs[0];
2409
2410 /* Arguments are ready. Create the new vector stmt. */
2411 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2412 {
2413 size_t k;
2414 vargs.truncate (0);
2415 for (k = 0; k < nargs; k++)
2416 {
2417 vec<tree> vec_oprndsk = vec_defs[k];
2418 vargs.quick_push (vec_oprndsk[i]);
2419 vargs.quick_push (vec_oprndsk[i + 1]);
2420 }
2421 new_stmt = gimple_build_call_vec (fndecl, vargs);
2422 new_temp = make_ssa_name (vec_dest, new_stmt);
2423 gimple_call_set_lhs (new_stmt, new_temp);
2424 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2425 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2426 }
2427
2428 for (i = 0; i < nargs; i++)
2429 {
2430 vec<tree> vec_oprndsi = vec_defs[i];
2431 vec_oprndsi.release ();
2432 }
2433 continue;
2434 }
2435
2436 for (i = 0; i < nargs; i++)
2437 {
2438 op = gimple_call_arg (stmt, i);
2439 if (j == 0)
2440 {
2441 vec_oprnd0
2442 = vect_get_vec_def_for_operand (op, stmt);
2443 vec_oprnd1
2444 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2445 }
2446 else
2447 {
2448 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2449 vec_oprnd0
2450 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2451 vec_oprnd1
2452 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2453 }
2454
2455 vargs.quick_push (vec_oprnd0);
2456 vargs.quick_push (vec_oprnd1);
2457 }
2458
2459 new_stmt = gimple_build_call_vec (fndecl, vargs);
2460 new_temp = make_ssa_name (vec_dest, new_stmt);
2461 gimple_call_set_lhs (new_stmt, new_temp);
2462 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2463
2464 if (j == 0)
2465 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2466 else
2467 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2468
2469 prev_stmt_info = vinfo_for_stmt (new_stmt);
2470 }
2471
2472 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2473
2474 break;
2475
2476 case WIDEN:
2477 /* No current target implements this case. */
2478 return false;
2479 }
2480
2481 vargs.release ();
2482
2483 /* The call in STMT might prevent it from being removed in dce.
2484 We however cannot remove it here, due to the way the ssa name
2485 it defines is mapped to the new definition. So just replace
2486 rhs of the statement with something harmless. */
2487
2488 if (slp_node)
2489 return true;
2490
2491 type = TREE_TYPE (scalar_dest);
2492 if (is_pattern_stmt_p (stmt_info))
2493 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2494 else
2495 lhs = gimple_call_lhs (stmt);
2496
2497 if (gimple_call_internal_p (stmt)
2498 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2499 {
2500 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2501 with vf - 1 rather than 0, that is the last iteration of the
2502 vectorized loop. */
2503 imm_use_iterator iter;
2504 use_operand_p use_p;
2505 gimple *use_stmt;
2506 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2507 {
2508 basic_block use_bb = gimple_bb (use_stmt);
2509 if (use_bb
2510 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2511 {
2512 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2513 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2514 ncopies * nunits_out - 1));
2515 update_stmt (use_stmt);
2516 }
2517 }
2518 }
2519
2520 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2521 set_vinfo_for_stmt (new_stmt, stmt_info);
2522 set_vinfo_for_stmt (stmt, NULL);
2523 STMT_VINFO_STMT (stmt_info) = new_stmt;
2524 gsi_replace (gsi, new_stmt, false);
2525
2526 return true;
2527 }
2528
2529
2530 struct simd_call_arg_info
2531 {
2532 tree vectype;
2533 tree op;
2534 enum vect_def_type dt;
2535 HOST_WIDE_INT linear_step;
2536 unsigned int align;
2537 bool simd_lane_linear;
2538 };
2539
2540 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2541 is linear within simd lane (but not within whole loop), note it in
2542 *ARGINFO. */
2543
2544 static void
2545 vect_simd_lane_linear (tree op, struct loop *loop,
2546 struct simd_call_arg_info *arginfo)
2547 {
2548 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
2549
2550 if (!is_gimple_assign (def_stmt)
2551 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2552 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2553 return;
2554
2555 tree base = gimple_assign_rhs1 (def_stmt);
2556 HOST_WIDE_INT linear_step = 0;
2557 tree v = gimple_assign_rhs2 (def_stmt);
2558 while (TREE_CODE (v) == SSA_NAME)
2559 {
2560 tree t;
2561 def_stmt = SSA_NAME_DEF_STMT (v);
2562 if (is_gimple_assign (def_stmt))
2563 switch (gimple_assign_rhs_code (def_stmt))
2564 {
2565 case PLUS_EXPR:
2566 t = gimple_assign_rhs2 (def_stmt);
2567 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2568 return;
2569 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2570 v = gimple_assign_rhs1 (def_stmt);
2571 continue;
2572 case MULT_EXPR:
2573 t = gimple_assign_rhs2 (def_stmt);
2574 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2575 return;
2576 linear_step = tree_to_shwi (t);
2577 v = gimple_assign_rhs1 (def_stmt);
2578 continue;
2579 CASE_CONVERT:
2580 t = gimple_assign_rhs1 (def_stmt);
2581 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2582 || (TYPE_PRECISION (TREE_TYPE (v))
2583 < TYPE_PRECISION (TREE_TYPE (t))))
2584 return;
2585 if (!linear_step)
2586 linear_step = 1;
2587 v = t;
2588 continue;
2589 default:
2590 return;
2591 }
2592 else if (is_gimple_call (def_stmt)
2593 && gimple_call_internal_p (def_stmt)
2594 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2595 && loop->simduid
2596 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2597 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2598 == loop->simduid))
2599 {
2600 if (!linear_step)
2601 linear_step = 1;
2602 arginfo->linear_step = linear_step;
2603 arginfo->op = base;
2604 arginfo->simd_lane_linear = true;
2605 return;
2606 }
2607 }
2608 }
2609
2610 /* Function vectorizable_simd_clone_call.
2611
2612 Check if STMT performs a function call that can be vectorized
2613 by calling a simd clone of the function.
2614 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2615 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2616 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2617
2618 static bool
2619 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2620 gimple **vec_stmt, slp_tree slp_node)
2621 {
2622 tree vec_dest;
2623 tree scalar_dest;
2624 tree op, type;
2625 tree vec_oprnd0 = NULL_TREE;
2626 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2627 tree vectype;
2628 unsigned int nunits;
2629 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2630 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2631 vec_info *vinfo = stmt_info->vinfo;
2632 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2633 tree fndecl, new_temp;
2634 gimple *def_stmt;
2635 gimple *new_stmt = NULL;
2636 int ncopies, j;
2637 vec<simd_call_arg_info> arginfo = vNULL;
2638 vec<tree> vargs = vNULL;
2639 size_t i, nargs;
2640 tree lhs, rtype, ratype;
2641 vec<constructor_elt, va_gc> *ret_ctor_elts;
2642
2643 /* Is STMT a vectorizable call? */
2644 if (!is_gimple_call (stmt))
2645 return false;
2646
2647 fndecl = gimple_call_fndecl (stmt);
2648 if (fndecl == NULL_TREE)
2649 return false;
2650
2651 struct cgraph_node *node = cgraph_node::get (fndecl);
2652 if (node == NULL || node->simd_clones == NULL)
2653 return false;
2654
2655 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2656 return false;
2657
2658 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2659 return false;
2660
2661 if (gimple_call_lhs (stmt)
2662 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2663 return false;
2664
2665 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2666
2667 vectype = STMT_VINFO_VECTYPE (stmt_info);
2668
2669 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2670 return false;
2671
2672 /* FORNOW */
2673 if (slp_node || PURE_SLP_STMT (stmt_info))
2674 return false;
2675
2676 /* Process function arguments. */
2677 nargs = gimple_call_num_args (stmt);
2678
2679 /* Bail out if the function has zero arguments. */
2680 if (nargs == 0)
2681 return false;
2682
2683 arginfo.create (nargs);
2684
2685 for (i = 0; i < nargs; i++)
2686 {
2687 simd_call_arg_info thisarginfo;
2688 affine_iv iv;
2689
2690 thisarginfo.linear_step = 0;
2691 thisarginfo.align = 0;
2692 thisarginfo.op = NULL_TREE;
2693 thisarginfo.simd_lane_linear = false;
2694
2695 op = gimple_call_arg (stmt, i);
2696 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2697 &thisarginfo.vectype)
2698 || thisarginfo.dt == vect_uninitialized_def)
2699 {
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2702 "use not simple.\n");
2703 arginfo.release ();
2704 return false;
2705 }
2706
2707 if (thisarginfo.dt == vect_constant_def
2708 || thisarginfo.dt == vect_external_def)
2709 gcc_assert (thisarginfo.vectype == NULL_TREE);
2710 else
2711 gcc_assert (thisarginfo.vectype != NULL_TREE);
2712
2713 /* For linear arguments, the analyze phase should have saved
2714 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2715 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2716 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2717 {
2718 gcc_assert (vec_stmt);
2719 thisarginfo.linear_step
2720 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2721 thisarginfo.op
2722 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2723 thisarginfo.simd_lane_linear
2724 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2725 == boolean_true_node);
2726 /* If loop has been peeled for alignment, we need to adjust it. */
2727 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2728 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2729 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2730 {
2731 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2732 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2733 tree opt = TREE_TYPE (thisarginfo.op);
2734 bias = fold_convert (TREE_TYPE (step), bias);
2735 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2736 thisarginfo.op
2737 = fold_build2 (POINTER_TYPE_P (opt)
2738 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2739 thisarginfo.op, bias);
2740 }
2741 }
2742 else if (!vec_stmt
2743 && thisarginfo.dt != vect_constant_def
2744 && thisarginfo.dt != vect_external_def
2745 && loop_vinfo
2746 && TREE_CODE (op) == SSA_NAME
2747 && simple_iv (loop, loop_containing_stmt (stmt), op,
2748 &iv, false)
2749 && tree_fits_shwi_p (iv.step))
2750 {
2751 thisarginfo.linear_step = tree_to_shwi (iv.step);
2752 thisarginfo.op = iv.base;
2753 }
2754 else if ((thisarginfo.dt == vect_constant_def
2755 || thisarginfo.dt == vect_external_def)
2756 && POINTER_TYPE_P (TREE_TYPE (op)))
2757 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2758 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2759 linear too. */
2760 if (POINTER_TYPE_P (TREE_TYPE (op))
2761 && !thisarginfo.linear_step
2762 && !vec_stmt
2763 && thisarginfo.dt != vect_constant_def
2764 && thisarginfo.dt != vect_external_def
2765 && loop_vinfo
2766 && !slp_node
2767 && TREE_CODE (op) == SSA_NAME)
2768 vect_simd_lane_linear (op, loop, &thisarginfo);
2769
2770 arginfo.quick_push (thisarginfo);
2771 }
2772
2773 unsigned int badness = 0;
2774 struct cgraph_node *bestn = NULL;
2775 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2776 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2777 else
2778 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2779 n = n->simdclone->next_clone)
2780 {
2781 unsigned int this_badness = 0;
2782 if (n->simdclone->simdlen
2783 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2784 || n->simdclone->nargs != nargs)
2785 continue;
2786 if (n->simdclone->simdlen
2787 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2788 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2789 - exact_log2 (n->simdclone->simdlen)) * 1024;
2790 if (n->simdclone->inbranch)
2791 this_badness += 2048;
2792 int target_badness = targetm.simd_clone.usable (n);
2793 if (target_badness < 0)
2794 continue;
2795 this_badness += target_badness * 512;
2796 /* FORNOW: Have to add code to add the mask argument. */
2797 if (n->simdclone->inbranch)
2798 continue;
2799 for (i = 0; i < nargs; i++)
2800 {
2801 switch (n->simdclone->args[i].arg_type)
2802 {
2803 case SIMD_CLONE_ARG_TYPE_VECTOR:
2804 if (!useless_type_conversion_p
2805 (n->simdclone->args[i].orig_type,
2806 TREE_TYPE (gimple_call_arg (stmt, i))))
2807 i = -1;
2808 else if (arginfo[i].dt == vect_constant_def
2809 || arginfo[i].dt == vect_external_def
2810 || arginfo[i].linear_step)
2811 this_badness += 64;
2812 break;
2813 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2814 if (arginfo[i].dt != vect_constant_def
2815 && arginfo[i].dt != vect_external_def)
2816 i = -1;
2817 break;
2818 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2819 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
2820 if (arginfo[i].dt == vect_constant_def
2821 || arginfo[i].dt == vect_external_def
2822 || (arginfo[i].linear_step
2823 != n->simdclone->args[i].linear_step))
2824 i = -1;
2825 break;
2826 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2827 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2828 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
2829 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
2830 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
2831 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
2832 /* FORNOW */
2833 i = -1;
2834 break;
2835 case SIMD_CLONE_ARG_TYPE_MASK:
2836 gcc_unreachable ();
2837 }
2838 if (i == (size_t) -1)
2839 break;
2840 if (n->simdclone->args[i].alignment > arginfo[i].align)
2841 {
2842 i = -1;
2843 break;
2844 }
2845 if (arginfo[i].align)
2846 this_badness += (exact_log2 (arginfo[i].align)
2847 - exact_log2 (n->simdclone->args[i].alignment));
2848 }
2849 if (i == (size_t) -1)
2850 continue;
2851 if (bestn == NULL || this_badness < badness)
2852 {
2853 bestn = n;
2854 badness = this_badness;
2855 }
2856 }
2857
2858 if (bestn == NULL)
2859 {
2860 arginfo.release ();
2861 return false;
2862 }
2863
2864 for (i = 0; i < nargs; i++)
2865 if ((arginfo[i].dt == vect_constant_def
2866 || arginfo[i].dt == vect_external_def)
2867 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2868 {
2869 arginfo[i].vectype
2870 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2871 i)));
2872 if (arginfo[i].vectype == NULL
2873 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2874 > bestn->simdclone->simdlen))
2875 {
2876 arginfo.release ();
2877 return false;
2878 }
2879 }
2880
2881 fndecl = bestn->decl;
2882 nunits = bestn->simdclone->simdlen;
2883 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2884
2885 /* If the function isn't const, only allow it in simd loops where user
2886 has asserted that at least nunits consecutive iterations can be
2887 performed using SIMD instructions. */
2888 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2889 && gimple_vuse (stmt))
2890 {
2891 arginfo.release ();
2892 return false;
2893 }
2894
2895 /* Sanity check: make sure that at least one copy of the vectorized stmt
2896 needs to be generated. */
2897 gcc_assert (ncopies >= 1);
2898
2899 if (!vec_stmt) /* transformation not required. */
2900 {
2901 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2902 for (i = 0; i < nargs; i++)
2903 if (bestn->simdclone->args[i].arg_type
2904 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2905 {
2906 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
2907 + 1);
2908 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2909 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2910 ? size_type_node : TREE_TYPE (arginfo[i].op);
2911 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2912 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2913 tree sll = arginfo[i].simd_lane_linear
2914 ? boolean_true_node : boolean_false_node;
2915 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
2916 }
2917 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2918 if (dump_enabled_p ())
2919 dump_printf_loc (MSG_NOTE, vect_location,
2920 "=== vectorizable_simd_clone_call ===\n");
2921 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2922 arginfo.release ();
2923 return true;
2924 }
2925
2926 /** Transform. **/
2927
2928 if (dump_enabled_p ())
2929 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2930
2931 /* Handle def. */
2932 scalar_dest = gimple_call_lhs (stmt);
2933 vec_dest = NULL_TREE;
2934 rtype = NULL_TREE;
2935 ratype = NULL_TREE;
2936 if (scalar_dest)
2937 {
2938 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2939 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2940 if (TREE_CODE (rtype) == ARRAY_TYPE)
2941 {
2942 ratype = rtype;
2943 rtype = TREE_TYPE (ratype);
2944 }
2945 }
2946
2947 prev_stmt_info = NULL;
2948 for (j = 0; j < ncopies; ++j)
2949 {
2950 /* Build argument list for the vectorized call. */
2951 if (j == 0)
2952 vargs.create (nargs);
2953 else
2954 vargs.truncate (0);
2955
2956 for (i = 0; i < nargs; i++)
2957 {
2958 unsigned int k, l, m, o;
2959 tree atype;
2960 op = gimple_call_arg (stmt, i);
2961 switch (bestn->simdclone->args[i].arg_type)
2962 {
2963 case SIMD_CLONE_ARG_TYPE_VECTOR:
2964 atype = bestn->simdclone->args[i].vector_type;
2965 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2966 for (m = j * o; m < (j + 1) * o; m++)
2967 {
2968 if (TYPE_VECTOR_SUBPARTS (atype)
2969 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2970 {
2971 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2972 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2973 / TYPE_VECTOR_SUBPARTS (atype));
2974 gcc_assert ((k & (k - 1)) == 0);
2975 if (m == 0)
2976 vec_oprnd0
2977 = vect_get_vec_def_for_operand (op, stmt);
2978 else
2979 {
2980 vec_oprnd0 = arginfo[i].op;
2981 if ((m & (k - 1)) == 0)
2982 vec_oprnd0
2983 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2984 vec_oprnd0);
2985 }
2986 arginfo[i].op = vec_oprnd0;
2987 vec_oprnd0
2988 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2989 size_int (prec),
2990 bitsize_int ((m & (k - 1)) * prec));
2991 new_stmt
2992 = gimple_build_assign (make_ssa_name (atype),
2993 vec_oprnd0);
2994 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2995 vargs.safe_push (gimple_assign_lhs (new_stmt));
2996 }
2997 else
2998 {
2999 k = (TYPE_VECTOR_SUBPARTS (atype)
3000 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3001 gcc_assert ((k & (k - 1)) == 0);
3002 vec<constructor_elt, va_gc> *ctor_elts;
3003 if (k != 1)
3004 vec_alloc (ctor_elts, k);
3005 else
3006 ctor_elts = NULL;
3007 for (l = 0; l < k; l++)
3008 {
3009 if (m == 0 && l == 0)
3010 vec_oprnd0
3011 = vect_get_vec_def_for_operand (op, stmt);
3012 else
3013 vec_oprnd0
3014 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3015 arginfo[i].op);
3016 arginfo[i].op = vec_oprnd0;
3017 if (k == 1)
3018 break;
3019 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3020 vec_oprnd0);
3021 }
3022 if (k == 1)
3023 vargs.safe_push (vec_oprnd0);
3024 else
3025 {
3026 vec_oprnd0 = build_constructor (atype, ctor_elts);
3027 new_stmt
3028 = gimple_build_assign (make_ssa_name (atype),
3029 vec_oprnd0);
3030 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3031 vargs.safe_push (gimple_assign_lhs (new_stmt));
3032 }
3033 }
3034 }
3035 break;
3036 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3037 vargs.safe_push (op);
3038 break;
3039 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3040 if (j == 0)
3041 {
3042 gimple_seq stmts;
3043 arginfo[i].op
3044 = force_gimple_operand (arginfo[i].op, &stmts, true,
3045 NULL_TREE);
3046 if (stmts != NULL)
3047 {
3048 basic_block new_bb;
3049 edge pe = loop_preheader_edge (loop);
3050 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3051 gcc_assert (!new_bb);
3052 }
3053 if (arginfo[i].simd_lane_linear)
3054 {
3055 vargs.safe_push (arginfo[i].op);
3056 break;
3057 }
3058 tree phi_res = copy_ssa_name (op);
3059 gphi *new_phi = create_phi_node (phi_res, loop->header);
3060 set_vinfo_for_stmt (new_phi,
3061 new_stmt_vec_info (new_phi, loop_vinfo));
3062 add_phi_arg (new_phi, arginfo[i].op,
3063 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3064 enum tree_code code
3065 = POINTER_TYPE_P (TREE_TYPE (op))
3066 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3067 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3068 ? sizetype : TREE_TYPE (op);
3069 widest_int cst
3070 = wi::mul (bestn->simdclone->args[i].linear_step,
3071 ncopies * nunits);
3072 tree tcst = wide_int_to_tree (type, cst);
3073 tree phi_arg = copy_ssa_name (op);
3074 new_stmt
3075 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3076 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3077 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3078 set_vinfo_for_stmt (new_stmt,
3079 new_stmt_vec_info (new_stmt, loop_vinfo));
3080 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3081 UNKNOWN_LOCATION);
3082 arginfo[i].op = phi_res;
3083 vargs.safe_push (phi_res);
3084 }
3085 else
3086 {
3087 enum tree_code code
3088 = POINTER_TYPE_P (TREE_TYPE (op))
3089 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3090 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3091 ? sizetype : TREE_TYPE (op);
3092 widest_int cst
3093 = wi::mul (bestn->simdclone->args[i].linear_step,
3094 j * nunits);
3095 tree tcst = wide_int_to_tree (type, cst);
3096 new_temp = make_ssa_name (TREE_TYPE (op));
3097 new_stmt = gimple_build_assign (new_temp, code,
3098 arginfo[i].op, tcst);
3099 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3100 vargs.safe_push (new_temp);
3101 }
3102 break;
3103 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3104 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3105 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3106 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3107 default:
3108 gcc_unreachable ();
3109 }
3110 }
3111
3112 new_stmt = gimple_build_call_vec (fndecl, vargs);
3113 if (vec_dest)
3114 {
3115 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3116 if (ratype)
3117 new_temp = create_tmp_var (ratype);
3118 else if (TYPE_VECTOR_SUBPARTS (vectype)
3119 == TYPE_VECTOR_SUBPARTS (rtype))
3120 new_temp = make_ssa_name (vec_dest, new_stmt);
3121 else
3122 new_temp = make_ssa_name (rtype, new_stmt);
3123 gimple_call_set_lhs (new_stmt, new_temp);
3124 }
3125 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3126
3127 if (vec_dest)
3128 {
3129 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3130 {
3131 unsigned int k, l;
3132 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3133 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3134 gcc_assert ((k & (k - 1)) == 0);
3135 for (l = 0; l < k; l++)
3136 {
3137 tree t;
3138 if (ratype)
3139 {
3140 t = build_fold_addr_expr (new_temp);
3141 t = build2 (MEM_REF, vectype, t,
3142 build_int_cst (TREE_TYPE (t),
3143 l * prec / BITS_PER_UNIT));
3144 }
3145 else
3146 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3147 size_int (prec), bitsize_int (l * prec));
3148 new_stmt
3149 = gimple_build_assign (make_ssa_name (vectype), t);
3150 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3151 if (j == 0 && l == 0)
3152 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3153 else
3154 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3155
3156 prev_stmt_info = vinfo_for_stmt (new_stmt);
3157 }
3158
3159 if (ratype)
3160 {
3161 tree clobber = build_constructor (ratype, NULL);
3162 TREE_THIS_VOLATILE (clobber) = 1;
3163 new_stmt = gimple_build_assign (new_temp, clobber);
3164 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3165 }
3166 continue;
3167 }
3168 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3169 {
3170 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3171 / TYPE_VECTOR_SUBPARTS (rtype));
3172 gcc_assert ((k & (k - 1)) == 0);
3173 if ((j & (k - 1)) == 0)
3174 vec_alloc (ret_ctor_elts, k);
3175 if (ratype)
3176 {
3177 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3178 for (m = 0; m < o; m++)
3179 {
3180 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3181 size_int (m), NULL_TREE, NULL_TREE);
3182 new_stmt
3183 = gimple_build_assign (make_ssa_name (rtype), tem);
3184 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3185 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3186 gimple_assign_lhs (new_stmt));
3187 }
3188 tree clobber = build_constructor (ratype, NULL);
3189 TREE_THIS_VOLATILE (clobber) = 1;
3190 new_stmt = gimple_build_assign (new_temp, clobber);
3191 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3192 }
3193 else
3194 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3195 if ((j & (k - 1)) != k - 1)
3196 continue;
3197 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3198 new_stmt
3199 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3200 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3201
3202 if ((unsigned) j == k - 1)
3203 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3204 else
3205 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3206
3207 prev_stmt_info = vinfo_for_stmt (new_stmt);
3208 continue;
3209 }
3210 else if (ratype)
3211 {
3212 tree t = build_fold_addr_expr (new_temp);
3213 t = build2 (MEM_REF, vectype, t,
3214 build_int_cst (TREE_TYPE (t), 0));
3215 new_stmt
3216 = gimple_build_assign (make_ssa_name (vec_dest), t);
3217 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3218 tree clobber = build_constructor (ratype, NULL);
3219 TREE_THIS_VOLATILE (clobber) = 1;
3220 vect_finish_stmt_generation (stmt,
3221 gimple_build_assign (new_temp,
3222 clobber), gsi);
3223 }
3224 }
3225
3226 if (j == 0)
3227 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3228 else
3229 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3230
3231 prev_stmt_info = vinfo_for_stmt (new_stmt);
3232 }
3233
3234 vargs.release ();
3235
3236 /* The call in STMT might prevent it from being removed in dce.
3237 We however cannot remove it here, due to the way the ssa name
3238 it defines is mapped to the new definition. So just replace
3239 rhs of the statement with something harmless. */
3240
3241 if (slp_node)
3242 return true;
3243
3244 if (scalar_dest)
3245 {
3246 type = TREE_TYPE (scalar_dest);
3247 if (is_pattern_stmt_p (stmt_info))
3248 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3249 else
3250 lhs = gimple_call_lhs (stmt);
3251 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3252 }
3253 else
3254 new_stmt = gimple_build_nop ();
3255 set_vinfo_for_stmt (new_stmt, stmt_info);
3256 set_vinfo_for_stmt (stmt, NULL);
3257 STMT_VINFO_STMT (stmt_info) = new_stmt;
3258 gsi_replace (gsi, new_stmt, true);
3259 unlink_stmt_vdef (stmt);
3260
3261 return true;
3262 }
3263
3264
3265 /* Function vect_gen_widened_results_half
3266
3267 Create a vector stmt whose code, type, number of arguments, and result
3268 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3269 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3270 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3271 needs to be created (DECL is a function-decl of a target-builtin).
3272 STMT is the original scalar stmt that we are vectorizing. */
3273
3274 static gimple *
3275 vect_gen_widened_results_half (enum tree_code code,
3276 tree decl,
3277 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3278 tree vec_dest, gimple_stmt_iterator *gsi,
3279 gimple *stmt)
3280 {
3281 gimple *new_stmt;
3282 tree new_temp;
3283
3284 /* Generate half of the widened result: */
3285 if (code == CALL_EXPR)
3286 {
3287 /* Target specific support */
3288 if (op_type == binary_op)
3289 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3290 else
3291 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3292 new_temp = make_ssa_name (vec_dest, new_stmt);
3293 gimple_call_set_lhs (new_stmt, new_temp);
3294 }
3295 else
3296 {
3297 /* Generic support */
3298 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3299 if (op_type != binary_op)
3300 vec_oprnd1 = NULL;
3301 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3302 new_temp = make_ssa_name (vec_dest, new_stmt);
3303 gimple_assign_set_lhs (new_stmt, new_temp);
3304 }
3305 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3306
3307 return new_stmt;
3308 }
3309
3310
3311 /* Get vectorized definitions for loop-based vectorization. For the first
3312 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3313 scalar operand), and for the rest we get a copy with
3314 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3315 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3316 The vectors are collected into VEC_OPRNDS. */
3317
3318 static void
3319 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3320 vec<tree> *vec_oprnds, int multi_step_cvt)
3321 {
3322 tree vec_oprnd;
3323
3324 /* Get first vector operand. */
3325 /* All the vector operands except the very first one (that is scalar oprnd)
3326 are stmt copies. */
3327 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3328 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3329 else
3330 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3331
3332 vec_oprnds->quick_push (vec_oprnd);
3333
3334 /* Get second vector operand. */
3335 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3336 vec_oprnds->quick_push (vec_oprnd);
3337
3338 *oprnd = vec_oprnd;
3339
3340 /* For conversion in multiple steps, continue to get operands
3341 recursively. */
3342 if (multi_step_cvt)
3343 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3344 }
3345
3346
3347 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3348 For multi-step conversions store the resulting vectors and call the function
3349 recursively. */
3350
3351 static void
3352 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3353 int multi_step_cvt, gimple *stmt,
3354 vec<tree> vec_dsts,
3355 gimple_stmt_iterator *gsi,
3356 slp_tree slp_node, enum tree_code code,
3357 stmt_vec_info *prev_stmt_info)
3358 {
3359 unsigned int i;
3360 tree vop0, vop1, new_tmp, vec_dest;
3361 gimple *new_stmt;
3362 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3363
3364 vec_dest = vec_dsts.pop ();
3365
3366 for (i = 0; i < vec_oprnds->length (); i += 2)
3367 {
3368 /* Create demotion operation. */
3369 vop0 = (*vec_oprnds)[i];
3370 vop1 = (*vec_oprnds)[i + 1];
3371 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3372 new_tmp = make_ssa_name (vec_dest, new_stmt);
3373 gimple_assign_set_lhs (new_stmt, new_tmp);
3374 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3375
3376 if (multi_step_cvt)
3377 /* Store the resulting vector for next recursive call. */
3378 (*vec_oprnds)[i/2] = new_tmp;
3379 else
3380 {
3381 /* This is the last step of the conversion sequence. Store the
3382 vectors in SLP_NODE or in vector info of the scalar statement
3383 (or in STMT_VINFO_RELATED_STMT chain). */
3384 if (slp_node)
3385 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3386 else
3387 {
3388 if (!*prev_stmt_info)
3389 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3390 else
3391 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3392
3393 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3394 }
3395 }
3396 }
3397
3398 /* For multi-step demotion operations we first generate demotion operations
3399 from the source type to the intermediate types, and then combine the
3400 results (stored in VEC_OPRNDS) in demotion operation to the destination
3401 type. */
3402 if (multi_step_cvt)
3403 {
3404 /* At each level of recursion we have half of the operands we had at the
3405 previous level. */
3406 vec_oprnds->truncate ((i+1)/2);
3407 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3408 stmt, vec_dsts, gsi, slp_node,
3409 VEC_PACK_TRUNC_EXPR,
3410 prev_stmt_info);
3411 }
3412
3413 vec_dsts.quick_push (vec_dest);
3414 }
3415
3416
3417 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3418 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3419 the resulting vectors and call the function recursively. */
3420
3421 static void
3422 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3423 vec<tree> *vec_oprnds1,
3424 gimple *stmt, tree vec_dest,
3425 gimple_stmt_iterator *gsi,
3426 enum tree_code code1,
3427 enum tree_code code2, tree decl1,
3428 tree decl2, int op_type)
3429 {
3430 int i;
3431 tree vop0, vop1, new_tmp1, new_tmp2;
3432 gimple *new_stmt1, *new_stmt2;
3433 vec<tree> vec_tmp = vNULL;
3434
3435 vec_tmp.create (vec_oprnds0->length () * 2);
3436 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3437 {
3438 if (op_type == binary_op)
3439 vop1 = (*vec_oprnds1)[i];
3440 else
3441 vop1 = NULL_TREE;
3442
3443 /* Generate the two halves of promotion operation. */
3444 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3445 op_type, vec_dest, gsi, stmt);
3446 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3447 op_type, vec_dest, gsi, stmt);
3448 if (is_gimple_call (new_stmt1))
3449 {
3450 new_tmp1 = gimple_call_lhs (new_stmt1);
3451 new_tmp2 = gimple_call_lhs (new_stmt2);
3452 }
3453 else
3454 {
3455 new_tmp1 = gimple_assign_lhs (new_stmt1);
3456 new_tmp2 = gimple_assign_lhs (new_stmt2);
3457 }
3458
3459 /* Store the results for the next step. */
3460 vec_tmp.quick_push (new_tmp1);
3461 vec_tmp.quick_push (new_tmp2);
3462 }
3463
3464 vec_oprnds0->release ();
3465 *vec_oprnds0 = vec_tmp;
3466 }
3467
3468
3469 /* Check if STMT performs a conversion operation, that can be vectorized.
3470 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3471 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3472 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3473
3474 static bool
3475 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3476 gimple **vec_stmt, slp_tree slp_node)
3477 {
3478 tree vec_dest;
3479 tree scalar_dest;
3480 tree op0, op1 = NULL_TREE;
3481 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3482 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3483 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3484 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3485 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3486 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3487 tree new_temp;
3488 gimple *def_stmt;
3489 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3490 gimple *new_stmt = NULL;
3491 stmt_vec_info prev_stmt_info;
3492 int nunits_in;
3493 int nunits_out;
3494 tree vectype_out, vectype_in;
3495 int ncopies, i, j;
3496 tree lhs_type, rhs_type;
3497 enum { NARROW, NONE, WIDEN } modifier;
3498 vec<tree> vec_oprnds0 = vNULL;
3499 vec<tree> vec_oprnds1 = vNULL;
3500 tree vop0;
3501 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3502 vec_info *vinfo = stmt_info->vinfo;
3503 int multi_step_cvt = 0;
3504 vec<tree> vec_dsts = vNULL;
3505 vec<tree> interm_types = vNULL;
3506 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3507 int op_type;
3508 machine_mode rhs_mode;
3509 unsigned short fltsz;
3510
3511 /* Is STMT a vectorizable conversion? */
3512
3513 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3514 return false;
3515
3516 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3517 return false;
3518
3519 if (!is_gimple_assign (stmt))
3520 return false;
3521
3522 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3523 return false;
3524
3525 code = gimple_assign_rhs_code (stmt);
3526 if (!CONVERT_EXPR_CODE_P (code)
3527 && code != FIX_TRUNC_EXPR
3528 && code != FLOAT_EXPR
3529 && code != WIDEN_MULT_EXPR
3530 && code != WIDEN_LSHIFT_EXPR)
3531 return false;
3532
3533 op_type = TREE_CODE_LENGTH (code);
3534
3535 /* Check types of lhs and rhs. */
3536 scalar_dest = gimple_assign_lhs (stmt);
3537 lhs_type = TREE_TYPE (scalar_dest);
3538 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3539
3540 op0 = gimple_assign_rhs1 (stmt);
3541 rhs_type = TREE_TYPE (op0);
3542
3543 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3544 && !((INTEGRAL_TYPE_P (lhs_type)
3545 && INTEGRAL_TYPE_P (rhs_type))
3546 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3547 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3548 return false;
3549
3550 if ((INTEGRAL_TYPE_P (lhs_type)
3551 && (TYPE_PRECISION (lhs_type)
3552 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3553 || (INTEGRAL_TYPE_P (rhs_type)
3554 && (TYPE_PRECISION (rhs_type)
3555 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3556 {
3557 if (dump_enabled_p ())
3558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3559 "type conversion to/from bit-precision unsupported."
3560 "\n");
3561 return false;
3562 }
3563
3564 /* Check the operands of the operation. */
3565 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
3566 {
3567 if (dump_enabled_p ())
3568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3569 "use not simple.\n");
3570 return false;
3571 }
3572 if (op_type == binary_op)
3573 {
3574 bool ok;
3575
3576 op1 = gimple_assign_rhs2 (stmt);
3577 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3578 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3579 OP1. */
3580 if (CONSTANT_CLASS_P (op0))
3581 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
3582 else
3583 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
3584
3585 if (!ok)
3586 {
3587 if (dump_enabled_p ())
3588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3589 "use not simple.\n");
3590 return false;
3591 }
3592 }
3593
3594 /* If op0 is an external or constant defs use a vector type of
3595 the same size as the output vector type. */
3596 if (!vectype_in)
3597 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3598 if (vec_stmt)
3599 gcc_assert (vectype_in);
3600 if (!vectype_in)
3601 {
3602 if (dump_enabled_p ())
3603 {
3604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3605 "no vectype for scalar type ");
3606 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3607 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3608 }
3609
3610 return false;
3611 }
3612
3613 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3614 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3615 if (nunits_in < nunits_out)
3616 modifier = NARROW;
3617 else if (nunits_out == nunits_in)
3618 modifier = NONE;
3619 else
3620 modifier = WIDEN;
3621
3622 /* Multiple types in SLP are handled by creating the appropriate number of
3623 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3624 case of SLP. */
3625 if (slp_node || PURE_SLP_STMT (stmt_info))
3626 ncopies = 1;
3627 else if (modifier == NARROW)
3628 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3629 else
3630 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3631
3632 /* Sanity check: make sure that at least one copy of the vectorized stmt
3633 needs to be generated. */
3634 gcc_assert (ncopies >= 1);
3635
3636 /* Supportable by target? */
3637 switch (modifier)
3638 {
3639 case NONE:
3640 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3641 return false;
3642 if (supportable_convert_operation (code, vectype_out, vectype_in,
3643 &decl1, &code1))
3644 break;
3645 /* FALLTHRU */
3646 unsupported:
3647 if (dump_enabled_p ())
3648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3649 "conversion not supported by target.\n");
3650 return false;
3651
3652 case WIDEN:
3653 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3654 &code1, &code2, &multi_step_cvt,
3655 &interm_types))
3656 {
3657 /* Binary widening operation can only be supported directly by the
3658 architecture. */
3659 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3660 break;
3661 }
3662
3663 if (code != FLOAT_EXPR
3664 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3665 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3666 goto unsupported;
3667
3668 rhs_mode = TYPE_MODE (rhs_type);
3669 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3670 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3671 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3672 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3673 {
3674 cvt_type
3675 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3676 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3677 if (cvt_type == NULL_TREE)
3678 goto unsupported;
3679
3680 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3681 {
3682 if (!supportable_convert_operation (code, vectype_out,
3683 cvt_type, &decl1, &codecvt1))
3684 goto unsupported;
3685 }
3686 else if (!supportable_widening_operation (code, stmt, vectype_out,
3687 cvt_type, &codecvt1,
3688 &codecvt2, &multi_step_cvt,
3689 &interm_types))
3690 continue;
3691 else
3692 gcc_assert (multi_step_cvt == 0);
3693
3694 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3695 vectype_in, &code1, &code2,
3696 &multi_step_cvt, &interm_types))
3697 break;
3698 }
3699
3700 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3701 goto unsupported;
3702
3703 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3704 codecvt2 = ERROR_MARK;
3705 else
3706 {
3707 multi_step_cvt++;
3708 interm_types.safe_push (cvt_type);
3709 cvt_type = NULL_TREE;
3710 }
3711 break;
3712
3713 case NARROW:
3714 gcc_assert (op_type == unary_op);
3715 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3716 &code1, &multi_step_cvt,
3717 &interm_types))
3718 break;
3719
3720 if (code != FIX_TRUNC_EXPR
3721 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3722 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3723 goto unsupported;
3724
3725 rhs_mode = TYPE_MODE (rhs_type);
3726 cvt_type
3727 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3728 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3729 if (cvt_type == NULL_TREE)
3730 goto unsupported;
3731 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3732 &decl1, &codecvt1))
3733 goto unsupported;
3734 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3735 &code1, &multi_step_cvt,
3736 &interm_types))
3737 break;
3738 goto unsupported;
3739
3740 default:
3741 gcc_unreachable ();
3742 }
3743
3744 if (!vec_stmt) /* transformation not required. */
3745 {
3746 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_NOTE, vect_location,
3748 "=== vectorizable_conversion ===\n");
3749 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3750 {
3751 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3752 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3753 }
3754 else if (modifier == NARROW)
3755 {
3756 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3757 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3758 }
3759 else
3760 {
3761 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3762 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3763 }
3764 interm_types.release ();
3765 return true;
3766 }
3767
3768 /** Transform. **/
3769 if (dump_enabled_p ())
3770 dump_printf_loc (MSG_NOTE, vect_location,
3771 "transform conversion. ncopies = %d.\n", ncopies);
3772
3773 if (op_type == binary_op)
3774 {
3775 if (CONSTANT_CLASS_P (op0))
3776 op0 = fold_convert (TREE_TYPE (op1), op0);
3777 else if (CONSTANT_CLASS_P (op1))
3778 op1 = fold_convert (TREE_TYPE (op0), op1);
3779 }
3780
3781 /* In case of multi-step conversion, we first generate conversion operations
3782 to the intermediate types, and then from that types to the final one.
3783 We create vector destinations for the intermediate type (TYPES) received
3784 from supportable_*_operation, and store them in the correct order
3785 for future use in vect_create_vectorized_*_stmts (). */
3786 vec_dsts.create (multi_step_cvt + 1);
3787 vec_dest = vect_create_destination_var (scalar_dest,
3788 (cvt_type && modifier == WIDEN)
3789 ? cvt_type : vectype_out);
3790 vec_dsts.quick_push (vec_dest);
3791
3792 if (multi_step_cvt)
3793 {
3794 for (i = interm_types.length () - 1;
3795 interm_types.iterate (i, &intermediate_type); i--)
3796 {
3797 vec_dest = vect_create_destination_var (scalar_dest,
3798 intermediate_type);
3799 vec_dsts.quick_push (vec_dest);
3800 }
3801 }
3802
3803 if (cvt_type)
3804 vec_dest = vect_create_destination_var (scalar_dest,
3805 modifier == WIDEN
3806 ? vectype_out : cvt_type);
3807
3808 if (!slp_node)
3809 {
3810 if (modifier == WIDEN)
3811 {
3812 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3813 if (op_type == binary_op)
3814 vec_oprnds1.create (1);
3815 }
3816 else if (modifier == NARROW)
3817 vec_oprnds0.create (
3818 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3819 }
3820 else if (code == WIDEN_LSHIFT_EXPR)
3821 vec_oprnds1.create (slp_node->vec_stmts_size);
3822
3823 last_oprnd = op0;
3824 prev_stmt_info = NULL;
3825 switch (modifier)
3826 {
3827 case NONE:
3828 for (j = 0; j < ncopies; j++)
3829 {
3830 if (j == 0)
3831 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3832 -1);
3833 else
3834 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3835
3836 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3837 {
3838 /* Arguments are ready, create the new vector stmt. */
3839 if (code1 == CALL_EXPR)
3840 {
3841 new_stmt = gimple_build_call (decl1, 1, vop0);
3842 new_temp = make_ssa_name (vec_dest, new_stmt);
3843 gimple_call_set_lhs (new_stmt, new_temp);
3844 }
3845 else
3846 {
3847 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3848 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3849 new_temp = make_ssa_name (vec_dest, new_stmt);
3850 gimple_assign_set_lhs (new_stmt, new_temp);
3851 }
3852
3853 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3854 if (slp_node)
3855 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3856 else
3857 {
3858 if (!prev_stmt_info)
3859 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3860 else
3861 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3862 prev_stmt_info = vinfo_for_stmt (new_stmt);
3863 }
3864 }
3865 }
3866 break;
3867
3868 case WIDEN:
3869 /* In case the vectorization factor (VF) is bigger than the number
3870 of elements that we can fit in a vectype (nunits), we have to
3871 generate more than one vector stmt - i.e - we need to "unroll"
3872 the vector stmt by a factor VF/nunits. */
3873 for (j = 0; j < ncopies; j++)
3874 {
3875 /* Handle uses. */
3876 if (j == 0)
3877 {
3878 if (slp_node)
3879 {
3880 if (code == WIDEN_LSHIFT_EXPR)
3881 {
3882 unsigned int k;
3883
3884 vec_oprnd1 = op1;
3885 /* Store vec_oprnd1 for every vector stmt to be created
3886 for SLP_NODE. We check during the analysis that all
3887 the shift arguments are the same. */
3888 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3889 vec_oprnds1.quick_push (vec_oprnd1);
3890
3891 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3892 slp_node, -1);
3893 }
3894 else
3895 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3896 &vec_oprnds1, slp_node, -1);
3897 }
3898 else
3899 {
3900 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
3901 vec_oprnds0.quick_push (vec_oprnd0);
3902 if (op_type == binary_op)
3903 {
3904 if (code == WIDEN_LSHIFT_EXPR)
3905 vec_oprnd1 = op1;
3906 else
3907 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
3908 vec_oprnds1.quick_push (vec_oprnd1);
3909 }
3910 }
3911 }
3912 else
3913 {
3914 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3915 vec_oprnds0.truncate (0);
3916 vec_oprnds0.quick_push (vec_oprnd0);
3917 if (op_type == binary_op)
3918 {
3919 if (code == WIDEN_LSHIFT_EXPR)
3920 vec_oprnd1 = op1;
3921 else
3922 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3923 vec_oprnd1);
3924 vec_oprnds1.truncate (0);
3925 vec_oprnds1.quick_push (vec_oprnd1);
3926 }
3927 }
3928
3929 /* Arguments are ready. Create the new vector stmts. */
3930 for (i = multi_step_cvt; i >= 0; i--)
3931 {
3932 tree this_dest = vec_dsts[i];
3933 enum tree_code c1 = code1, c2 = code2;
3934 if (i == 0 && codecvt2 != ERROR_MARK)
3935 {
3936 c1 = codecvt1;
3937 c2 = codecvt2;
3938 }
3939 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3940 &vec_oprnds1,
3941 stmt, this_dest, gsi,
3942 c1, c2, decl1, decl2,
3943 op_type);
3944 }
3945
3946 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3947 {
3948 if (cvt_type)
3949 {
3950 if (codecvt1 == CALL_EXPR)
3951 {
3952 new_stmt = gimple_build_call (decl1, 1, vop0);
3953 new_temp = make_ssa_name (vec_dest, new_stmt);
3954 gimple_call_set_lhs (new_stmt, new_temp);
3955 }
3956 else
3957 {
3958 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3959 new_temp = make_ssa_name (vec_dest);
3960 new_stmt = gimple_build_assign (new_temp, codecvt1,
3961 vop0);
3962 }
3963
3964 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3965 }
3966 else
3967 new_stmt = SSA_NAME_DEF_STMT (vop0);
3968
3969 if (slp_node)
3970 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3971 else
3972 {
3973 if (!prev_stmt_info)
3974 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3975 else
3976 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3977 prev_stmt_info = vinfo_for_stmt (new_stmt);
3978 }
3979 }
3980 }
3981
3982 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3983 break;
3984
3985 case NARROW:
3986 /* In case the vectorization factor (VF) is bigger than the number
3987 of elements that we can fit in a vectype (nunits), we have to
3988 generate more than one vector stmt - i.e - we need to "unroll"
3989 the vector stmt by a factor VF/nunits. */
3990 for (j = 0; j < ncopies; j++)
3991 {
3992 /* Handle uses. */
3993 if (slp_node)
3994 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3995 slp_node, -1);
3996 else
3997 {
3998 vec_oprnds0.truncate (0);
3999 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4000 vect_pow2 (multi_step_cvt) - 1);
4001 }
4002
4003 /* Arguments are ready. Create the new vector stmts. */
4004 if (cvt_type)
4005 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4006 {
4007 if (codecvt1 == CALL_EXPR)
4008 {
4009 new_stmt = gimple_build_call (decl1, 1, vop0);
4010 new_temp = make_ssa_name (vec_dest, new_stmt);
4011 gimple_call_set_lhs (new_stmt, new_temp);
4012 }
4013 else
4014 {
4015 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4016 new_temp = make_ssa_name (vec_dest);
4017 new_stmt = gimple_build_assign (new_temp, codecvt1,
4018 vop0);
4019 }
4020
4021 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4022 vec_oprnds0[i] = new_temp;
4023 }
4024
4025 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4026 stmt, vec_dsts, gsi,
4027 slp_node, code1,
4028 &prev_stmt_info);
4029 }
4030
4031 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4032 break;
4033 }
4034
4035 vec_oprnds0.release ();
4036 vec_oprnds1.release ();
4037 vec_dsts.release ();
4038 interm_types.release ();
4039
4040 return true;
4041 }
4042
4043
4044 /* Function vectorizable_assignment.
4045
4046 Check if STMT performs an assignment (copy) that can be vectorized.
4047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4050
4051 static bool
4052 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4053 gimple **vec_stmt, slp_tree slp_node)
4054 {
4055 tree vec_dest;
4056 tree scalar_dest;
4057 tree op;
4058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4059 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4060 tree new_temp;
4061 gimple *def_stmt;
4062 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4063 int ncopies;
4064 int i, j;
4065 vec<tree> vec_oprnds = vNULL;
4066 tree vop;
4067 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4068 vec_info *vinfo = stmt_info->vinfo;
4069 gimple *new_stmt = NULL;
4070 stmt_vec_info prev_stmt_info = NULL;
4071 enum tree_code code;
4072 tree vectype_in;
4073
4074 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4075 return false;
4076
4077 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4078 return false;
4079
4080 /* Is vectorizable assignment? */
4081 if (!is_gimple_assign (stmt))
4082 return false;
4083
4084 scalar_dest = gimple_assign_lhs (stmt);
4085 if (TREE_CODE (scalar_dest) != SSA_NAME)
4086 return false;
4087
4088 code = gimple_assign_rhs_code (stmt);
4089 if (gimple_assign_single_p (stmt)
4090 || code == PAREN_EXPR
4091 || CONVERT_EXPR_CODE_P (code))
4092 op = gimple_assign_rhs1 (stmt);
4093 else
4094 return false;
4095
4096 if (code == VIEW_CONVERT_EXPR)
4097 op = TREE_OPERAND (op, 0);
4098
4099 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4100 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4101
4102 /* Multiple types in SLP are handled by creating the appropriate number of
4103 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4104 case of SLP. */
4105 if (slp_node || PURE_SLP_STMT (stmt_info))
4106 ncopies = 1;
4107 else
4108 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4109
4110 gcc_assert (ncopies >= 1);
4111
4112 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4113 {
4114 if (dump_enabled_p ())
4115 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4116 "use not simple.\n");
4117 return false;
4118 }
4119
4120 /* We can handle NOP_EXPR conversions that do not change the number
4121 of elements or the vector size. */
4122 if ((CONVERT_EXPR_CODE_P (code)
4123 || code == VIEW_CONVERT_EXPR)
4124 && (!vectype_in
4125 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4126 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4127 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4128 return false;
4129
4130 /* We do not handle bit-precision changes. */
4131 if ((CONVERT_EXPR_CODE_P (code)
4132 || code == VIEW_CONVERT_EXPR)
4133 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4134 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4135 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4136 || ((TYPE_PRECISION (TREE_TYPE (op))
4137 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4138 /* But a conversion that does not change the bit-pattern is ok. */
4139 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4140 > TYPE_PRECISION (TREE_TYPE (op)))
4141 && TYPE_UNSIGNED (TREE_TYPE (op))))
4142 {
4143 if (dump_enabled_p ())
4144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4145 "type conversion to/from bit-precision "
4146 "unsupported.\n");
4147 return false;
4148 }
4149
4150 if (!vec_stmt) /* transformation not required. */
4151 {
4152 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4153 if (dump_enabled_p ())
4154 dump_printf_loc (MSG_NOTE, vect_location,
4155 "=== vectorizable_assignment ===\n");
4156 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4157 return true;
4158 }
4159
4160 /** Transform. **/
4161 if (dump_enabled_p ())
4162 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4163
4164 /* Handle def. */
4165 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4166
4167 /* Handle use. */
4168 for (j = 0; j < ncopies; j++)
4169 {
4170 /* Handle uses. */
4171 if (j == 0)
4172 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4173 else
4174 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4175
4176 /* Arguments are ready. create the new vector stmt. */
4177 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4178 {
4179 if (CONVERT_EXPR_CODE_P (code)
4180 || code == VIEW_CONVERT_EXPR)
4181 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4182 new_stmt = gimple_build_assign (vec_dest, vop);
4183 new_temp = make_ssa_name (vec_dest, new_stmt);
4184 gimple_assign_set_lhs (new_stmt, new_temp);
4185 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4186 if (slp_node)
4187 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4188 }
4189
4190 if (slp_node)
4191 continue;
4192
4193 if (j == 0)
4194 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4195 else
4196 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4197
4198 prev_stmt_info = vinfo_for_stmt (new_stmt);
4199 }
4200
4201 vec_oprnds.release ();
4202 return true;
4203 }
4204
4205
4206 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4207 either as shift by a scalar or by a vector. */
4208
4209 bool
4210 vect_supportable_shift (enum tree_code code, tree scalar_type)
4211 {
4212
4213 machine_mode vec_mode;
4214 optab optab;
4215 int icode;
4216 tree vectype;
4217
4218 vectype = get_vectype_for_scalar_type (scalar_type);
4219 if (!vectype)
4220 return false;
4221
4222 optab = optab_for_tree_code (code, vectype, optab_scalar);
4223 if (!optab
4224 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4225 {
4226 optab = optab_for_tree_code (code, vectype, optab_vector);
4227 if (!optab
4228 || (optab_handler (optab, TYPE_MODE (vectype))
4229 == CODE_FOR_nothing))
4230 return false;
4231 }
4232
4233 vec_mode = TYPE_MODE (vectype);
4234 icode = (int) optab_handler (optab, vec_mode);
4235 if (icode == CODE_FOR_nothing)
4236 return false;
4237
4238 return true;
4239 }
4240
4241
4242 /* Function vectorizable_shift.
4243
4244 Check if STMT performs a shift operation that can be vectorized.
4245 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4246 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4247 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4248
4249 static bool
4250 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4251 gimple **vec_stmt, slp_tree slp_node)
4252 {
4253 tree vec_dest;
4254 tree scalar_dest;
4255 tree op0, op1 = NULL;
4256 tree vec_oprnd1 = NULL_TREE;
4257 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4258 tree vectype;
4259 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4260 enum tree_code code;
4261 machine_mode vec_mode;
4262 tree new_temp;
4263 optab optab;
4264 int icode;
4265 machine_mode optab_op2_mode;
4266 gimple *def_stmt;
4267 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4268 gimple *new_stmt = NULL;
4269 stmt_vec_info prev_stmt_info;
4270 int nunits_in;
4271 int nunits_out;
4272 tree vectype_out;
4273 tree op1_vectype;
4274 int ncopies;
4275 int j, i;
4276 vec<tree> vec_oprnds0 = vNULL;
4277 vec<tree> vec_oprnds1 = vNULL;
4278 tree vop0, vop1;
4279 unsigned int k;
4280 bool scalar_shift_arg = true;
4281 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4282 vec_info *vinfo = stmt_info->vinfo;
4283 int vf;
4284
4285 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4286 return false;
4287
4288 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4289 return false;
4290
4291 /* Is STMT a vectorizable binary/unary operation? */
4292 if (!is_gimple_assign (stmt))
4293 return false;
4294
4295 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4296 return false;
4297
4298 code = gimple_assign_rhs_code (stmt);
4299
4300 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4301 || code == RROTATE_EXPR))
4302 return false;
4303
4304 scalar_dest = gimple_assign_lhs (stmt);
4305 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4306 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4307 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4308 {
4309 if (dump_enabled_p ())
4310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4311 "bit-precision shifts not supported.\n");
4312 return false;
4313 }
4314
4315 op0 = gimple_assign_rhs1 (stmt);
4316 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4317 {
4318 if (dump_enabled_p ())
4319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4320 "use not simple.\n");
4321 return false;
4322 }
4323 /* If op0 is an external or constant def use a vector type with
4324 the same size as the output vector type. */
4325 if (!vectype)
4326 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4327 if (vec_stmt)
4328 gcc_assert (vectype);
4329 if (!vectype)
4330 {
4331 if (dump_enabled_p ())
4332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4333 "no vectype for scalar type\n");
4334 return false;
4335 }
4336
4337 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4338 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4339 if (nunits_out != nunits_in)
4340 return false;
4341
4342 op1 = gimple_assign_rhs2 (stmt);
4343 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4344 {
4345 if (dump_enabled_p ())
4346 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4347 "use not simple.\n");
4348 return false;
4349 }
4350
4351 if (loop_vinfo)
4352 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4353 else
4354 vf = 1;
4355
4356 /* Multiple types in SLP are handled by creating the appropriate number of
4357 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4358 case of SLP. */
4359 if (slp_node || PURE_SLP_STMT (stmt_info))
4360 ncopies = 1;
4361 else
4362 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4363
4364 gcc_assert (ncopies >= 1);
4365
4366 /* Determine whether the shift amount is a vector, or scalar. If the
4367 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4368
4369 if ((dt[1] == vect_internal_def
4370 || dt[1] == vect_induction_def)
4371 && !slp_node)
4372 scalar_shift_arg = false;
4373 else if (dt[1] == vect_constant_def
4374 || dt[1] == vect_external_def
4375 || dt[1] == vect_internal_def)
4376 {
4377 /* In SLP, need to check whether the shift count is the same,
4378 in loops if it is a constant or invariant, it is always
4379 a scalar shift. */
4380 if (slp_node)
4381 {
4382 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4383 gimple *slpstmt;
4384
4385 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4386 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4387 scalar_shift_arg = false;
4388 }
4389 }
4390 else
4391 {
4392 if (dump_enabled_p ())
4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4394 "operand mode requires invariant argument.\n");
4395 return false;
4396 }
4397
4398 /* Vector shifted by vector. */
4399 if (!scalar_shift_arg)
4400 {
4401 optab = optab_for_tree_code (code, vectype, optab_vector);
4402 if (dump_enabled_p ())
4403 dump_printf_loc (MSG_NOTE, vect_location,
4404 "vector/vector shift/rotate found.\n");
4405
4406 if (!op1_vectype)
4407 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4408 if (op1_vectype == NULL_TREE
4409 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4410 {
4411 if (dump_enabled_p ())
4412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4413 "unusable type for last operand in"
4414 " vector/vector shift/rotate.\n");
4415 return false;
4416 }
4417 }
4418 /* See if the machine has a vector shifted by scalar insn and if not
4419 then see if it has a vector shifted by vector insn. */
4420 else
4421 {
4422 optab = optab_for_tree_code (code, vectype, optab_scalar);
4423 if (optab
4424 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4425 {
4426 if (dump_enabled_p ())
4427 dump_printf_loc (MSG_NOTE, vect_location,
4428 "vector/scalar shift/rotate found.\n");
4429 }
4430 else
4431 {
4432 optab = optab_for_tree_code (code, vectype, optab_vector);
4433 if (optab
4434 && (optab_handler (optab, TYPE_MODE (vectype))
4435 != CODE_FOR_nothing))
4436 {
4437 scalar_shift_arg = false;
4438
4439 if (dump_enabled_p ())
4440 dump_printf_loc (MSG_NOTE, vect_location,
4441 "vector/vector shift/rotate found.\n");
4442
4443 /* Unlike the other binary operators, shifts/rotates have
4444 the rhs being int, instead of the same type as the lhs,
4445 so make sure the scalar is the right type if we are
4446 dealing with vectors of long long/long/short/char. */
4447 if (dt[1] == vect_constant_def)
4448 op1 = fold_convert (TREE_TYPE (vectype), op1);
4449 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4450 TREE_TYPE (op1)))
4451 {
4452 if (slp_node
4453 && TYPE_MODE (TREE_TYPE (vectype))
4454 != TYPE_MODE (TREE_TYPE (op1)))
4455 {
4456 if (dump_enabled_p ())
4457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4458 "unusable type for last operand in"
4459 " vector/vector shift/rotate.\n");
4460 return false;
4461 }
4462 if (vec_stmt && !slp_node)
4463 {
4464 op1 = fold_convert (TREE_TYPE (vectype), op1);
4465 op1 = vect_init_vector (stmt, op1,
4466 TREE_TYPE (vectype), NULL);
4467 }
4468 }
4469 }
4470 }
4471 }
4472
4473 /* Supportable by target? */
4474 if (!optab)
4475 {
4476 if (dump_enabled_p ())
4477 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4478 "no optab.\n");
4479 return false;
4480 }
4481 vec_mode = TYPE_MODE (vectype);
4482 icode = (int) optab_handler (optab, vec_mode);
4483 if (icode == CODE_FOR_nothing)
4484 {
4485 if (dump_enabled_p ())
4486 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4487 "op not supported by target.\n");
4488 /* Check only during analysis. */
4489 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4490 || (vf < vect_min_worthwhile_factor (code)
4491 && !vec_stmt))
4492 return false;
4493 if (dump_enabled_p ())
4494 dump_printf_loc (MSG_NOTE, vect_location,
4495 "proceeding using word mode.\n");
4496 }
4497
4498 /* Worthwhile without SIMD support? Check only during analysis. */
4499 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4500 && vf < vect_min_worthwhile_factor (code)
4501 && !vec_stmt)
4502 {
4503 if (dump_enabled_p ())
4504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4505 "not worthwhile without SIMD support.\n");
4506 return false;
4507 }
4508
4509 if (!vec_stmt) /* transformation not required. */
4510 {
4511 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4512 if (dump_enabled_p ())
4513 dump_printf_loc (MSG_NOTE, vect_location,
4514 "=== vectorizable_shift ===\n");
4515 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4516 return true;
4517 }
4518
4519 /** Transform. **/
4520
4521 if (dump_enabled_p ())
4522 dump_printf_loc (MSG_NOTE, vect_location,
4523 "transform binary/unary operation.\n");
4524
4525 /* Handle def. */
4526 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4527
4528 prev_stmt_info = NULL;
4529 for (j = 0; j < ncopies; j++)
4530 {
4531 /* Handle uses. */
4532 if (j == 0)
4533 {
4534 if (scalar_shift_arg)
4535 {
4536 /* Vector shl and shr insn patterns can be defined with scalar
4537 operand 2 (shift operand). In this case, use constant or loop
4538 invariant op1 directly, without extending it to vector mode
4539 first. */
4540 optab_op2_mode = insn_data[icode].operand[2].mode;
4541 if (!VECTOR_MODE_P (optab_op2_mode))
4542 {
4543 if (dump_enabled_p ())
4544 dump_printf_loc (MSG_NOTE, vect_location,
4545 "operand 1 using scalar mode.\n");
4546 vec_oprnd1 = op1;
4547 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4548 vec_oprnds1.quick_push (vec_oprnd1);
4549 if (slp_node)
4550 {
4551 /* Store vec_oprnd1 for every vector stmt to be created
4552 for SLP_NODE. We check during the analysis that all
4553 the shift arguments are the same.
4554 TODO: Allow different constants for different vector
4555 stmts generated for an SLP instance. */
4556 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4557 vec_oprnds1.quick_push (vec_oprnd1);
4558 }
4559 }
4560 }
4561
4562 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4563 (a special case for certain kind of vector shifts); otherwise,
4564 operand 1 should be of a vector type (the usual case). */
4565 if (vec_oprnd1)
4566 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4567 slp_node, -1);
4568 else
4569 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4570 slp_node, -1);
4571 }
4572 else
4573 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4574
4575 /* Arguments are ready. Create the new vector stmt. */
4576 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4577 {
4578 vop1 = vec_oprnds1[i];
4579 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4580 new_temp = make_ssa_name (vec_dest, new_stmt);
4581 gimple_assign_set_lhs (new_stmt, new_temp);
4582 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4583 if (slp_node)
4584 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4585 }
4586
4587 if (slp_node)
4588 continue;
4589
4590 if (j == 0)
4591 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4592 else
4593 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4594 prev_stmt_info = vinfo_for_stmt (new_stmt);
4595 }
4596
4597 vec_oprnds0.release ();
4598 vec_oprnds1.release ();
4599
4600 return true;
4601 }
4602
4603
4604 /* Function vectorizable_operation.
4605
4606 Check if STMT performs a binary, unary or ternary operation that can
4607 be vectorized.
4608 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4609 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4610 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4611
4612 static bool
4613 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4614 gimple **vec_stmt, slp_tree slp_node)
4615 {
4616 tree vec_dest;
4617 tree scalar_dest;
4618 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4619 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4620 tree vectype;
4621 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4622 enum tree_code code;
4623 machine_mode vec_mode;
4624 tree new_temp;
4625 int op_type;
4626 optab optab;
4627 bool target_support_p;
4628 gimple *def_stmt;
4629 enum vect_def_type dt[3]
4630 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4631 gimple *new_stmt = NULL;
4632 stmt_vec_info prev_stmt_info;
4633 int nunits_in;
4634 int nunits_out;
4635 tree vectype_out;
4636 int ncopies;
4637 int j, i;
4638 vec<tree> vec_oprnds0 = vNULL;
4639 vec<tree> vec_oprnds1 = vNULL;
4640 vec<tree> vec_oprnds2 = vNULL;
4641 tree vop0, vop1, vop2;
4642 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4643 vec_info *vinfo = stmt_info->vinfo;
4644 int vf;
4645
4646 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4647 return false;
4648
4649 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4650 return false;
4651
4652 /* Is STMT a vectorizable binary/unary operation? */
4653 if (!is_gimple_assign (stmt))
4654 return false;
4655
4656 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4657 return false;
4658
4659 code = gimple_assign_rhs_code (stmt);
4660
4661 /* For pointer addition, we should use the normal plus for
4662 the vector addition. */
4663 if (code == POINTER_PLUS_EXPR)
4664 code = PLUS_EXPR;
4665
4666 /* Support only unary or binary operations. */
4667 op_type = TREE_CODE_LENGTH (code);
4668 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4669 {
4670 if (dump_enabled_p ())
4671 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4672 "num. args = %d (not unary/binary/ternary op).\n",
4673 op_type);
4674 return false;
4675 }
4676
4677 scalar_dest = gimple_assign_lhs (stmt);
4678 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4679
4680 /* Most operations cannot handle bit-precision types without extra
4681 truncations. */
4682 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4683 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4684 /* Exception are bitwise binary operations. */
4685 && code != BIT_IOR_EXPR
4686 && code != BIT_XOR_EXPR
4687 && code != BIT_AND_EXPR)
4688 {
4689 if (dump_enabled_p ())
4690 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4691 "bit-precision arithmetic not supported.\n");
4692 return false;
4693 }
4694
4695 op0 = gimple_assign_rhs1 (stmt);
4696 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4697 {
4698 if (dump_enabled_p ())
4699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4700 "use not simple.\n");
4701 return false;
4702 }
4703 /* If op0 is an external or constant def use a vector type with
4704 the same size as the output vector type. */
4705 if (!vectype)
4706 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4707 if (vec_stmt)
4708 gcc_assert (vectype);
4709 if (!vectype)
4710 {
4711 if (dump_enabled_p ())
4712 {
4713 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4714 "no vectype for scalar type ");
4715 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4716 TREE_TYPE (op0));
4717 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4718 }
4719
4720 return false;
4721 }
4722
4723 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4724 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4725 if (nunits_out != nunits_in)
4726 return false;
4727
4728 if (op_type == binary_op || op_type == ternary_op)
4729 {
4730 op1 = gimple_assign_rhs2 (stmt);
4731 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
4732 {
4733 if (dump_enabled_p ())
4734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4735 "use not simple.\n");
4736 return false;
4737 }
4738 }
4739 if (op_type == ternary_op)
4740 {
4741 op2 = gimple_assign_rhs3 (stmt);
4742 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
4743 {
4744 if (dump_enabled_p ())
4745 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4746 "use not simple.\n");
4747 return false;
4748 }
4749 }
4750
4751 if (loop_vinfo)
4752 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4753 else
4754 vf = 1;
4755
4756 /* Multiple types in SLP are handled by creating the appropriate number of
4757 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4758 case of SLP. */
4759 if (slp_node || PURE_SLP_STMT (stmt_info))
4760 ncopies = 1;
4761 else
4762 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4763
4764 gcc_assert (ncopies >= 1);
4765
4766 /* Shifts are handled in vectorizable_shift (). */
4767 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4768 || code == RROTATE_EXPR)
4769 return false;
4770
4771 /* Supportable by target? */
4772
4773 vec_mode = TYPE_MODE (vectype);
4774 if (code == MULT_HIGHPART_EXPR)
4775 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4776 else
4777 {
4778 optab = optab_for_tree_code (code, vectype, optab_default);
4779 if (!optab)
4780 {
4781 if (dump_enabled_p ())
4782 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4783 "no optab.\n");
4784 return false;
4785 }
4786 target_support_p = (optab_handler (optab, vec_mode)
4787 != CODE_FOR_nothing);
4788 }
4789
4790 if (!target_support_p)
4791 {
4792 if (dump_enabled_p ())
4793 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4794 "op not supported by target.\n");
4795 /* Check only during analysis. */
4796 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4797 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4798 return false;
4799 if (dump_enabled_p ())
4800 dump_printf_loc (MSG_NOTE, vect_location,
4801 "proceeding using word mode.\n");
4802 }
4803
4804 /* Worthwhile without SIMD support? Check only during analysis. */
4805 if (!VECTOR_MODE_P (vec_mode)
4806 && !vec_stmt
4807 && vf < vect_min_worthwhile_factor (code))
4808 {
4809 if (dump_enabled_p ())
4810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4811 "not worthwhile without SIMD support.\n");
4812 return false;
4813 }
4814
4815 if (!vec_stmt) /* transformation not required. */
4816 {
4817 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4818 if (dump_enabled_p ())
4819 dump_printf_loc (MSG_NOTE, vect_location,
4820 "=== vectorizable_operation ===\n");
4821 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4822 return true;
4823 }
4824
4825 /** Transform. **/
4826
4827 if (dump_enabled_p ())
4828 dump_printf_loc (MSG_NOTE, vect_location,
4829 "transform binary/unary operation.\n");
4830
4831 /* Handle def. */
4832 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4833
4834 /* In case the vectorization factor (VF) is bigger than the number
4835 of elements that we can fit in a vectype (nunits), we have to generate
4836 more than one vector stmt - i.e - we need to "unroll" the
4837 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4838 from one copy of the vector stmt to the next, in the field
4839 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4840 stages to find the correct vector defs to be used when vectorizing
4841 stmts that use the defs of the current stmt. The example below
4842 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4843 we need to create 4 vectorized stmts):
4844
4845 before vectorization:
4846 RELATED_STMT VEC_STMT
4847 S1: x = memref - -
4848 S2: z = x + 1 - -
4849
4850 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4851 there):
4852 RELATED_STMT VEC_STMT
4853 VS1_0: vx0 = memref0 VS1_1 -
4854 VS1_1: vx1 = memref1 VS1_2 -
4855 VS1_2: vx2 = memref2 VS1_3 -
4856 VS1_3: vx3 = memref3 - -
4857 S1: x = load - VS1_0
4858 S2: z = x + 1 - -
4859
4860 step2: vectorize stmt S2 (done here):
4861 To vectorize stmt S2 we first need to find the relevant vector
4862 def for the first operand 'x'. This is, as usual, obtained from
4863 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4864 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4865 relevant vector def 'vx0'. Having found 'vx0' we can generate
4866 the vector stmt VS2_0, and as usual, record it in the
4867 STMT_VINFO_VEC_STMT of stmt S2.
4868 When creating the second copy (VS2_1), we obtain the relevant vector
4869 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4870 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4871 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4872 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4873 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4874 chain of stmts and pointers:
4875 RELATED_STMT VEC_STMT
4876 VS1_0: vx0 = memref0 VS1_1 -
4877 VS1_1: vx1 = memref1 VS1_2 -
4878 VS1_2: vx2 = memref2 VS1_3 -
4879 VS1_3: vx3 = memref3 - -
4880 S1: x = load - VS1_0
4881 VS2_0: vz0 = vx0 + v1 VS2_1 -
4882 VS2_1: vz1 = vx1 + v1 VS2_2 -
4883 VS2_2: vz2 = vx2 + v1 VS2_3 -
4884 VS2_3: vz3 = vx3 + v1 - -
4885 S2: z = x + 1 - VS2_0 */
4886
4887 prev_stmt_info = NULL;
4888 for (j = 0; j < ncopies; j++)
4889 {
4890 /* Handle uses. */
4891 if (j == 0)
4892 {
4893 if (op_type == binary_op || op_type == ternary_op)
4894 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4895 slp_node, -1);
4896 else
4897 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4898 slp_node, -1);
4899 if (op_type == ternary_op)
4900 {
4901 vec_oprnds2.create (1);
4902 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4903 stmt));
4904 }
4905 }
4906 else
4907 {
4908 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4909 if (op_type == ternary_op)
4910 {
4911 tree vec_oprnd = vec_oprnds2.pop ();
4912 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4913 vec_oprnd));
4914 }
4915 }
4916
4917 /* Arguments are ready. Create the new vector stmt. */
4918 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4919 {
4920 vop1 = ((op_type == binary_op || op_type == ternary_op)
4921 ? vec_oprnds1[i] : NULL_TREE);
4922 vop2 = ((op_type == ternary_op)
4923 ? vec_oprnds2[i] : NULL_TREE);
4924 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4925 new_temp = make_ssa_name (vec_dest, new_stmt);
4926 gimple_assign_set_lhs (new_stmt, new_temp);
4927 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4928 if (slp_node)
4929 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4930 }
4931
4932 if (slp_node)
4933 continue;
4934
4935 if (j == 0)
4936 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4937 else
4938 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4939 prev_stmt_info = vinfo_for_stmt (new_stmt);
4940 }
4941
4942 vec_oprnds0.release ();
4943 vec_oprnds1.release ();
4944 vec_oprnds2.release ();
4945
4946 return true;
4947 }
4948
4949 /* A helper function to ensure data reference DR's base alignment
4950 for STMT_INFO. */
4951
4952 static void
4953 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4954 {
4955 if (!dr->aux)
4956 return;
4957
4958 if (DR_VECT_AUX (dr)->base_misaligned)
4959 {
4960 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4961 tree base_decl = DR_VECT_AUX (dr)->base_decl;
4962
4963 if (decl_in_symtab_p (base_decl))
4964 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4965 else
4966 {
4967 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4968 DECL_USER_ALIGN (base_decl) = 1;
4969 }
4970 DR_VECT_AUX (dr)->base_misaligned = false;
4971 }
4972 }
4973
4974
4975 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4976 reversal of the vector elements. If that is impossible to do,
4977 returns NULL. */
4978
4979 static tree
4980 perm_mask_for_reverse (tree vectype)
4981 {
4982 int i, nunits;
4983 unsigned char *sel;
4984
4985 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4986 sel = XALLOCAVEC (unsigned char, nunits);
4987
4988 for (i = 0; i < nunits; ++i)
4989 sel[i] = nunits - 1 - i;
4990
4991 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
4992 return NULL_TREE;
4993 return vect_gen_perm_mask_checked (vectype, sel);
4994 }
4995
4996 /* Function vectorizable_store.
4997
4998 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4999 can be vectorized.
5000 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5001 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5002 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5003
5004 static bool
5005 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5006 slp_tree slp_node)
5007 {
5008 tree scalar_dest;
5009 tree data_ref;
5010 tree op;
5011 tree vec_oprnd = NULL_TREE;
5012 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5013 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5014 tree elem_type;
5015 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5016 struct loop *loop = NULL;
5017 machine_mode vec_mode;
5018 tree dummy;
5019 enum dr_alignment_support alignment_support_scheme;
5020 gimple *def_stmt;
5021 enum vect_def_type dt;
5022 stmt_vec_info prev_stmt_info = NULL;
5023 tree dataref_ptr = NULL_TREE;
5024 tree dataref_offset = NULL_TREE;
5025 gimple *ptr_incr = NULL;
5026 int ncopies;
5027 int j;
5028 gimple *next_stmt, *first_stmt = NULL;
5029 bool grouped_store = false;
5030 bool store_lanes_p = false;
5031 unsigned int group_size, i;
5032 vec<tree> dr_chain = vNULL;
5033 vec<tree> oprnds = vNULL;
5034 vec<tree> result_chain = vNULL;
5035 bool inv_p;
5036 bool negative = false;
5037 tree offset = NULL_TREE;
5038 vec<tree> vec_oprnds = vNULL;
5039 bool slp = (slp_node != NULL);
5040 unsigned int vec_num;
5041 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5042 vec_info *vinfo = stmt_info->vinfo;
5043 tree aggr_type;
5044 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5045 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5046 int scatter_scale = 1;
5047 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5048 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5049 gimple *new_stmt;
5050
5051 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5052 return false;
5053
5054 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5055 return false;
5056
5057 /* Is vectorizable store? */
5058
5059 if (!is_gimple_assign (stmt))
5060 return false;
5061
5062 scalar_dest = gimple_assign_lhs (stmt);
5063 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5064 && is_pattern_stmt_p (stmt_info))
5065 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5066 if (TREE_CODE (scalar_dest) != ARRAY_REF
5067 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5068 && TREE_CODE (scalar_dest) != INDIRECT_REF
5069 && TREE_CODE (scalar_dest) != COMPONENT_REF
5070 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5071 && TREE_CODE (scalar_dest) != REALPART_EXPR
5072 && TREE_CODE (scalar_dest) != MEM_REF)
5073 return false;
5074
5075 gcc_assert (gimple_assign_single_p (stmt));
5076
5077 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5078 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5079
5080 if (loop_vinfo)
5081 loop = LOOP_VINFO_LOOP (loop_vinfo);
5082
5083 /* Multiple types in SLP are handled by creating the appropriate number of
5084 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5085 case of SLP. */
5086 if (slp || PURE_SLP_STMT (stmt_info))
5087 ncopies = 1;
5088 else
5089 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5090
5091 gcc_assert (ncopies >= 1);
5092
5093 /* FORNOW. This restriction should be relaxed. */
5094 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5095 {
5096 if (dump_enabled_p ())
5097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5098 "multiple types in nested loop.\n");
5099 return false;
5100 }
5101
5102 op = gimple_assign_rhs1 (stmt);
5103 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5104 {
5105 if (dump_enabled_p ())
5106 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5107 "use not simple.\n");
5108 return false;
5109 }
5110
5111 elem_type = TREE_TYPE (vectype);
5112 vec_mode = TYPE_MODE (vectype);
5113
5114 /* FORNOW. In some cases can vectorize even if data-type not supported
5115 (e.g. - array initialization with 0). */
5116 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5117 return false;
5118
5119 if (!STMT_VINFO_DATA_REF (stmt_info))
5120 return false;
5121
5122 if (!STMT_VINFO_STRIDED_P (stmt_info))
5123 {
5124 negative =
5125 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5126 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5127 size_zero_node) < 0;
5128 if (negative && ncopies > 1)
5129 {
5130 if (dump_enabled_p ())
5131 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5132 "multiple types with negative step.\n");
5133 return false;
5134 }
5135 if (negative)
5136 {
5137 gcc_assert (!grouped_store);
5138 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5139 if (alignment_support_scheme != dr_aligned
5140 && alignment_support_scheme != dr_unaligned_supported)
5141 {
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5144 "negative step but alignment required.\n");
5145 return false;
5146 }
5147 if (dt != vect_constant_def
5148 && dt != vect_external_def
5149 && !perm_mask_for_reverse (vectype))
5150 {
5151 if (dump_enabled_p ())
5152 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5153 "negative step and reversing not supported.\n");
5154 return false;
5155 }
5156 }
5157 }
5158
5159 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5160 {
5161 grouped_store = true;
5162 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5163 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5164 if (!slp
5165 && !PURE_SLP_STMT (stmt_info)
5166 && !STMT_VINFO_STRIDED_P (stmt_info))
5167 {
5168 if (vect_store_lanes_supported (vectype, group_size))
5169 store_lanes_p = true;
5170 else if (!vect_grouped_store_supported (vectype, group_size))
5171 return false;
5172 }
5173
5174 if (STMT_VINFO_STRIDED_P (stmt_info)
5175 && (slp || PURE_SLP_STMT (stmt_info))
5176 && (group_size > nunits
5177 || nunits % group_size != 0))
5178 {
5179 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5180 "unhandled strided group store\n");
5181 return false;
5182 }
5183
5184 if (first_stmt == stmt)
5185 {
5186 /* STMT is the leader of the group. Check the operands of all the
5187 stmts of the group. */
5188 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5189 while (next_stmt)
5190 {
5191 gcc_assert (gimple_assign_single_p (next_stmt));
5192 op = gimple_assign_rhs1 (next_stmt);
5193 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5194 {
5195 if (dump_enabled_p ())
5196 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5197 "use not simple.\n");
5198 return false;
5199 }
5200 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5201 }
5202 }
5203 }
5204
5205 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5206 {
5207 gimple *def_stmt;
5208 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5209 &scatter_off, &scatter_scale);
5210 gcc_assert (scatter_decl);
5211 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5212 &scatter_off_vectype))
5213 {
5214 if (dump_enabled_p ())
5215 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5216 "scatter index use not simple.");
5217 return false;
5218 }
5219 }
5220
5221 if (!vec_stmt) /* transformation not required. */
5222 {
5223 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5224 /* The SLP costs are calculated during SLP analysis. */
5225 if (!PURE_SLP_STMT (stmt_info))
5226 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5227 NULL, NULL, NULL);
5228 return true;
5229 }
5230
5231 /** Transform. **/
5232
5233 ensure_base_align (stmt_info, dr);
5234
5235 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5236 {
5237 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5238 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5239 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5240 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5241 edge pe = loop_preheader_edge (loop);
5242 gimple_seq seq;
5243 basic_block new_bb;
5244 enum { NARROW, NONE, WIDEN } modifier;
5245 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5246
5247 if (nunits == (unsigned int) scatter_off_nunits)
5248 modifier = NONE;
5249 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5250 {
5251 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5252 modifier = WIDEN;
5253
5254 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5255 sel[i] = i | nunits;
5256
5257 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5258 gcc_assert (perm_mask != NULL_TREE);
5259 }
5260 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5261 {
5262 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5263 modifier = NARROW;
5264
5265 for (i = 0; i < (unsigned int) nunits; ++i)
5266 sel[i] = i | scatter_off_nunits;
5267
5268 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5269 gcc_assert (perm_mask != NULL_TREE);
5270 ncopies *= 2;
5271 }
5272 else
5273 gcc_unreachable ();
5274
5275 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5276 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5277 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5278 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5279 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5280 scaletype = TREE_VALUE (arglist);
5281
5282 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5283 && TREE_CODE (rettype) == VOID_TYPE);
5284
5285 ptr = fold_convert (ptrtype, scatter_base);
5286 if (!is_gimple_min_invariant (ptr))
5287 {
5288 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5289 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5290 gcc_assert (!new_bb);
5291 }
5292
5293 /* Currently we support only unconditional scatter stores,
5294 so mask should be all ones. */
5295 mask = build_int_cst (masktype, -1);
5296 mask = vect_init_vector (stmt, mask, masktype, NULL);
5297
5298 scale = build_int_cst (scaletype, scatter_scale);
5299
5300 prev_stmt_info = NULL;
5301 for (j = 0; j < ncopies; ++j)
5302 {
5303 if (j == 0)
5304 {
5305 src = vec_oprnd1
5306 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5307 op = vec_oprnd0
5308 = vect_get_vec_def_for_operand (scatter_off, stmt);
5309 }
5310 else if (modifier != NONE && (j & 1))
5311 {
5312 if (modifier == WIDEN)
5313 {
5314 src = vec_oprnd1
5315 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5316 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5317 stmt, gsi);
5318 }
5319 else if (modifier == NARROW)
5320 {
5321 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5322 stmt, gsi);
5323 op = vec_oprnd0
5324 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5325 }
5326 else
5327 gcc_unreachable ();
5328 }
5329 else
5330 {
5331 src = vec_oprnd1
5332 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5333 op = vec_oprnd0
5334 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5335 }
5336
5337 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5338 {
5339 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5340 == TYPE_VECTOR_SUBPARTS (srctype));
5341 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5342 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5343 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5344 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5345 src = var;
5346 }
5347
5348 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5349 {
5350 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5351 == TYPE_VECTOR_SUBPARTS (idxtype));
5352 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5353 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5354 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5355 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5356 op = var;
5357 }
5358
5359 new_stmt
5360 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5361
5362 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5363
5364 if (prev_stmt_info == NULL)
5365 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5366 else
5367 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5368 prev_stmt_info = vinfo_for_stmt (new_stmt);
5369 }
5370 return true;
5371 }
5372
5373 if (grouped_store)
5374 {
5375 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5376 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5377
5378 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5379
5380 /* FORNOW */
5381 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5382
5383 /* We vectorize all the stmts of the interleaving group when we
5384 reach the last stmt in the group. */
5385 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5386 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5387 && !slp)
5388 {
5389 *vec_stmt = NULL;
5390 return true;
5391 }
5392
5393 if (slp)
5394 {
5395 grouped_store = false;
5396 /* VEC_NUM is the number of vect stmts to be created for this
5397 group. */
5398 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5399 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5400 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5401 op = gimple_assign_rhs1 (first_stmt);
5402 }
5403 else
5404 /* VEC_NUM is the number of vect stmts to be created for this
5405 group. */
5406 vec_num = group_size;
5407 }
5408 else
5409 {
5410 first_stmt = stmt;
5411 first_dr = dr;
5412 group_size = vec_num = 1;
5413 }
5414
5415 if (dump_enabled_p ())
5416 dump_printf_loc (MSG_NOTE, vect_location,
5417 "transform store. ncopies = %d\n", ncopies);
5418
5419 if (STMT_VINFO_STRIDED_P (stmt_info))
5420 {
5421 gimple_stmt_iterator incr_gsi;
5422 bool insert_after;
5423 gimple *incr;
5424 tree offvar;
5425 tree ivstep;
5426 tree running_off;
5427 gimple_seq stmts = NULL;
5428 tree stride_base, stride_step, alias_off;
5429 tree vec_oprnd;
5430 unsigned int g;
5431
5432 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5433
5434 stride_base
5435 = fold_build_pointer_plus
5436 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5437 size_binop (PLUS_EXPR,
5438 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5439 convert_to_ptrofftype (DR_INIT(first_dr))));
5440 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5441
5442 /* For a store with loop-invariant (but other than power-of-2)
5443 stride (i.e. not a grouped access) like so:
5444
5445 for (i = 0; i < n; i += stride)
5446 array[i] = ...;
5447
5448 we generate a new induction variable and new stores from
5449 the components of the (vectorized) rhs:
5450
5451 for (j = 0; ; j += VF*stride)
5452 vectemp = ...;
5453 tmp1 = vectemp[0];
5454 array[j] = tmp1;
5455 tmp2 = vectemp[1];
5456 array[j + stride] = tmp2;
5457 ...
5458 */
5459
5460 unsigned nstores = nunits;
5461 tree ltype = elem_type;
5462 if (slp)
5463 {
5464 nstores = nunits / group_size;
5465 if (group_size < nunits)
5466 ltype = build_vector_type (elem_type, group_size);
5467 else
5468 ltype = vectype;
5469 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5470 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5471 group_size = 1;
5472 }
5473
5474 ivstep = stride_step;
5475 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5476 build_int_cst (TREE_TYPE (ivstep),
5477 ncopies * nstores));
5478
5479 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5480
5481 create_iv (stride_base, ivstep, NULL,
5482 loop, &incr_gsi, insert_after,
5483 &offvar, NULL);
5484 incr = gsi_stmt (incr_gsi);
5485 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
5486
5487 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5488 if (stmts)
5489 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5490
5491 prev_stmt_info = NULL;
5492 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5493 next_stmt = first_stmt;
5494 for (g = 0; g < group_size; g++)
5495 {
5496 running_off = offvar;
5497 if (g)
5498 {
5499 tree size = TYPE_SIZE_UNIT (ltype);
5500 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5501 size);
5502 tree newoff = copy_ssa_name (running_off, NULL);
5503 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5504 running_off, pos);
5505 vect_finish_stmt_generation (stmt, incr, gsi);
5506 running_off = newoff;
5507 }
5508 for (j = 0; j < ncopies; j++)
5509 {
5510 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5511 and first_stmt == stmt. */
5512 if (j == 0)
5513 {
5514 if (slp)
5515 {
5516 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5517 slp_node, -1);
5518 vec_oprnd = vec_oprnds[0];
5519 }
5520 else
5521 {
5522 gcc_assert (gimple_assign_single_p (next_stmt));
5523 op = gimple_assign_rhs1 (next_stmt);
5524 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5525 }
5526 }
5527 else
5528 {
5529 if (slp)
5530 vec_oprnd = vec_oprnds[j];
5531 else
5532 {
5533 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
5534 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5535 }
5536 }
5537
5538 for (i = 0; i < nstores; i++)
5539 {
5540 tree newref, newoff;
5541 gimple *incr, *assign;
5542 tree size = TYPE_SIZE (ltype);
5543 /* Extract the i'th component. */
5544 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5545 bitsize_int (i), size);
5546 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5547 size, pos);
5548
5549 elem = force_gimple_operand_gsi (gsi, elem, true,
5550 NULL_TREE, true,
5551 GSI_SAME_STMT);
5552
5553 newref = build2 (MEM_REF, ltype,
5554 running_off, alias_off);
5555
5556 /* And store it to *running_off. */
5557 assign = gimple_build_assign (newref, elem);
5558 vect_finish_stmt_generation (stmt, assign, gsi);
5559
5560 newoff = copy_ssa_name (running_off, NULL);
5561 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5562 running_off, stride_step);
5563 vect_finish_stmt_generation (stmt, incr, gsi);
5564
5565 running_off = newoff;
5566 if (g == group_size - 1
5567 && !slp)
5568 {
5569 if (j == 0 && i == 0)
5570 STMT_VINFO_VEC_STMT (stmt_info)
5571 = *vec_stmt = assign;
5572 else
5573 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5574 prev_stmt_info = vinfo_for_stmt (assign);
5575 }
5576 }
5577 }
5578 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5579 }
5580 return true;
5581 }
5582
5583 dr_chain.create (group_size);
5584 oprnds.create (group_size);
5585
5586 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5587 gcc_assert (alignment_support_scheme);
5588 /* Targets with store-lane instructions must not require explicit
5589 realignment. */
5590 gcc_assert (!store_lanes_p
5591 || alignment_support_scheme == dr_aligned
5592 || alignment_support_scheme == dr_unaligned_supported);
5593
5594 if (negative)
5595 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5596
5597 if (store_lanes_p)
5598 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5599 else
5600 aggr_type = vectype;
5601
5602 /* In case the vectorization factor (VF) is bigger than the number
5603 of elements that we can fit in a vectype (nunits), we have to generate
5604 more than one vector stmt - i.e - we need to "unroll" the
5605 vector stmt by a factor VF/nunits. For more details see documentation in
5606 vect_get_vec_def_for_copy_stmt. */
5607
5608 /* In case of interleaving (non-unit grouped access):
5609
5610 S1: &base + 2 = x2
5611 S2: &base = x0
5612 S3: &base + 1 = x1
5613 S4: &base + 3 = x3
5614
5615 We create vectorized stores starting from base address (the access of the
5616 first stmt in the chain (S2 in the above example), when the last store stmt
5617 of the chain (S4) is reached:
5618
5619 VS1: &base = vx2
5620 VS2: &base + vec_size*1 = vx0
5621 VS3: &base + vec_size*2 = vx1
5622 VS4: &base + vec_size*3 = vx3
5623
5624 Then permutation statements are generated:
5625
5626 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5627 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5628 ...
5629
5630 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5631 (the order of the data-refs in the output of vect_permute_store_chain
5632 corresponds to the order of scalar stmts in the interleaving chain - see
5633 the documentation of vect_permute_store_chain()).
5634
5635 In case of both multiple types and interleaving, above vector stores and
5636 permutation stmts are created for every copy. The result vector stmts are
5637 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5638 STMT_VINFO_RELATED_STMT for the next copies.
5639 */
5640
5641 prev_stmt_info = NULL;
5642 for (j = 0; j < ncopies; j++)
5643 {
5644
5645 if (j == 0)
5646 {
5647 if (slp)
5648 {
5649 /* Get vectorized arguments for SLP_NODE. */
5650 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5651 NULL, slp_node, -1);
5652
5653 vec_oprnd = vec_oprnds[0];
5654 }
5655 else
5656 {
5657 /* For interleaved stores we collect vectorized defs for all the
5658 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5659 used as an input to vect_permute_store_chain(), and OPRNDS as
5660 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5661
5662 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5663 OPRNDS are of size 1. */
5664 next_stmt = first_stmt;
5665 for (i = 0; i < group_size; i++)
5666 {
5667 /* Since gaps are not supported for interleaved stores,
5668 GROUP_SIZE is the exact number of stmts in the chain.
5669 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5670 there is no interleaving, GROUP_SIZE is 1, and only one
5671 iteration of the loop will be executed. */
5672 gcc_assert (next_stmt
5673 && gimple_assign_single_p (next_stmt));
5674 op = gimple_assign_rhs1 (next_stmt);
5675
5676 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5677 dr_chain.quick_push (vec_oprnd);
5678 oprnds.quick_push (vec_oprnd);
5679 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5680 }
5681 }
5682
5683 /* We should have catched mismatched types earlier. */
5684 gcc_assert (useless_type_conversion_p (vectype,
5685 TREE_TYPE (vec_oprnd)));
5686 bool simd_lane_access_p
5687 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5688 if (simd_lane_access_p
5689 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5690 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5691 && integer_zerop (DR_OFFSET (first_dr))
5692 && integer_zerop (DR_INIT (first_dr))
5693 && alias_sets_conflict_p (get_alias_set (aggr_type),
5694 get_alias_set (DR_REF (first_dr))))
5695 {
5696 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5697 dataref_offset = build_int_cst (reference_alias_ptr_type
5698 (DR_REF (first_dr)), 0);
5699 inv_p = false;
5700 }
5701 else
5702 dataref_ptr
5703 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5704 simd_lane_access_p ? loop : NULL,
5705 offset, &dummy, gsi, &ptr_incr,
5706 simd_lane_access_p, &inv_p);
5707 gcc_assert (bb_vinfo || !inv_p);
5708 }
5709 else
5710 {
5711 /* For interleaved stores we created vectorized defs for all the
5712 defs stored in OPRNDS in the previous iteration (previous copy).
5713 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5714 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5715 next copy.
5716 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5717 OPRNDS are of size 1. */
5718 for (i = 0; i < group_size; i++)
5719 {
5720 op = oprnds[i];
5721 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
5722 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5723 dr_chain[i] = vec_oprnd;
5724 oprnds[i] = vec_oprnd;
5725 }
5726 if (dataref_offset)
5727 dataref_offset
5728 = int_const_binop (PLUS_EXPR, dataref_offset,
5729 TYPE_SIZE_UNIT (aggr_type));
5730 else
5731 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5732 TYPE_SIZE_UNIT (aggr_type));
5733 }
5734
5735 if (store_lanes_p)
5736 {
5737 tree vec_array;
5738
5739 /* Combine all the vectors into an array. */
5740 vec_array = create_vector_array (vectype, vec_num);
5741 for (i = 0; i < vec_num; i++)
5742 {
5743 vec_oprnd = dr_chain[i];
5744 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5745 }
5746
5747 /* Emit:
5748 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5749 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5750 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5751 gimple_call_set_lhs (new_stmt, data_ref);
5752 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5753 }
5754 else
5755 {
5756 new_stmt = NULL;
5757 if (grouped_store)
5758 {
5759 if (j == 0)
5760 result_chain.create (group_size);
5761 /* Permute. */
5762 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5763 &result_chain);
5764 }
5765
5766 next_stmt = first_stmt;
5767 for (i = 0; i < vec_num; i++)
5768 {
5769 unsigned align, misalign;
5770
5771 if (i > 0)
5772 /* Bump the vector pointer. */
5773 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5774 stmt, NULL_TREE);
5775
5776 if (slp)
5777 vec_oprnd = vec_oprnds[i];
5778 else if (grouped_store)
5779 /* For grouped stores vectorized defs are interleaved in
5780 vect_permute_store_chain(). */
5781 vec_oprnd = result_chain[i];
5782
5783 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5784 dataref_ptr,
5785 dataref_offset
5786 ? dataref_offset
5787 : build_int_cst (reference_alias_ptr_type
5788 (DR_REF (first_dr)), 0));
5789 align = TYPE_ALIGN_UNIT (vectype);
5790 if (aligned_access_p (first_dr))
5791 misalign = 0;
5792 else if (DR_MISALIGNMENT (first_dr) == -1)
5793 {
5794 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5795 align = TYPE_ALIGN_UNIT (elem_type);
5796 else
5797 align = get_object_alignment (DR_REF (first_dr))
5798 / BITS_PER_UNIT;
5799 misalign = 0;
5800 TREE_TYPE (data_ref)
5801 = build_aligned_type (TREE_TYPE (data_ref),
5802 align * BITS_PER_UNIT);
5803 }
5804 else
5805 {
5806 TREE_TYPE (data_ref)
5807 = build_aligned_type (TREE_TYPE (data_ref),
5808 TYPE_ALIGN (elem_type));
5809 misalign = DR_MISALIGNMENT (first_dr);
5810 }
5811 if (dataref_offset == NULL_TREE
5812 && TREE_CODE (dataref_ptr) == SSA_NAME)
5813 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5814 misalign);
5815
5816 if (negative
5817 && dt != vect_constant_def
5818 && dt != vect_external_def)
5819 {
5820 tree perm_mask = perm_mask_for_reverse (vectype);
5821 tree perm_dest
5822 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5823 vectype);
5824 tree new_temp = make_ssa_name (perm_dest);
5825
5826 /* Generate the permute statement. */
5827 gimple *perm_stmt
5828 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5829 vec_oprnd, perm_mask);
5830 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5831
5832 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5833 vec_oprnd = new_temp;
5834 }
5835
5836 /* Arguments are ready. Create the new vector stmt. */
5837 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5838 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5839
5840 if (slp)
5841 continue;
5842
5843 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5844 if (!next_stmt)
5845 break;
5846 }
5847 }
5848 if (!slp)
5849 {
5850 if (j == 0)
5851 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5852 else
5853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5854 prev_stmt_info = vinfo_for_stmt (new_stmt);
5855 }
5856 }
5857
5858 dr_chain.release ();
5859 oprnds.release ();
5860 result_chain.release ();
5861 vec_oprnds.release ();
5862
5863 return true;
5864 }
5865
5866 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5867 VECTOR_CST mask. No checks are made that the target platform supports the
5868 mask, so callers may wish to test can_vec_perm_p separately, or use
5869 vect_gen_perm_mask_checked. */
5870
5871 tree
5872 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5873 {
5874 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5875 int i, nunits;
5876
5877 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5878
5879 mask_elt_type = lang_hooks.types.type_for_mode
5880 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5881 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5882
5883 mask_elts = XALLOCAVEC (tree, nunits);
5884 for (i = nunits - 1; i >= 0; i--)
5885 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5886 mask_vec = build_vector (mask_type, mask_elts);
5887
5888 return mask_vec;
5889 }
5890
5891 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5892 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5893
5894 tree
5895 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5896 {
5897 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5898 return vect_gen_perm_mask_any (vectype, sel);
5899 }
5900
5901 /* Given a vector variable X and Y, that was generated for the scalar
5902 STMT, generate instructions to permute the vector elements of X and Y
5903 using permutation mask MASK_VEC, insert them at *GSI and return the
5904 permuted vector variable. */
5905
5906 static tree
5907 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
5908 gimple_stmt_iterator *gsi)
5909 {
5910 tree vectype = TREE_TYPE (x);
5911 tree perm_dest, data_ref;
5912 gimple *perm_stmt;
5913
5914 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5915 data_ref = make_ssa_name (perm_dest);
5916
5917 /* Generate the permute statement. */
5918 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5919 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5920
5921 return data_ref;
5922 }
5923
5924 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5925 inserting them on the loops preheader edge. Returns true if we
5926 were successful in doing so (and thus STMT can be moved then),
5927 otherwise returns false. */
5928
5929 static bool
5930 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
5931 {
5932 ssa_op_iter i;
5933 tree op;
5934 bool any = false;
5935
5936 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5937 {
5938 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5939 if (!gimple_nop_p (def_stmt)
5940 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5941 {
5942 /* Make sure we don't need to recurse. While we could do
5943 so in simple cases when there are more complex use webs
5944 we don't have an easy way to preserve stmt order to fulfil
5945 dependencies within them. */
5946 tree op2;
5947 ssa_op_iter i2;
5948 if (gimple_code (def_stmt) == GIMPLE_PHI)
5949 return false;
5950 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5951 {
5952 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
5953 if (!gimple_nop_p (def_stmt2)
5954 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5955 return false;
5956 }
5957 any = true;
5958 }
5959 }
5960
5961 if (!any)
5962 return true;
5963
5964 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5965 {
5966 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5967 if (!gimple_nop_p (def_stmt)
5968 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5969 {
5970 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5971 gsi_remove (&gsi, false);
5972 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5973 }
5974 }
5975
5976 return true;
5977 }
5978
5979 /* vectorizable_load.
5980
5981 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5982 can be vectorized.
5983 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5984 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5985 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5986
5987 static bool
5988 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5989 slp_tree slp_node, slp_instance slp_node_instance)
5990 {
5991 tree scalar_dest;
5992 tree vec_dest = NULL;
5993 tree data_ref = NULL;
5994 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5995 stmt_vec_info prev_stmt_info;
5996 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5997 struct loop *loop = NULL;
5998 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5999 bool nested_in_vect_loop = false;
6000 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6001 tree elem_type;
6002 tree new_temp;
6003 machine_mode mode;
6004 gimple *new_stmt = NULL;
6005 tree dummy;
6006 enum dr_alignment_support alignment_support_scheme;
6007 tree dataref_ptr = NULL_TREE;
6008 tree dataref_offset = NULL_TREE;
6009 gimple *ptr_incr = NULL;
6010 int ncopies;
6011 int i, j, group_size = -1, group_gap_adj;
6012 tree msq = NULL_TREE, lsq;
6013 tree offset = NULL_TREE;
6014 tree byte_offset = NULL_TREE;
6015 tree realignment_token = NULL_TREE;
6016 gphi *phi = NULL;
6017 vec<tree> dr_chain = vNULL;
6018 bool grouped_load = false;
6019 bool load_lanes_p = false;
6020 gimple *first_stmt;
6021 bool inv_p;
6022 bool negative = false;
6023 bool compute_in_loop = false;
6024 struct loop *at_loop;
6025 int vec_num;
6026 bool slp = (slp_node != NULL);
6027 bool slp_perm = false;
6028 enum tree_code code;
6029 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6030 int vf;
6031 tree aggr_type;
6032 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6033 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6034 int gather_scale = 1;
6035 enum vect_def_type gather_dt = vect_unknown_def_type;
6036 vec_info *vinfo = stmt_info->vinfo;
6037
6038 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6039 return false;
6040
6041 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
6042 return false;
6043
6044 /* Is vectorizable load? */
6045 if (!is_gimple_assign (stmt))
6046 return false;
6047
6048 scalar_dest = gimple_assign_lhs (stmt);
6049 if (TREE_CODE (scalar_dest) != SSA_NAME)
6050 return false;
6051
6052 code = gimple_assign_rhs_code (stmt);
6053 if (code != ARRAY_REF
6054 && code != BIT_FIELD_REF
6055 && code != INDIRECT_REF
6056 && code != COMPONENT_REF
6057 && code != IMAGPART_EXPR
6058 && code != REALPART_EXPR
6059 && code != MEM_REF
6060 && TREE_CODE_CLASS (code) != tcc_declaration)
6061 return false;
6062
6063 if (!STMT_VINFO_DATA_REF (stmt_info))
6064 return false;
6065
6066 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6067 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6068
6069 if (loop_vinfo)
6070 {
6071 loop = LOOP_VINFO_LOOP (loop_vinfo);
6072 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6073 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6074 }
6075 else
6076 vf = 1;
6077
6078 /* Multiple types in SLP are handled by creating the appropriate number of
6079 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6080 case of SLP. */
6081 if (slp || PURE_SLP_STMT (stmt_info))
6082 ncopies = 1;
6083 else
6084 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6085
6086 gcc_assert (ncopies >= 1);
6087
6088 /* FORNOW. This restriction should be relaxed. */
6089 if (nested_in_vect_loop && ncopies > 1)
6090 {
6091 if (dump_enabled_p ())
6092 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6093 "multiple types in nested loop.\n");
6094 return false;
6095 }
6096
6097 /* Invalidate assumptions made by dependence analysis when vectorization
6098 on the unrolled body effectively re-orders stmts. */
6099 if (ncopies > 1
6100 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6101 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6102 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6103 {
6104 if (dump_enabled_p ())
6105 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6106 "cannot perform implicit CSE when unrolling "
6107 "with negative dependence distance\n");
6108 return false;
6109 }
6110
6111 elem_type = TREE_TYPE (vectype);
6112 mode = TYPE_MODE (vectype);
6113
6114 /* FORNOW. In some cases can vectorize even if data-type not supported
6115 (e.g. - data copies). */
6116 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6117 {
6118 if (dump_enabled_p ())
6119 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6120 "Aligned load, but unsupported type.\n");
6121 return false;
6122 }
6123
6124 /* Check if the load is a part of an interleaving chain. */
6125 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6126 {
6127 grouped_load = true;
6128 /* FORNOW */
6129 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6130
6131 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6132
6133 /* If this is single-element interleaving with an element distance
6134 that leaves unused vector loads around punt - we at least create
6135 very sub-optimal code in that case (and blow up memory,
6136 see PR65518). */
6137 if (first_stmt == stmt
6138 && !GROUP_NEXT_ELEMENT (stmt_info)
6139 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6140 {
6141 if (dump_enabled_p ())
6142 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6143 "single-element interleaving not supported "
6144 "for not adjacent vector loads\n");
6145 return false;
6146 }
6147
6148 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6149 slp_perm = true;
6150
6151 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6152 if (!slp
6153 && !PURE_SLP_STMT (stmt_info)
6154 && !STMT_VINFO_STRIDED_P (stmt_info))
6155 {
6156 if (vect_load_lanes_supported (vectype, group_size))
6157 load_lanes_p = true;
6158 else if (!vect_grouped_load_supported (vectype, group_size))
6159 return false;
6160 }
6161
6162 /* Invalidate assumptions made by dependence analysis when vectorization
6163 on the unrolled body effectively re-orders stmts. */
6164 if (!PURE_SLP_STMT (stmt_info)
6165 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6166 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6167 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6168 {
6169 if (dump_enabled_p ())
6170 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6171 "cannot perform implicit CSE when performing "
6172 "group loads with negative dependence distance\n");
6173 return false;
6174 }
6175
6176 /* Similarly when the stmt is a load that is both part of a SLP
6177 instance and a loop vectorized stmt via the same-dr mechanism
6178 we have to give up. */
6179 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6180 && (STMT_SLP_TYPE (stmt_info)
6181 != STMT_SLP_TYPE (vinfo_for_stmt
6182 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6183 {
6184 if (dump_enabled_p ())
6185 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6186 "conflicting SLP types for CSEd load\n");
6187 return false;
6188 }
6189 }
6190
6191
6192 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6193 {
6194 gimple *def_stmt;
6195 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6196 &gather_off, &gather_scale);
6197 gcc_assert (gather_decl);
6198 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6199 &gather_off_vectype))
6200 {
6201 if (dump_enabled_p ())
6202 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6203 "gather index use not simple.\n");
6204 return false;
6205 }
6206 }
6207 else if (STMT_VINFO_STRIDED_P (stmt_info))
6208 {
6209 if ((grouped_load
6210 && (slp || PURE_SLP_STMT (stmt_info)))
6211 && (group_size > nunits
6212 || nunits % group_size != 0))
6213 {
6214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6215 "unhandled strided group load\n");
6216 return false;
6217 }
6218 }
6219 else
6220 {
6221 negative = tree_int_cst_compare (nested_in_vect_loop
6222 ? STMT_VINFO_DR_STEP (stmt_info)
6223 : DR_STEP (dr),
6224 size_zero_node) < 0;
6225 if (negative && ncopies > 1)
6226 {
6227 if (dump_enabled_p ())
6228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6229 "multiple types with negative step.\n");
6230 return false;
6231 }
6232
6233 if (negative)
6234 {
6235 if (grouped_load)
6236 {
6237 if (dump_enabled_p ())
6238 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6239 "negative step for group load not supported"
6240 "\n");
6241 return false;
6242 }
6243 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6244 if (alignment_support_scheme != dr_aligned
6245 && alignment_support_scheme != dr_unaligned_supported)
6246 {
6247 if (dump_enabled_p ())
6248 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6249 "negative step but alignment required.\n");
6250 return false;
6251 }
6252 if (!perm_mask_for_reverse (vectype))
6253 {
6254 if (dump_enabled_p ())
6255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6256 "negative step and reversing not supported."
6257 "\n");
6258 return false;
6259 }
6260 }
6261 }
6262
6263 if (!vec_stmt) /* transformation not required. */
6264 {
6265 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6266 /* The SLP costs are calculated during SLP analysis. */
6267 if (!PURE_SLP_STMT (stmt_info))
6268 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6269 NULL, NULL, NULL);
6270 return true;
6271 }
6272
6273 if (dump_enabled_p ())
6274 dump_printf_loc (MSG_NOTE, vect_location,
6275 "transform load. ncopies = %d\n", ncopies);
6276
6277 /** Transform. **/
6278
6279 ensure_base_align (stmt_info, dr);
6280
6281 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6282 {
6283 tree vec_oprnd0 = NULL_TREE, op;
6284 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6285 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6286 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6287 edge pe = loop_preheader_edge (loop);
6288 gimple_seq seq;
6289 basic_block new_bb;
6290 enum { NARROW, NONE, WIDEN } modifier;
6291 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6292
6293 if (nunits == gather_off_nunits)
6294 modifier = NONE;
6295 else if (nunits == gather_off_nunits / 2)
6296 {
6297 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6298 modifier = WIDEN;
6299
6300 for (i = 0; i < gather_off_nunits; ++i)
6301 sel[i] = i | nunits;
6302
6303 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6304 }
6305 else if (nunits == gather_off_nunits * 2)
6306 {
6307 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6308 modifier = NARROW;
6309
6310 for (i = 0; i < nunits; ++i)
6311 sel[i] = i < gather_off_nunits
6312 ? i : i + nunits - gather_off_nunits;
6313
6314 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6315 ncopies *= 2;
6316 }
6317 else
6318 gcc_unreachable ();
6319
6320 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6321 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6322 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6323 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6324 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6325 scaletype = TREE_VALUE (arglist);
6326 gcc_checking_assert (types_compatible_p (srctype, rettype));
6327
6328 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6329
6330 ptr = fold_convert (ptrtype, gather_base);
6331 if (!is_gimple_min_invariant (ptr))
6332 {
6333 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6334 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6335 gcc_assert (!new_bb);
6336 }
6337
6338 /* Currently we support only unconditional gather loads,
6339 so mask should be all ones. */
6340 if (TREE_CODE (masktype) == INTEGER_TYPE)
6341 mask = build_int_cst (masktype, -1);
6342 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6343 {
6344 mask = build_int_cst (TREE_TYPE (masktype), -1);
6345 mask = build_vector_from_val (masktype, mask);
6346 mask = vect_init_vector (stmt, mask, masktype, NULL);
6347 }
6348 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6349 {
6350 REAL_VALUE_TYPE r;
6351 long tmp[6];
6352 for (j = 0; j < 6; ++j)
6353 tmp[j] = -1;
6354 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6355 mask = build_real (TREE_TYPE (masktype), r);
6356 mask = build_vector_from_val (masktype, mask);
6357 mask = vect_init_vector (stmt, mask, masktype, NULL);
6358 }
6359 else
6360 gcc_unreachable ();
6361
6362 scale = build_int_cst (scaletype, gather_scale);
6363
6364 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6365 merge = build_int_cst (TREE_TYPE (rettype), 0);
6366 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6367 {
6368 REAL_VALUE_TYPE r;
6369 long tmp[6];
6370 for (j = 0; j < 6; ++j)
6371 tmp[j] = 0;
6372 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6373 merge = build_real (TREE_TYPE (rettype), r);
6374 }
6375 else
6376 gcc_unreachable ();
6377 merge = build_vector_from_val (rettype, merge);
6378 merge = vect_init_vector (stmt, merge, rettype, NULL);
6379
6380 prev_stmt_info = NULL;
6381 for (j = 0; j < ncopies; ++j)
6382 {
6383 if (modifier == WIDEN && (j & 1))
6384 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6385 perm_mask, stmt, gsi);
6386 else if (j == 0)
6387 op = vec_oprnd0
6388 = vect_get_vec_def_for_operand (gather_off, stmt);
6389 else
6390 op = vec_oprnd0
6391 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6392
6393 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6394 {
6395 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6396 == TYPE_VECTOR_SUBPARTS (idxtype));
6397 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6398 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6399 new_stmt
6400 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6401 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6402 op = var;
6403 }
6404
6405 new_stmt
6406 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6407
6408 if (!useless_type_conversion_p (vectype, rettype))
6409 {
6410 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6411 == TYPE_VECTOR_SUBPARTS (rettype));
6412 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6413 gimple_call_set_lhs (new_stmt, op);
6414 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6415 var = make_ssa_name (vec_dest);
6416 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6417 new_stmt
6418 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6419 }
6420 else
6421 {
6422 var = make_ssa_name (vec_dest, new_stmt);
6423 gimple_call_set_lhs (new_stmt, var);
6424 }
6425
6426 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6427
6428 if (modifier == NARROW)
6429 {
6430 if ((j & 1) == 0)
6431 {
6432 prev_res = var;
6433 continue;
6434 }
6435 var = permute_vec_elements (prev_res, var,
6436 perm_mask, stmt, gsi);
6437 new_stmt = SSA_NAME_DEF_STMT (var);
6438 }
6439
6440 if (prev_stmt_info == NULL)
6441 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6442 else
6443 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6444 prev_stmt_info = vinfo_for_stmt (new_stmt);
6445 }
6446 return true;
6447 }
6448 else if (STMT_VINFO_STRIDED_P (stmt_info))
6449 {
6450 gimple_stmt_iterator incr_gsi;
6451 bool insert_after;
6452 gimple *incr;
6453 tree offvar;
6454 tree ivstep;
6455 tree running_off;
6456 vec<constructor_elt, va_gc> *v = NULL;
6457 gimple_seq stmts = NULL;
6458 tree stride_base, stride_step, alias_off;
6459
6460 gcc_assert (!nested_in_vect_loop);
6461
6462 if (slp && grouped_load)
6463 first_dr = STMT_VINFO_DATA_REF
6464 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6465 else
6466 first_dr = dr;
6467
6468 stride_base
6469 = fold_build_pointer_plus
6470 (DR_BASE_ADDRESS (first_dr),
6471 size_binop (PLUS_EXPR,
6472 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6473 convert_to_ptrofftype (DR_INIT (first_dr))));
6474 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6475
6476 /* For a load with loop-invariant (but other than power-of-2)
6477 stride (i.e. not a grouped access) like so:
6478
6479 for (i = 0; i < n; i += stride)
6480 ... = array[i];
6481
6482 we generate a new induction variable and new accesses to
6483 form a new vector (or vectors, depending on ncopies):
6484
6485 for (j = 0; ; j += VF*stride)
6486 tmp1 = array[j];
6487 tmp2 = array[j + stride];
6488 ...
6489 vectemp = {tmp1, tmp2, ...}
6490 */
6491
6492 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6493 build_int_cst (TREE_TYPE (stride_step), vf));
6494
6495 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6496
6497 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6498 loop, &incr_gsi, insert_after,
6499 &offvar, NULL);
6500 incr = gsi_stmt (incr_gsi);
6501 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6502
6503 stride_step = force_gimple_operand (unshare_expr (stride_step),
6504 &stmts, true, NULL_TREE);
6505 if (stmts)
6506 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6507
6508 prev_stmt_info = NULL;
6509 running_off = offvar;
6510 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6511 int nloads = nunits;
6512 tree ltype = TREE_TYPE (vectype);
6513 auto_vec<tree> dr_chain;
6514 if (slp)
6515 {
6516 nloads = nunits / group_size;
6517 if (group_size < nunits)
6518 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6519 else
6520 ltype = vectype;
6521 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6522 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6523 if (slp_perm)
6524 dr_chain.create (ncopies);
6525 }
6526 for (j = 0; j < ncopies; j++)
6527 {
6528 tree vec_inv;
6529
6530 if (nloads > 1)
6531 {
6532 vec_alloc (v, nloads);
6533 for (i = 0; i < nloads; i++)
6534 {
6535 tree newref, newoff;
6536 gimple *incr;
6537 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6538
6539 newref = force_gimple_operand_gsi (gsi, newref, true,
6540 NULL_TREE, true,
6541 GSI_SAME_STMT);
6542 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6543 newoff = copy_ssa_name (running_off);
6544 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6545 running_off, stride_step);
6546 vect_finish_stmt_generation (stmt, incr, gsi);
6547
6548 running_off = newoff;
6549 }
6550
6551 vec_inv = build_constructor (vectype, v);
6552 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6553 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6554 }
6555 else
6556 {
6557 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6558 build2 (MEM_REF, ltype,
6559 running_off, alias_off));
6560 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6561
6562 tree newoff = copy_ssa_name (running_off);
6563 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6564 running_off, stride_step);
6565 vect_finish_stmt_generation (stmt, incr, gsi);
6566
6567 running_off = newoff;
6568 }
6569
6570 if (slp)
6571 {
6572 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6573 if (slp_perm)
6574 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6575 }
6576 else
6577 {
6578 if (j == 0)
6579 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6580 else
6581 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6582 prev_stmt_info = vinfo_for_stmt (new_stmt);
6583 }
6584 }
6585 if (slp_perm)
6586 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6587 slp_node_instance, false);
6588 return true;
6589 }
6590
6591 if (grouped_load)
6592 {
6593 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6594 if (slp
6595 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6596 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6597 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6598
6599 /* Check if the chain of loads is already vectorized. */
6600 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6601 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6602 ??? But we can only do so if there is exactly one
6603 as we have no way to get at the rest. Leave the CSE
6604 opportunity alone.
6605 ??? With the group load eventually participating
6606 in multiple different permutations (having multiple
6607 slp nodes which refer to the same group) the CSE
6608 is even wrong code. See PR56270. */
6609 && !slp)
6610 {
6611 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6612 return true;
6613 }
6614 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6615 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6616 group_gap_adj = 0;
6617
6618 /* VEC_NUM is the number of vect stmts to be created for this group. */
6619 if (slp)
6620 {
6621 grouped_load = false;
6622 /* For SLP permutation support we need to load the whole group,
6623 not only the number of vector stmts the permutation result
6624 fits in. */
6625 if (slp_perm)
6626 vec_num = (group_size * vf + nunits - 1) / nunits;
6627 else
6628 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6629 group_gap_adj = vf * group_size - nunits * vec_num;
6630 }
6631 else
6632 vec_num = group_size;
6633 }
6634 else
6635 {
6636 first_stmt = stmt;
6637 first_dr = dr;
6638 group_size = vec_num = 1;
6639 group_gap_adj = 0;
6640 }
6641
6642 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6643 gcc_assert (alignment_support_scheme);
6644 /* Targets with load-lane instructions must not require explicit
6645 realignment. */
6646 gcc_assert (!load_lanes_p
6647 || alignment_support_scheme == dr_aligned
6648 || alignment_support_scheme == dr_unaligned_supported);
6649
6650 /* In case the vectorization factor (VF) is bigger than the number
6651 of elements that we can fit in a vectype (nunits), we have to generate
6652 more than one vector stmt - i.e - we need to "unroll" the
6653 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6654 from one copy of the vector stmt to the next, in the field
6655 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6656 stages to find the correct vector defs to be used when vectorizing
6657 stmts that use the defs of the current stmt. The example below
6658 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6659 need to create 4 vectorized stmts):
6660
6661 before vectorization:
6662 RELATED_STMT VEC_STMT
6663 S1: x = memref - -
6664 S2: z = x + 1 - -
6665
6666 step 1: vectorize stmt S1:
6667 We first create the vector stmt VS1_0, and, as usual, record a
6668 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6669 Next, we create the vector stmt VS1_1, and record a pointer to
6670 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6671 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6672 stmts and pointers:
6673 RELATED_STMT VEC_STMT
6674 VS1_0: vx0 = memref0 VS1_1 -
6675 VS1_1: vx1 = memref1 VS1_2 -
6676 VS1_2: vx2 = memref2 VS1_3 -
6677 VS1_3: vx3 = memref3 - -
6678 S1: x = load - VS1_0
6679 S2: z = x + 1 - -
6680
6681 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6682 information we recorded in RELATED_STMT field is used to vectorize
6683 stmt S2. */
6684
6685 /* In case of interleaving (non-unit grouped access):
6686
6687 S1: x2 = &base + 2
6688 S2: x0 = &base
6689 S3: x1 = &base + 1
6690 S4: x3 = &base + 3
6691
6692 Vectorized loads are created in the order of memory accesses
6693 starting from the access of the first stmt of the chain:
6694
6695 VS1: vx0 = &base
6696 VS2: vx1 = &base + vec_size*1
6697 VS3: vx3 = &base + vec_size*2
6698 VS4: vx4 = &base + vec_size*3
6699
6700 Then permutation statements are generated:
6701
6702 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6703 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6704 ...
6705
6706 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6707 (the order of the data-refs in the output of vect_permute_load_chain
6708 corresponds to the order of scalar stmts in the interleaving chain - see
6709 the documentation of vect_permute_load_chain()).
6710 The generation of permutation stmts and recording them in
6711 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6712
6713 In case of both multiple types and interleaving, the vector loads and
6714 permutation stmts above are created for every copy. The result vector
6715 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6716 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6717
6718 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6719 on a target that supports unaligned accesses (dr_unaligned_supported)
6720 we generate the following code:
6721 p = initial_addr;
6722 indx = 0;
6723 loop {
6724 p = p + indx * vectype_size;
6725 vec_dest = *(p);
6726 indx = indx + 1;
6727 }
6728
6729 Otherwise, the data reference is potentially unaligned on a target that
6730 does not support unaligned accesses (dr_explicit_realign_optimized) -
6731 then generate the following code, in which the data in each iteration is
6732 obtained by two vector loads, one from the previous iteration, and one
6733 from the current iteration:
6734 p1 = initial_addr;
6735 msq_init = *(floor(p1))
6736 p2 = initial_addr + VS - 1;
6737 realignment_token = call target_builtin;
6738 indx = 0;
6739 loop {
6740 p2 = p2 + indx * vectype_size
6741 lsq = *(floor(p2))
6742 vec_dest = realign_load (msq, lsq, realignment_token)
6743 indx = indx + 1;
6744 msq = lsq;
6745 } */
6746
6747 /* If the misalignment remains the same throughout the execution of the
6748 loop, we can create the init_addr and permutation mask at the loop
6749 preheader. Otherwise, it needs to be created inside the loop.
6750 This can only occur when vectorizing memory accesses in the inner-loop
6751 nested within an outer-loop that is being vectorized. */
6752
6753 if (nested_in_vect_loop
6754 && (TREE_INT_CST_LOW (DR_STEP (dr))
6755 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6756 {
6757 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6758 compute_in_loop = true;
6759 }
6760
6761 if ((alignment_support_scheme == dr_explicit_realign_optimized
6762 || alignment_support_scheme == dr_explicit_realign)
6763 && !compute_in_loop)
6764 {
6765 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6766 alignment_support_scheme, NULL_TREE,
6767 &at_loop);
6768 if (alignment_support_scheme == dr_explicit_realign_optimized)
6769 {
6770 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6771 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6772 size_one_node);
6773 }
6774 }
6775 else
6776 at_loop = loop;
6777
6778 if (negative)
6779 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6780
6781 if (load_lanes_p)
6782 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6783 else
6784 aggr_type = vectype;
6785
6786 prev_stmt_info = NULL;
6787 for (j = 0; j < ncopies; j++)
6788 {
6789 /* 1. Create the vector or array pointer update chain. */
6790 if (j == 0)
6791 {
6792 bool simd_lane_access_p
6793 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6794 if (simd_lane_access_p
6795 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6796 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6797 && integer_zerop (DR_OFFSET (first_dr))
6798 && integer_zerop (DR_INIT (first_dr))
6799 && alias_sets_conflict_p (get_alias_set (aggr_type),
6800 get_alias_set (DR_REF (first_dr)))
6801 && (alignment_support_scheme == dr_aligned
6802 || alignment_support_scheme == dr_unaligned_supported))
6803 {
6804 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6805 dataref_offset = build_int_cst (reference_alias_ptr_type
6806 (DR_REF (first_dr)), 0);
6807 inv_p = false;
6808 }
6809 else
6810 dataref_ptr
6811 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6812 offset, &dummy, gsi, &ptr_incr,
6813 simd_lane_access_p, &inv_p,
6814 byte_offset);
6815 }
6816 else if (dataref_offset)
6817 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6818 TYPE_SIZE_UNIT (aggr_type));
6819 else
6820 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6821 TYPE_SIZE_UNIT (aggr_type));
6822
6823 if (grouped_load || slp_perm)
6824 dr_chain.create (vec_num);
6825
6826 if (load_lanes_p)
6827 {
6828 tree vec_array;
6829
6830 vec_array = create_vector_array (vectype, vec_num);
6831
6832 /* Emit:
6833 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6834 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6835 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6836 gimple_call_set_lhs (new_stmt, vec_array);
6837 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6838
6839 /* Extract each vector into an SSA_NAME. */
6840 for (i = 0; i < vec_num; i++)
6841 {
6842 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6843 vec_array, i);
6844 dr_chain.quick_push (new_temp);
6845 }
6846
6847 /* Record the mapping between SSA_NAMEs and statements. */
6848 vect_record_grouped_load_vectors (stmt, dr_chain);
6849 }
6850 else
6851 {
6852 for (i = 0; i < vec_num; i++)
6853 {
6854 if (i > 0)
6855 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6856 stmt, NULL_TREE);
6857
6858 /* 2. Create the vector-load in the loop. */
6859 switch (alignment_support_scheme)
6860 {
6861 case dr_aligned:
6862 case dr_unaligned_supported:
6863 {
6864 unsigned int align, misalign;
6865
6866 data_ref
6867 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6868 dataref_offset
6869 ? dataref_offset
6870 : build_int_cst (reference_alias_ptr_type
6871 (DR_REF (first_dr)), 0));
6872 align = TYPE_ALIGN_UNIT (vectype);
6873 if (alignment_support_scheme == dr_aligned)
6874 {
6875 gcc_assert (aligned_access_p (first_dr));
6876 misalign = 0;
6877 }
6878 else if (DR_MISALIGNMENT (first_dr) == -1)
6879 {
6880 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6881 align = TYPE_ALIGN_UNIT (elem_type);
6882 else
6883 align = (get_object_alignment (DR_REF (first_dr))
6884 / BITS_PER_UNIT);
6885 misalign = 0;
6886 TREE_TYPE (data_ref)
6887 = build_aligned_type (TREE_TYPE (data_ref),
6888 align * BITS_PER_UNIT);
6889 }
6890 else
6891 {
6892 TREE_TYPE (data_ref)
6893 = build_aligned_type (TREE_TYPE (data_ref),
6894 TYPE_ALIGN (elem_type));
6895 misalign = DR_MISALIGNMENT (first_dr);
6896 }
6897 if (dataref_offset == NULL_TREE
6898 && TREE_CODE (dataref_ptr) == SSA_NAME)
6899 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6900 align, misalign);
6901 break;
6902 }
6903 case dr_explicit_realign:
6904 {
6905 tree ptr, bump;
6906
6907 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6908
6909 if (compute_in_loop)
6910 msq = vect_setup_realignment (first_stmt, gsi,
6911 &realignment_token,
6912 dr_explicit_realign,
6913 dataref_ptr, NULL);
6914
6915 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6916 ptr = copy_ssa_name (dataref_ptr);
6917 else
6918 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6919 new_stmt = gimple_build_assign
6920 (ptr, BIT_AND_EXPR, dataref_ptr,
6921 build_int_cst
6922 (TREE_TYPE (dataref_ptr),
6923 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6924 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6925 data_ref
6926 = build2 (MEM_REF, vectype, ptr,
6927 build_int_cst (reference_alias_ptr_type
6928 (DR_REF (first_dr)), 0));
6929 vec_dest = vect_create_destination_var (scalar_dest,
6930 vectype);
6931 new_stmt = gimple_build_assign (vec_dest, data_ref);
6932 new_temp = make_ssa_name (vec_dest, new_stmt);
6933 gimple_assign_set_lhs (new_stmt, new_temp);
6934 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6935 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6936 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6937 msq = new_temp;
6938
6939 bump = size_binop (MULT_EXPR, vs,
6940 TYPE_SIZE_UNIT (elem_type));
6941 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6942 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6943 new_stmt = gimple_build_assign
6944 (NULL_TREE, BIT_AND_EXPR, ptr,
6945 build_int_cst
6946 (TREE_TYPE (ptr),
6947 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6948 ptr = copy_ssa_name (ptr, new_stmt);
6949 gimple_assign_set_lhs (new_stmt, ptr);
6950 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6951 data_ref
6952 = build2 (MEM_REF, vectype, ptr,
6953 build_int_cst (reference_alias_ptr_type
6954 (DR_REF (first_dr)), 0));
6955 break;
6956 }
6957 case dr_explicit_realign_optimized:
6958 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6959 new_temp = copy_ssa_name (dataref_ptr);
6960 else
6961 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
6962 new_stmt = gimple_build_assign
6963 (new_temp, BIT_AND_EXPR, dataref_ptr,
6964 build_int_cst
6965 (TREE_TYPE (dataref_ptr),
6966 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6967 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6968 data_ref
6969 = build2 (MEM_REF, vectype, new_temp,
6970 build_int_cst (reference_alias_ptr_type
6971 (DR_REF (first_dr)), 0));
6972 break;
6973 default:
6974 gcc_unreachable ();
6975 }
6976 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6977 new_stmt = gimple_build_assign (vec_dest, data_ref);
6978 new_temp = make_ssa_name (vec_dest, new_stmt);
6979 gimple_assign_set_lhs (new_stmt, new_temp);
6980 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6981
6982 /* 3. Handle explicit realignment if necessary/supported.
6983 Create in loop:
6984 vec_dest = realign_load (msq, lsq, realignment_token) */
6985 if (alignment_support_scheme == dr_explicit_realign_optimized
6986 || alignment_support_scheme == dr_explicit_realign)
6987 {
6988 lsq = gimple_assign_lhs (new_stmt);
6989 if (!realignment_token)
6990 realignment_token = dataref_ptr;
6991 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6992 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
6993 msq, lsq, realignment_token);
6994 new_temp = make_ssa_name (vec_dest, new_stmt);
6995 gimple_assign_set_lhs (new_stmt, new_temp);
6996 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6997
6998 if (alignment_support_scheme == dr_explicit_realign_optimized)
6999 {
7000 gcc_assert (phi);
7001 if (i == vec_num - 1 && j == ncopies - 1)
7002 add_phi_arg (phi, lsq,
7003 loop_latch_edge (containing_loop),
7004 UNKNOWN_LOCATION);
7005 msq = lsq;
7006 }
7007 }
7008
7009 /* 4. Handle invariant-load. */
7010 if (inv_p && !bb_vinfo)
7011 {
7012 gcc_assert (!grouped_load);
7013 /* If we have versioned for aliasing or the loop doesn't
7014 have any data dependencies that would preclude this,
7015 then we are sure this is a loop invariant load and
7016 thus we can insert it on the preheader edge. */
7017 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7018 && !nested_in_vect_loop
7019 && hoist_defs_of_uses (stmt, loop))
7020 {
7021 if (dump_enabled_p ())
7022 {
7023 dump_printf_loc (MSG_NOTE, vect_location,
7024 "hoisting out of the vectorized "
7025 "loop: ");
7026 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7027 }
7028 tree tem = copy_ssa_name (scalar_dest);
7029 gsi_insert_on_edge_immediate
7030 (loop_preheader_edge (loop),
7031 gimple_build_assign (tem,
7032 unshare_expr
7033 (gimple_assign_rhs1 (stmt))));
7034 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7035 }
7036 else
7037 {
7038 gimple_stmt_iterator gsi2 = *gsi;
7039 gsi_next (&gsi2);
7040 new_temp = vect_init_vector (stmt, scalar_dest,
7041 vectype, &gsi2);
7042 }
7043 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7044 set_vinfo_for_stmt (new_stmt,
7045 new_stmt_vec_info (new_stmt, vinfo));
7046 }
7047
7048 if (negative)
7049 {
7050 tree perm_mask = perm_mask_for_reverse (vectype);
7051 new_temp = permute_vec_elements (new_temp, new_temp,
7052 perm_mask, stmt, gsi);
7053 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7054 }
7055
7056 /* Collect vector loads and later create their permutation in
7057 vect_transform_grouped_load (). */
7058 if (grouped_load || slp_perm)
7059 dr_chain.quick_push (new_temp);
7060
7061 /* Store vector loads in the corresponding SLP_NODE. */
7062 if (slp && !slp_perm)
7063 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7064 }
7065 /* Bump the vector pointer to account for a gap or for excess
7066 elements loaded for a permuted SLP load. */
7067 if (group_gap_adj != 0)
7068 {
7069 bool ovf;
7070 tree bump
7071 = wide_int_to_tree (sizetype,
7072 wi::smul (TYPE_SIZE_UNIT (elem_type),
7073 group_gap_adj, &ovf));
7074 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7075 stmt, bump);
7076 }
7077 }
7078
7079 if (slp && !slp_perm)
7080 continue;
7081
7082 if (slp_perm)
7083 {
7084 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7085 slp_node_instance, false))
7086 {
7087 dr_chain.release ();
7088 return false;
7089 }
7090 }
7091 else
7092 {
7093 if (grouped_load)
7094 {
7095 if (!load_lanes_p)
7096 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7097 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7098 }
7099 else
7100 {
7101 if (j == 0)
7102 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7103 else
7104 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7105 prev_stmt_info = vinfo_for_stmt (new_stmt);
7106 }
7107 }
7108 dr_chain.release ();
7109 }
7110
7111 return true;
7112 }
7113
7114 /* Function vect_is_simple_cond.
7115
7116 Input:
7117 LOOP - the loop that is being vectorized.
7118 COND - Condition that is checked for simple use.
7119
7120 Output:
7121 *COMP_VECTYPE - the vector type for the comparison.
7122
7123 Returns whether a COND can be vectorized. Checks whether
7124 condition operands are supportable using vec_is_simple_use. */
7125
7126 static bool
7127 vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
7128 {
7129 tree lhs, rhs;
7130 enum vect_def_type dt;
7131 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7132
7133 if (!COMPARISON_CLASS_P (cond))
7134 return false;
7135
7136 lhs = TREE_OPERAND (cond, 0);
7137 rhs = TREE_OPERAND (cond, 1);
7138
7139 if (TREE_CODE (lhs) == SSA_NAME)
7140 {
7141 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7142 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
7143 return false;
7144 }
7145 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7146 && TREE_CODE (lhs) != FIXED_CST)
7147 return false;
7148
7149 if (TREE_CODE (rhs) == SSA_NAME)
7150 {
7151 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7152 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
7153 return false;
7154 }
7155 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7156 && TREE_CODE (rhs) != FIXED_CST)
7157 return false;
7158
7159 *comp_vectype = vectype1 ? vectype1 : vectype2;
7160 return true;
7161 }
7162
7163 /* vectorizable_condition.
7164
7165 Check if STMT is conditional modify expression that can be vectorized.
7166 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7167 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7168 at GSI.
7169
7170 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7171 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7172 else clause if it is 2).
7173
7174 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7175
7176 bool
7177 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7178 gimple **vec_stmt, tree reduc_def, int reduc_index,
7179 slp_tree slp_node)
7180 {
7181 tree scalar_dest = NULL_TREE;
7182 tree vec_dest = NULL_TREE;
7183 tree cond_expr, then_clause, else_clause;
7184 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7185 tree comp_vectype = NULL_TREE;
7186 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7187 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7188 tree vec_compare, vec_cond_expr;
7189 tree new_temp;
7190 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7191 enum vect_def_type dt, dts[4];
7192 int ncopies;
7193 enum tree_code code;
7194 stmt_vec_info prev_stmt_info = NULL;
7195 int i, j;
7196 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7197 vec<tree> vec_oprnds0 = vNULL;
7198 vec<tree> vec_oprnds1 = vNULL;
7199 vec<tree> vec_oprnds2 = vNULL;
7200 vec<tree> vec_oprnds3 = vNULL;
7201 tree vec_cmp_type;
7202
7203 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7204 return false;
7205
7206 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7207 {
7208 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7209 return false;
7210
7211 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7212 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7213 && reduc_def))
7214 return false;
7215
7216 /* FORNOW: not yet supported. */
7217 if (STMT_VINFO_LIVE_P (stmt_info))
7218 {
7219 if (dump_enabled_p ())
7220 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7221 "value used after loop.\n");
7222 return false;
7223 }
7224 }
7225
7226 /* Is vectorizable conditional operation? */
7227 if (!is_gimple_assign (stmt))
7228 return false;
7229
7230 code = gimple_assign_rhs_code (stmt);
7231
7232 if (code != COND_EXPR)
7233 return false;
7234
7235 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7236 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7237
7238 if (slp_node || PURE_SLP_STMT (stmt_info))
7239 ncopies = 1;
7240 else
7241 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7242
7243 gcc_assert (ncopies >= 1);
7244 if (reduc_index && ncopies > 1)
7245 return false; /* FORNOW */
7246
7247 cond_expr = gimple_assign_rhs1 (stmt);
7248 then_clause = gimple_assign_rhs2 (stmt);
7249 else_clause = gimple_assign_rhs3 (stmt);
7250
7251 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
7252 || !comp_vectype)
7253 return false;
7254
7255 gimple *def_stmt;
7256 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt))
7257 return false;
7258 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt))
7259 return false;
7260
7261 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7262 if (vec_cmp_type == NULL_TREE)
7263 return false;
7264
7265 if (!vec_stmt)
7266 {
7267 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7268 return expand_vec_cond_expr_p (vectype, comp_vectype);
7269 }
7270
7271 /* Transform. */
7272
7273 if (!slp_node)
7274 {
7275 vec_oprnds0.create (1);
7276 vec_oprnds1.create (1);
7277 vec_oprnds2.create (1);
7278 vec_oprnds3.create (1);
7279 }
7280
7281 /* Handle def. */
7282 scalar_dest = gimple_assign_lhs (stmt);
7283 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7284
7285 /* Handle cond expr. */
7286 for (j = 0; j < ncopies; j++)
7287 {
7288 gassign *new_stmt = NULL;
7289 if (j == 0)
7290 {
7291 if (slp_node)
7292 {
7293 auto_vec<tree, 4> ops;
7294 auto_vec<vec<tree>, 4> vec_defs;
7295
7296 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7297 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7298 ops.safe_push (then_clause);
7299 ops.safe_push (else_clause);
7300 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7301 vec_oprnds3 = vec_defs.pop ();
7302 vec_oprnds2 = vec_defs.pop ();
7303 vec_oprnds1 = vec_defs.pop ();
7304 vec_oprnds0 = vec_defs.pop ();
7305
7306 ops.release ();
7307 vec_defs.release ();
7308 }
7309 else
7310 {
7311 gimple *gtemp;
7312 vec_cond_lhs =
7313 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt);
7314 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7315 loop_vinfo, &gtemp, &dts[0]);
7316
7317 vec_cond_rhs =
7318 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7319 stmt);
7320 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7321 loop_vinfo, &gtemp, &dts[1]);
7322 if (reduc_index == 1)
7323 vec_then_clause = reduc_def;
7324 else
7325 {
7326 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7327 stmt);
7328 vect_is_simple_use (then_clause, loop_vinfo,
7329 &gtemp, &dts[2]);
7330 }
7331 if (reduc_index == 2)
7332 vec_else_clause = reduc_def;
7333 else
7334 {
7335 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7336 stmt);
7337 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
7338 }
7339 }
7340 }
7341 else
7342 {
7343 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7344 vec_oprnds0.pop ());
7345 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7346 vec_oprnds1.pop ());
7347 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7348 vec_oprnds2.pop ());
7349 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7350 vec_oprnds3.pop ());
7351 }
7352
7353 if (!slp_node)
7354 {
7355 vec_oprnds0.quick_push (vec_cond_lhs);
7356 vec_oprnds1.quick_push (vec_cond_rhs);
7357 vec_oprnds2.quick_push (vec_then_clause);
7358 vec_oprnds3.quick_push (vec_else_clause);
7359 }
7360
7361 /* Arguments are ready. Create the new vector stmt. */
7362 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7363 {
7364 vec_cond_rhs = vec_oprnds1[i];
7365 vec_then_clause = vec_oprnds2[i];
7366 vec_else_clause = vec_oprnds3[i];
7367
7368 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7369 vec_cond_lhs, vec_cond_rhs);
7370 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7371 vec_compare, vec_then_clause, vec_else_clause);
7372
7373 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7374 new_temp = make_ssa_name (vec_dest, new_stmt);
7375 gimple_assign_set_lhs (new_stmt, new_temp);
7376 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7377 if (slp_node)
7378 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7379 }
7380
7381 if (slp_node)
7382 continue;
7383
7384 if (j == 0)
7385 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7386 else
7387 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7388
7389 prev_stmt_info = vinfo_for_stmt (new_stmt);
7390 }
7391
7392 vec_oprnds0.release ();
7393 vec_oprnds1.release ();
7394 vec_oprnds2.release ();
7395 vec_oprnds3.release ();
7396
7397 return true;
7398 }
7399
7400
7401 /* Make sure the statement is vectorizable. */
7402
7403 bool
7404 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
7405 {
7406 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7407 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7408 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7409 bool ok;
7410 tree scalar_type, vectype;
7411 gimple *pattern_stmt;
7412 gimple_seq pattern_def_seq;
7413
7414 if (dump_enabled_p ())
7415 {
7416 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7417 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7418 }
7419
7420 if (gimple_has_volatile_ops (stmt))
7421 {
7422 if (dump_enabled_p ())
7423 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7424 "not vectorized: stmt has volatile operands\n");
7425
7426 return false;
7427 }
7428
7429 /* Skip stmts that do not need to be vectorized. In loops this is expected
7430 to include:
7431 - the COND_EXPR which is the loop exit condition
7432 - any LABEL_EXPRs in the loop
7433 - computations that are used only for array indexing or loop control.
7434 In basic blocks we only analyze statements that are a part of some SLP
7435 instance, therefore, all the statements are relevant.
7436
7437 Pattern statement needs to be analyzed instead of the original statement
7438 if the original statement is not relevant. Otherwise, we analyze both
7439 statements. In basic blocks we are called from some SLP instance
7440 traversal, don't analyze pattern stmts instead, the pattern stmts
7441 already will be part of SLP instance. */
7442
7443 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7444 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7445 && !STMT_VINFO_LIVE_P (stmt_info))
7446 {
7447 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7448 && pattern_stmt
7449 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7450 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7451 {
7452 /* Analyze PATTERN_STMT instead of the original stmt. */
7453 stmt = pattern_stmt;
7454 stmt_info = vinfo_for_stmt (pattern_stmt);
7455 if (dump_enabled_p ())
7456 {
7457 dump_printf_loc (MSG_NOTE, vect_location,
7458 "==> examining pattern statement: ");
7459 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7460 }
7461 }
7462 else
7463 {
7464 if (dump_enabled_p ())
7465 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7466
7467 return true;
7468 }
7469 }
7470 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7471 && node == NULL
7472 && pattern_stmt
7473 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7474 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7475 {
7476 /* Analyze PATTERN_STMT too. */
7477 if (dump_enabled_p ())
7478 {
7479 dump_printf_loc (MSG_NOTE, vect_location,
7480 "==> examining pattern statement: ");
7481 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7482 }
7483
7484 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7485 return false;
7486 }
7487
7488 if (is_pattern_stmt_p (stmt_info)
7489 && node == NULL
7490 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7491 {
7492 gimple_stmt_iterator si;
7493
7494 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7495 {
7496 gimple *pattern_def_stmt = gsi_stmt (si);
7497 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7498 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7499 {
7500 /* Analyze def stmt of STMT if it's a pattern stmt. */
7501 if (dump_enabled_p ())
7502 {
7503 dump_printf_loc (MSG_NOTE, vect_location,
7504 "==> examining pattern def statement: ");
7505 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7506 }
7507
7508 if (!vect_analyze_stmt (pattern_def_stmt,
7509 need_to_vectorize, node))
7510 return false;
7511 }
7512 }
7513 }
7514
7515 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7516 {
7517 case vect_internal_def:
7518 break;
7519
7520 case vect_reduction_def:
7521 case vect_nested_cycle:
7522 gcc_assert (!bb_vinfo
7523 && (relevance == vect_used_in_outer
7524 || relevance == vect_used_in_outer_by_reduction
7525 || relevance == vect_used_by_reduction
7526 || relevance == vect_unused_in_scope));
7527 break;
7528
7529 case vect_induction_def:
7530 case vect_constant_def:
7531 case vect_external_def:
7532 case vect_unknown_def_type:
7533 default:
7534 gcc_unreachable ();
7535 }
7536
7537 if (bb_vinfo)
7538 {
7539 gcc_assert (PURE_SLP_STMT (stmt_info));
7540
7541 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7542 if (dump_enabled_p ())
7543 {
7544 dump_printf_loc (MSG_NOTE, vect_location,
7545 "get vectype for scalar type: ");
7546 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7547 dump_printf (MSG_NOTE, "\n");
7548 }
7549
7550 vectype = get_vectype_for_scalar_type (scalar_type);
7551 if (!vectype)
7552 {
7553 if (dump_enabled_p ())
7554 {
7555 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7556 "not SLPed: unsupported data-type ");
7557 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7558 scalar_type);
7559 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7560 }
7561 return false;
7562 }
7563
7564 if (dump_enabled_p ())
7565 {
7566 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7567 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7568 dump_printf (MSG_NOTE, "\n");
7569 }
7570
7571 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7572 }
7573
7574 if (STMT_VINFO_RELEVANT_P (stmt_info))
7575 {
7576 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7577 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7578 || (is_gimple_call (stmt)
7579 && gimple_call_lhs (stmt) == NULL_TREE));
7580 *need_to_vectorize = true;
7581 }
7582
7583 if (PURE_SLP_STMT (stmt_info) && !node)
7584 {
7585 dump_printf_loc (MSG_NOTE, vect_location,
7586 "handled only by SLP analysis\n");
7587 return true;
7588 }
7589
7590 ok = true;
7591 if (!bb_vinfo
7592 && (STMT_VINFO_RELEVANT_P (stmt_info)
7593 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7594 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7595 || vectorizable_conversion (stmt, NULL, NULL, node)
7596 || vectorizable_shift (stmt, NULL, NULL, node)
7597 || vectorizable_operation (stmt, NULL, NULL, node)
7598 || vectorizable_assignment (stmt, NULL, NULL, node)
7599 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7600 || vectorizable_call (stmt, NULL, NULL, node)
7601 || vectorizable_store (stmt, NULL, NULL, node)
7602 || vectorizable_reduction (stmt, NULL, NULL, node)
7603 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7604 else
7605 {
7606 if (bb_vinfo)
7607 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7608 || vectorizable_conversion (stmt, NULL, NULL, node)
7609 || vectorizable_shift (stmt, NULL, NULL, node)
7610 || vectorizable_operation (stmt, NULL, NULL, node)
7611 || vectorizable_assignment (stmt, NULL, NULL, node)
7612 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7613 || vectorizable_call (stmt, NULL, NULL, node)
7614 || vectorizable_store (stmt, NULL, NULL, node)
7615 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7616 }
7617
7618 if (!ok)
7619 {
7620 if (dump_enabled_p ())
7621 {
7622 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7623 "not vectorized: relevant stmt not ");
7624 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7625 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7626 }
7627
7628 return false;
7629 }
7630
7631 if (bb_vinfo)
7632 return true;
7633
7634 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7635 need extra handling, except for vectorizable reductions. */
7636 if (STMT_VINFO_LIVE_P (stmt_info)
7637 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7638 ok = vectorizable_live_operation (stmt, NULL, NULL);
7639
7640 if (!ok)
7641 {
7642 if (dump_enabled_p ())
7643 {
7644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7645 "not vectorized: live stmt not ");
7646 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7647 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7648 }
7649
7650 return false;
7651 }
7652
7653 return true;
7654 }
7655
7656
7657 /* Function vect_transform_stmt.
7658
7659 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7660
7661 bool
7662 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
7663 bool *grouped_store, slp_tree slp_node,
7664 slp_instance slp_node_instance)
7665 {
7666 bool is_store = false;
7667 gimple *vec_stmt = NULL;
7668 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7669 bool done;
7670
7671 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7672
7673 switch (STMT_VINFO_TYPE (stmt_info))
7674 {
7675 case type_demotion_vec_info_type:
7676 case type_promotion_vec_info_type:
7677 case type_conversion_vec_info_type:
7678 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7679 gcc_assert (done);
7680 break;
7681
7682 case induc_vec_info_type:
7683 gcc_assert (!slp_node);
7684 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7685 gcc_assert (done);
7686 break;
7687
7688 case shift_vec_info_type:
7689 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7690 gcc_assert (done);
7691 break;
7692
7693 case op_vec_info_type:
7694 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7695 gcc_assert (done);
7696 break;
7697
7698 case assignment_vec_info_type:
7699 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7700 gcc_assert (done);
7701 break;
7702
7703 case load_vec_info_type:
7704 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7705 slp_node_instance);
7706 gcc_assert (done);
7707 break;
7708
7709 case store_vec_info_type:
7710 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7711 gcc_assert (done);
7712 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7713 {
7714 /* In case of interleaving, the whole chain is vectorized when the
7715 last store in the chain is reached. Store stmts before the last
7716 one are skipped, and there vec_stmt_info shouldn't be freed
7717 meanwhile. */
7718 *grouped_store = true;
7719 if (STMT_VINFO_VEC_STMT (stmt_info))
7720 is_store = true;
7721 }
7722 else
7723 is_store = true;
7724 break;
7725
7726 case condition_vec_info_type:
7727 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7728 gcc_assert (done);
7729 break;
7730
7731 case call_vec_info_type:
7732 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7733 stmt = gsi_stmt (*gsi);
7734 if (is_gimple_call (stmt)
7735 && gimple_call_internal_p (stmt)
7736 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7737 is_store = true;
7738 break;
7739
7740 case call_simd_clone_vec_info_type:
7741 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7742 stmt = gsi_stmt (*gsi);
7743 break;
7744
7745 case reduc_vec_info_type:
7746 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7747 gcc_assert (done);
7748 break;
7749
7750 default:
7751 if (!STMT_VINFO_LIVE_P (stmt_info))
7752 {
7753 if (dump_enabled_p ())
7754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7755 "stmt not supported.\n");
7756 gcc_unreachable ();
7757 }
7758 }
7759
7760 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7761 This would break hybrid SLP vectorization. */
7762 if (slp_node)
7763 gcc_assert (!vec_stmt
7764 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
7765
7766 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7767 is being vectorized, but outside the immediately enclosing loop. */
7768 if (vec_stmt
7769 && STMT_VINFO_LOOP_VINFO (stmt_info)
7770 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7771 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7772 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7773 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7774 || STMT_VINFO_RELEVANT (stmt_info) ==
7775 vect_used_in_outer_by_reduction))
7776 {
7777 struct loop *innerloop = LOOP_VINFO_LOOP (
7778 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7779 imm_use_iterator imm_iter;
7780 use_operand_p use_p;
7781 tree scalar_dest;
7782 gimple *exit_phi;
7783
7784 if (dump_enabled_p ())
7785 dump_printf_loc (MSG_NOTE, vect_location,
7786 "Record the vdef for outer-loop vectorization.\n");
7787
7788 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7789 (to be used when vectorizing outer-loop stmts that use the DEF of
7790 STMT). */
7791 if (gimple_code (stmt) == GIMPLE_PHI)
7792 scalar_dest = PHI_RESULT (stmt);
7793 else
7794 scalar_dest = gimple_assign_lhs (stmt);
7795
7796 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7797 {
7798 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7799 {
7800 exit_phi = USE_STMT (use_p);
7801 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7802 }
7803 }
7804 }
7805
7806 /* Handle stmts whose DEF is used outside the loop-nest that is
7807 being vectorized. */
7808 if (STMT_VINFO_LIVE_P (stmt_info)
7809 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7810 {
7811 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7812 gcc_assert (done);
7813 }
7814
7815 if (vec_stmt)
7816 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7817
7818 return is_store;
7819 }
7820
7821
7822 /* Remove a group of stores (for SLP or interleaving), free their
7823 stmt_vec_info. */
7824
7825 void
7826 vect_remove_stores (gimple *first_stmt)
7827 {
7828 gimple *next = first_stmt;
7829 gimple *tmp;
7830 gimple_stmt_iterator next_si;
7831
7832 while (next)
7833 {
7834 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7835
7836 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7837 if (is_pattern_stmt_p (stmt_info))
7838 next = STMT_VINFO_RELATED_STMT (stmt_info);
7839 /* Free the attached stmt_vec_info and remove the stmt. */
7840 next_si = gsi_for_stmt (next);
7841 unlink_stmt_vdef (next);
7842 gsi_remove (&next_si, true);
7843 release_defs (next);
7844 free_stmt_vec_info (next);
7845 next = tmp;
7846 }
7847 }
7848
7849
7850 /* Function new_stmt_vec_info.
7851
7852 Create and initialize a new stmt_vec_info struct for STMT. */
7853
7854 stmt_vec_info
7855 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
7856 {
7857 stmt_vec_info res;
7858 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7859
7860 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7861 STMT_VINFO_STMT (res) = stmt;
7862 res->vinfo = vinfo;
7863 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7864 STMT_VINFO_LIVE_P (res) = false;
7865 STMT_VINFO_VECTYPE (res) = NULL;
7866 STMT_VINFO_VEC_STMT (res) = NULL;
7867 STMT_VINFO_VECTORIZABLE (res) = true;
7868 STMT_VINFO_IN_PATTERN_P (res) = false;
7869 STMT_VINFO_RELATED_STMT (res) = NULL;
7870 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7871 STMT_VINFO_DATA_REF (res) = NULL;
7872 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
7873
7874 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7875 STMT_VINFO_DR_OFFSET (res) = NULL;
7876 STMT_VINFO_DR_INIT (res) = NULL;
7877 STMT_VINFO_DR_STEP (res) = NULL;
7878 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7879
7880 if (gimple_code (stmt) == GIMPLE_PHI
7881 && is_loop_header_bb_p (gimple_bb (stmt)))
7882 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7883 else
7884 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7885
7886 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7887 STMT_SLP_TYPE (res) = loop_vect;
7888 GROUP_FIRST_ELEMENT (res) = NULL;
7889 GROUP_NEXT_ELEMENT (res) = NULL;
7890 GROUP_SIZE (res) = 0;
7891 GROUP_STORE_COUNT (res) = 0;
7892 GROUP_GAP (res) = 0;
7893 GROUP_SAME_DR_STMT (res) = NULL;
7894
7895 return res;
7896 }
7897
7898
7899 /* Create a hash table for stmt_vec_info. */
7900
7901 void
7902 init_stmt_vec_info_vec (void)
7903 {
7904 gcc_assert (!stmt_vec_info_vec.exists ());
7905 stmt_vec_info_vec.create (50);
7906 }
7907
7908
7909 /* Free hash table for stmt_vec_info. */
7910
7911 void
7912 free_stmt_vec_info_vec (void)
7913 {
7914 unsigned int i;
7915 stmt_vec_info info;
7916 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7917 if (info != NULL)
7918 free_stmt_vec_info (STMT_VINFO_STMT (info));
7919 gcc_assert (stmt_vec_info_vec.exists ());
7920 stmt_vec_info_vec.release ();
7921 }
7922
7923
7924 /* Free stmt vectorization related info. */
7925
7926 void
7927 free_stmt_vec_info (gimple *stmt)
7928 {
7929 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7930
7931 if (!stmt_info)
7932 return;
7933
7934 /* Check if this statement has a related "pattern stmt"
7935 (introduced by the vectorizer during the pattern recognition
7936 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7937 too. */
7938 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7939 {
7940 stmt_vec_info patt_info
7941 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7942 if (patt_info)
7943 {
7944 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7945 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
7946 gimple_set_bb (patt_stmt, NULL);
7947 tree lhs = gimple_get_lhs (patt_stmt);
7948 if (TREE_CODE (lhs) == SSA_NAME)
7949 release_ssa_name (lhs);
7950 if (seq)
7951 {
7952 gimple_stmt_iterator si;
7953 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7954 {
7955 gimple *seq_stmt = gsi_stmt (si);
7956 gimple_set_bb (seq_stmt, NULL);
7957 lhs = gimple_get_lhs (seq_stmt);
7958 if (TREE_CODE (lhs) == SSA_NAME)
7959 release_ssa_name (lhs);
7960 free_stmt_vec_info (seq_stmt);
7961 }
7962 }
7963 free_stmt_vec_info (patt_stmt);
7964 }
7965 }
7966
7967 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7968 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7969 set_vinfo_for_stmt (stmt, NULL);
7970 free (stmt_info);
7971 }
7972
7973
7974 /* Function get_vectype_for_scalar_type_and_size.
7975
7976 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7977 by the target. */
7978
7979 static tree
7980 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7981 {
7982 machine_mode inner_mode = TYPE_MODE (scalar_type);
7983 machine_mode simd_mode;
7984 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7985 int nunits;
7986 tree vectype;
7987
7988 if (nbytes == 0)
7989 return NULL_TREE;
7990
7991 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7992 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7993 return NULL_TREE;
7994
7995 /* For vector types of elements whose mode precision doesn't
7996 match their types precision we use a element type of mode
7997 precision. The vectorization routines will have to make sure
7998 they support the proper result truncation/extension.
7999 We also make sure to build vector types with INTEGER_TYPE
8000 component type only. */
8001 if (INTEGRAL_TYPE_P (scalar_type)
8002 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8003 || TREE_CODE (scalar_type) != INTEGER_TYPE))
8004 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8005 TYPE_UNSIGNED (scalar_type));
8006
8007 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8008 When the component mode passes the above test simply use a type
8009 corresponding to that mode. The theory is that any use that
8010 would cause problems with this will disable vectorization anyway. */
8011 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
8012 && !INTEGRAL_TYPE_P (scalar_type))
8013 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8014
8015 /* We can't build a vector type of elements with alignment bigger than
8016 their size. */
8017 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
8018 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8019 TYPE_UNSIGNED (scalar_type));
8020
8021 /* If we felt back to using the mode fail if there was
8022 no scalar type for it. */
8023 if (scalar_type == NULL_TREE)
8024 return NULL_TREE;
8025
8026 /* If no size was supplied use the mode the target prefers. Otherwise
8027 lookup a vector mode of the specified size. */
8028 if (size == 0)
8029 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8030 else
8031 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8032 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8033 if (nunits <= 1)
8034 return NULL_TREE;
8035
8036 vectype = build_vector_type (scalar_type, nunits);
8037
8038 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8039 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8040 return NULL_TREE;
8041
8042 return vectype;
8043 }
8044
8045 unsigned int current_vector_size;
8046
8047 /* Function get_vectype_for_scalar_type.
8048
8049 Returns the vector type corresponding to SCALAR_TYPE as supported
8050 by the target. */
8051
8052 tree
8053 get_vectype_for_scalar_type (tree scalar_type)
8054 {
8055 tree vectype;
8056 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8057 current_vector_size);
8058 if (vectype
8059 && current_vector_size == 0)
8060 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8061 return vectype;
8062 }
8063
8064 /* Function get_same_sized_vectype
8065
8066 Returns a vector type corresponding to SCALAR_TYPE of size
8067 VECTOR_TYPE if supported by the target. */
8068
8069 tree
8070 get_same_sized_vectype (tree scalar_type, tree vector_type)
8071 {
8072 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8073 return build_same_sized_truth_vector_type (vector_type);
8074
8075 return get_vectype_for_scalar_type_and_size
8076 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8077 }
8078
8079 /* Function vect_is_simple_use.
8080
8081 Input:
8082 VINFO - the vect info of the loop or basic block that is being vectorized.
8083 OPERAND - operand in the loop or bb.
8084 Output:
8085 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8086 DT - the type of definition
8087
8088 Returns whether a stmt with OPERAND can be vectorized.
8089 For loops, supportable operands are constants, loop invariants, and operands
8090 that are defined by the current iteration of the loop. Unsupportable
8091 operands are those that are defined by a previous iteration of the loop (as
8092 is the case in reduction/induction computations).
8093 For basic blocks, supportable operands are constants and bb invariants.
8094 For now, operands defined outside the basic block are not supported. */
8095
8096 bool
8097 vect_is_simple_use (tree operand, vec_info *vinfo,
8098 gimple **def_stmt, enum vect_def_type *dt)
8099 {
8100 *def_stmt = NULL;
8101 *dt = vect_unknown_def_type;
8102
8103 if (dump_enabled_p ())
8104 {
8105 dump_printf_loc (MSG_NOTE, vect_location,
8106 "vect_is_simple_use: operand ");
8107 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8108 dump_printf (MSG_NOTE, "\n");
8109 }
8110
8111 if (CONSTANT_CLASS_P (operand))
8112 {
8113 *dt = vect_constant_def;
8114 return true;
8115 }
8116
8117 if (is_gimple_min_invariant (operand))
8118 {
8119 *dt = vect_external_def;
8120 return true;
8121 }
8122
8123 if (TREE_CODE (operand) != SSA_NAME)
8124 {
8125 if (dump_enabled_p ())
8126 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8127 "not ssa-name.\n");
8128 return false;
8129 }
8130
8131 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8132 {
8133 *dt = vect_external_def;
8134 return true;
8135 }
8136
8137 *def_stmt = SSA_NAME_DEF_STMT (operand);
8138 if (dump_enabled_p ())
8139 {
8140 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8141 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8142 }
8143
8144 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
8145 *dt = vect_external_def;
8146 else
8147 {
8148 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8149 if (is_a <bb_vec_info> (vinfo) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8150 *dt = vect_external_def;
8151 else
8152 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8153 }
8154
8155 if (dump_enabled_p ())
8156 {
8157 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8158 switch (*dt)
8159 {
8160 case vect_uninitialized_def:
8161 dump_printf (MSG_NOTE, "uninitialized\n");
8162 break;
8163 case vect_constant_def:
8164 dump_printf (MSG_NOTE, "constant\n");
8165 break;
8166 case vect_external_def:
8167 dump_printf (MSG_NOTE, "external\n");
8168 break;
8169 case vect_internal_def:
8170 dump_printf (MSG_NOTE, "internal\n");
8171 break;
8172 case vect_induction_def:
8173 dump_printf (MSG_NOTE, "induction\n");
8174 break;
8175 case vect_reduction_def:
8176 dump_printf (MSG_NOTE, "reduction\n");
8177 break;
8178 case vect_double_reduction_def:
8179 dump_printf (MSG_NOTE, "double reduction\n");
8180 break;
8181 case vect_nested_cycle:
8182 dump_printf (MSG_NOTE, "nested cycle\n");
8183 break;
8184 case vect_unknown_def_type:
8185 dump_printf (MSG_NOTE, "unknown\n");
8186 break;
8187 }
8188 }
8189
8190 if (*dt == vect_unknown_def_type)
8191 {
8192 if (dump_enabled_p ())
8193 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8194 "Unsupported pattern.\n");
8195 return false;
8196 }
8197
8198 switch (gimple_code (*def_stmt))
8199 {
8200 case GIMPLE_PHI:
8201 case GIMPLE_ASSIGN:
8202 case GIMPLE_CALL:
8203 break;
8204 default:
8205 if (dump_enabled_p ())
8206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8207 "unsupported defining stmt:\n");
8208 return false;
8209 }
8210
8211 return true;
8212 }
8213
8214 /* Function vect_is_simple_use.
8215
8216 Same as vect_is_simple_use but also determines the vector operand
8217 type of OPERAND and stores it to *VECTYPE. If the definition of
8218 OPERAND is vect_uninitialized_def, vect_constant_def or
8219 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8220 is responsible to compute the best suited vector type for the
8221 scalar operand. */
8222
8223 bool
8224 vect_is_simple_use (tree operand, vec_info *vinfo,
8225 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
8226 {
8227 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
8228 return false;
8229
8230 /* Now get a vector type if the def is internal, otherwise supply
8231 NULL_TREE and leave it up to the caller to figure out a proper
8232 type for the use stmt. */
8233 if (*dt == vect_internal_def
8234 || *dt == vect_induction_def
8235 || *dt == vect_reduction_def
8236 || *dt == vect_double_reduction_def
8237 || *dt == vect_nested_cycle)
8238 {
8239 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8240
8241 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8242 && !STMT_VINFO_RELEVANT (stmt_info)
8243 && !STMT_VINFO_LIVE_P (stmt_info))
8244 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8245
8246 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8247 gcc_assert (*vectype != NULL_TREE);
8248 }
8249 else if (*dt == vect_uninitialized_def
8250 || *dt == vect_constant_def
8251 || *dt == vect_external_def)
8252 *vectype = NULL_TREE;
8253 else
8254 gcc_unreachable ();
8255
8256 return true;
8257 }
8258
8259
8260 /* Function supportable_widening_operation
8261
8262 Check whether an operation represented by the code CODE is a
8263 widening operation that is supported by the target platform in
8264 vector form (i.e., when operating on arguments of type VECTYPE_IN
8265 producing a result of type VECTYPE_OUT).
8266
8267 Widening operations we currently support are NOP (CONVERT), FLOAT
8268 and WIDEN_MULT. This function checks if these operations are supported
8269 by the target platform either directly (via vector tree-codes), or via
8270 target builtins.
8271
8272 Output:
8273 - CODE1 and CODE2 are codes of vector operations to be used when
8274 vectorizing the operation, if available.
8275 - MULTI_STEP_CVT determines the number of required intermediate steps in
8276 case of multi-step conversion (like char->short->int - in that case
8277 MULTI_STEP_CVT will be 1).
8278 - INTERM_TYPES contains the intermediate type required to perform the
8279 widening operation (short in the above example). */
8280
8281 bool
8282 supportable_widening_operation (enum tree_code code, gimple *stmt,
8283 tree vectype_out, tree vectype_in,
8284 enum tree_code *code1, enum tree_code *code2,
8285 int *multi_step_cvt,
8286 vec<tree> *interm_types)
8287 {
8288 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8289 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8290 struct loop *vect_loop = NULL;
8291 machine_mode vec_mode;
8292 enum insn_code icode1, icode2;
8293 optab optab1, optab2;
8294 tree vectype = vectype_in;
8295 tree wide_vectype = vectype_out;
8296 enum tree_code c1, c2;
8297 int i;
8298 tree prev_type, intermediate_type;
8299 machine_mode intermediate_mode, prev_mode;
8300 optab optab3, optab4;
8301
8302 *multi_step_cvt = 0;
8303 if (loop_info)
8304 vect_loop = LOOP_VINFO_LOOP (loop_info);
8305
8306 switch (code)
8307 {
8308 case WIDEN_MULT_EXPR:
8309 /* The result of a vectorized widening operation usually requires
8310 two vectors (because the widened results do not fit into one vector).
8311 The generated vector results would normally be expected to be
8312 generated in the same order as in the original scalar computation,
8313 i.e. if 8 results are generated in each vector iteration, they are
8314 to be organized as follows:
8315 vect1: [res1,res2,res3,res4],
8316 vect2: [res5,res6,res7,res8].
8317
8318 However, in the special case that the result of the widening
8319 operation is used in a reduction computation only, the order doesn't
8320 matter (because when vectorizing a reduction we change the order of
8321 the computation). Some targets can take advantage of this and
8322 generate more efficient code. For example, targets like Altivec,
8323 that support widen_mult using a sequence of {mult_even,mult_odd}
8324 generate the following vectors:
8325 vect1: [res1,res3,res5,res7],
8326 vect2: [res2,res4,res6,res8].
8327
8328 When vectorizing outer-loops, we execute the inner-loop sequentially
8329 (each vectorized inner-loop iteration contributes to VF outer-loop
8330 iterations in parallel). We therefore don't allow to change the
8331 order of the computation in the inner-loop during outer-loop
8332 vectorization. */
8333 /* TODO: Another case in which order doesn't *really* matter is when we
8334 widen and then contract again, e.g. (short)((int)x * y >> 8).
8335 Normally, pack_trunc performs an even/odd permute, whereas the
8336 repack from an even/odd expansion would be an interleave, which
8337 would be significantly simpler for e.g. AVX2. */
8338 /* In any case, in order to avoid duplicating the code below, recurse
8339 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8340 are properly set up for the caller. If we fail, we'll continue with
8341 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8342 if (vect_loop
8343 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8344 && !nested_in_vect_loop_p (vect_loop, stmt)
8345 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8346 stmt, vectype_out, vectype_in,
8347 code1, code2, multi_step_cvt,
8348 interm_types))
8349 {
8350 /* Elements in a vector with vect_used_by_reduction property cannot
8351 be reordered if the use chain with this property does not have the
8352 same operation. One such an example is s += a * b, where elements
8353 in a and b cannot be reordered. Here we check if the vector defined
8354 by STMT is only directly used in the reduction statement. */
8355 tree lhs = gimple_assign_lhs (stmt);
8356 use_operand_p dummy;
8357 gimple *use_stmt;
8358 stmt_vec_info use_stmt_info = NULL;
8359 if (single_imm_use (lhs, &dummy, &use_stmt)
8360 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8361 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8362 return true;
8363 }
8364 c1 = VEC_WIDEN_MULT_LO_EXPR;
8365 c2 = VEC_WIDEN_MULT_HI_EXPR;
8366 break;
8367
8368 case DOT_PROD_EXPR:
8369 c1 = DOT_PROD_EXPR;
8370 c2 = DOT_PROD_EXPR;
8371 break;
8372
8373 case SAD_EXPR:
8374 c1 = SAD_EXPR;
8375 c2 = SAD_EXPR;
8376 break;
8377
8378 case VEC_WIDEN_MULT_EVEN_EXPR:
8379 /* Support the recursion induced just above. */
8380 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8381 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8382 break;
8383
8384 case WIDEN_LSHIFT_EXPR:
8385 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8386 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8387 break;
8388
8389 CASE_CONVERT:
8390 c1 = VEC_UNPACK_LO_EXPR;
8391 c2 = VEC_UNPACK_HI_EXPR;
8392 break;
8393
8394 case FLOAT_EXPR:
8395 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8396 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8397 break;
8398
8399 case FIX_TRUNC_EXPR:
8400 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8401 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8402 computing the operation. */
8403 return false;
8404
8405 default:
8406 gcc_unreachable ();
8407 }
8408
8409 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8410 std::swap (c1, c2);
8411
8412 if (code == FIX_TRUNC_EXPR)
8413 {
8414 /* The signedness is determined from output operand. */
8415 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8416 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8417 }
8418 else
8419 {
8420 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8421 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8422 }
8423
8424 if (!optab1 || !optab2)
8425 return false;
8426
8427 vec_mode = TYPE_MODE (vectype);
8428 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8429 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8430 return false;
8431
8432 *code1 = c1;
8433 *code2 = c2;
8434
8435 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8436 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8437 return true;
8438
8439 /* Check if it's a multi-step conversion that can be done using intermediate
8440 types. */
8441
8442 prev_type = vectype;
8443 prev_mode = vec_mode;
8444
8445 if (!CONVERT_EXPR_CODE_P (code))
8446 return false;
8447
8448 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8449 intermediate steps in promotion sequence. We try
8450 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8451 not. */
8452 interm_types->create (MAX_INTERM_CVT_STEPS);
8453 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8454 {
8455 intermediate_mode = insn_data[icode1].operand[0].mode;
8456 intermediate_type
8457 = lang_hooks.types.type_for_mode (intermediate_mode,
8458 TYPE_UNSIGNED (prev_type));
8459 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8460 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8461
8462 if (!optab3 || !optab4
8463 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8464 || insn_data[icode1].operand[0].mode != intermediate_mode
8465 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8466 || insn_data[icode2].operand[0].mode != intermediate_mode
8467 || ((icode1 = optab_handler (optab3, intermediate_mode))
8468 == CODE_FOR_nothing)
8469 || ((icode2 = optab_handler (optab4, intermediate_mode))
8470 == CODE_FOR_nothing))
8471 break;
8472
8473 interm_types->quick_push (intermediate_type);
8474 (*multi_step_cvt)++;
8475
8476 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8477 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8478 return true;
8479
8480 prev_type = intermediate_type;
8481 prev_mode = intermediate_mode;
8482 }
8483
8484 interm_types->release ();
8485 return false;
8486 }
8487
8488
8489 /* Function supportable_narrowing_operation
8490
8491 Check whether an operation represented by the code CODE is a
8492 narrowing operation that is supported by the target platform in
8493 vector form (i.e., when operating on arguments of type VECTYPE_IN
8494 and producing a result of type VECTYPE_OUT).
8495
8496 Narrowing operations we currently support are NOP (CONVERT) and
8497 FIX_TRUNC. This function checks if these operations are supported by
8498 the target platform directly via vector tree-codes.
8499
8500 Output:
8501 - CODE1 is the code of a vector operation to be used when
8502 vectorizing the operation, if available.
8503 - MULTI_STEP_CVT determines the number of required intermediate steps in
8504 case of multi-step conversion (like int->short->char - in that case
8505 MULTI_STEP_CVT will be 1).
8506 - INTERM_TYPES contains the intermediate type required to perform the
8507 narrowing operation (short in the above example). */
8508
8509 bool
8510 supportable_narrowing_operation (enum tree_code code,
8511 tree vectype_out, tree vectype_in,
8512 enum tree_code *code1, int *multi_step_cvt,
8513 vec<tree> *interm_types)
8514 {
8515 machine_mode vec_mode;
8516 enum insn_code icode1;
8517 optab optab1, interm_optab;
8518 tree vectype = vectype_in;
8519 tree narrow_vectype = vectype_out;
8520 enum tree_code c1;
8521 tree intermediate_type;
8522 machine_mode intermediate_mode, prev_mode;
8523 int i;
8524 bool uns;
8525
8526 *multi_step_cvt = 0;
8527 switch (code)
8528 {
8529 CASE_CONVERT:
8530 c1 = VEC_PACK_TRUNC_EXPR;
8531 break;
8532
8533 case FIX_TRUNC_EXPR:
8534 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8535 break;
8536
8537 case FLOAT_EXPR:
8538 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8539 tree code and optabs used for computing the operation. */
8540 return false;
8541
8542 default:
8543 gcc_unreachable ();
8544 }
8545
8546 if (code == FIX_TRUNC_EXPR)
8547 /* The signedness is determined from output operand. */
8548 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8549 else
8550 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8551
8552 if (!optab1)
8553 return false;
8554
8555 vec_mode = TYPE_MODE (vectype);
8556 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8557 return false;
8558
8559 *code1 = c1;
8560
8561 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8562 return true;
8563
8564 /* Check if it's a multi-step conversion that can be done using intermediate
8565 types. */
8566 prev_mode = vec_mode;
8567 if (code == FIX_TRUNC_EXPR)
8568 uns = TYPE_UNSIGNED (vectype_out);
8569 else
8570 uns = TYPE_UNSIGNED (vectype);
8571
8572 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8573 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8574 costly than signed. */
8575 if (code == FIX_TRUNC_EXPR && uns)
8576 {
8577 enum insn_code icode2;
8578
8579 intermediate_type
8580 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8581 interm_optab
8582 = optab_for_tree_code (c1, intermediate_type, optab_default);
8583 if (interm_optab != unknown_optab
8584 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8585 && insn_data[icode1].operand[0].mode
8586 == insn_data[icode2].operand[0].mode)
8587 {
8588 uns = false;
8589 optab1 = interm_optab;
8590 icode1 = icode2;
8591 }
8592 }
8593
8594 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8595 intermediate steps in promotion sequence. We try
8596 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8597 interm_types->create (MAX_INTERM_CVT_STEPS);
8598 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8599 {
8600 intermediate_mode = insn_data[icode1].operand[0].mode;
8601 intermediate_type
8602 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8603 interm_optab
8604 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8605 optab_default);
8606 if (!interm_optab
8607 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8608 || insn_data[icode1].operand[0].mode != intermediate_mode
8609 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8610 == CODE_FOR_nothing))
8611 break;
8612
8613 interm_types->quick_push (intermediate_type);
8614 (*multi_step_cvt)++;
8615
8616 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8617 return true;
8618
8619 prev_mode = intermediate_mode;
8620 optab1 = interm_optab;
8621 }
8622
8623 interm_types->release ();
8624 return false;
8625 }