Support for vectorizing conditional expressions
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "backend.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "rtl.h"
30 #include "ssa.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "target.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
37 #include "tree-eh.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "cfgloop.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
46 #include "flags.h"
47 #include "insn-config.h"
48 #include "recog.h" /* FIXME: for insn_data */
49 #include "insn-codes.h"
50 #include "optabs-tree.h"
51 #include "diagnostic-core.h"
52 #include "tree-vectorizer.h"
53 #include "cgraph.h"
54 #include "builtins.h"
55
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
58
59 /* Return the vectorized type for the given statement. */
60
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
63 {
64 return STMT_VINFO_VECTYPE (stmt_info);
65 }
66
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
71 {
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
76
77 if (!loop_vinfo)
78 return false;
79
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
81
82 return (bb->loop_father == loop->inner);
83 }
84
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
88
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
93 {
94 if (body_cost_vec)
95 {
96 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
97 stmt_info_for_cost si = { count, kind,
98 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
99 misalign };
100 body_cost_vec->safe_push (si);
101 return (unsigned)
102 (builtin_vectorization_cost (kind, vectype, misalign) * count);
103 }
104 else
105 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
106 count, kind, stmt_info, misalign, where);
107 }
108
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
110
111 static tree
112 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
113 {
114 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
115 "vect_array");
116 }
117
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT and the vector is associated
121 with scalar destination SCALAR_DEST. */
122
123 static tree
124 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
125 tree array, unsigned HOST_WIDE_INT n)
126 {
127 tree vect_type, vect, vect_name, array_ref;
128 gimple *new_stmt;
129
130 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
131 vect_type = TREE_TYPE (TREE_TYPE (array));
132 vect = vect_create_destination_var (scalar_dest, vect_type);
133 array_ref = build4 (ARRAY_REF, vect_type, array,
134 build_int_cst (size_type_node, n),
135 NULL_TREE, NULL_TREE);
136
137 new_stmt = gimple_build_assign (vect, array_ref);
138 vect_name = make_ssa_name (vect, new_stmt);
139 gimple_assign_set_lhs (new_stmt, vect_name);
140 vect_finish_stmt_generation (stmt, new_stmt, gsi);
141
142 return vect_name;
143 }
144
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT. */
148
149 static void
150 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
151 tree array, unsigned HOST_WIDE_INT n)
152 {
153 tree array_ref;
154 gimple *new_stmt;
155
156 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
157 build_int_cst (size_type_node, n),
158 NULL_TREE, NULL_TREE);
159
160 new_stmt = gimple_build_assign (array_ref, vect);
161 vect_finish_stmt_generation (stmt, new_stmt, gsi);
162 }
163
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
166 (and its group). */
167
168 static tree
169 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
170 {
171 tree mem_ref, alias_ptr_type;
172
173 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
174 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
175 /* Arrays have the same alignment as their type. */
176 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
177 return mem_ref;
178 }
179
180 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
181
182 /* Function vect_mark_relevant.
183
184 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
185
186 static void
187 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
188 enum vect_relevant relevant, bool live_p,
189 bool used_in_pattern)
190 {
191 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
192 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
193 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
194 gimple *pattern_stmt;
195
196 if (dump_enabled_p ())
197 dump_printf_loc (MSG_NOTE, vect_location,
198 "mark relevant %d, live %d.\n", relevant, live_p);
199
200 /* If this stmt is an original stmt in a pattern, we might need to mark its
201 related pattern stmt instead of the original stmt. However, such stmts
202 may have their own uses that are not in any pattern, in such cases the
203 stmt itself should be marked. */
204 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
205 {
206 bool found = false;
207 if (!used_in_pattern)
208 {
209 imm_use_iterator imm_iter;
210 use_operand_p use_p;
211 gimple *use_stmt;
212 tree lhs;
213 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
214 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
215
216 if (is_gimple_assign (stmt))
217 lhs = gimple_assign_lhs (stmt);
218 else
219 lhs = gimple_call_lhs (stmt);
220
221 /* This use is out of pattern use, if LHS has other uses that are
222 pattern uses, we should mark the stmt itself, and not the pattern
223 stmt. */
224 if (lhs && TREE_CODE (lhs) == SSA_NAME)
225 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
226 {
227 if (is_gimple_debug (USE_STMT (use_p)))
228 continue;
229 use_stmt = USE_STMT (use_p);
230
231 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
232 continue;
233
234 if (vinfo_for_stmt (use_stmt)
235 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
236 {
237 found = true;
238 break;
239 }
240 }
241 }
242
243 if (!found)
244 {
245 /* This is the last stmt in a sequence that was detected as a
246 pattern that can potentially be vectorized. Don't mark the stmt
247 as relevant/live because it's not going to be vectorized.
248 Instead mark the pattern-stmt that replaces it. */
249
250 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
251
252 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE, vect_location,
254 "last stmt in pattern. don't mark"
255 " relevant/live.\n");
256 stmt_info = vinfo_for_stmt (pattern_stmt);
257 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
258 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
259 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
260 stmt = pattern_stmt;
261 }
262 }
263
264 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
265 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
266 STMT_VINFO_RELEVANT (stmt_info) = relevant;
267
268 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
269 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
270 {
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE, vect_location,
273 "already marked relevant/live.\n");
274 return;
275 }
276
277 worklist->safe_push (stmt);
278 }
279
280
281 /* Function vect_stmt_relevant_p.
282
283 Return true if STMT in loop that is represented by LOOP_VINFO is
284 "relevant for vectorization".
285
286 A stmt is considered "relevant for vectorization" if:
287 - it has uses outside the loop.
288 - it has vdefs (it alters memory).
289 - control stmts in the loop (except for the exit condition).
290
291 CHECKME: what other side effects would the vectorizer allow? */
292
293 static bool
294 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
295 enum vect_relevant *relevant, bool *live_p)
296 {
297 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
298 ssa_op_iter op_iter;
299 imm_use_iterator imm_iter;
300 use_operand_p use_p;
301 def_operand_p def_p;
302
303 *relevant = vect_unused_in_scope;
304 *live_p = false;
305
306 /* cond stmt other than loop exit cond. */
307 if (is_ctrl_stmt (stmt)
308 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
309 != loop_exit_ctrl_vec_info_type)
310 *relevant = vect_used_in_scope;
311
312 /* changing memory. */
313 if (gimple_code (stmt) != GIMPLE_PHI)
314 if (gimple_vdef (stmt)
315 && !gimple_clobber_p (stmt))
316 {
317 if (dump_enabled_p ())
318 dump_printf_loc (MSG_NOTE, vect_location,
319 "vec_stmt_relevant_p: stmt has vdefs.\n");
320 *relevant = vect_used_in_scope;
321 }
322
323 /* uses outside the loop. */
324 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
325 {
326 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 {
328 basic_block bb = gimple_bb (USE_STMT (use_p));
329 if (!flow_bb_inside_loop_p (loop, bb))
330 {
331 if (dump_enabled_p ())
332 dump_printf_loc (MSG_NOTE, vect_location,
333 "vec_stmt_relevant_p: used out of loop.\n");
334
335 if (is_gimple_debug (USE_STMT (use_p)))
336 continue;
337
338 /* We expect all such uses to be in the loop exit phis
339 (because of loop closed form) */
340 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
341 gcc_assert (bb == single_exit (loop)->dest);
342
343 *live_p = true;
344 }
345 }
346 }
347
348 return (*live_p || *relevant);
349 }
350
351
352 /* Function exist_non_indexing_operands_for_use_p
353
354 USE is one of the uses attached to STMT. Check if USE is
355 used in STMT for anything other than indexing an array. */
356
357 static bool
358 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
359 {
360 tree operand;
361 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
362
363 /* USE corresponds to some operand in STMT. If there is no data
364 reference in STMT, then any operand that corresponds to USE
365 is not indexing an array. */
366 if (!STMT_VINFO_DATA_REF (stmt_info))
367 return true;
368
369 /* STMT has a data_ref. FORNOW this means that its of one of
370 the following forms:
371 -1- ARRAY_REF = var
372 -2- var = ARRAY_REF
373 (This should have been verified in analyze_data_refs).
374
375 'var' in the second case corresponds to a def, not a use,
376 so USE cannot correspond to any operands that are not used
377 for array indexing.
378
379 Therefore, all we need to check is if STMT falls into the
380 first case, and whether var corresponds to USE. */
381
382 if (!gimple_assign_copy_p (stmt))
383 {
384 if (is_gimple_call (stmt)
385 && gimple_call_internal_p (stmt))
386 switch (gimple_call_internal_fn (stmt))
387 {
388 case IFN_MASK_STORE:
389 operand = gimple_call_arg (stmt, 3);
390 if (operand == use)
391 return true;
392 /* FALLTHRU */
393 case IFN_MASK_LOAD:
394 operand = gimple_call_arg (stmt, 2);
395 if (operand == use)
396 return true;
397 break;
398 default:
399 break;
400 }
401 return false;
402 }
403
404 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
405 return false;
406 operand = gimple_assign_rhs1 (stmt);
407 if (TREE_CODE (operand) != SSA_NAME)
408 return false;
409
410 if (operand == use)
411 return true;
412
413 return false;
414 }
415
416
417 /*
418 Function process_use.
419
420 Inputs:
421 - a USE in STMT in a loop represented by LOOP_VINFO
422 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
423 that defined USE. This is done by calling mark_relevant and passing it
424 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
425 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
426 be performed.
427
428 Outputs:
429 Generally, LIVE_P and RELEVANT are used to define the liveness and
430 relevance info of the DEF_STMT of this USE:
431 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
432 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
433 Exceptions:
434 - case 1: If USE is used only for address computations (e.g. array indexing),
435 which does not need to be directly vectorized, then the liveness/relevance
436 of the respective DEF_STMT is left unchanged.
437 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
438 skip DEF_STMT cause it had already been processed.
439 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
440 be modified accordingly.
441
442 Return true if everything is as expected. Return false otherwise. */
443
444 static bool
445 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
446 enum vect_relevant relevant, vec<gimple *> *worklist,
447 bool force)
448 {
449 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
450 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
451 stmt_vec_info dstmt_vinfo;
452 basic_block bb, def_bb;
453 gimple *def_stmt;
454 enum vect_def_type dt;
455
456 /* case 1: we are only interested in uses that need to be vectorized. Uses
457 that are used for address computation are not considered relevant. */
458 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
459 return true;
460
461 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
462 {
463 if (dump_enabled_p ())
464 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
465 "not vectorized: unsupported use in stmt.\n");
466 return false;
467 }
468
469 if (!def_stmt || gimple_nop_p (def_stmt))
470 return true;
471
472 def_bb = gimple_bb (def_stmt);
473 if (!flow_bb_inside_loop_p (loop, def_bb))
474 {
475 if (dump_enabled_p ())
476 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
477 return true;
478 }
479
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
481 DEF_STMT must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DEF_STMT in the loop. So we just
484 check that everything is as expected, and we are done. */
485 dstmt_vinfo = vinfo_for_stmt (def_stmt);
486 bb = gimple_bb (stmt);
487 if (gimple_code (stmt) == GIMPLE_PHI
488 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
489 && gimple_code (def_stmt) != GIMPLE_PHI
490 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
491 && bb->loop_father == def_bb->loop_father)
492 {
493 if (dump_enabled_p ())
494 dump_printf_loc (MSG_NOTE, vect_location,
495 "reduc-stmt defining reduc-phi in the same nest.\n");
496 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
497 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
498 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
499 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
500 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
501 return true;
502 }
503
504 /* case 3a: outer-loop stmt defining an inner-loop stmt:
505 outer-loop-header-bb:
506 d = def_stmt
507 inner-loop:
508 stmt # use (d)
509 outer-loop-tail-bb:
510 ... */
511 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
512 {
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE, vect_location,
515 "outer-loop def-stmt defining inner-loop stmt.\n");
516
517 switch (relevant)
518 {
519 case vect_unused_in_scope:
520 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
521 vect_used_in_scope : vect_unused_in_scope;
522 break;
523
524 case vect_used_in_outer_by_reduction:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
526 relevant = vect_used_by_reduction;
527 break;
528
529 case vect_used_in_outer:
530 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
531 relevant = vect_used_in_scope;
532 break;
533
534 case vect_used_in_scope:
535 break;
536
537 default:
538 gcc_unreachable ();
539 }
540 }
541
542 /* case 3b: inner-loop stmt defining an outer-loop stmt:
543 outer-loop-header-bb:
544 ...
545 inner-loop:
546 d = def_stmt
547 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
548 stmt # use (d) */
549 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
550 {
551 if (dump_enabled_p ())
552 dump_printf_loc (MSG_NOTE, vect_location,
553 "inner-loop def-stmt defining outer-loop stmt.\n");
554
555 switch (relevant)
556 {
557 case vect_unused_in_scope:
558 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
559 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
560 vect_used_in_outer_by_reduction : vect_unused_in_scope;
561 break;
562
563 case vect_used_by_reduction:
564 relevant = vect_used_in_outer_by_reduction;
565 break;
566
567 case vect_used_in_scope:
568 relevant = vect_used_in_outer;
569 break;
570
571 default:
572 gcc_unreachable ();
573 }
574 }
575
576 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
577 is_pattern_stmt_p (stmt_vinfo));
578 return true;
579 }
580
581
582 /* Function vect_mark_stmts_to_be_vectorized.
583
584 Not all stmts in the loop need to be vectorized. For example:
585
586 for i...
587 for j...
588 1. T0 = i + j
589 2. T1 = a[T0]
590
591 3. j = j + 1
592
593 Stmt 1 and 3 do not need to be vectorized, because loop control and
594 addressing of vectorized data-refs are handled differently.
595
596 This pass detects such stmts. */
597
598 bool
599 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
600 {
601 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
602 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
603 unsigned int nbbs = loop->num_nodes;
604 gimple_stmt_iterator si;
605 gimple *stmt;
606 unsigned int i;
607 stmt_vec_info stmt_vinfo;
608 basic_block bb;
609 gimple *phi;
610 bool live_p;
611 enum vect_relevant relevant, tmp_relevant;
612 enum vect_def_type def_type;
613
614 if (dump_enabled_p ())
615 dump_printf_loc (MSG_NOTE, vect_location,
616 "=== vect_mark_stmts_to_be_vectorized ===\n");
617
618 auto_vec<gimple *, 64> worklist;
619
620 /* 1. Init worklist. */
621 for (i = 0; i < nbbs; i++)
622 {
623 bb = bbs[i];
624 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
625 {
626 phi = gsi_stmt (si);
627 if (dump_enabled_p ())
628 {
629 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
630 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
631 }
632
633 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
634 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
635 }
636 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
637 {
638 stmt = gsi_stmt (si);
639 if (dump_enabled_p ())
640 {
641 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
642 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
643 }
644
645 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
646 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
647 }
648 }
649
650 /* 2. Process_worklist */
651 while (worklist.length () > 0)
652 {
653 use_operand_p use_p;
654 ssa_op_iter iter;
655
656 stmt = worklist.pop ();
657 if (dump_enabled_p ())
658 {
659 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
660 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
661 }
662
663 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
664 (DEF_STMT) as relevant/irrelevant and live/dead according to the
665 liveness and relevance properties of STMT. */
666 stmt_vinfo = vinfo_for_stmt (stmt);
667 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
668 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
669
670 /* Generally, the liveness and relevance properties of STMT are
671 propagated as is to the DEF_STMTs of its USEs:
672 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
673 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
674
675 One exception is when STMT has been identified as defining a reduction
676 variable; in this case we set the liveness/relevance as follows:
677 live_p = false
678 relevant = vect_used_by_reduction
679 This is because we distinguish between two kinds of relevant stmts -
680 those that are used by a reduction computation, and those that are
681 (also) used by a regular computation. This allows us later on to
682 identify stmts that are used solely by a reduction, and therefore the
683 order of the results that they produce does not have to be kept. */
684
685 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
686 tmp_relevant = relevant;
687 switch (def_type)
688 {
689 case vect_reduction_def:
690 switch (tmp_relevant)
691 {
692 case vect_unused_in_scope:
693 relevant = vect_used_by_reduction;
694 break;
695
696 case vect_used_by_reduction:
697 if (gimple_code (stmt) == GIMPLE_PHI)
698 break;
699 /* fall through */
700
701 default:
702 if (dump_enabled_p ())
703 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
704 "unsupported use of reduction.\n");
705 return false;
706 }
707
708 live_p = false;
709 break;
710
711 case vect_nested_cycle:
712 if (tmp_relevant != vect_unused_in_scope
713 && tmp_relevant != vect_used_in_outer_by_reduction
714 && tmp_relevant != vect_used_in_outer)
715 {
716 if (dump_enabled_p ())
717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
718 "unsupported use of nested cycle.\n");
719
720 return false;
721 }
722
723 live_p = false;
724 break;
725
726 case vect_double_reduction_def:
727 if (tmp_relevant != vect_unused_in_scope
728 && tmp_relevant != vect_used_by_reduction)
729 {
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of double reduction.\n");
733
734 return false;
735 }
736
737 live_p = false;
738 break;
739
740 default:
741 break;
742 }
743
744 if (is_pattern_stmt_p (stmt_vinfo))
745 {
746 /* Pattern statements are not inserted into the code, so
747 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
748 have to scan the RHS or function arguments instead. */
749 if (is_gimple_assign (stmt))
750 {
751 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
752 tree op = gimple_assign_rhs1 (stmt);
753
754 i = 1;
755 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
756 {
757 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
758 live_p, relevant, &worklist, false)
759 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
760 live_p, relevant, &worklist, false))
761 return false;
762 i = 2;
763 }
764 for (; i < gimple_num_ops (stmt); i++)
765 {
766 op = gimple_op (stmt, i);
767 if (TREE_CODE (op) == SSA_NAME
768 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
769 &worklist, false))
770 return false;
771 }
772 }
773 else if (is_gimple_call (stmt))
774 {
775 for (i = 0; i < gimple_call_num_args (stmt); i++)
776 {
777 tree arg = gimple_call_arg (stmt, i);
778 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
779 &worklist, false))
780 return false;
781 }
782 }
783 }
784 else
785 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
786 {
787 tree op = USE_FROM_PTR (use_p);
788 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
789 &worklist, false))
790 return false;
791 }
792
793 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
794 {
795 tree off;
796 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
797 gcc_assert (decl);
798 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
799 &worklist, true))
800 return false;
801 }
802 } /* while worklist */
803
804 return true;
805 }
806
807
808 /* Function vect_model_simple_cost.
809
810 Models cost for simple operations, i.e. those that only emit ncopies of a
811 single op. Right now, this does not account for multiple insns that could
812 be generated for the single vector op. We will handle that shortly. */
813
814 void
815 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
816 enum vect_def_type *dt,
817 stmt_vector_for_cost *prologue_cost_vec,
818 stmt_vector_for_cost *body_cost_vec)
819 {
820 int i;
821 int inside_cost = 0, prologue_cost = 0;
822
823 /* The SLP costs were already calculated during SLP tree build. */
824 if (PURE_SLP_STMT (stmt_info))
825 return;
826
827 /* FORNOW: Assuming maximum 2 args per stmts. */
828 for (i = 0; i < 2; i++)
829 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
830 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
831 stmt_info, 0, vect_prologue);
832
833 /* Pass the inside-of-loop statements to the target-specific cost model. */
834 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
835 stmt_info, 0, vect_body);
836
837 if (dump_enabled_p ())
838 dump_printf_loc (MSG_NOTE, vect_location,
839 "vect_model_simple_cost: inside_cost = %d, "
840 "prologue_cost = %d .\n", inside_cost, prologue_cost);
841 }
842
843
844 /* Model cost for type demotion and promotion operations. PWR is normally
845 zero for single-step promotions and demotions. It will be one if
846 two-step promotion/demotion is required, and so on. Each additional
847 step doubles the number of instructions required. */
848
849 static void
850 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
851 enum vect_def_type *dt, int pwr)
852 {
853 int i, tmp;
854 int inside_cost = 0, prologue_cost = 0;
855 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
856 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
857 void *target_cost_data;
858
859 /* The SLP costs were already calculated during SLP tree build. */
860 if (PURE_SLP_STMT (stmt_info))
861 return;
862
863 if (loop_vinfo)
864 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
865 else
866 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
867
868 for (i = 0; i < pwr + 1; i++)
869 {
870 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
871 (i + 1) : i;
872 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
873 vec_promote_demote, stmt_info, 0,
874 vect_body);
875 }
876
877 /* FORNOW: Assuming maximum 2 args per stmts. */
878 for (i = 0; i < 2; i++)
879 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
880 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
881 stmt_info, 0, vect_prologue);
882
883 if (dump_enabled_p ())
884 dump_printf_loc (MSG_NOTE, vect_location,
885 "vect_model_promotion_demotion_cost: inside_cost = %d, "
886 "prologue_cost = %d .\n", inside_cost, prologue_cost);
887 }
888
889 /* Function vect_cost_group_size
890
891 For grouped load or store, return the group_size only if it is the first
892 load or store of a group, else return 1. This ensures that group size is
893 only returned once per group. */
894
895 static int
896 vect_cost_group_size (stmt_vec_info stmt_info)
897 {
898 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
899
900 if (first_stmt == STMT_VINFO_STMT (stmt_info))
901 return GROUP_SIZE (stmt_info);
902
903 return 1;
904 }
905
906
907 /* Function vect_model_store_cost
908
909 Models cost for stores. In the case of grouped accesses, one access
910 has the overhead of the grouped access attributed to it. */
911
912 void
913 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
914 bool store_lanes_p, enum vect_def_type dt,
915 slp_tree slp_node,
916 stmt_vector_for_cost *prologue_cost_vec,
917 stmt_vector_for_cost *body_cost_vec)
918 {
919 int group_size;
920 unsigned int inside_cost = 0, prologue_cost = 0;
921 struct data_reference *first_dr;
922 gimple *first_stmt;
923
924 if (dt == vect_constant_def || dt == vect_external_def)
925 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
926 stmt_info, 0, vect_prologue);
927
928 /* Grouped access? */
929 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
930 {
931 if (slp_node)
932 {
933 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
934 group_size = 1;
935 }
936 else
937 {
938 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
939 group_size = vect_cost_group_size (stmt_info);
940 }
941
942 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
943 }
944 /* Not a grouped access. */
945 else
946 {
947 group_size = 1;
948 first_dr = STMT_VINFO_DATA_REF (stmt_info);
949 }
950
951 /* We assume that the cost of a single store-lanes instruction is
952 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
953 access is instead being provided by a permute-and-store operation,
954 include the cost of the permutes. */
955 if (!store_lanes_p && group_size > 1
956 && !STMT_VINFO_STRIDED_P (stmt_info))
957 {
958 /* Uses a high and low interleave or shuffle operations for each
959 needed permute. */
960 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
961 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
962 stmt_info, 0, vect_body);
963
964 if (dump_enabled_p ())
965 dump_printf_loc (MSG_NOTE, vect_location,
966 "vect_model_store_cost: strided group_size = %d .\n",
967 group_size);
968 }
969
970 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
971 /* Costs of the stores. */
972 if (STMT_VINFO_STRIDED_P (stmt_info)
973 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
974 {
975 /* N scalar stores plus extracting the elements. */
976 inside_cost += record_stmt_cost (body_cost_vec,
977 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
978 scalar_store, stmt_info, 0, vect_body);
979 }
980 else
981 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
982
983 if (STMT_VINFO_STRIDED_P (stmt_info))
984 inside_cost += record_stmt_cost (body_cost_vec,
985 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
986 vec_to_scalar, stmt_info, 0, vect_body);
987
988 if (dump_enabled_p ())
989 dump_printf_loc (MSG_NOTE, vect_location,
990 "vect_model_store_cost: inside_cost = %d, "
991 "prologue_cost = %d .\n", inside_cost, prologue_cost);
992 }
993
994
995 /* Calculate cost of DR's memory access. */
996 void
997 vect_get_store_cost (struct data_reference *dr, int ncopies,
998 unsigned int *inside_cost,
999 stmt_vector_for_cost *body_cost_vec)
1000 {
1001 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1002 gimple *stmt = DR_STMT (dr);
1003 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1004
1005 switch (alignment_support_scheme)
1006 {
1007 case dr_aligned:
1008 {
1009 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1010 vector_store, stmt_info, 0,
1011 vect_body);
1012
1013 if (dump_enabled_p ())
1014 dump_printf_loc (MSG_NOTE, vect_location,
1015 "vect_model_store_cost: aligned.\n");
1016 break;
1017 }
1018
1019 case dr_unaligned_supported:
1020 {
1021 /* Here, we assign an additional cost for the unaligned store. */
1022 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1023 unaligned_store, stmt_info,
1024 DR_MISALIGNMENT (dr), vect_body);
1025 if (dump_enabled_p ())
1026 dump_printf_loc (MSG_NOTE, vect_location,
1027 "vect_model_store_cost: unaligned supported by "
1028 "hardware.\n");
1029 break;
1030 }
1031
1032 case dr_unaligned_unsupported:
1033 {
1034 *inside_cost = VECT_MAX_COST;
1035
1036 if (dump_enabled_p ())
1037 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1038 "vect_model_store_cost: unsupported access.\n");
1039 break;
1040 }
1041
1042 default:
1043 gcc_unreachable ();
1044 }
1045 }
1046
1047
1048 /* Function vect_model_load_cost
1049
1050 Models cost for loads. In the case of grouped accesses, the last access
1051 has the overhead of the grouped access attributed to it. Since unaligned
1052 accesses are supported for loads, we also account for the costs of the
1053 access scheme chosen. */
1054
1055 void
1056 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1057 bool load_lanes_p, slp_tree slp_node,
1058 stmt_vector_for_cost *prologue_cost_vec,
1059 stmt_vector_for_cost *body_cost_vec)
1060 {
1061 int group_size;
1062 gimple *first_stmt;
1063 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1064 unsigned int inside_cost = 0, prologue_cost = 0;
1065
1066 /* Grouped accesses? */
1067 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1068 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1069 {
1070 group_size = vect_cost_group_size (stmt_info);
1071 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1072 }
1073 /* Not a grouped access. */
1074 else
1075 {
1076 group_size = 1;
1077 first_dr = dr;
1078 }
1079
1080 /* We assume that the cost of a single load-lanes instruction is
1081 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1082 access is instead being provided by a load-and-permute operation,
1083 include the cost of the permutes. */
1084 if (!load_lanes_p && group_size > 1
1085 && !STMT_VINFO_STRIDED_P (stmt_info))
1086 {
1087 /* Uses an even and odd extract operations or shuffle operations
1088 for each needed permute. */
1089 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1090 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1091 stmt_info, 0, vect_body);
1092
1093 if (dump_enabled_p ())
1094 dump_printf_loc (MSG_NOTE, vect_location,
1095 "vect_model_load_cost: strided group_size = %d .\n",
1096 group_size);
1097 }
1098
1099 /* The loads themselves. */
1100 if (STMT_VINFO_STRIDED_P (stmt_info)
1101 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1102 {
1103 /* N scalar loads plus gathering them into a vector. */
1104 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1105 inside_cost += record_stmt_cost (body_cost_vec,
1106 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1107 scalar_load, stmt_info, 0, vect_body);
1108 }
1109 else
1110 vect_get_load_cost (first_dr, ncopies,
1111 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1112 || group_size > 1 || slp_node),
1113 &inside_cost, &prologue_cost,
1114 prologue_cost_vec, body_cost_vec, true);
1115 if (STMT_VINFO_STRIDED_P (stmt_info))
1116 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1117 stmt_info, 0, vect_body);
1118
1119 if (dump_enabled_p ())
1120 dump_printf_loc (MSG_NOTE, vect_location,
1121 "vect_model_load_cost: inside_cost = %d, "
1122 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1123 }
1124
1125
1126 /* Calculate cost of DR's memory access. */
1127 void
1128 vect_get_load_cost (struct data_reference *dr, int ncopies,
1129 bool add_realign_cost, unsigned int *inside_cost,
1130 unsigned int *prologue_cost,
1131 stmt_vector_for_cost *prologue_cost_vec,
1132 stmt_vector_for_cost *body_cost_vec,
1133 bool record_prologue_costs)
1134 {
1135 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1136 gimple *stmt = DR_STMT (dr);
1137 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1138
1139 switch (alignment_support_scheme)
1140 {
1141 case dr_aligned:
1142 {
1143 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1144 stmt_info, 0, vect_body);
1145
1146 if (dump_enabled_p ())
1147 dump_printf_loc (MSG_NOTE, vect_location,
1148 "vect_model_load_cost: aligned.\n");
1149
1150 break;
1151 }
1152 case dr_unaligned_supported:
1153 {
1154 /* Here, we assign an additional cost for the unaligned load. */
1155 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1156 unaligned_load, stmt_info,
1157 DR_MISALIGNMENT (dr), vect_body);
1158
1159 if (dump_enabled_p ())
1160 dump_printf_loc (MSG_NOTE, vect_location,
1161 "vect_model_load_cost: unaligned supported by "
1162 "hardware.\n");
1163
1164 break;
1165 }
1166 case dr_explicit_realign:
1167 {
1168 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1169 vector_load, stmt_info, 0, vect_body);
1170 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1171 vec_perm, stmt_info, 0, vect_body);
1172
1173 /* FIXME: If the misalignment remains fixed across the iterations of
1174 the containing loop, the following cost should be added to the
1175 prologue costs. */
1176 if (targetm.vectorize.builtin_mask_for_load)
1177 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1178 stmt_info, 0, vect_body);
1179
1180 if (dump_enabled_p ())
1181 dump_printf_loc (MSG_NOTE, vect_location,
1182 "vect_model_load_cost: explicit realign\n");
1183
1184 break;
1185 }
1186 case dr_explicit_realign_optimized:
1187 {
1188 if (dump_enabled_p ())
1189 dump_printf_loc (MSG_NOTE, vect_location,
1190 "vect_model_load_cost: unaligned software "
1191 "pipelined.\n");
1192
1193 /* Unaligned software pipeline has a load of an address, an initial
1194 load, and possibly a mask operation to "prime" the loop. However,
1195 if this is an access in a group of loads, which provide grouped
1196 access, then the above cost should only be considered for one
1197 access in the group. Inside the loop, there is a load op
1198 and a realignment op. */
1199
1200 if (add_realign_cost && record_prologue_costs)
1201 {
1202 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1203 vector_stmt, stmt_info,
1204 0, vect_prologue);
1205 if (targetm.vectorize.builtin_mask_for_load)
1206 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1207 vector_stmt, stmt_info,
1208 0, vect_prologue);
1209 }
1210
1211 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1212 stmt_info, 0, vect_body);
1213 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1214 stmt_info, 0, vect_body);
1215
1216 if (dump_enabled_p ())
1217 dump_printf_loc (MSG_NOTE, vect_location,
1218 "vect_model_load_cost: explicit realign optimized"
1219 "\n");
1220
1221 break;
1222 }
1223
1224 case dr_unaligned_unsupported:
1225 {
1226 *inside_cost = VECT_MAX_COST;
1227
1228 if (dump_enabled_p ())
1229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1230 "vect_model_load_cost: unsupported access.\n");
1231 break;
1232 }
1233
1234 default:
1235 gcc_unreachable ();
1236 }
1237 }
1238
1239 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1240 the loop preheader for the vectorized stmt STMT. */
1241
1242 static void
1243 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1244 {
1245 if (gsi)
1246 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1247 else
1248 {
1249 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1250 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1251
1252 if (loop_vinfo)
1253 {
1254 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1255 basic_block new_bb;
1256 edge pe;
1257
1258 if (nested_in_vect_loop_p (loop, stmt))
1259 loop = loop->inner;
1260
1261 pe = loop_preheader_edge (loop);
1262 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1263 gcc_assert (!new_bb);
1264 }
1265 else
1266 {
1267 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1268 basic_block bb;
1269 gimple_stmt_iterator gsi_bb_start;
1270
1271 gcc_assert (bb_vinfo);
1272 bb = BB_VINFO_BB (bb_vinfo);
1273 gsi_bb_start = gsi_after_labels (bb);
1274 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1275 }
1276 }
1277
1278 if (dump_enabled_p ())
1279 {
1280 dump_printf_loc (MSG_NOTE, vect_location,
1281 "created new init_stmt: ");
1282 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1283 }
1284 }
1285
1286 /* Function vect_init_vector.
1287
1288 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1289 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1290 vector type a vector with all elements equal to VAL is created first.
1291 Place the initialization at BSI if it is not NULL. Otherwise, place the
1292 initialization at the loop preheader.
1293 Return the DEF of INIT_STMT.
1294 It will be used in the vectorization of STMT. */
1295
1296 tree
1297 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1298 {
1299 gimple *init_stmt;
1300 tree new_temp;
1301
1302 if (TREE_CODE (type) == VECTOR_TYPE
1303 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1304 {
1305 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1306 {
1307 if (CONSTANT_CLASS_P (val))
1308 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1309 else
1310 {
1311 new_temp = make_ssa_name (TREE_TYPE (type));
1312 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1313 vect_init_vector_1 (stmt, init_stmt, gsi);
1314 val = new_temp;
1315 }
1316 }
1317 val = build_vector_from_val (type, val);
1318 }
1319
1320 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1321 init_stmt = gimple_build_assign (new_temp, val);
1322 vect_init_vector_1 (stmt, init_stmt, gsi);
1323 return new_temp;
1324 }
1325
1326
1327 /* Function vect_get_vec_def_for_operand.
1328
1329 OP is an operand in STMT. This function returns a (vector) def that will be
1330 used in the vectorized stmt for STMT.
1331
1332 In the case that OP is an SSA_NAME which is defined in the loop, then
1333 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1334
1335 In case OP is an invariant or constant, a new stmt that creates a vector def
1336 needs to be introduced. */
1337
1338 tree
1339 vect_get_vec_def_for_operand (tree op, gimple *stmt)
1340 {
1341 tree vec_oprnd;
1342 gimple *vec_stmt;
1343 gimple *def_stmt;
1344 stmt_vec_info def_stmt_info = NULL;
1345 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1346 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1347 enum vect_def_type dt;
1348 bool is_simple_use;
1349 tree vector_type;
1350
1351 if (dump_enabled_p ())
1352 {
1353 dump_printf_loc (MSG_NOTE, vect_location,
1354 "vect_get_vec_def_for_operand: ");
1355 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1356 dump_printf (MSG_NOTE, "\n");
1357 }
1358
1359 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1360 gcc_assert (is_simple_use);
1361 if (dump_enabled_p ())
1362 {
1363 int loc_printed = 0;
1364 if (def_stmt)
1365 {
1366 if (loc_printed)
1367 dump_printf (MSG_NOTE, " def_stmt = ");
1368 else
1369 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1370 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1371 }
1372 }
1373
1374 switch (dt)
1375 {
1376 /* operand is a constant or a loop invariant. */
1377 case vect_constant_def:
1378 case vect_external_def:
1379 {
1380 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1381 gcc_assert (vector_type);
1382 return vect_init_vector (stmt, op, vector_type, NULL);
1383 }
1384
1385 /* operand is defined inside the loop. */
1386 case vect_internal_def:
1387 {
1388 /* Get the def from the vectorized stmt. */
1389 def_stmt_info = vinfo_for_stmt (def_stmt);
1390
1391 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1392 /* Get vectorized pattern statement. */
1393 if (!vec_stmt
1394 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1395 && !STMT_VINFO_RELEVANT (def_stmt_info))
1396 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1397 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1398 gcc_assert (vec_stmt);
1399 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1400 vec_oprnd = PHI_RESULT (vec_stmt);
1401 else if (is_gimple_call (vec_stmt))
1402 vec_oprnd = gimple_call_lhs (vec_stmt);
1403 else
1404 vec_oprnd = gimple_assign_lhs (vec_stmt);
1405 return vec_oprnd;
1406 }
1407
1408 /* operand is defined by a loop header phi - reduction */
1409 case vect_reduction_def:
1410 case vect_double_reduction_def:
1411 case vect_nested_cycle:
1412 /* Code should use get_initial_def_for_reduction. */
1413 gcc_unreachable ();
1414
1415 /* operand is defined by loop-header phi - induction. */
1416 case vect_induction_def:
1417 {
1418 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1419
1420 /* Get the def from the vectorized stmt. */
1421 def_stmt_info = vinfo_for_stmt (def_stmt);
1422 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1423 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1424 vec_oprnd = PHI_RESULT (vec_stmt);
1425 else
1426 vec_oprnd = gimple_get_lhs (vec_stmt);
1427 return vec_oprnd;
1428 }
1429
1430 default:
1431 gcc_unreachable ();
1432 }
1433 }
1434
1435
1436 /* Function vect_get_vec_def_for_stmt_copy
1437
1438 Return a vector-def for an operand. This function is used when the
1439 vectorized stmt to be created (by the caller to this function) is a "copy"
1440 created in case the vectorized result cannot fit in one vector, and several
1441 copies of the vector-stmt are required. In this case the vector-def is
1442 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1443 of the stmt that defines VEC_OPRND.
1444 DT is the type of the vector def VEC_OPRND.
1445
1446 Context:
1447 In case the vectorization factor (VF) is bigger than the number
1448 of elements that can fit in a vectype (nunits), we have to generate
1449 more than one vector stmt to vectorize the scalar stmt. This situation
1450 arises when there are multiple data-types operated upon in the loop; the
1451 smallest data-type determines the VF, and as a result, when vectorizing
1452 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1453 vector stmt (each computing a vector of 'nunits' results, and together
1454 computing 'VF' results in each iteration). This function is called when
1455 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1456 which VF=16 and nunits=4, so the number of copies required is 4):
1457
1458 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1459
1460 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1461 VS1.1: vx.1 = memref1 VS1.2
1462 VS1.2: vx.2 = memref2 VS1.3
1463 VS1.3: vx.3 = memref3
1464
1465 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1466 VSnew.1: vz1 = vx.1 + ... VSnew.2
1467 VSnew.2: vz2 = vx.2 + ... VSnew.3
1468 VSnew.3: vz3 = vx.3 + ...
1469
1470 The vectorization of S1 is explained in vectorizable_load.
1471 The vectorization of S2:
1472 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1473 the function 'vect_get_vec_def_for_operand' is called to
1474 get the relevant vector-def for each operand of S2. For operand x it
1475 returns the vector-def 'vx.0'.
1476
1477 To create the remaining copies of the vector-stmt (VSnew.j), this
1478 function is called to get the relevant vector-def for each operand. It is
1479 obtained from the respective VS1.j stmt, which is recorded in the
1480 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1481
1482 For example, to obtain the vector-def 'vx.1' in order to create the
1483 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1484 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1485 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1486 and return its def ('vx.1').
1487 Overall, to create the above sequence this function will be called 3 times:
1488 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1489 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1490 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1491
1492 tree
1493 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1494 {
1495 gimple *vec_stmt_for_operand;
1496 stmt_vec_info def_stmt_info;
1497
1498 /* Do nothing; can reuse same def. */
1499 if (dt == vect_external_def || dt == vect_constant_def )
1500 return vec_oprnd;
1501
1502 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1503 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1504 gcc_assert (def_stmt_info);
1505 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1506 gcc_assert (vec_stmt_for_operand);
1507 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1508 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1509 else
1510 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1511 return vec_oprnd;
1512 }
1513
1514
1515 /* Get vectorized definitions for the operands to create a copy of an original
1516 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1517
1518 static void
1519 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1520 vec<tree> *vec_oprnds0,
1521 vec<tree> *vec_oprnds1)
1522 {
1523 tree vec_oprnd = vec_oprnds0->pop ();
1524
1525 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1526 vec_oprnds0->quick_push (vec_oprnd);
1527
1528 if (vec_oprnds1 && vec_oprnds1->length ())
1529 {
1530 vec_oprnd = vec_oprnds1->pop ();
1531 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1532 vec_oprnds1->quick_push (vec_oprnd);
1533 }
1534 }
1535
1536
1537 /* Get vectorized definitions for OP0 and OP1.
1538 REDUC_INDEX is the index of reduction operand in case of reduction,
1539 and -1 otherwise. */
1540
1541 void
1542 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1543 vec<tree> *vec_oprnds0,
1544 vec<tree> *vec_oprnds1,
1545 slp_tree slp_node, int reduc_index)
1546 {
1547 if (slp_node)
1548 {
1549 int nops = (op1 == NULL_TREE) ? 1 : 2;
1550 auto_vec<tree> ops (nops);
1551 auto_vec<vec<tree> > vec_defs (nops);
1552
1553 ops.quick_push (op0);
1554 if (op1)
1555 ops.quick_push (op1);
1556
1557 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1558
1559 *vec_oprnds0 = vec_defs[0];
1560 if (op1)
1561 *vec_oprnds1 = vec_defs[1];
1562 }
1563 else
1564 {
1565 tree vec_oprnd;
1566
1567 vec_oprnds0->create (1);
1568 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1569 vec_oprnds0->quick_push (vec_oprnd);
1570
1571 if (op1)
1572 {
1573 vec_oprnds1->create (1);
1574 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1575 vec_oprnds1->quick_push (vec_oprnd);
1576 }
1577 }
1578 }
1579
1580
1581 /* Function vect_finish_stmt_generation.
1582
1583 Insert a new stmt. */
1584
1585 void
1586 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1587 gimple_stmt_iterator *gsi)
1588 {
1589 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1590 vec_info *vinfo = stmt_info->vinfo;
1591
1592 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1593
1594 if (!gsi_end_p (*gsi)
1595 && gimple_has_mem_ops (vec_stmt))
1596 {
1597 gimple *at_stmt = gsi_stmt (*gsi);
1598 tree vuse = gimple_vuse (at_stmt);
1599 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1600 {
1601 tree vdef = gimple_vdef (at_stmt);
1602 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1603 /* If we have an SSA vuse and insert a store, update virtual
1604 SSA form to avoid triggering the renamer. Do so only
1605 if we can easily see all uses - which is what almost always
1606 happens with the way vectorized stmts are inserted. */
1607 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1608 && ((is_gimple_assign (vec_stmt)
1609 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1610 || (is_gimple_call (vec_stmt)
1611 && !(gimple_call_flags (vec_stmt)
1612 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1613 {
1614 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1615 gimple_set_vdef (vec_stmt, new_vdef);
1616 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1617 }
1618 }
1619 }
1620 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1621
1622 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1623
1624 if (dump_enabled_p ())
1625 {
1626 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1627 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1628 }
1629
1630 gimple_set_location (vec_stmt, gimple_location (stmt));
1631
1632 /* While EH edges will generally prevent vectorization, stmt might
1633 e.g. be in a must-not-throw region. Ensure newly created stmts
1634 that could throw are part of the same region. */
1635 int lp_nr = lookup_stmt_eh_lp (stmt);
1636 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1637 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1638 }
1639
1640 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1641 a function declaration if the target has a vectorized version
1642 of the function, or NULL_TREE if the function cannot be vectorized. */
1643
1644 tree
1645 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1646 {
1647 tree fndecl = gimple_call_fndecl (call);
1648
1649 /* We only handle functions that do not read or clobber memory -- i.e.
1650 const or novops ones. */
1651 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1652 return NULL_TREE;
1653
1654 if (!fndecl
1655 || TREE_CODE (fndecl) != FUNCTION_DECL
1656 || !DECL_BUILT_IN (fndecl))
1657 return NULL_TREE;
1658
1659 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1660 vectype_in);
1661 }
1662
1663
1664 static tree permute_vec_elements (tree, tree, tree, gimple *,
1665 gimple_stmt_iterator *);
1666
1667
1668 /* Function vectorizable_mask_load_store.
1669
1670 Check if STMT performs a conditional load or store that can be vectorized.
1671 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1672 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1673 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1674
1675 static bool
1676 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1677 gimple **vec_stmt, slp_tree slp_node)
1678 {
1679 tree vec_dest = NULL;
1680 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1681 stmt_vec_info prev_stmt_info;
1682 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1683 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1684 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1685 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1686 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1687 tree elem_type;
1688 gimple *new_stmt;
1689 tree dummy;
1690 tree dataref_ptr = NULL_TREE;
1691 gimple *ptr_incr;
1692 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1693 int ncopies;
1694 int i, j;
1695 bool inv_p;
1696 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1697 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1698 int gather_scale = 1;
1699 enum vect_def_type gather_dt = vect_unknown_def_type;
1700 bool is_store;
1701 tree mask;
1702 gimple *def_stmt;
1703 enum vect_def_type dt;
1704
1705 if (slp_node != NULL)
1706 return false;
1707
1708 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1709 gcc_assert (ncopies >= 1);
1710
1711 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1712 mask = gimple_call_arg (stmt, 2);
1713 if (TYPE_PRECISION (TREE_TYPE (mask))
1714 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1715 return false;
1716
1717 /* FORNOW. This restriction should be relaxed. */
1718 if (nested_in_vect_loop && ncopies > 1)
1719 {
1720 if (dump_enabled_p ())
1721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1722 "multiple types in nested loop.");
1723 return false;
1724 }
1725
1726 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1727 return false;
1728
1729 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1730 return false;
1731
1732 if (!STMT_VINFO_DATA_REF (stmt_info))
1733 return false;
1734
1735 elem_type = TREE_TYPE (vectype);
1736
1737 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1738 return false;
1739
1740 if (STMT_VINFO_STRIDED_P (stmt_info))
1741 return false;
1742
1743 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1744 {
1745 gimple *def_stmt;
1746 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
1747 &gather_off, &gather_scale);
1748 gcc_assert (gather_decl);
1749 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1750 &gather_off_vectype))
1751 {
1752 if (dump_enabled_p ())
1753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1754 "gather index use not simple.");
1755 return false;
1756 }
1757
1758 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1759 tree masktype
1760 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1761 if (TREE_CODE (masktype) == INTEGER_TYPE)
1762 {
1763 if (dump_enabled_p ())
1764 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1765 "masked gather with integer mask not supported.");
1766 return false;
1767 }
1768 }
1769 else if (tree_int_cst_compare (nested_in_vect_loop
1770 ? STMT_VINFO_DR_STEP (stmt_info)
1771 : DR_STEP (dr), size_zero_node) <= 0)
1772 return false;
1773 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1774 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1775 return false;
1776
1777 if (TREE_CODE (mask) != SSA_NAME)
1778 return false;
1779
1780 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt))
1781 return false;
1782
1783 if (is_store)
1784 {
1785 tree rhs = gimple_call_arg (stmt, 3);
1786 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt))
1787 return false;
1788 }
1789
1790 if (!vec_stmt) /* transformation not required. */
1791 {
1792 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1793 if (is_store)
1794 vect_model_store_cost (stmt_info, ncopies, false, dt,
1795 NULL, NULL, NULL);
1796 else
1797 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1798 return true;
1799 }
1800
1801 /** Transform. **/
1802
1803 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1804 {
1805 tree vec_oprnd0 = NULL_TREE, op;
1806 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1807 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1808 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1809 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1810 tree mask_perm_mask = NULL_TREE;
1811 edge pe = loop_preheader_edge (loop);
1812 gimple_seq seq;
1813 basic_block new_bb;
1814 enum { NARROW, NONE, WIDEN } modifier;
1815 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1816
1817 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1818 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1819 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1820 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1821 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1822 scaletype = TREE_VALUE (arglist);
1823 gcc_checking_assert (types_compatible_p (srctype, rettype)
1824 && types_compatible_p (srctype, masktype));
1825
1826 if (nunits == gather_off_nunits)
1827 modifier = NONE;
1828 else if (nunits == gather_off_nunits / 2)
1829 {
1830 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1831 modifier = WIDEN;
1832
1833 for (i = 0; i < gather_off_nunits; ++i)
1834 sel[i] = i | nunits;
1835
1836 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1837 }
1838 else if (nunits == gather_off_nunits * 2)
1839 {
1840 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1841 modifier = NARROW;
1842
1843 for (i = 0; i < nunits; ++i)
1844 sel[i] = i < gather_off_nunits
1845 ? i : i + nunits - gather_off_nunits;
1846
1847 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1848 ncopies *= 2;
1849 for (i = 0; i < nunits; ++i)
1850 sel[i] = i | gather_off_nunits;
1851 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1852 }
1853 else
1854 gcc_unreachable ();
1855
1856 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1857
1858 ptr = fold_convert (ptrtype, gather_base);
1859 if (!is_gimple_min_invariant (ptr))
1860 {
1861 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1862 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1863 gcc_assert (!new_bb);
1864 }
1865
1866 scale = build_int_cst (scaletype, gather_scale);
1867
1868 prev_stmt_info = NULL;
1869 for (j = 0; j < ncopies; ++j)
1870 {
1871 if (modifier == WIDEN && (j & 1))
1872 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1873 perm_mask, stmt, gsi);
1874 else if (j == 0)
1875 op = vec_oprnd0
1876 = vect_get_vec_def_for_operand (gather_off, stmt);
1877 else
1878 op = vec_oprnd0
1879 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1880
1881 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1882 {
1883 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1884 == TYPE_VECTOR_SUBPARTS (idxtype));
1885 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
1886 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1887 new_stmt
1888 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1889 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1890 op = var;
1891 }
1892
1893 if (mask_perm_mask && (j & 1))
1894 mask_op = permute_vec_elements (mask_op, mask_op,
1895 mask_perm_mask, stmt, gsi);
1896 else
1897 {
1898 if (j == 0)
1899 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1900 else
1901 {
1902 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1903 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1904 }
1905
1906 mask_op = vec_mask;
1907 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1908 {
1909 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1910 == TYPE_VECTOR_SUBPARTS (masktype));
1911 var = vect_get_new_ssa_name (masktype, vect_simple_var);
1912 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1913 new_stmt
1914 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1915 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1916 mask_op = var;
1917 }
1918 }
1919
1920 new_stmt
1921 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1922 scale);
1923
1924 if (!useless_type_conversion_p (vectype, rettype))
1925 {
1926 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1927 == TYPE_VECTOR_SUBPARTS (rettype));
1928 op = vect_get_new_ssa_name (rettype, vect_simple_var);
1929 gimple_call_set_lhs (new_stmt, op);
1930 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1931 var = make_ssa_name (vec_dest);
1932 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
1933 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1934 }
1935 else
1936 {
1937 var = make_ssa_name (vec_dest, new_stmt);
1938 gimple_call_set_lhs (new_stmt, var);
1939 }
1940
1941 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1942
1943 if (modifier == NARROW)
1944 {
1945 if ((j & 1) == 0)
1946 {
1947 prev_res = var;
1948 continue;
1949 }
1950 var = permute_vec_elements (prev_res, var,
1951 perm_mask, stmt, gsi);
1952 new_stmt = SSA_NAME_DEF_STMT (var);
1953 }
1954
1955 if (prev_stmt_info == NULL)
1956 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1957 else
1958 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1959 prev_stmt_info = vinfo_for_stmt (new_stmt);
1960 }
1961
1962 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1963 from the IL. */
1964 tree lhs = gimple_call_lhs (stmt);
1965 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
1966 set_vinfo_for_stmt (new_stmt, stmt_info);
1967 set_vinfo_for_stmt (stmt, NULL);
1968 STMT_VINFO_STMT (stmt_info) = new_stmt;
1969 gsi_replace (gsi, new_stmt, true);
1970 return true;
1971 }
1972 else if (is_store)
1973 {
1974 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
1975 prev_stmt_info = NULL;
1976 for (i = 0; i < ncopies; i++)
1977 {
1978 unsigned align, misalign;
1979
1980 if (i == 0)
1981 {
1982 tree rhs = gimple_call_arg (stmt, 3);
1983 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
1984 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1985 /* We should have catched mismatched types earlier. */
1986 gcc_assert (useless_type_conversion_p (vectype,
1987 TREE_TYPE (vec_rhs)));
1988 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
1989 NULL_TREE, &dummy, gsi,
1990 &ptr_incr, false, &inv_p);
1991 gcc_assert (!inv_p);
1992 }
1993 else
1994 {
1995 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
1996 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
1997 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1998 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1999 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2000 TYPE_SIZE_UNIT (vectype));
2001 }
2002
2003 align = TYPE_ALIGN_UNIT (vectype);
2004 if (aligned_access_p (dr))
2005 misalign = 0;
2006 else if (DR_MISALIGNMENT (dr) == -1)
2007 {
2008 align = TYPE_ALIGN_UNIT (elem_type);
2009 misalign = 0;
2010 }
2011 else
2012 misalign = DR_MISALIGNMENT (dr);
2013 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2014 misalign);
2015 new_stmt
2016 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2017 gimple_call_arg (stmt, 1),
2018 vec_mask, vec_rhs);
2019 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2020 if (i == 0)
2021 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2022 else
2023 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2024 prev_stmt_info = vinfo_for_stmt (new_stmt);
2025 }
2026 }
2027 else
2028 {
2029 tree vec_mask = NULL_TREE;
2030 prev_stmt_info = NULL;
2031 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2032 for (i = 0; i < ncopies; i++)
2033 {
2034 unsigned align, misalign;
2035
2036 if (i == 0)
2037 {
2038 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2039 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2040 NULL_TREE, &dummy, gsi,
2041 &ptr_incr, false, &inv_p);
2042 gcc_assert (!inv_p);
2043 }
2044 else
2045 {
2046 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2047 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2048 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2049 TYPE_SIZE_UNIT (vectype));
2050 }
2051
2052 align = TYPE_ALIGN_UNIT (vectype);
2053 if (aligned_access_p (dr))
2054 misalign = 0;
2055 else if (DR_MISALIGNMENT (dr) == -1)
2056 {
2057 align = TYPE_ALIGN_UNIT (elem_type);
2058 misalign = 0;
2059 }
2060 else
2061 misalign = DR_MISALIGNMENT (dr);
2062 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2063 misalign);
2064 new_stmt
2065 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2066 gimple_call_arg (stmt, 1),
2067 vec_mask);
2068 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2069 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2070 if (i == 0)
2071 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2072 else
2073 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2074 prev_stmt_info = vinfo_for_stmt (new_stmt);
2075 }
2076 }
2077
2078 if (!is_store)
2079 {
2080 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2081 from the IL. */
2082 tree lhs = gimple_call_lhs (stmt);
2083 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2084 set_vinfo_for_stmt (new_stmt, stmt_info);
2085 set_vinfo_for_stmt (stmt, NULL);
2086 STMT_VINFO_STMT (stmt_info) = new_stmt;
2087 gsi_replace (gsi, new_stmt, true);
2088 }
2089
2090 return true;
2091 }
2092
2093
2094 /* Function vectorizable_call.
2095
2096 Check if GS performs a function call that can be vectorized.
2097 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2098 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2099 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2100
2101 static bool
2102 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2103 slp_tree slp_node)
2104 {
2105 gcall *stmt;
2106 tree vec_dest;
2107 tree scalar_dest;
2108 tree op, type;
2109 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2110 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2111 tree vectype_out, vectype_in;
2112 int nunits_in;
2113 int nunits_out;
2114 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2115 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2116 vec_info *vinfo = stmt_info->vinfo;
2117 tree fndecl, new_temp, rhs_type;
2118 gimple *def_stmt;
2119 enum vect_def_type dt[3]
2120 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2121 gimple *new_stmt = NULL;
2122 int ncopies, j;
2123 vec<tree> vargs = vNULL;
2124 enum { NARROW, NONE, WIDEN } modifier;
2125 size_t i, nargs;
2126 tree lhs;
2127
2128 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2129 return false;
2130
2131 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2132 return false;
2133
2134 /* Is GS a vectorizable call? */
2135 stmt = dyn_cast <gcall *> (gs);
2136 if (!stmt)
2137 return false;
2138
2139 if (gimple_call_internal_p (stmt)
2140 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2141 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2142 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2143 slp_node);
2144
2145 if (gimple_call_lhs (stmt) == NULL_TREE
2146 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2147 return false;
2148
2149 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2150
2151 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2152
2153 /* Process function arguments. */
2154 rhs_type = NULL_TREE;
2155 vectype_in = NULL_TREE;
2156 nargs = gimple_call_num_args (stmt);
2157
2158 /* Bail out if the function has more than three arguments, we do not have
2159 interesting builtin functions to vectorize with more than two arguments
2160 except for fma. No arguments is also not good. */
2161 if (nargs == 0 || nargs > 3)
2162 return false;
2163
2164 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2165 if (gimple_call_internal_p (stmt)
2166 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2167 {
2168 nargs = 0;
2169 rhs_type = unsigned_type_node;
2170 }
2171
2172 for (i = 0; i < nargs; i++)
2173 {
2174 tree opvectype;
2175
2176 op = gimple_call_arg (stmt, i);
2177
2178 /* We can only handle calls with arguments of the same type. */
2179 if (rhs_type
2180 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2181 {
2182 if (dump_enabled_p ())
2183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2184 "argument types differ.\n");
2185 return false;
2186 }
2187 if (!rhs_type)
2188 rhs_type = TREE_TYPE (op);
2189
2190 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2191 {
2192 if (dump_enabled_p ())
2193 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2194 "use not simple.\n");
2195 return false;
2196 }
2197
2198 if (!vectype_in)
2199 vectype_in = opvectype;
2200 else if (opvectype
2201 && opvectype != vectype_in)
2202 {
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2205 "argument vector types differ.\n");
2206 return false;
2207 }
2208 }
2209 /* If all arguments are external or constant defs use a vector type with
2210 the same size as the output vector type. */
2211 if (!vectype_in)
2212 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2213 if (vec_stmt)
2214 gcc_assert (vectype_in);
2215 if (!vectype_in)
2216 {
2217 if (dump_enabled_p ())
2218 {
2219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2220 "no vectype for scalar type ");
2221 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2222 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2223 }
2224
2225 return false;
2226 }
2227
2228 /* FORNOW */
2229 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2230 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2231 if (nunits_in == nunits_out / 2)
2232 modifier = NARROW;
2233 else if (nunits_out == nunits_in)
2234 modifier = NONE;
2235 else if (nunits_out == nunits_in / 2)
2236 modifier = WIDEN;
2237 else
2238 return false;
2239
2240 /* For now, we only vectorize functions if a target specific builtin
2241 is available. TODO -- in some cases, it might be profitable to
2242 insert the calls for pieces of the vector, in order to be able
2243 to vectorize other operations in the loop. */
2244 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2245 if (fndecl == NULL_TREE)
2246 {
2247 if (gimple_call_internal_p (stmt)
2248 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2249 && !slp_node
2250 && loop_vinfo
2251 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2252 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2253 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2254 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2255 {
2256 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2257 { 0, 1, 2, ... vf - 1 } vector. */
2258 gcc_assert (nargs == 0);
2259 }
2260 else
2261 {
2262 if (dump_enabled_p ())
2263 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2264 "function is not vectorizable.\n");
2265 return false;
2266 }
2267 }
2268
2269 gcc_assert (!gimple_vuse (stmt));
2270
2271 if (slp_node || PURE_SLP_STMT (stmt_info))
2272 ncopies = 1;
2273 else if (modifier == NARROW)
2274 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2275 else
2276 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2277
2278 /* Sanity check: make sure that at least one copy of the vectorized stmt
2279 needs to be generated. */
2280 gcc_assert (ncopies >= 1);
2281
2282 if (!vec_stmt) /* transformation not required. */
2283 {
2284 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2287 "\n");
2288 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2289 return true;
2290 }
2291
2292 /** Transform. **/
2293
2294 if (dump_enabled_p ())
2295 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2296
2297 /* Handle def. */
2298 scalar_dest = gimple_call_lhs (stmt);
2299 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2300
2301 prev_stmt_info = NULL;
2302 switch (modifier)
2303 {
2304 case NONE:
2305 for (j = 0; j < ncopies; ++j)
2306 {
2307 /* Build argument list for the vectorized call. */
2308 if (j == 0)
2309 vargs.create (nargs);
2310 else
2311 vargs.truncate (0);
2312
2313 if (slp_node)
2314 {
2315 auto_vec<vec<tree> > vec_defs (nargs);
2316 vec<tree> vec_oprnds0;
2317
2318 for (i = 0; i < nargs; i++)
2319 vargs.quick_push (gimple_call_arg (stmt, i));
2320 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2321 vec_oprnds0 = vec_defs[0];
2322
2323 /* Arguments are ready. Create the new vector stmt. */
2324 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2325 {
2326 size_t k;
2327 for (k = 0; k < nargs; k++)
2328 {
2329 vec<tree> vec_oprndsk = vec_defs[k];
2330 vargs[k] = vec_oprndsk[i];
2331 }
2332 new_stmt = gimple_build_call_vec (fndecl, vargs);
2333 new_temp = make_ssa_name (vec_dest, new_stmt);
2334 gimple_call_set_lhs (new_stmt, new_temp);
2335 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2336 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2337 }
2338
2339 for (i = 0; i < nargs; i++)
2340 {
2341 vec<tree> vec_oprndsi = vec_defs[i];
2342 vec_oprndsi.release ();
2343 }
2344 continue;
2345 }
2346
2347 for (i = 0; i < nargs; i++)
2348 {
2349 op = gimple_call_arg (stmt, i);
2350 if (j == 0)
2351 vec_oprnd0
2352 = vect_get_vec_def_for_operand (op, stmt);
2353 else
2354 {
2355 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2356 vec_oprnd0
2357 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2358 }
2359
2360 vargs.quick_push (vec_oprnd0);
2361 }
2362
2363 if (gimple_call_internal_p (stmt)
2364 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2365 {
2366 tree *v = XALLOCAVEC (tree, nunits_out);
2367 int k;
2368 for (k = 0; k < nunits_out; ++k)
2369 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2370 tree cst = build_vector (vectype_out, v);
2371 tree new_var
2372 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2373 gimple *init_stmt = gimple_build_assign (new_var, cst);
2374 vect_init_vector_1 (stmt, init_stmt, NULL);
2375 new_temp = make_ssa_name (vec_dest);
2376 new_stmt = gimple_build_assign (new_temp, new_var);
2377 }
2378 else
2379 {
2380 new_stmt = gimple_build_call_vec (fndecl, vargs);
2381 new_temp = make_ssa_name (vec_dest, new_stmt);
2382 gimple_call_set_lhs (new_stmt, new_temp);
2383 }
2384 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2385
2386 if (j == 0)
2387 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2388 else
2389 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2390
2391 prev_stmt_info = vinfo_for_stmt (new_stmt);
2392 }
2393
2394 break;
2395
2396 case NARROW:
2397 for (j = 0; j < ncopies; ++j)
2398 {
2399 /* Build argument list for the vectorized call. */
2400 if (j == 0)
2401 vargs.create (nargs * 2);
2402 else
2403 vargs.truncate (0);
2404
2405 if (slp_node)
2406 {
2407 auto_vec<vec<tree> > vec_defs (nargs);
2408 vec<tree> vec_oprnds0;
2409
2410 for (i = 0; i < nargs; i++)
2411 vargs.quick_push (gimple_call_arg (stmt, i));
2412 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2413 vec_oprnds0 = vec_defs[0];
2414
2415 /* Arguments are ready. Create the new vector stmt. */
2416 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2417 {
2418 size_t k;
2419 vargs.truncate (0);
2420 for (k = 0; k < nargs; k++)
2421 {
2422 vec<tree> vec_oprndsk = vec_defs[k];
2423 vargs.quick_push (vec_oprndsk[i]);
2424 vargs.quick_push (vec_oprndsk[i + 1]);
2425 }
2426 new_stmt = gimple_build_call_vec (fndecl, vargs);
2427 new_temp = make_ssa_name (vec_dest, new_stmt);
2428 gimple_call_set_lhs (new_stmt, new_temp);
2429 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2430 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2431 }
2432
2433 for (i = 0; i < nargs; i++)
2434 {
2435 vec<tree> vec_oprndsi = vec_defs[i];
2436 vec_oprndsi.release ();
2437 }
2438 continue;
2439 }
2440
2441 for (i = 0; i < nargs; i++)
2442 {
2443 op = gimple_call_arg (stmt, i);
2444 if (j == 0)
2445 {
2446 vec_oprnd0
2447 = vect_get_vec_def_for_operand (op, stmt);
2448 vec_oprnd1
2449 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2450 }
2451 else
2452 {
2453 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2454 vec_oprnd0
2455 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2456 vec_oprnd1
2457 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2458 }
2459
2460 vargs.quick_push (vec_oprnd0);
2461 vargs.quick_push (vec_oprnd1);
2462 }
2463
2464 new_stmt = gimple_build_call_vec (fndecl, vargs);
2465 new_temp = make_ssa_name (vec_dest, new_stmt);
2466 gimple_call_set_lhs (new_stmt, new_temp);
2467 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2468
2469 if (j == 0)
2470 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2471 else
2472 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2473
2474 prev_stmt_info = vinfo_for_stmt (new_stmt);
2475 }
2476
2477 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2478
2479 break;
2480
2481 case WIDEN:
2482 /* No current target implements this case. */
2483 return false;
2484 }
2485
2486 vargs.release ();
2487
2488 /* The call in STMT might prevent it from being removed in dce.
2489 We however cannot remove it here, due to the way the ssa name
2490 it defines is mapped to the new definition. So just replace
2491 rhs of the statement with something harmless. */
2492
2493 if (slp_node)
2494 return true;
2495
2496 type = TREE_TYPE (scalar_dest);
2497 if (is_pattern_stmt_p (stmt_info))
2498 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2499 else
2500 lhs = gimple_call_lhs (stmt);
2501
2502 if (gimple_call_internal_p (stmt)
2503 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2504 {
2505 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2506 with vf - 1 rather than 0, that is the last iteration of the
2507 vectorized loop. */
2508 imm_use_iterator iter;
2509 use_operand_p use_p;
2510 gimple *use_stmt;
2511 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2512 {
2513 basic_block use_bb = gimple_bb (use_stmt);
2514 if (use_bb
2515 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2516 {
2517 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2518 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2519 ncopies * nunits_out - 1));
2520 update_stmt (use_stmt);
2521 }
2522 }
2523 }
2524
2525 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2526 set_vinfo_for_stmt (new_stmt, stmt_info);
2527 set_vinfo_for_stmt (stmt, NULL);
2528 STMT_VINFO_STMT (stmt_info) = new_stmt;
2529 gsi_replace (gsi, new_stmt, false);
2530
2531 return true;
2532 }
2533
2534
2535 struct simd_call_arg_info
2536 {
2537 tree vectype;
2538 tree op;
2539 enum vect_def_type dt;
2540 HOST_WIDE_INT linear_step;
2541 unsigned int align;
2542 bool simd_lane_linear;
2543 };
2544
2545 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2546 is linear within simd lane (but not within whole loop), note it in
2547 *ARGINFO. */
2548
2549 static void
2550 vect_simd_lane_linear (tree op, struct loop *loop,
2551 struct simd_call_arg_info *arginfo)
2552 {
2553 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
2554
2555 if (!is_gimple_assign (def_stmt)
2556 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2557 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2558 return;
2559
2560 tree base = gimple_assign_rhs1 (def_stmt);
2561 HOST_WIDE_INT linear_step = 0;
2562 tree v = gimple_assign_rhs2 (def_stmt);
2563 while (TREE_CODE (v) == SSA_NAME)
2564 {
2565 tree t;
2566 def_stmt = SSA_NAME_DEF_STMT (v);
2567 if (is_gimple_assign (def_stmt))
2568 switch (gimple_assign_rhs_code (def_stmt))
2569 {
2570 case PLUS_EXPR:
2571 t = gimple_assign_rhs2 (def_stmt);
2572 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2573 return;
2574 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2575 v = gimple_assign_rhs1 (def_stmt);
2576 continue;
2577 case MULT_EXPR:
2578 t = gimple_assign_rhs2 (def_stmt);
2579 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2580 return;
2581 linear_step = tree_to_shwi (t);
2582 v = gimple_assign_rhs1 (def_stmt);
2583 continue;
2584 CASE_CONVERT:
2585 t = gimple_assign_rhs1 (def_stmt);
2586 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2587 || (TYPE_PRECISION (TREE_TYPE (v))
2588 < TYPE_PRECISION (TREE_TYPE (t))))
2589 return;
2590 if (!linear_step)
2591 linear_step = 1;
2592 v = t;
2593 continue;
2594 default:
2595 return;
2596 }
2597 else if (is_gimple_call (def_stmt)
2598 && gimple_call_internal_p (def_stmt)
2599 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2600 && loop->simduid
2601 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2602 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2603 == loop->simduid))
2604 {
2605 if (!linear_step)
2606 linear_step = 1;
2607 arginfo->linear_step = linear_step;
2608 arginfo->op = base;
2609 arginfo->simd_lane_linear = true;
2610 return;
2611 }
2612 }
2613 }
2614
2615 /* Function vectorizable_simd_clone_call.
2616
2617 Check if STMT performs a function call that can be vectorized
2618 by calling a simd clone of the function.
2619 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2620 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2621 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2622
2623 static bool
2624 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2625 gimple **vec_stmt, slp_tree slp_node)
2626 {
2627 tree vec_dest;
2628 tree scalar_dest;
2629 tree op, type;
2630 tree vec_oprnd0 = NULL_TREE;
2631 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2632 tree vectype;
2633 unsigned int nunits;
2634 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2635 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2636 vec_info *vinfo = stmt_info->vinfo;
2637 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2638 tree fndecl, new_temp;
2639 gimple *def_stmt;
2640 gimple *new_stmt = NULL;
2641 int ncopies, j;
2642 vec<simd_call_arg_info> arginfo = vNULL;
2643 vec<tree> vargs = vNULL;
2644 size_t i, nargs;
2645 tree lhs, rtype, ratype;
2646 vec<constructor_elt, va_gc> *ret_ctor_elts;
2647
2648 /* Is STMT a vectorizable call? */
2649 if (!is_gimple_call (stmt))
2650 return false;
2651
2652 fndecl = gimple_call_fndecl (stmt);
2653 if (fndecl == NULL_TREE)
2654 return false;
2655
2656 struct cgraph_node *node = cgraph_node::get (fndecl);
2657 if (node == NULL || node->simd_clones == NULL)
2658 return false;
2659
2660 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2661 return false;
2662
2663 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2664 return false;
2665
2666 if (gimple_call_lhs (stmt)
2667 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2668 return false;
2669
2670 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2671
2672 vectype = STMT_VINFO_VECTYPE (stmt_info);
2673
2674 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2675 return false;
2676
2677 /* FORNOW */
2678 if (slp_node || PURE_SLP_STMT (stmt_info))
2679 return false;
2680
2681 /* Process function arguments. */
2682 nargs = gimple_call_num_args (stmt);
2683
2684 /* Bail out if the function has zero arguments. */
2685 if (nargs == 0)
2686 return false;
2687
2688 arginfo.create (nargs);
2689
2690 for (i = 0; i < nargs; i++)
2691 {
2692 simd_call_arg_info thisarginfo;
2693 affine_iv iv;
2694
2695 thisarginfo.linear_step = 0;
2696 thisarginfo.align = 0;
2697 thisarginfo.op = NULL_TREE;
2698 thisarginfo.simd_lane_linear = false;
2699
2700 op = gimple_call_arg (stmt, i);
2701 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2702 &thisarginfo.vectype)
2703 || thisarginfo.dt == vect_uninitialized_def)
2704 {
2705 if (dump_enabled_p ())
2706 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2707 "use not simple.\n");
2708 arginfo.release ();
2709 return false;
2710 }
2711
2712 if (thisarginfo.dt == vect_constant_def
2713 || thisarginfo.dt == vect_external_def)
2714 gcc_assert (thisarginfo.vectype == NULL_TREE);
2715 else
2716 gcc_assert (thisarginfo.vectype != NULL_TREE);
2717
2718 /* For linear arguments, the analyze phase should have saved
2719 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2720 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2721 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2722 {
2723 gcc_assert (vec_stmt);
2724 thisarginfo.linear_step
2725 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2726 thisarginfo.op
2727 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2728 thisarginfo.simd_lane_linear
2729 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2730 == boolean_true_node);
2731 /* If loop has been peeled for alignment, we need to adjust it. */
2732 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2733 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2734 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2735 {
2736 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2737 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2738 tree opt = TREE_TYPE (thisarginfo.op);
2739 bias = fold_convert (TREE_TYPE (step), bias);
2740 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2741 thisarginfo.op
2742 = fold_build2 (POINTER_TYPE_P (opt)
2743 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2744 thisarginfo.op, bias);
2745 }
2746 }
2747 else if (!vec_stmt
2748 && thisarginfo.dt != vect_constant_def
2749 && thisarginfo.dt != vect_external_def
2750 && loop_vinfo
2751 && TREE_CODE (op) == SSA_NAME
2752 && simple_iv (loop, loop_containing_stmt (stmt), op,
2753 &iv, false)
2754 && tree_fits_shwi_p (iv.step))
2755 {
2756 thisarginfo.linear_step = tree_to_shwi (iv.step);
2757 thisarginfo.op = iv.base;
2758 }
2759 else if ((thisarginfo.dt == vect_constant_def
2760 || thisarginfo.dt == vect_external_def)
2761 && POINTER_TYPE_P (TREE_TYPE (op)))
2762 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2763 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2764 linear too. */
2765 if (POINTER_TYPE_P (TREE_TYPE (op))
2766 && !thisarginfo.linear_step
2767 && !vec_stmt
2768 && thisarginfo.dt != vect_constant_def
2769 && thisarginfo.dt != vect_external_def
2770 && loop_vinfo
2771 && !slp_node
2772 && TREE_CODE (op) == SSA_NAME)
2773 vect_simd_lane_linear (op, loop, &thisarginfo);
2774
2775 arginfo.quick_push (thisarginfo);
2776 }
2777
2778 unsigned int badness = 0;
2779 struct cgraph_node *bestn = NULL;
2780 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2781 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2782 else
2783 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2784 n = n->simdclone->next_clone)
2785 {
2786 unsigned int this_badness = 0;
2787 if (n->simdclone->simdlen
2788 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2789 || n->simdclone->nargs != nargs)
2790 continue;
2791 if (n->simdclone->simdlen
2792 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2793 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2794 - exact_log2 (n->simdclone->simdlen)) * 1024;
2795 if (n->simdclone->inbranch)
2796 this_badness += 2048;
2797 int target_badness = targetm.simd_clone.usable (n);
2798 if (target_badness < 0)
2799 continue;
2800 this_badness += target_badness * 512;
2801 /* FORNOW: Have to add code to add the mask argument. */
2802 if (n->simdclone->inbranch)
2803 continue;
2804 for (i = 0; i < nargs; i++)
2805 {
2806 switch (n->simdclone->args[i].arg_type)
2807 {
2808 case SIMD_CLONE_ARG_TYPE_VECTOR:
2809 if (!useless_type_conversion_p
2810 (n->simdclone->args[i].orig_type,
2811 TREE_TYPE (gimple_call_arg (stmt, i))))
2812 i = -1;
2813 else if (arginfo[i].dt == vect_constant_def
2814 || arginfo[i].dt == vect_external_def
2815 || arginfo[i].linear_step)
2816 this_badness += 64;
2817 break;
2818 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2819 if (arginfo[i].dt != vect_constant_def
2820 && arginfo[i].dt != vect_external_def)
2821 i = -1;
2822 break;
2823 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2824 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
2825 if (arginfo[i].dt == vect_constant_def
2826 || arginfo[i].dt == vect_external_def
2827 || (arginfo[i].linear_step
2828 != n->simdclone->args[i].linear_step))
2829 i = -1;
2830 break;
2831 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2832 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2833 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
2834 /* FORNOW */
2835 i = -1;
2836 break;
2837 case SIMD_CLONE_ARG_TYPE_MASK:
2838 gcc_unreachable ();
2839 }
2840 if (i == (size_t) -1)
2841 break;
2842 if (n->simdclone->args[i].alignment > arginfo[i].align)
2843 {
2844 i = -1;
2845 break;
2846 }
2847 if (arginfo[i].align)
2848 this_badness += (exact_log2 (arginfo[i].align)
2849 - exact_log2 (n->simdclone->args[i].alignment));
2850 }
2851 if (i == (size_t) -1)
2852 continue;
2853 if (bestn == NULL || this_badness < badness)
2854 {
2855 bestn = n;
2856 badness = this_badness;
2857 }
2858 }
2859
2860 if (bestn == NULL)
2861 {
2862 arginfo.release ();
2863 return false;
2864 }
2865
2866 for (i = 0; i < nargs; i++)
2867 if ((arginfo[i].dt == vect_constant_def
2868 || arginfo[i].dt == vect_external_def)
2869 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2870 {
2871 arginfo[i].vectype
2872 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2873 i)));
2874 if (arginfo[i].vectype == NULL
2875 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2876 > bestn->simdclone->simdlen))
2877 {
2878 arginfo.release ();
2879 return false;
2880 }
2881 }
2882
2883 fndecl = bestn->decl;
2884 nunits = bestn->simdclone->simdlen;
2885 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2886
2887 /* If the function isn't const, only allow it in simd loops where user
2888 has asserted that at least nunits consecutive iterations can be
2889 performed using SIMD instructions. */
2890 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2891 && gimple_vuse (stmt))
2892 {
2893 arginfo.release ();
2894 return false;
2895 }
2896
2897 /* Sanity check: make sure that at least one copy of the vectorized stmt
2898 needs to be generated. */
2899 gcc_assert (ncopies >= 1);
2900
2901 if (!vec_stmt) /* transformation not required. */
2902 {
2903 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2904 for (i = 0; i < nargs; i++)
2905 if (bestn->simdclone->args[i].arg_type
2906 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2907 {
2908 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
2909 + 1);
2910 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2911 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2912 ? size_type_node : TREE_TYPE (arginfo[i].op);
2913 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2914 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2915 tree sll = arginfo[i].simd_lane_linear
2916 ? boolean_true_node : boolean_false_node;
2917 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
2918 }
2919 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2920 if (dump_enabled_p ())
2921 dump_printf_loc (MSG_NOTE, vect_location,
2922 "=== vectorizable_simd_clone_call ===\n");
2923 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2924 arginfo.release ();
2925 return true;
2926 }
2927
2928 /** Transform. **/
2929
2930 if (dump_enabled_p ())
2931 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2932
2933 /* Handle def. */
2934 scalar_dest = gimple_call_lhs (stmt);
2935 vec_dest = NULL_TREE;
2936 rtype = NULL_TREE;
2937 ratype = NULL_TREE;
2938 if (scalar_dest)
2939 {
2940 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2941 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2942 if (TREE_CODE (rtype) == ARRAY_TYPE)
2943 {
2944 ratype = rtype;
2945 rtype = TREE_TYPE (ratype);
2946 }
2947 }
2948
2949 prev_stmt_info = NULL;
2950 for (j = 0; j < ncopies; ++j)
2951 {
2952 /* Build argument list for the vectorized call. */
2953 if (j == 0)
2954 vargs.create (nargs);
2955 else
2956 vargs.truncate (0);
2957
2958 for (i = 0; i < nargs; i++)
2959 {
2960 unsigned int k, l, m, o;
2961 tree atype;
2962 op = gimple_call_arg (stmt, i);
2963 switch (bestn->simdclone->args[i].arg_type)
2964 {
2965 case SIMD_CLONE_ARG_TYPE_VECTOR:
2966 atype = bestn->simdclone->args[i].vector_type;
2967 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2968 for (m = j * o; m < (j + 1) * o; m++)
2969 {
2970 if (TYPE_VECTOR_SUBPARTS (atype)
2971 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2972 {
2973 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2974 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2975 / TYPE_VECTOR_SUBPARTS (atype));
2976 gcc_assert ((k & (k - 1)) == 0);
2977 if (m == 0)
2978 vec_oprnd0
2979 = vect_get_vec_def_for_operand (op, stmt);
2980 else
2981 {
2982 vec_oprnd0 = arginfo[i].op;
2983 if ((m & (k - 1)) == 0)
2984 vec_oprnd0
2985 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2986 vec_oprnd0);
2987 }
2988 arginfo[i].op = vec_oprnd0;
2989 vec_oprnd0
2990 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2991 size_int (prec),
2992 bitsize_int ((m & (k - 1)) * prec));
2993 new_stmt
2994 = gimple_build_assign (make_ssa_name (atype),
2995 vec_oprnd0);
2996 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2997 vargs.safe_push (gimple_assign_lhs (new_stmt));
2998 }
2999 else
3000 {
3001 k = (TYPE_VECTOR_SUBPARTS (atype)
3002 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3003 gcc_assert ((k & (k - 1)) == 0);
3004 vec<constructor_elt, va_gc> *ctor_elts;
3005 if (k != 1)
3006 vec_alloc (ctor_elts, k);
3007 else
3008 ctor_elts = NULL;
3009 for (l = 0; l < k; l++)
3010 {
3011 if (m == 0 && l == 0)
3012 vec_oprnd0
3013 = vect_get_vec_def_for_operand (op, stmt);
3014 else
3015 vec_oprnd0
3016 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3017 arginfo[i].op);
3018 arginfo[i].op = vec_oprnd0;
3019 if (k == 1)
3020 break;
3021 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3022 vec_oprnd0);
3023 }
3024 if (k == 1)
3025 vargs.safe_push (vec_oprnd0);
3026 else
3027 {
3028 vec_oprnd0 = build_constructor (atype, ctor_elts);
3029 new_stmt
3030 = gimple_build_assign (make_ssa_name (atype),
3031 vec_oprnd0);
3032 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3033 vargs.safe_push (gimple_assign_lhs (new_stmt));
3034 }
3035 }
3036 }
3037 break;
3038 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3039 vargs.safe_push (op);
3040 break;
3041 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3042 if (j == 0)
3043 {
3044 gimple_seq stmts;
3045 arginfo[i].op
3046 = force_gimple_operand (arginfo[i].op, &stmts, true,
3047 NULL_TREE);
3048 if (stmts != NULL)
3049 {
3050 basic_block new_bb;
3051 edge pe = loop_preheader_edge (loop);
3052 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3053 gcc_assert (!new_bb);
3054 }
3055 if (arginfo[i].simd_lane_linear)
3056 {
3057 vargs.safe_push (arginfo[i].op);
3058 break;
3059 }
3060 tree phi_res = copy_ssa_name (op);
3061 gphi *new_phi = create_phi_node (phi_res, loop->header);
3062 set_vinfo_for_stmt (new_phi,
3063 new_stmt_vec_info (new_phi, loop_vinfo));
3064 add_phi_arg (new_phi, arginfo[i].op,
3065 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3066 enum tree_code code
3067 = POINTER_TYPE_P (TREE_TYPE (op))
3068 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3069 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3070 ? sizetype : TREE_TYPE (op);
3071 widest_int cst
3072 = wi::mul (bestn->simdclone->args[i].linear_step,
3073 ncopies * nunits);
3074 tree tcst = wide_int_to_tree (type, cst);
3075 tree phi_arg = copy_ssa_name (op);
3076 new_stmt
3077 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3078 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3079 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3080 set_vinfo_for_stmt (new_stmt,
3081 new_stmt_vec_info (new_stmt, loop_vinfo));
3082 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3083 UNKNOWN_LOCATION);
3084 arginfo[i].op = phi_res;
3085 vargs.safe_push (phi_res);
3086 }
3087 else
3088 {
3089 enum tree_code code
3090 = POINTER_TYPE_P (TREE_TYPE (op))
3091 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3092 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3093 ? sizetype : TREE_TYPE (op);
3094 widest_int cst
3095 = wi::mul (bestn->simdclone->args[i].linear_step,
3096 j * nunits);
3097 tree tcst = wide_int_to_tree (type, cst);
3098 new_temp = make_ssa_name (TREE_TYPE (op));
3099 new_stmt = gimple_build_assign (new_temp, code,
3100 arginfo[i].op, tcst);
3101 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3102 vargs.safe_push (new_temp);
3103 }
3104 break;
3105 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3106 default:
3107 gcc_unreachable ();
3108 }
3109 }
3110
3111 new_stmt = gimple_build_call_vec (fndecl, vargs);
3112 if (vec_dest)
3113 {
3114 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3115 if (ratype)
3116 new_temp = create_tmp_var (ratype);
3117 else if (TYPE_VECTOR_SUBPARTS (vectype)
3118 == TYPE_VECTOR_SUBPARTS (rtype))
3119 new_temp = make_ssa_name (vec_dest, new_stmt);
3120 else
3121 new_temp = make_ssa_name (rtype, new_stmt);
3122 gimple_call_set_lhs (new_stmt, new_temp);
3123 }
3124 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3125
3126 if (vec_dest)
3127 {
3128 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3129 {
3130 unsigned int k, l;
3131 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3132 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3133 gcc_assert ((k & (k - 1)) == 0);
3134 for (l = 0; l < k; l++)
3135 {
3136 tree t;
3137 if (ratype)
3138 {
3139 t = build_fold_addr_expr (new_temp);
3140 t = build2 (MEM_REF, vectype, t,
3141 build_int_cst (TREE_TYPE (t),
3142 l * prec / BITS_PER_UNIT));
3143 }
3144 else
3145 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3146 size_int (prec), bitsize_int (l * prec));
3147 new_stmt
3148 = gimple_build_assign (make_ssa_name (vectype), t);
3149 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3150 if (j == 0 && l == 0)
3151 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3152 else
3153 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3154
3155 prev_stmt_info = vinfo_for_stmt (new_stmt);
3156 }
3157
3158 if (ratype)
3159 {
3160 tree clobber = build_constructor (ratype, NULL);
3161 TREE_THIS_VOLATILE (clobber) = 1;
3162 new_stmt = gimple_build_assign (new_temp, clobber);
3163 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3164 }
3165 continue;
3166 }
3167 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3168 {
3169 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3170 / TYPE_VECTOR_SUBPARTS (rtype));
3171 gcc_assert ((k & (k - 1)) == 0);
3172 if ((j & (k - 1)) == 0)
3173 vec_alloc (ret_ctor_elts, k);
3174 if (ratype)
3175 {
3176 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3177 for (m = 0; m < o; m++)
3178 {
3179 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3180 size_int (m), NULL_TREE, NULL_TREE);
3181 new_stmt
3182 = gimple_build_assign (make_ssa_name (rtype), tem);
3183 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3184 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3185 gimple_assign_lhs (new_stmt));
3186 }
3187 tree clobber = build_constructor (ratype, NULL);
3188 TREE_THIS_VOLATILE (clobber) = 1;
3189 new_stmt = gimple_build_assign (new_temp, clobber);
3190 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3191 }
3192 else
3193 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3194 if ((j & (k - 1)) != k - 1)
3195 continue;
3196 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3197 new_stmt
3198 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3199 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3200
3201 if ((unsigned) j == k - 1)
3202 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3203 else
3204 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3205
3206 prev_stmt_info = vinfo_for_stmt (new_stmt);
3207 continue;
3208 }
3209 else if (ratype)
3210 {
3211 tree t = build_fold_addr_expr (new_temp);
3212 t = build2 (MEM_REF, vectype, t,
3213 build_int_cst (TREE_TYPE (t), 0));
3214 new_stmt
3215 = gimple_build_assign (make_ssa_name (vec_dest), t);
3216 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3217 tree clobber = build_constructor (ratype, NULL);
3218 TREE_THIS_VOLATILE (clobber) = 1;
3219 vect_finish_stmt_generation (stmt,
3220 gimple_build_assign (new_temp,
3221 clobber), gsi);
3222 }
3223 }
3224
3225 if (j == 0)
3226 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3227 else
3228 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3229
3230 prev_stmt_info = vinfo_for_stmt (new_stmt);
3231 }
3232
3233 vargs.release ();
3234
3235 /* The call in STMT might prevent it from being removed in dce.
3236 We however cannot remove it here, due to the way the ssa name
3237 it defines is mapped to the new definition. So just replace
3238 rhs of the statement with something harmless. */
3239
3240 if (slp_node)
3241 return true;
3242
3243 if (scalar_dest)
3244 {
3245 type = TREE_TYPE (scalar_dest);
3246 if (is_pattern_stmt_p (stmt_info))
3247 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3248 else
3249 lhs = gimple_call_lhs (stmt);
3250 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3251 }
3252 else
3253 new_stmt = gimple_build_nop ();
3254 set_vinfo_for_stmt (new_stmt, stmt_info);
3255 set_vinfo_for_stmt (stmt, NULL);
3256 STMT_VINFO_STMT (stmt_info) = new_stmt;
3257 gsi_replace (gsi, new_stmt, true);
3258 unlink_stmt_vdef (stmt);
3259
3260 return true;
3261 }
3262
3263
3264 /* Function vect_gen_widened_results_half
3265
3266 Create a vector stmt whose code, type, number of arguments, and result
3267 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3268 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3269 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3270 needs to be created (DECL is a function-decl of a target-builtin).
3271 STMT is the original scalar stmt that we are vectorizing. */
3272
3273 static gimple *
3274 vect_gen_widened_results_half (enum tree_code code,
3275 tree decl,
3276 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3277 tree vec_dest, gimple_stmt_iterator *gsi,
3278 gimple *stmt)
3279 {
3280 gimple *new_stmt;
3281 tree new_temp;
3282
3283 /* Generate half of the widened result: */
3284 if (code == CALL_EXPR)
3285 {
3286 /* Target specific support */
3287 if (op_type == binary_op)
3288 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3289 else
3290 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3291 new_temp = make_ssa_name (vec_dest, new_stmt);
3292 gimple_call_set_lhs (new_stmt, new_temp);
3293 }
3294 else
3295 {
3296 /* Generic support */
3297 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3298 if (op_type != binary_op)
3299 vec_oprnd1 = NULL;
3300 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3301 new_temp = make_ssa_name (vec_dest, new_stmt);
3302 gimple_assign_set_lhs (new_stmt, new_temp);
3303 }
3304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3305
3306 return new_stmt;
3307 }
3308
3309
3310 /* Get vectorized definitions for loop-based vectorization. For the first
3311 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3312 scalar operand), and for the rest we get a copy with
3313 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3314 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3315 The vectors are collected into VEC_OPRNDS. */
3316
3317 static void
3318 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3319 vec<tree> *vec_oprnds, int multi_step_cvt)
3320 {
3321 tree vec_oprnd;
3322
3323 /* Get first vector operand. */
3324 /* All the vector operands except the very first one (that is scalar oprnd)
3325 are stmt copies. */
3326 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3327 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3328 else
3329 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3330
3331 vec_oprnds->quick_push (vec_oprnd);
3332
3333 /* Get second vector operand. */
3334 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3335 vec_oprnds->quick_push (vec_oprnd);
3336
3337 *oprnd = vec_oprnd;
3338
3339 /* For conversion in multiple steps, continue to get operands
3340 recursively. */
3341 if (multi_step_cvt)
3342 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3343 }
3344
3345
3346 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3347 For multi-step conversions store the resulting vectors and call the function
3348 recursively. */
3349
3350 static void
3351 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3352 int multi_step_cvt, gimple *stmt,
3353 vec<tree> vec_dsts,
3354 gimple_stmt_iterator *gsi,
3355 slp_tree slp_node, enum tree_code code,
3356 stmt_vec_info *prev_stmt_info)
3357 {
3358 unsigned int i;
3359 tree vop0, vop1, new_tmp, vec_dest;
3360 gimple *new_stmt;
3361 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3362
3363 vec_dest = vec_dsts.pop ();
3364
3365 for (i = 0; i < vec_oprnds->length (); i += 2)
3366 {
3367 /* Create demotion operation. */
3368 vop0 = (*vec_oprnds)[i];
3369 vop1 = (*vec_oprnds)[i + 1];
3370 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3371 new_tmp = make_ssa_name (vec_dest, new_stmt);
3372 gimple_assign_set_lhs (new_stmt, new_tmp);
3373 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3374
3375 if (multi_step_cvt)
3376 /* Store the resulting vector for next recursive call. */
3377 (*vec_oprnds)[i/2] = new_tmp;
3378 else
3379 {
3380 /* This is the last step of the conversion sequence. Store the
3381 vectors in SLP_NODE or in vector info of the scalar statement
3382 (or in STMT_VINFO_RELATED_STMT chain). */
3383 if (slp_node)
3384 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3385 else
3386 {
3387 if (!*prev_stmt_info)
3388 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3389 else
3390 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3391
3392 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3393 }
3394 }
3395 }
3396
3397 /* For multi-step demotion operations we first generate demotion operations
3398 from the source type to the intermediate types, and then combine the
3399 results (stored in VEC_OPRNDS) in demotion operation to the destination
3400 type. */
3401 if (multi_step_cvt)
3402 {
3403 /* At each level of recursion we have half of the operands we had at the
3404 previous level. */
3405 vec_oprnds->truncate ((i+1)/2);
3406 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3407 stmt, vec_dsts, gsi, slp_node,
3408 VEC_PACK_TRUNC_EXPR,
3409 prev_stmt_info);
3410 }
3411
3412 vec_dsts.quick_push (vec_dest);
3413 }
3414
3415
3416 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3417 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3418 the resulting vectors and call the function recursively. */
3419
3420 static void
3421 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3422 vec<tree> *vec_oprnds1,
3423 gimple *stmt, tree vec_dest,
3424 gimple_stmt_iterator *gsi,
3425 enum tree_code code1,
3426 enum tree_code code2, tree decl1,
3427 tree decl2, int op_type)
3428 {
3429 int i;
3430 tree vop0, vop1, new_tmp1, new_tmp2;
3431 gimple *new_stmt1, *new_stmt2;
3432 vec<tree> vec_tmp = vNULL;
3433
3434 vec_tmp.create (vec_oprnds0->length () * 2);
3435 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3436 {
3437 if (op_type == binary_op)
3438 vop1 = (*vec_oprnds1)[i];
3439 else
3440 vop1 = NULL_TREE;
3441
3442 /* Generate the two halves of promotion operation. */
3443 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3444 op_type, vec_dest, gsi, stmt);
3445 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3446 op_type, vec_dest, gsi, stmt);
3447 if (is_gimple_call (new_stmt1))
3448 {
3449 new_tmp1 = gimple_call_lhs (new_stmt1);
3450 new_tmp2 = gimple_call_lhs (new_stmt2);
3451 }
3452 else
3453 {
3454 new_tmp1 = gimple_assign_lhs (new_stmt1);
3455 new_tmp2 = gimple_assign_lhs (new_stmt2);
3456 }
3457
3458 /* Store the results for the next step. */
3459 vec_tmp.quick_push (new_tmp1);
3460 vec_tmp.quick_push (new_tmp2);
3461 }
3462
3463 vec_oprnds0->release ();
3464 *vec_oprnds0 = vec_tmp;
3465 }
3466
3467
3468 /* Check if STMT performs a conversion operation, that can be vectorized.
3469 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3470 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3471 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3472
3473 static bool
3474 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3475 gimple **vec_stmt, slp_tree slp_node)
3476 {
3477 tree vec_dest;
3478 tree scalar_dest;
3479 tree op0, op1 = NULL_TREE;
3480 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3481 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3482 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3483 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3484 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3485 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3486 tree new_temp;
3487 gimple *def_stmt;
3488 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3489 gimple *new_stmt = NULL;
3490 stmt_vec_info prev_stmt_info;
3491 int nunits_in;
3492 int nunits_out;
3493 tree vectype_out, vectype_in;
3494 int ncopies, i, j;
3495 tree lhs_type, rhs_type;
3496 enum { NARROW, NONE, WIDEN } modifier;
3497 vec<tree> vec_oprnds0 = vNULL;
3498 vec<tree> vec_oprnds1 = vNULL;
3499 tree vop0;
3500 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3501 vec_info *vinfo = stmt_info->vinfo;
3502 int multi_step_cvt = 0;
3503 vec<tree> vec_dsts = vNULL;
3504 vec<tree> interm_types = vNULL;
3505 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3506 int op_type;
3507 machine_mode rhs_mode;
3508 unsigned short fltsz;
3509
3510 /* Is STMT a vectorizable conversion? */
3511
3512 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3513 return false;
3514
3515 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3516 return false;
3517
3518 if (!is_gimple_assign (stmt))
3519 return false;
3520
3521 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3522 return false;
3523
3524 code = gimple_assign_rhs_code (stmt);
3525 if (!CONVERT_EXPR_CODE_P (code)
3526 && code != FIX_TRUNC_EXPR
3527 && code != FLOAT_EXPR
3528 && code != WIDEN_MULT_EXPR
3529 && code != WIDEN_LSHIFT_EXPR)
3530 return false;
3531
3532 op_type = TREE_CODE_LENGTH (code);
3533
3534 /* Check types of lhs and rhs. */
3535 scalar_dest = gimple_assign_lhs (stmt);
3536 lhs_type = TREE_TYPE (scalar_dest);
3537 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3538
3539 op0 = gimple_assign_rhs1 (stmt);
3540 rhs_type = TREE_TYPE (op0);
3541
3542 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3543 && !((INTEGRAL_TYPE_P (lhs_type)
3544 && INTEGRAL_TYPE_P (rhs_type))
3545 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3546 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3547 return false;
3548
3549 if ((INTEGRAL_TYPE_P (lhs_type)
3550 && (TYPE_PRECISION (lhs_type)
3551 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3552 || (INTEGRAL_TYPE_P (rhs_type)
3553 && (TYPE_PRECISION (rhs_type)
3554 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3555 {
3556 if (dump_enabled_p ())
3557 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3558 "type conversion to/from bit-precision unsupported."
3559 "\n");
3560 return false;
3561 }
3562
3563 /* Check the operands of the operation. */
3564 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
3565 {
3566 if (dump_enabled_p ())
3567 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3568 "use not simple.\n");
3569 return false;
3570 }
3571 if (op_type == binary_op)
3572 {
3573 bool ok;
3574
3575 op1 = gimple_assign_rhs2 (stmt);
3576 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3577 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3578 OP1. */
3579 if (CONSTANT_CLASS_P (op0))
3580 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
3581 else
3582 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
3583
3584 if (!ok)
3585 {
3586 if (dump_enabled_p ())
3587 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3588 "use not simple.\n");
3589 return false;
3590 }
3591 }
3592
3593 /* If op0 is an external or constant defs use a vector type of
3594 the same size as the output vector type. */
3595 if (!vectype_in)
3596 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3597 if (vec_stmt)
3598 gcc_assert (vectype_in);
3599 if (!vectype_in)
3600 {
3601 if (dump_enabled_p ())
3602 {
3603 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3604 "no vectype for scalar type ");
3605 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3606 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3607 }
3608
3609 return false;
3610 }
3611
3612 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3613 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3614 if (nunits_in < nunits_out)
3615 modifier = NARROW;
3616 else if (nunits_out == nunits_in)
3617 modifier = NONE;
3618 else
3619 modifier = WIDEN;
3620
3621 /* Multiple types in SLP are handled by creating the appropriate number of
3622 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3623 case of SLP. */
3624 if (slp_node || PURE_SLP_STMT (stmt_info))
3625 ncopies = 1;
3626 else if (modifier == NARROW)
3627 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3628 else
3629 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3630
3631 /* Sanity check: make sure that at least one copy of the vectorized stmt
3632 needs to be generated. */
3633 gcc_assert (ncopies >= 1);
3634
3635 /* Supportable by target? */
3636 switch (modifier)
3637 {
3638 case NONE:
3639 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3640 return false;
3641 if (supportable_convert_operation (code, vectype_out, vectype_in,
3642 &decl1, &code1))
3643 break;
3644 /* FALLTHRU */
3645 unsupported:
3646 if (dump_enabled_p ())
3647 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3648 "conversion not supported by target.\n");
3649 return false;
3650
3651 case WIDEN:
3652 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3653 &code1, &code2, &multi_step_cvt,
3654 &interm_types))
3655 {
3656 /* Binary widening operation can only be supported directly by the
3657 architecture. */
3658 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3659 break;
3660 }
3661
3662 if (code != FLOAT_EXPR
3663 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3664 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3665 goto unsupported;
3666
3667 rhs_mode = TYPE_MODE (rhs_type);
3668 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3669 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3670 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3671 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3672 {
3673 cvt_type
3674 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3675 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3676 if (cvt_type == NULL_TREE)
3677 goto unsupported;
3678
3679 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3680 {
3681 if (!supportable_convert_operation (code, vectype_out,
3682 cvt_type, &decl1, &codecvt1))
3683 goto unsupported;
3684 }
3685 else if (!supportable_widening_operation (code, stmt, vectype_out,
3686 cvt_type, &codecvt1,
3687 &codecvt2, &multi_step_cvt,
3688 &interm_types))
3689 continue;
3690 else
3691 gcc_assert (multi_step_cvt == 0);
3692
3693 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3694 vectype_in, &code1, &code2,
3695 &multi_step_cvt, &interm_types))
3696 break;
3697 }
3698
3699 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3700 goto unsupported;
3701
3702 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3703 codecvt2 = ERROR_MARK;
3704 else
3705 {
3706 multi_step_cvt++;
3707 interm_types.safe_push (cvt_type);
3708 cvt_type = NULL_TREE;
3709 }
3710 break;
3711
3712 case NARROW:
3713 gcc_assert (op_type == unary_op);
3714 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3715 &code1, &multi_step_cvt,
3716 &interm_types))
3717 break;
3718
3719 if (code != FIX_TRUNC_EXPR
3720 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3721 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3722 goto unsupported;
3723
3724 rhs_mode = TYPE_MODE (rhs_type);
3725 cvt_type
3726 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3727 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3728 if (cvt_type == NULL_TREE)
3729 goto unsupported;
3730 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3731 &decl1, &codecvt1))
3732 goto unsupported;
3733 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3734 &code1, &multi_step_cvt,
3735 &interm_types))
3736 break;
3737 goto unsupported;
3738
3739 default:
3740 gcc_unreachable ();
3741 }
3742
3743 if (!vec_stmt) /* transformation not required. */
3744 {
3745 if (dump_enabled_p ())
3746 dump_printf_loc (MSG_NOTE, vect_location,
3747 "=== vectorizable_conversion ===\n");
3748 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3749 {
3750 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3751 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3752 }
3753 else if (modifier == NARROW)
3754 {
3755 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3756 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3757 }
3758 else
3759 {
3760 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3761 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3762 }
3763 interm_types.release ();
3764 return true;
3765 }
3766
3767 /** Transform. **/
3768 if (dump_enabled_p ())
3769 dump_printf_loc (MSG_NOTE, vect_location,
3770 "transform conversion. ncopies = %d.\n", ncopies);
3771
3772 if (op_type == binary_op)
3773 {
3774 if (CONSTANT_CLASS_P (op0))
3775 op0 = fold_convert (TREE_TYPE (op1), op0);
3776 else if (CONSTANT_CLASS_P (op1))
3777 op1 = fold_convert (TREE_TYPE (op0), op1);
3778 }
3779
3780 /* In case of multi-step conversion, we first generate conversion operations
3781 to the intermediate types, and then from that types to the final one.
3782 We create vector destinations for the intermediate type (TYPES) received
3783 from supportable_*_operation, and store them in the correct order
3784 for future use in vect_create_vectorized_*_stmts (). */
3785 vec_dsts.create (multi_step_cvt + 1);
3786 vec_dest = vect_create_destination_var (scalar_dest,
3787 (cvt_type && modifier == WIDEN)
3788 ? cvt_type : vectype_out);
3789 vec_dsts.quick_push (vec_dest);
3790
3791 if (multi_step_cvt)
3792 {
3793 for (i = interm_types.length () - 1;
3794 interm_types.iterate (i, &intermediate_type); i--)
3795 {
3796 vec_dest = vect_create_destination_var (scalar_dest,
3797 intermediate_type);
3798 vec_dsts.quick_push (vec_dest);
3799 }
3800 }
3801
3802 if (cvt_type)
3803 vec_dest = vect_create_destination_var (scalar_dest,
3804 modifier == WIDEN
3805 ? vectype_out : cvt_type);
3806
3807 if (!slp_node)
3808 {
3809 if (modifier == WIDEN)
3810 {
3811 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3812 if (op_type == binary_op)
3813 vec_oprnds1.create (1);
3814 }
3815 else if (modifier == NARROW)
3816 vec_oprnds0.create (
3817 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3818 }
3819 else if (code == WIDEN_LSHIFT_EXPR)
3820 vec_oprnds1.create (slp_node->vec_stmts_size);
3821
3822 last_oprnd = op0;
3823 prev_stmt_info = NULL;
3824 switch (modifier)
3825 {
3826 case NONE:
3827 for (j = 0; j < ncopies; j++)
3828 {
3829 if (j == 0)
3830 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3831 -1);
3832 else
3833 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3834
3835 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3836 {
3837 /* Arguments are ready, create the new vector stmt. */
3838 if (code1 == CALL_EXPR)
3839 {
3840 new_stmt = gimple_build_call (decl1, 1, vop0);
3841 new_temp = make_ssa_name (vec_dest, new_stmt);
3842 gimple_call_set_lhs (new_stmt, new_temp);
3843 }
3844 else
3845 {
3846 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3847 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3848 new_temp = make_ssa_name (vec_dest, new_stmt);
3849 gimple_assign_set_lhs (new_stmt, new_temp);
3850 }
3851
3852 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3853 if (slp_node)
3854 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3855 else
3856 {
3857 if (!prev_stmt_info)
3858 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3859 else
3860 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3861 prev_stmt_info = vinfo_for_stmt (new_stmt);
3862 }
3863 }
3864 }
3865 break;
3866
3867 case WIDEN:
3868 /* In case the vectorization factor (VF) is bigger than the number
3869 of elements that we can fit in a vectype (nunits), we have to
3870 generate more than one vector stmt - i.e - we need to "unroll"
3871 the vector stmt by a factor VF/nunits. */
3872 for (j = 0; j < ncopies; j++)
3873 {
3874 /* Handle uses. */
3875 if (j == 0)
3876 {
3877 if (slp_node)
3878 {
3879 if (code == WIDEN_LSHIFT_EXPR)
3880 {
3881 unsigned int k;
3882
3883 vec_oprnd1 = op1;
3884 /* Store vec_oprnd1 for every vector stmt to be created
3885 for SLP_NODE. We check during the analysis that all
3886 the shift arguments are the same. */
3887 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3888 vec_oprnds1.quick_push (vec_oprnd1);
3889
3890 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3891 slp_node, -1);
3892 }
3893 else
3894 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3895 &vec_oprnds1, slp_node, -1);
3896 }
3897 else
3898 {
3899 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
3900 vec_oprnds0.quick_push (vec_oprnd0);
3901 if (op_type == binary_op)
3902 {
3903 if (code == WIDEN_LSHIFT_EXPR)
3904 vec_oprnd1 = op1;
3905 else
3906 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
3907 vec_oprnds1.quick_push (vec_oprnd1);
3908 }
3909 }
3910 }
3911 else
3912 {
3913 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3914 vec_oprnds0.truncate (0);
3915 vec_oprnds0.quick_push (vec_oprnd0);
3916 if (op_type == binary_op)
3917 {
3918 if (code == WIDEN_LSHIFT_EXPR)
3919 vec_oprnd1 = op1;
3920 else
3921 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3922 vec_oprnd1);
3923 vec_oprnds1.truncate (0);
3924 vec_oprnds1.quick_push (vec_oprnd1);
3925 }
3926 }
3927
3928 /* Arguments are ready. Create the new vector stmts. */
3929 for (i = multi_step_cvt; i >= 0; i--)
3930 {
3931 tree this_dest = vec_dsts[i];
3932 enum tree_code c1 = code1, c2 = code2;
3933 if (i == 0 && codecvt2 != ERROR_MARK)
3934 {
3935 c1 = codecvt1;
3936 c2 = codecvt2;
3937 }
3938 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3939 &vec_oprnds1,
3940 stmt, this_dest, gsi,
3941 c1, c2, decl1, decl2,
3942 op_type);
3943 }
3944
3945 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3946 {
3947 if (cvt_type)
3948 {
3949 if (codecvt1 == CALL_EXPR)
3950 {
3951 new_stmt = gimple_build_call (decl1, 1, vop0);
3952 new_temp = make_ssa_name (vec_dest, new_stmt);
3953 gimple_call_set_lhs (new_stmt, new_temp);
3954 }
3955 else
3956 {
3957 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3958 new_temp = make_ssa_name (vec_dest);
3959 new_stmt = gimple_build_assign (new_temp, codecvt1,
3960 vop0);
3961 }
3962
3963 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3964 }
3965 else
3966 new_stmt = SSA_NAME_DEF_STMT (vop0);
3967
3968 if (slp_node)
3969 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3970 else
3971 {
3972 if (!prev_stmt_info)
3973 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3974 else
3975 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3976 prev_stmt_info = vinfo_for_stmt (new_stmt);
3977 }
3978 }
3979 }
3980
3981 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3982 break;
3983
3984 case NARROW:
3985 /* In case the vectorization factor (VF) is bigger than the number
3986 of elements that we can fit in a vectype (nunits), we have to
3987 generate more than one vector stmt - i.e - we need to "unroll"
3988 the vector stmt by a factor VF/nunits. */
3989 for (j = 0; j < ncopies; j++)
3990 {
3991 /* Handle uses. */
3992 if (slp_node)
3993 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3994 slp_node, -1);
3995 else
3996 {
3997 vec_oprnds0.truncate (0);
3998 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3999 vect_pow2 (multi_step_cvt) - 1);
4000 }
4001
4002 /* Arguments are ready. Create the new vector stmts. */
4003 if (cvt_type)
4004 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4005 {
4006 if (codecvt1 == CALL_EXPR)
4007 {
4008 new_stmt = gimple_build_call (decl1, 1, vop0);
4009 new_temp = make_ssa_name (vec_dest, new_stmt);
4010 gimple_call_set_lhs (new_stmt, new_temp);
4011 }
4012 else
4013 {
4014 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4015 new_temp = make_ssa_name (vec_dest);
4016 new_stmt = gimple_build_assign (new_temp, codecvt1,
4017 vop0);
4018 }
4019
4020 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4021 vec_oprnds0[i] = new_temp;
4022 }
4023
4024 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4025 stmt, vec_dsts, gsi,
4026 slp_node, code1,
4027 &prev_stmt_info);
4028 }
4029
4030 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4031 break;
4032 }
4033
4034 vec_oprnds0.release ();
4035 vec_oprnds1.release ();
4036 vec_dsts.release ();
4037 interm_types.release ();
4038
4039 return true;
4040 }
4041
4042
4043 /* Function vectorizable_assignment.
4044
4045 Check if STMT performs an assignment (copy) that can be vectorized.
4046 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4047 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4048 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4049
4050 static bool
4051 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4052 gimple **vec_stmt, slp_tree slp_node)
4053 {
4054 tree vec_dest;
4055 tree scalar_dest;
4056 tree op;
4057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4058 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4059 tree new_temp;
4060 gimple *def_stmt;
4061 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4062 int ncopies;
4063 int i, j;
4064 vec<tree> vec_oprnds = vNULL;
4065 tree vop;
4066 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4067 vec_info *vinfo = stmt_info->vinfo;
4068 gimple *new_stmt = NULL;
4069 stmt_vec_info prev_stmt_info = NULL;
4070 enum tree_code code;
4071 tree vectype_in;
4072
4073 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4074 return false;
4075
4076 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4077 return false;
4078
4079 /* Is vectorizable assignment? */
4080 if (!is_gimple_assign (stmt))
4081 return false;
4082
4083 scalar_dest = gimple_assign_lhs (stmt);
4084 if (TREE_CODE (scalar_dest) != SSA_NAME)
4085 return false;
4086
4087 code = gimple_assign_rhs_code (stmt);
4088 if (gimple_assign_single_p (stmt)
4089 || code == PAREN_EXPR
4090 || CONVERT_EXPR_CODE_P (code))
4091 op = gimple_assign_rhs1 (stmt);
4092 else
4093 return false;
4094
4095 if (code == VIEW_CONVERT_EXPR)
4096 op = TREE_OPERAND (op, 0);
4097
4098 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4099 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4100
4101 /* Multiple types in SLP are handled by creating the appropriate number of
4102 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4103 case of SLP. */
4104 if (slp_node || PURE_SLP_STMT (stmt_info))
4105 ncopies = 1;
4106 else
4107 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4108
4109 gcc_assert (ncopies >= 1);
4110
4111 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4112 {
4113 if (dump_enabled_p ())
4114 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4115 "use not simple.\n");
4116 return false;
4117 }
4118
4119 /* We can handle NOP_EXPR conversions that do not change the number
4120 of elements or the vector size. */
4121 if ((CONVERT_EXPR_CODE_P (code)
4122 || code == VIEW_CONVERT_EXPR)
4123 && (!vectype_in
4124 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4125 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4126 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4127 return false;
4128
4129 /* We do not handle bit-precision changes. */
4130 if ((CONVERT_EXPR_CODE_P (code)
4131 || code == VIEW_CONVERT_EXPR)
4132 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4133 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4134 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4135 || ((TYPE_PRECISION (TREE_TYPE (op))
4136 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4137 /* But a conversion that does not change the bit-pattern is ok. */
4138 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4139 > TYPE_PRECISION (TREE_TYPE (op)))
4140 && TYPE_UNSIGNED (TREE_TYPE (op))))
4141 {
4142 if (dump_enabled_p ())
4143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4144 "type conversion to/from bit-precision "
4145 "unsupported.\n");
4146 return false;
4147 }
4148
4149 if (!vec_stmt) /* transformation not required. */
4150 {
4151 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4152 if (dump_enabled_p ())
4153 dump_printf_loc (MSG_NOTE, vect_location,
4154 "=== vectorizable_assignment ===\n");
4155 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4156 return true;
4157 }
4158
4159 /** Transform. **/
4160 if (dump_enabled_p ())
4161 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4162
4163 /* Handle def. */
4164 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4165
4166 /* Handle use. */
4167 for (j = 0; j < ncopies; j++)
4168 {
4169 /* Handle uses. */
4170 if (j == 0)
4171 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4172 else
4173 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4174
4175 /* Arguments are ready. create the new vector stmt. */
4176 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4177 {
4178 if (CONVERT_EXPR_CODE_P (code)
4179 || code == VIEW_CONVERT_EXPR)
4180 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4181 new_stmt = gimple_build_assign (vec_dest, vop);
4182 new_temp = make_ssa_name (vec_dest, new_stmt);
4183 gimple_assign_set_lhs (new_stmt, new_temp);
4184 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4185 if (slp_node)
4186 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4187 }
4188
4189 if (slp_node)
4190 continue;
4191
4192 if (j == 0)
4193 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4194 else
4195 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4196
4197 prev_stmt_info = vinfo_for_stmt (new_stmt);
4198 }
4199
4200 vec_oprnds.release ();
4201 return true;
4202 }
4203
4204
4205 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4206 either as shift by a scalar or by a vector. */
4207
4208 bool
4209 vect_supportable_shift (enum tree_code code, tree scalar_type)
4210 {
4211
4212 machine_mode vec_mode;
4213 optab optab;
4214 int icode;
4215 tree vectype;
4216
4217 vectype = get_vectype_for_scalar_type (scalar_type);
4218 if (!vectype)
4219 return false;
4220
4221 optab = optab_for_tree_code (code, vectype, optab_scalar);
4222 if (!optab
4223 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4224 {
4225 optab = optab_for_tree_code (code, vectype, optab_vector);
4226 if (!optab
4227 || (optab_handler (optab, TYPE_MODE (vectype))
4228 == CODE_FOR_nothing))
4229 return false;
4230 }
4231
4232 vec_mode = TYPE_MODE (vectype);
4233 icode = (int) optab_handler (optab, vec_mode);
4234 if (icode == CODE_FOR_nothing)
4235 return false;
4236
4237 return true;
4238 }
4239
4240
4241 /* Function vectorizable_shift.
4242
4243 Check if STMT performs a shift operation that can be vectorized.
4244 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4245 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4246 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4247
4248 static bool
4249 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4250 gimple **vec_stmt, slp_tree slp_node)
4251 {
4252 tree vec_dest;
4253 tree scalar_dest;
4254 tree op0, op1 = NULL;
4255 tree vec_oprnd1 = NULL_TREE;
4256 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4257 tree vectype;
4258 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4259 enum tree_code code;
4260 machine_mode vec_mode;
4261 tree new_temp;
4262 optab optab;
4263 int icode;
4264 machine_mode optab_op2_mode;
4265 gimple *def_stmt;
4266 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4267 gimple *new_stmt = NULL;
4268 stmt_vec_info prev_stmt_info;
4269 int nunits_in;
4270 int nunits_out;
4271 tree vectype_out;
4272 tree op1_vectype;
4273 int ncopies;
4274 int j, i;
4275 vec<tree> vec_oprnds0 = vNULL;
4276 vec<tree> vec_oprnds1 = vNULL;
4277 tree vop0, vop1;
4278 unsigned int k;
4279 bool scalar_shift_arg = true;
4280 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4281 vec_info *vinfo = stmt_info->vinfo;
4282 int vf;
4283
4284 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4285 return false;
4286
4287 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4288 return false;
4289
4290 /* Is STMT a vectorizable binary/unary operation? */
4291 if (!is_gimple_assign (stmt))
4292 return false;
4293
4294 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4295 return false;
4296
4297 code = gimple_assign_rhs_code (stmt);
4298
4299 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4300 || code == RROTATE_EXPR))
4301 return false;
4302
4303 scalar_dest = gimple_assign_lhs (stmt);
4304 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4305 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4306 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4307 {
4308 if (dump_enabled_p ())
4309 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4310 "bit-precision shifts not supported.\n");
4311 return false;
4312 }
4313
4314 op0 = gimple_assign_rhs1 (stmt);
4315 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4316 {
4317 if (dump_enabled_p ())
4318 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4319 "use not simple.\n");
4320 return false;
4321 }
4322 /* If op0 is an external or constant def use a vector type with
4323 the same size as the output vector type. */
4324 if (!vectype)
4325 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4326 if (vec_stmt)
4327 gcc_assert (vectype);
4328 if (!vectype)
4329 {
4330 if (dump_enabled_p ())
4331 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4332 "no vectype for scalar type\n");
4333 return false;
4334 }
4335
4336 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4337 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4338 if (nunits_out != nunits_in)
4339 return false;
4340
4341 op1 = gimple_assign_rhs2 (stmt);
4342 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4343 {
4344 if (dump_enabled_p ())
4345 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4346 "use not simple.\n");
4347 return false;
4348 }
4349
4350 if (loop_vinfo)
4351 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4352 else
4353 vf = 1;
4354
4355 /* Multiple types in SLP are handled by creating the appropriate number of
4356 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4357 case of SLP. */
4358 if (slp_node || PURE_SLP_STMT (stmt_info))
4359 ncopies = 1;
4360 else
4361 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4362
4363 gcc_assert (ncopies >= 1);
4364
4365 /* Determine whether the shift amount is a vector, or scalar. If the
4366 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4367
4368 if ((dt[1] == vect_internal_def
4369 || dt[1] == vect_induction_def)
4370 && !slp_node)
4371 scalar_shift_arg = false;
4372 else if (dt[1] == vect_constant_def
4373 || dt[1] == vect_external_def
4374 || dt[1] == vect_internal_def)
4375 {
4376 /* In SLP, need to check whether the shift count is the same,
4377 in loops if it is a constant or invariant, it is always
4378 a scalar shift. */
4379 if (slp_node)
4380 {
4381 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4382 gimple *slpstmt;
4383
4384 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4385 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4386 scalar_shift_arg = false;
4387 }
4388 }
4389 else
4390 {
4391 if (dump_enabled_p ())
4392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4393 "operand mode requires invariant argument.\n");
4394 return false;
4395 }
4396
4397 /* Vector shifted by vector. */
4398 if (!scalar_shift_arg)
4399 {
4400 optab = optab_for_tree_code (code, vectype, optab_vector);
4401 if (dump_enabled_p ())
4402 dump_printf_loc (MSG_NOTE, vect_location,
4403 "vector/vector shift/rotate found.\n");
4404
4405 if (!op1_vectype)
4406 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4407 if (op1_vectype == NULL_TREE
4408 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4409 {
4410 if (dump_enabled_p ())
4411 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4412 "unusable type for last operand in"
4413 " vector/vector shift/rotate.\n");
4414 return false;
4415 }
4416 }
4417 /* See if the machine has a vector shifted by scalar insn and if not
4418 then see if it has a vector shifted by vector insn. */
4419 else
4420 {
4421 optab = optab_for_tree_code (code, vectype, optab_scalar);
4422 if (optab
4423 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4424 {
4425 if (dump_enabled_p ())
4426 dump_printf_loc (MSG_NOTE, vect_location,
4427 "vector/scalar shift/rotate found.\n");
4428 }
4429 else
4430 {
4431 optab = optab_for_tree_code (code, vectype, optab_vector);
4432 if (optab
4433 && (optab_handler (optab, TYPE_MODE (vectype))
4434 != CODE_FOR_nothing))
4435 {
4436 scalar_shift_arg = false;
4437
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE, vect_location,
4440 "vector/vector shift/rotate found.\n");
4441
4442 /* Unlike the other binary operators, shifts/rotates have
4443 the rhs being int, instead of the same type as the lhs,
4444 so make sure the scalar is the right type if we are
4445 dealing with vectors of long long/long/short/char. */
4446 if (dt[1] == vect_constant_def)
4447 op1 = fold_convert (TREE_TYPE (vectype), op1);
4448 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4449 TREE_TYPE (op1)))
4450 {
4451 if (slp_node
4452 && TYPE_MODE (TREE_TYPE (vectype))
4453 != TYPE_MODE (TREE_TYPE (op1)))
4454 {
4455 if (dump_enabled_p ())
4456 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4457 "unusable type for last operand in"
4458 " vector/vector shift/rotate.\n");
4459 return false;
4460 }
4461 if (vec_stmt && !slp_node)
4462 {
4463 op1 = fold_convert (TREE_TYPE (vectype), op1);
4464 op1 = vect_init_vector (stmt, op1,
4465 TREE_TYPE (vectype), NULL);
4466 }
4467 }
4468 }
4469 }
4470 }
4471
4472 /* Supportable by target? */
4473 if (!optab)
4474 {
4475 if (dump_enabled_p ())
4476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4477 "no optab.\n");
4478 return false;
4479 }
4480 vec_mode = TYPE_MODE (vectype);
4481 icode = (int) optab_handler (optab, vec_mode);
4482 if (icode == CODE_FOR_nothing)
4483 {
4484 if (dump_enabled_p ())
4485 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4486 "op not supported by target.\n");
4487 /* Check only during analysis. */
4488 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4489 || (vf < vect_min_worthwhile_factor (code)
4490 && !vec_stmt))
4491 return false;
4492 if (dump_enabled_p ())
4493 dump_printf_loc (MSG_NOTE, vect_location,
4494 "proceeding using word mode.\n");
4495 }
4496
4497 /* Worthwhile without SIMD support? Check only during analysis. */
4498 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4499 && vf < vect_min_worthwhile_factor (code)
4500 && !vec_stmt)
4501 {
4502 if (dump_enabled_p ())
4503 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4504 "not worthwhile without SIMD support.\n");
4505 return false;
4506 }
4507
4508 if (!vec_stmt) /* transformation not required. */
4509 {
4510 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4511 if (dump_enabled_p ())
4512 dump_printf_loc (MSG_NOTE, vect_location,
4513 "=== vectorizable_shift ===\n");
4514 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4515 return true;
4516 }
4517
4518 /** Transform. **/
4519
4520 if (dump_enabled_p ())
4521 dump_printf_loc (MSG_NOTE, vect_location,
4522 "transform binary/unary operation.\n");
4523
4524 /* Handle def. */
4525 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4526
4527 prev_stmt_info = NULL;
4528 for (j = 0; j < ncopies; j++)
4529 {
4530 /* Handle uses. */
4531 if (j == 0)
4532 {
4533 if (scalar_shift_arg)
4534 {
4535 /* Vector shl and shr insn patterns can be defined with scalar
4536 operand 2 (shift operand). In this case, use constant or loop
4537 invariant op1 directly, without extending it to vector mode
4538 first. */
4539 optab_op2_mode = insn_data[icode].operand[2].mode;
4540 if (!VECTOR_MODE_P (optab_op2_mode))
4541 {
4542 if (dump_enabled_p ())
4543 dump_printf_loc (MSG_NOTE, vect_location,
4544 "operand 1 using scalar mode.\n");
4545 vec_oprnd1 = op1;
4546 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4547 vec_oprnds1.quick_push (vec_oprnd1);
4548 if (slp_node)
4549 {
4550 /* Store vec_oprnd1 for every vector stmt to be created
4551 for SLP_NODE. We check during the analysis that all
4552 the shift arguments are the same.
4553 TODO: Allow different constants for different vector
4554 stmts generated for an SLP instance. */
4555 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4556 vec_oprnds1.quick_push (vec_oprnd1);
4557 }
4558 }
4559 }
4560
4561 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4562 (a special case for certain kind of vector shifts); otherwise,
4563 operand 1 should be of a vector type (the usual case). */
4564 if (vec_oprnd1)
4565 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4566 slp_node, -1);
4567 else
4568 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4569 slp_node, -1);
4570 }
4571 else
4572 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4573
4574 /* Arguments are ready. Create the new vector stmt. */
4575 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4576 {
4577 vop1 = vec_oprnds1[i];
4578 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4579 new_temp = make_ssa_name (vec_dest, new_stmt);
4580 gimple_assign_set_lhs (new_stmt, new_temp);
4581 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4582 if (slp_node)
4583 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4584 }
4585
4586 if (slp_node)
4587 continue;
4588
4589 if (j == 0)
4590 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4591 else
4592 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4593 prev_stmt_info = vinfo_for_stmt (new_stmt);
4594 }
4595
4596 vec_oprnds0.release ();
4597 vec_oprnds1.release ();
4598
4599 return true;
4600 }
4601
4602
4603 /* Function vectorizable_operation.
4604
4605 Check if STMT performs a binary, unary or ternary operation that can
4606 be vectorized.
4607 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4608 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4609 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4610
4611 static bool
4612 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4613 gimple **vec_stmt, slp_tree slp_node)
4614 {
4615 tree vec_dest;
4616 tree scalar_dest;
4617 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4618 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4619 tree vectype;
4620 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4621 enum tree_code code;
4622 machine_mode vec_mode;
4623 tree new_temp;
4624 int op_type;
4625 optab optab;
4626 bool target_support_p;
4627 gimple *def_stmt;
4628 enum vect_def_type dt[3]
4629 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4630 gimple *new_stmt = NULL;
4631 stmt_vec_info prev_stmt_info;
4632 int nunits_in;
4633 int nunits_out;
4634 tree vectype_out;
4635 int ncopies;
4636 int j, i;
4637 vec<tree> vec_oprnds0 = vNULL;
4638 vec<tree> vec_oprnds1 = vNULL;
4639 vec<tree> vec_oprnds2 = vNULL;
4640 tree vop0, vop1, vop2;
4641 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4642 vec_info *vinfo = stmt_info->vinfo;
4643 int vf;
4644
4645 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4646 return false;
4647
4648 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4649 return false;
4650
4651 /* Is STMT a vectorizable binary/unary operation? */
4652 if (!is_gimple_assign (stmt))
4653 return false;
4654
4655 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4656 return false;
4657
4658 code = gimple_assign_rhs_code (stmt);
4659
4660 /* For pointer addition, we should use the normal plus for
4661 the vector addition. */
4662 if (code == POINTER_PLUS_EXPR)
4663 code = PLUS_EXPR;
4664
4665 /* Support only unary or binary operations. */
4666 op_type = TREE_CODE_LENGTH (code);
4667 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4668 {
4669 if (dump_enabled_p ())
4670 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4671 "num. args = %d (not unary/binary/ternary op).\n",
4672 op_type);
4673 return false;
4674 }
4675
4676 scalar_dest = gimple_assign_lhs (stmt);
4677 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4678
4679 /* Most operations cannot handle bit-precision types without extra
4680 truncations. */
4681 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4682 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4683 /* Exception are bitwise binary operations. */
4684 && code != BIT_IOR_EXPR
4685 && code != BIT_XOR_EXPR
4686 && code != BIT_AND_EXPR)
4687 {
4688 if (dump_enabled_p ())
4689 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4690 "bit-precision arithmetic not supported.\n");
4691 return false;
4692 }
4693
4694 op0 = gimple_assign_rhs1 (stmt);
4695 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4696 {
4697 if (dump_enabled_p ())
4698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4699 "use not simple.\n");
4700 return false;
4701 }
4702 /* If op0 is an external or constant def use a vector type with
4703 the same size as the output vector type. */
4704 if (!vectype)
4705 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4706 if (vec_stmt)
4707 gcc_assert (vectype);
4708 if (!vectype)
4709 {
4710 if (dump_enabled_p ())
4711 {
4712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4713 "no vectype for scalar type ");
4714 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4715 TREE_TYPE (op0));
4716 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4717 }
4718
4719 return false;
4720 }
4721
4722 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4723 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4724 if (nunits_out != nunits_in)
4725 return false;
4726
4727 if (op_type == binary_op || op_type == ternary_op)
4728 {
4729 op1 = gimple_assign_rhs2 (stmt);
4730 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
4731 {
4732 if (dump_enabled_p ())
4733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4734 "use not simple.\n");
4735 return false;
4736 }
4737 }
4738 if (op_type == ternary_op)
4739 {
4740 op2 = gimple_assign_rhs3 (stmt);
4741 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
4742 {
4743 if (dump_enabled_p ())
4744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4745 "use not simple.\n");
4746 return false;
4747 }
4748 }
4749
4750 if (loop_vinfo)
4751 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4752 else
4753 vf = 1;
4754
4755 /* Multiple types in SLP are handled by creating the appropriate number of
4756 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4757 case of SLP. */
4758 if (slp_node || PURE_SLP_STMT (stmt_info))
4759 ncopies = 1;
4760 else
4761 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4762
4763 gcc_assert (ncopies >= 1);
4764
4765 /* Shifts are handled in vectorizable_shift (). */
4766 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4767 || code == RROTATE_EXPR)
4768 return false;
4769
4770 /* Supportable by target? */
4771
4772 vec_mode = TYPE_MODE (vectype);
4773 if (code == MULT_HIGHPART_EXPR)
4774 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4775 else
4776 {
4777 optab = optab_for_tree_code (code, vectype, optab_default);
4778 if (!optab)
4779 {
4780 if (dump_enabled_p ())
4781 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4782 "no optab.\n");
4783 return false;
4784 }
4785 target_support_p = (optab_handler (optab, vec_mode)
4786 != CODE_FOR_nothing);
4787 }
4788
4789 if (!target_support_p)
4790 {
4791 if (dump_enabled_p ())
4792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4793 "op not supported by target.\n");
4794 /* Check only during analysis. */
4795 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4796 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4797 return false;
4798 if (dump_enabled_p ())
4799 dump_printf_loc (MSG_NOTE, vect_location,
4800 "proceeding using word mode.\n");
4801 }
4802
4803 /* Worthwhile without SIMD support? Check only during analysis. */
4804 if (!VECTOR_MODE_P (vec_mode)
4805 && !vec_stmt
4806 && vf < vect_min_worthwhile_factor (code))
4807 {
4808 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4810 "not worthwhile without SIMD support.\n");
4811 return false;
4812 }
4813
4814 if (!vec_stmt) /* transformation not required. */
4815 {
4816 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4817 if (dump_enabled_p ())
4818 dump_printf_loc (MSG_NOTE, vect_location,
4819 "=== vectorizable_operation ===\n");
4820 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4821 return true;
4822 }
4823
4824 /** Transform. **/
4825
4826 if (dump_enabled_p ())
4827 dump_printf_loc (MSG_NOTE, vect_location,
4828 "transform binary/unary operation.\n");
4829
4830 /* Handle def. */
4831 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4832
4833 /* In case the vectorization factor (VF) is bigger than the number
4834 of elements that we can fit in a vectype (nunits), we have to generate
4835 more than one vector stmt - i.e - we need to "unroll" the
4836 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4837 from one copy of the vector stmt to the next, in the field
4838 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4839 stages to find the correct vector defs to be used when vectorizing
4840 stmts that use the defs of the current stmt. The example below
4841 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4842 we need to create 4 vectorized stmts):
4843
4844 before vectorization:
4845 RELATED_STMT VEC_STMT
4846 S1: x = memref - -
4847 S2: z = x + 1 - -
4848
4849 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4850 there):
4851 RELATED_STMT VEC_STMT
4852 VS1_0: vx0 = memref0 VS1_1 -
4853 VS1_1: vx1 = memref1 VS1_2 -
4854 VS1_2: vx2 = memref2 VS1_3 -
4855 VS1_3: vx3 = memref3 - -
4856 S1: x = load - VS1_0
4857 S2: z = x + 1 - -
4858
4859 step2: vectorize stmt S2 (done here):
4860 To vectorize stmt S2 we first need to find the relevant vector
4861 def for the first operand 'x'. This is, as usual, obtained from
4862 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4863 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4864 relevant vector def 'vx0'. Having found 'vx0' we can generate
4865 the vector stmt VS2_0, and as usual, record it in the
4866 STMT_VINFO_VEC_STMT of stmt S2.
4867 When creating the second copy (VS2_1), we obtain the relevant vector
4868 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4869 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4870 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4871 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4872 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4873 chain of stmts and pointers:
4874 RELATED_STMT VEC_STMT
4875 VS1_0: vx0 = memref0 VS1_1 -
4876 VS1_1: vx1 = memref1 VS1_2 -
4877 VS1_2: vx2 = memref2 VS1_3 -
4878 VS1_3: vx3 = memref3 - -
4879 S1: x = load - VS1_0
4880 VS2_0: vz0 = vx0 + v1 VS2_1 -
4881 VS2_1: vz1 = vx1 + v1 VS2_2 -
4882 VS2_2: vz2 = vx2 + v1 VS2_3 -
4883 VS2_3: vz3 = vx3 + v1 - -
4884 S2: z = x + 1 - VS2_0 */
4885
4886 prev_stmt_info = NULL;
4887 for (j = 0; j < ncopies; j++)
4888 {
4889 /* Handle uses. */
4890 if (j == 0)
4891 {
4892 if (op_type == binary_op || op_type == ternary_op)
4893 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4894 slp_node, -1);
4895 else
4896 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4897 slp_node, -1);
4898 if (op_type == ternary_op)
4899 {
4900 vec_oprnds2.create (1);
4901 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4902 stmt));
4903 }
4904 }
4905 else
4906 {
4907 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4908 if (op_type == ternary_op)
4909 {
4910 tree vec_oprnd = vec_oprnds2.pop ();
4911 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4912 vec_oprnd));
4913 }
4914 }
4915
4916 /* Arguments are ready. Create the new vector stmt. */
4917 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4918 {
4919 vop1 = ((op_type == binary_op || op_type == ternary_op)
4920 ? vec_oprnds1[i] : NULL_TREE);
4921 vop2 = ((op_type == ternary_op)
4922 ? vec_oprnds2[i] : NULL_TREE);
4923 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4924 new_temp = make_ssa_name (vec_dest, new_stmt);
4925 gimple_assign_set_lhs (new_stmt, new_temp);
4926 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4927 if (slp_node)
4928 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4929 }
4930
4931 if (slp_node)
4932 continue;
4933
4934 if (j == 0)
4935 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4936 else
4937 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4938 prev_stmt_info = vinfo_for_stmt (new_stmt);
4939 }
4940
4941 vec_oprnds0.release ();
4942 vec_oprnds1.release ();
4943 vec_oprnds2.release ();
4944
4945 return true;
4946 }
4947
4948 /* A helper function to ensure data reference DR's base alignment
4949 for STMT_INFO. */
4950
4951 static void
4952 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4953 {
4954 if (!dr->aux)
4955 return;
4956
4957 if (DR_VECT_AUX (dr)->base_misaligned)
4958 {
4959 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4960 tree base_decl = DR_VECT_AUX (dr)->base_decl;
4961
4962 if (decl_in_symtab_p (base_decl))
4963 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4964 else
4965 {
4966 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4967 DECL_USER_ALIGN (base_decl) = 1;
4968 }
4969 DR_VECT_AUX (dr)->base_misaligned = false;
4970 }
4971 }
4972
4973
4974 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4975 reversal of the vector elements. If that is impossible to do,
4976 returns NULL. */
4977
4978 static tree
4979 perm_mask_for_reverse (tree vectype)
4980 {
4981 int i, nunits;
4982 unsigned char *sel;
4983
4984 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4985 sel = XALLOCAVEC (unsigned char, nunits);
4986
4987 for (i = 0; i < nunits; ++i)
4988 sel[i] = nunits - 1 - i;
4989
4990 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
4991 return NULL_TREE;
4992 return vect_gen_perm_mask_checked (vectype, sel);
4993 }
4994
4995 /* Function vectorizable_store.
4996
4997 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4998 can be vectorized.
4999 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5000 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5001 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5002
5003 static bool
5004 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5005 slp_tree slp_node)
5006 {
5007 tree scalar_dest;
5008 tree data_ref;
5009 tree op;
5010 tree vec_oprnd = NULL_TREE;
5011 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5012 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5013 tree elem_type;
5014 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5015 struct loop *loop = NULL;
5016 machine_mode vec_mode;
5017 tree dummy;
5018 enum dr_alignment_support alignment_support_scheme;
5019 gimple *def_stmt;
5020 enum vect_def_type dt;
5021 stmt_vec_info prev_stmt_info = NULL;
5022 tree dataref_ptr = NULL_TREE;
5023 tree dataref_offset = NULL_TREE;
5024 gimple *ptr_incr = NULL;
5025 int ncopies;
5026 int j;
5027 gimple *next_stmt, *first_stmt = NULL;
5028 bool grouped_store = false;
5029 bool store_lanes_p = false;
5030 unsigned int group_size, i;
5031 vec<tree> dr_chain = vNULL;
5032 vec<tree> oprnds = vNULL;
5033 vec<tree> result_chain = vNULL;
5034 bool inv_p;
5035 bool negative = false;
5036 tree offset = NULL_TREE;
5037 vec<tree> vec_oprnds = vNULL;
5038 bool slp = (slp_node != NULL);
5039 unsigned int vec_num;
5040 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5041 vec_info *vinfo = stmt_info->vinfo;
5042 tree aggr_type;
5043 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5044 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5045 int scatter_scale = 1;
5046 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5047 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5048 gimple *new_stmt;
5049
5050 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5051 return false;
5052
5053 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5054 return false;
5055
5056 /* Is vectorizable store? */
5057
5058 if (!is_gimple_assign (stmt))
5059 return false;
5060
5061 scalar_dest = gimple_assign_lhs (stmt);
5062 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5063 && is_pattern_stmt_p (stmt_info))
5064 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5065 if (TREE_CODE (scalar_dest) != ARRAY_REF
5066 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5067 && TREE_CODE (scalar_dest) != INDIRECT_REF
5068 && TREE_CODE (scalar_dest) != COMPONENT_REF
5069 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5070 && TREE_CODE (scalar_dest) != REALPART_EXPR
5071 && TREE_CODE (scalar_dest) != MEM_REF)
5072 return false;
5073
5074 gcc_assert (gimple_assign_single_p (stmt));
5075
5076 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5077 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5078
5079 if (loop_vinfo)
5080 loop = LOOP_VINFO_LOOP (loop_vinfo);
5081
5082 /* Multiple types in SLP are handled by creating the appropriate number of
5083 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5084 case of SLP. */
5085 if (slp || PURE_SLP_STMT (stmt_info))
5086 ncopies = 1;
5087 else
5088 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5089
5090 gcc_assert (ncopies >= 1);
5091
5092 /* FORNOW. This restriction should be relaxed. */
5093 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5094 {
5095 if (dump_enabled_p ())
5096 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5097 "multiple types in nested loop.\n");
5098 return false;
5099 }
5100
5101 op = gimple_assign_rhs1 (stmt);
5102 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5103 {
5104 if (dump_enabled_p ())
5105 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5106 "use not simple.\n");
5107 return false;
5108 }
5109
5110 elem_type = TREE_TYPE (vectype);
5111 vec_mode = TYPE_MODE (vectype);
5112
5113 /* FORNOW. In some cases can vectorize even if data-type not supported
5114 (e.g. - array initialization with 0). */
5115 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5116 return false;
5117
5118 if (!STMT_VINFO_DATA_REF (stmt_info))
5119 return false;
5120
5121 if (!STMT_VINFO_STRIDED_P (stmt_info))
5122 {
5123 negative =
5124 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5125 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5126 size_zero_node) < 0;
5127 if (negative && ncopies > 1)
5128 {
5129 if (dump_enabled_p ())
5130 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5131 "multiple types with negative step.\n");
5132 return false;
5133 }
5134 if (negative)
5135 {
5136 gcc_assert (!grouped_store);
5137 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5138 if (alignment_support_scheme != dr_aligned
5139 && alignment_support_scheme != dr_unaligned_supported)
5140 {
5141 if (dump_enabled_p ())
5142 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5143 "negative step but alignment required.\n");
5144 return false;
5145 }
5146 if (dt != vect_constant_def
5147 && dt != vect_external_def
5148 && !perm_mask_for_reverse (vectype))
5149 {
5150 if (dump_enabled_p ())
5151 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5152 "negative step and reversing not supported.\n");
5153 return false;
5154 }
5155 }
5156 }
5157
5158 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5159 {
5160 grouped_store = true;
5161 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5162 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5163 if (!slp
5164 && !PURE_SLP_STMT (stmt_info)
5165 && !STMT_VINFO_STRIDED_P (stmt_info))
5166 {
5167 if (vect_store_lanes_supported (vectype, group_size))
5168 store_lanes_p = true;
5169 else if (!vect_grouped_store_supported (vectype, group_size))
5170 return false;
5171 }
5172
5173 if (STMT_VINFO_STRIDED_P (stmt_info)
5174 && (slp || PURE_SLP_STMT (stmt_info))
5175 && (group_size > nunits
5176 || nunits % group_size != 0))
5177 {
5178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5179 "unhandled strided group store\n");
5180 return false;
5181 }
5182
5183 if (first_stmt == stmt)
5184 {
5185 /* STMT is the leader of the group. Check the operands of all the
5186 stmts of the group. */
5187 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5188 while (next_stmt)
5189 {
5190 gcc_assert (gimple_assign_single_p (next_stmt));
5191 op = gimple_assign_rhs1 (next_stmt);
5192 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5193 {
5194 if (dump_enabled_p ())
5195 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5196 "use not simple.\n");
5197 return false;
5198 }
5199 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5200 }
5201 }
5202 }
5203
5204 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5205 {
5206 gimple *def_stmt;
5207 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5208 &scatter_off, &scatter_scale);
5209 gcc_assert (scatter_decl);
5210 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5211 &scatter_off_vectype))
5212 {
5213 if (dump_enabled_p ())
5214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5215 "scatter index use not simple.");
5216 return false;
5217 }
5218 }
5219
5220 if (!vec_stmt) /* transformation not required. */
5221 {
5222 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5223 /* The SLP costs are calculated during SLP analysis. */
5224 if (!PURE_SLP_STMT (stmt_info))
5225 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5226 NULL, NULL, NULL);
5227 return true;
5228 }
5229
5230 /** Transform. **/
5231
5232 ensure_base_align (stmt_info, dr);
5233
5234 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5235 {
5236 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5237 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5238 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5239 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5240 edge pe = loop_preheader_edge (loop);
5241 gimple_seq seq;
5242 basic_block new_bb;
5243 enum { NARROW, NONE, WIDEN } modifier;
5244 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5245
5246 if (nunits == (unsigned int) scatter_off_nunits)
5247 modifier = NONE;
5248 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5249 {
5250 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5251 modifier = WIDEN;
5252
5253 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5254 sel[i] = i | nunits;
5255
5256 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5257 gcc_assert (perm_mask != NULL_TREE);
5258 }
5259 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5260 {
5261 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5262 modifier = NARROW;
5263
5264 for (i = 0; i < (unsigned int) nunits; ++i)
5265 sel[i] = i | scatter_off_nunits;
5266
5267 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5268 gcc_assert (perm_mask != NULL_TREE);
5269 ncopies *= 2;
5270 }
5271 else
5272 gcc_unreachable ();
5273
5274 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5275 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5276 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5277 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5278 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5279 scaletype = TREE_VALUE (arglist);
5280
5281 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5282 && TREE_CODE (rettype) == VOID_TYPE);
5283
5284 ptr = fold_convert (ptrtype, scatter_base);
5285 if (!is_gimple_min_invariant (ptr))
5286 {
5287 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5288 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5289 gcc_assert (!new_bb);
5290 }
5291
5292 /* Currently we support only unconditional scatter stores,
5293 so mask should be all ones. */
5294 mask = build_int_cst (masktype, -1);
5295 mask = vect_init_vector (stmt, mask, masktype, NULL);
5296
5297 scale = build_int_cst (scaletype, scatter_scale);
5298
5299 prev_stmt_info = NULL;
5300 for (j = 0; j < ncopies; ++j)
5301 {
5302 if (j == 0)
5303 {
5304 src = vec_oprnd1
5305 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5306 op = vec_oprnd0
5307 = vect_get_vec_def_for_operand (scatter_off, stmt);
5308 }
5309 else if (modifier != NONE && (j & 1))
5310 {
5311 if (modifier == WIDEN)
5312 {
5313 src = vec_oprnd1
5314 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5315 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5316 stmt, gsi);
5317 }
5318 else if (modifier == NARROW)
5319 {
5320 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5321 stmt, gsi);
5322 op = vec_oprnd0
5323 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5324 }
5325 else
5326 gcc_unreachable ();
5327 }
5328 else
5329 {
5330 src = vec_oprnd1
5331 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5332 op = vec_oprnd0
5333 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5334 }
5335
5336 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5337 {
5338 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5339 == TYPE_VECTOR_SUBPARTS (srctype));
5340 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5341 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5342 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5343 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5344 src = var;
5345 }
5346
5347 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5348 {
5349 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5350 == TYPE_VECTOR_SUBPARTS (idxtype));
5351 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5352 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5353 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5354 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5355 op = var;
5356 }
5357
5358 new_stmt
5359 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5360
5361 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5362
5363 if (prev_stmt_info == NULL)
5364 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5365 else
5366 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5367 prev_stmt_info = vinfo_for_stmt (new_stmt);
5368 }
5369 return true;
5370 }
5371
5372 if (grouped_store)
5373 {
5374 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5375 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5376
5377 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5378
5379 /* FORNOW */
5380 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5381
5382 /* We vectorize all the stmts of the interleaving group when we
5383 reach the last stmt in the group. */
5384 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5385 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5386 && !slp)
5387 {
5388 *vec_stmt = NULL;
5389 return true;
5390 }
5391
5392 if (slp)
5393 {
5394 grouped_store = false;
5395 /* VEC_NUM is the number of vect stmts to be created for this
5396 group. */
5397 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5398 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5399 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5400 op = gimple_assign_rhs1 (first_stmt);
5401 }
5402 else
5403 /* VEC_NUM is the number of vect stmts to be created for this
5404 group. */
5405 vec_num = group_size;
5406 }
5407 else
5408 {
5409 first_stmt = stmt;
5410 first_dr = dr;
5411 group_size = vec_num = 1;
5412 }
5413
5414 if (dump_enabled_p ())
5415 dump_printf_loc (MSG_NOTE, vect_location,
5416 "transform store. ncopies = %d\n", ncopies);
5417
5418 if (STMT_VINFO_STRIDED_P (stmt_info))
5419 {
5420 gimple_stmt_iterator incr_gsi;
5421 bool insert_after;
5422 gimple *incr;
5423 tree offvar;
5424 tree ivstep;
5425 tree running_off;
5426 gimple_seq stmts = NULL;
5427 tree stride_base, stride_step, alias_off;
5428 tree vec_oprnd;
5429 unsigned int g;
5430
5431 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5432
5433 stride_base
5434 = fold_build_pointer_plus
5435 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5436 size_binop (PLUS_EXPR,
5437 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5438 convert_to_ptrofftype (DR_INIT(first_dr))));
5439 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5440
5441 /* For a store with loop-invariant (but other than power-of-2)
5442 stride (i.e. not a grouped access) like so:
5443
5444 for (i = 0; i < n; i += stride)
5445 array[i] = ...;
5446
5447 we generate a new induction variable and new stores from
5448 the components of the (vectorized) rhs:
5449
5450 for (j = 0; ; j += VF*stride)
5451 vectemp = ...;
5452 tmp1 = vectemp[0];
5453 array[j] = tmp1;
5454 tmp2 = vectemp[1];
5455 array[j + stride] = tmp2;
5456 ...
5457 */
5458
5459 unsigned nstores = nunits;
5460 tree ltype = elem_type;
5461 if (slp)
5462 {
5463 nstores = nunits / group_size;
5464 if (group_size < nunits)
5465 ltype = build_vector_type (elem_type, group_size);
5466 else
5467 ltype = vectype;
5468 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5469 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5470 group_size = 1;
5471 }
5472
5473 ivstep = stride_step;
5474 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5475 build_int_cst (TREE_TYPE (ivstep),
5476 ncopies * nstores));
5477
5478 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5479
5480 create_iv (stride_base, ivstep, NULL,
5481 loop, &incr_gsi, insert_after,
5482 &offvar, NULL);
5483 incr = gsi_stmt (incr_gsi);
5484 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
5485
5486 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5487 if (stmts)
5488 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5489
5490 prev_stmt_info = NULL;
5491 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5492 next_stmt = first_stmt;
5493 for (g = 0; g < group_size; g++)
5494 {
5495 running_off = offvar;
5496 if (g)
5497 {
5498 tree size = TYPE_SIZE_UNIT (ltype);
5499 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5500 size);
5501 tree newoff = copy_ssa_name (running_off, NULL);
5502 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5503 running_off, pos);
5504 vect_finish_stmt_generation (stmt, incr, gsi);
5505 running_off = newoff;
5506 }
5507 for (j = 0; j < ncopies; j++)
5508 {
5509 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5510 and first_stmt == stmt. */
5511 if (j == 0)
5512 {
5513 if (slp)
5514 {
5515 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5516 slp_node, -1);
5517 vec_oprnd = vec_oprnds[0];
5518 }
5519 else
5520 {
5521 gcc_assert (gimple_assign_single_p (next_stmt));
5522 op = gimple_assign_rhs1 (next_stmt);
5523 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5524 }
5525 }
5526 else
5527 {
5528 if (slp)
5529 vec_oprnd = vec_oprnds[j];
5530 else
5531 {
5532 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
5533 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5534 }
5535 }
5536
5537 for (i = 0; i < nstores; i++)
5538 {
5539 tree newref, newoff;
5540 gimple *incr, *assign;
5541 tree size = TYPE_SIZE (ltype);
5542 /* Extract the i'th component. */
5543 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5544 bitsize_int (i), size);
5545 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5546 size, pos);
5547
5548 elem = force_gimple_operand_gsi (gsi, elem, true,
5549 NULL_TREE, true,
5550 GSI_SAME_STMT);
5551
5552 newref = build2 (MEM_REF, ltype,
5553 running_off, alias_off);
5554
5555 /* And store it to *running_off. */
5556 assign = gimple_build_assign (newref, elem);
5557 vect_finish_stmt_generation (stmt, assign, gsi);
5558
5559 newoff = copy_ssa_name (running_off, NULL);
5560 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5561 running_off, stride_step);
5562 vect_finish_stmt_generation (stmt, incr, gsi);
5563
5564 running_off = newoff;
5565 if (g == group_size - 1
5566 && !slp)
5567 {
5568 if (j == 0 && i == 0)
5569 STMT_VINFO_VEC_STMT (stmt_info)
5570 = *vec_stmt = assign;
5571 else
5572 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5573 prev_stmt_info = vinfo_for_stmt (assign);
5574 }
5575 }
5576 }
5577 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5578 }
5579 return true;
5580 }
5581
5582 dr_chain.create (group_size);
5583 oprnds.create (group_size);
5584
5585 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5586 gcc_assert (alignment_support_scheme);
5587 /* Targets with store-lane instructions must not require explicit
5588 realignment. */
5589 gcc_assert (!store_lanes_p
5590 || alignment_support_scheme == dr_aligned
5591 || alignment_support_scheme == dr_unaligned_supported);
5592
5593 if (negative)
5594 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5595
5596 if (store_lanes_p)
5597 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5598 else
5599 aggr_type = vectype;
5600
5601 /* In case the vectorization factor (VF) is bigger than the number
5602 of elements that we can fit in a vectype (nunits), we have to generate
5603 more than one vector stmt - i.e - we need to "unroll" the
5604 vector stmt by a factor VF/nunits. For more details see documentation in
5605 vect_get_vec_def_for_copy_stmt. */
5606
5607 /* In case of interleaving (non-unit grouped access):
5608
5609 S1: &base + 2 = x2
5610 S2: &base = x0
5611 S3: &base + 1 = x1
5612 S4: &base + 3 = x3
5613
5614 We create vectorized stores starting from base address (the access of the
5615 first stmt in the chain (S2 in the above example), when the last store stmt
5616 of the chain (S4) is reached:
5617
5618 VS1: &base = vx2
5619 VS2: &base + vec_size*1 = vx0
5620 VS3: &base + vec_size*2 = vx1
5621 VS4: &base + vec_size*3 = vx3
5622
5623 Then permutation statements are generated:
5624
5625 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5626 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5627 ...
5628
5629 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5630 (the order of the data-refs in the output of vect_permute_store_chain
5631 corresponds to the order of scalar stmts in the interleaving chain - see
5632 the documentation of vect_permute_store_chain()).
5633
5634 In case of both multiple types and interleaving, above vector stores and
5635 permutation stmts are created for every copy. The result vector stmts are
5636 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5637 STMT_VINFO_RELATED_STMT for the next copies.
5638 */
5639
5640 prev_stmt_info = NULL;
5641 for (j = 0; j < ncopies; j++)
5642 {
5643
5644 if (j == 0)
5645 {
5646 if (slp)
5647 {
5648 /* Get vectorized arguments for SLP_NODE. */
5649 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5650 NULL, slp_node, -1);
5651
5652 vec_oprnd = vec_oprnds[0];
5653 }
5654 else
5655 {
5656 /* For interleaved stores we collect vectorized defs for all the
5657 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5658 used as an input to vect_permute_store_chain(), and OPRNDS as
5659 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5660
5661 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5662 OPRNDS are of size 1. */
5663 next_stmt = first_stmt;
5664 for (i = 0; i < group_size; i++)
5665 {
5666 /* Since gaps are not supported for interleaved stores,
5667 GROUP_SIZE is the exact number of stmts in the chain.
5668 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5669 there is no interleaving, GROUP_SIZE is 1, and only one
5670 iteration of the loop will be executed. */
5671 gcc_assert (next_stmt
5672 && gimple_assign_single_p (next_stmt));
5673 op = gimple_assign_rhs1 (next_stmt);
5674
5675 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5676 dr_chain.quick_push (vec_oprnd);
5677 oprnds.quick_push (vec_oprnd);
5678 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5679 }
5680 }
5681
5682 /* We should have catched mismatched types earlier. */
5683 gcc_assert (useless_type_conversion_p (vectype,
5684 TREE_TYPE (vec_oprnd)));
5685 bool simd_lane_access_p
5686 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5687 if (simd_lane_access_p
5688 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5689 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5690 && integer_zerop (DR_OFFSET (first_dr))
5691 && integer_zerop (DR_INIT (first_dr))
5692 && alias_sets_conflict_p (get_alias_set (aggr_type),
5693 get_alias_set (DR_REF (first_dr))))
5694 {
5695 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5696 dataref_offset = build_int_cst (reference_alias_ptr_type
5697 (DR_REF (first_dr)), 0);
5698 inv_p = false;
5699 }
5700 else
5701 dataref_ptr
5702 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5703 simd_lane_access_p ? loop : NULL,
5704 offset, &dummy, gsi, &ptr_incr,
5705 simd_lane_access_p, &inv_p);
5706 gcc_assert (bb_vinfo || !inv_p);
5707 }
5708 else
5709 {
5710 /* For interleaved stores we created vectorized defs for all the
5711 defs stored in OPRNDS in the previous iteration (previous copy).
5712 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5713 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5714 next copy.
5715 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5716 OPRNDS are of size 1. */
5717 for (i = 0; i < group_size; i++)
5718 {
5719 op = oprnds[i];
5720 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
5721 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5722 dr_chain[i] = vec_oprnd;
5723 oprnds[i] = vec_oprnd;
5724 }
5725 if (dataref_offset)
5726 dataref_offset
5727 = int_const_binop (PLUS_EXPR, dataref_offset,
5728 TYPE_SIZE_UNIT (aggr_type));
5729 else
5730 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5731 TYPE_SIZE_UNIT (aggr_type));
5732 }
5733
5734 if (store_lanes_p)
5735 {
5736 tree vec_array;
5737
5738 /* Combine all the vectors into an array. */
5739 vec_array = create_vector_array (vectype, vec_num);
5740 for (i = 0; i < vec_num; i++)
5741 {
5742 vec_oprnd = dr_chain[i];
5743 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5744 }
5745
5746 /* Emit:
5747 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5748 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5749 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5750 gimple_call_set_lhs (new_stmt, data_ref);
5751 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5752 }
5753 else
5754 {
5755 new_stmt = NULL;
5756 if (grouped_store)
5757 {
5758 if (j == 0)
5759 result_chain.create (group_size);
5760 /* Permute. */
5761 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5762 &result_chain);
5763 }
5764
5765 next_stmt = first_stmt;
5766 for (i = 0; i < vec_num; i++)
5767 {
5768 unsigned align, misalign;
5769
5770 if (i > 0)
5771 /* Bump the vector pointer. */
5772 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5773 stmt, NULL_TREE);
5774
5775 if (slp)
5776 vec_oprnd = vec_oprnds[i];
5777 else if (grouped_store)
5778 /* For grouped stores vectorized defs are interleaved in
5779 vect_permute_store_chain(). */
5780 vec_oprnd = result_chain[i];
5781
5782 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5783 dataref_ptr,
5784 dataref_offset
5785 ? dataref_offset
5786 : build_int_cst (reference_alias_ptr_type
5787 (DR_REF (first_dr)), 0));
5788 align = TYPE_ALIGN_UNIT (vectype);
5789 if (aligned_access_p (first_dr))
5790 misalign = 0;
5791 else if (DR_MISALIGNMENT (first_dr) == -1)
5792 {
5793 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5794 align = TYPE_ALIGN_UNIT (elem_type);
5795 else
5796 align = get_object_alignment (DR_REF (first_dr))
5797 / BITS_PER_UNIT;
5798 misalign = 0;
5799 TREE_TYPE (data_ref)
5800 = build_aligned_type (TREE_TYPE (data_ref),
5801 align * BITS_PER_UNIT);
5802 }
5803 else
5804 {
5805 TREE_TYPE (data_ref)
5806 = build_aligned_type (TREE_TYPE (data_ref),
5807 TYPE_ALIGN (elem_type));
5808 misalign = DR_MISALIGNMENT (first_dr);
5809 }
5810 if (dataref_offset == NULL_TREE
5811 && TREE_CODE (dataref_ptr) == SSA_NAME)
5812 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5813 misalign);
5814
5815 if (negative
5816 && dt != vect_constant_def
5817 && dt != vect_external_def)
5818 {
5819 tree perm_mask = perm_mask_for_reverse (vectype);
5820 tree perm_dest
5821 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5822 vectype);
5823 tree new_temp = make_ssa_name (perm_dest);
5824
5825 /* Generate the permute statement. */
5826 gimple *perm_stmt
5827 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5828 vec_oprnd, perm_mask);
5829 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5830
5831 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5832 vec_oprnd = new_temp;
5833 }
5834
5835 /* Arguments are ready. Create the new vector stmt. */
5836 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5837 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5838
5839 if (slp)
5840 continue;
5841
5842 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5843 if (!next_stmt)
5844 break;
5845 }
5846 }
5847 if (!slp)
5848 {
5849 if (j == 0)
5850 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5851 else
5852 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5853 prev_stmt_info = vinfo_for_stmt (new_stmt);
5854 }
5855 }
5856
5857 dr_chain.release ();
5858 oprnds.release ();
5859 result_chain.release ();
5860 vec_oprnds.release ();
5861
5862 return true;
5863 }
5864
5865 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5866 VECTOR_CST mask. No checks are made that the target platform supports the
5867 mask, so callers may wish to test can_vec_perm_p separately, or use
5868 vect_gen_perm_mask_checked. */
5869
5870 tree
5871 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5872 {
5873 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5874 int i, nunits;
5875
5876 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5877
5878 mask_elt_type = lang_hooks.types.type_for_mode
5879 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5880 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5881
5882 mask_elts = XALLOCAVEC (tree, nunits);
5883 for (i = nunits - 1; i >= 0; i--)
5884 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5885 mask_vec = build_vector (mask_type, mask_elts);
5886
5887 return mask_vec;
5888 }
5889
5890 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5891 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5892
5893 tree
5894 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5895 {
5896 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5897 return vect_gen_perm_mask_any (vectype, sel);
5898 }
5899
5900 /* Given a vector variable X and Y, that was generated for the scalar
5901 STMT, generate instructions to permute the vector elements of X and Y
5902 using permutation mask MASK_VEC, insert them at *GSI and return the
5903 permuted vector variable. */
5904
5905 static tree
5906 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
5907 gimple_stmt_iterator *gsi)
5908 {
5909 tree vectype = TREE_TYPE (x);
5910 tree perm_dest, data_ref;
5911 gimple *perm_stmt;
5912
5913 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5914 data_ref = make_ssa_name (perm_dest);
5915
5916 /* Generate the permute statement. */
5917 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5918 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5919
5920 return data_ref;
5921 }
5922
5923 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5924 inserting them on the loops preheader edge. Returns true if we
5925 were successful in doing so (and thus STMT can be moved then),
5926 otherwise returns false. */
5927
5928 static bool
5929 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
5930 {
5931 ssa_op_iter i;
5932 tree op;
5933 bool any = false;
5934
5935 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5936 {
5937 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5938 if (!gimple_nop_p (def_stmt)
5939 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5940 {
5941 /* Make sure we don't need to recurse. While we could do
5942 so in simple cases when there are more complex use webs
5943 we don't have an easy way to preserve stmt order to fulfil
5944 dependencies within them. */
5945 tree op2;
5946 ssa_op_iter i2;
5947 if (gimple_code (def_stmt) == GIMPLE_PHI)
5948 return false;
5949 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5950 {
5951 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
5952 if (!gimple_nop_p (def_stmt2)
5953 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5954 return false;
5955 }
5956 any = true;
5957 }
5958 }
5959
5960 if (!any)
5961 return true;
5962
5963 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5964 {
5965 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5966 if (!gimple_nop_p (def_stmt)
5967 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5968 {
5969 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5970 gsi_remove (&gsi, false);
5971 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5972 }
5973 }
5974
5975 return true;
5976 }
5977
5978 /* vectorizable_load.
5979
5980 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5981 can be vectorized.
5982 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5983 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5984 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5985
5986 static bool
5987 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5988 slp_tree slp_node, slp_instance slp_node_instance)
5989 {
5990 tree scalar_dest;
5991 tree vec_dest = NULL;
5992 tree data_ref = NULL;
5993 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5994 stmt_vec_info prev_stmt_info;
5995 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5996 struct loop *loop = NULL;
5997 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5998 bool nested_in_vect_loop = false;
5999 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6000 tree elem_type;
6001 tree new_temp;
6002 machine_mode mode;
6003 gimple *new_stmt = NULL;
6004 tree dummy;
6005 enum dr_alignment_support alignment_support_scheme;
6006 tree dataref_ptr = NULL_TREE;
6007 tree dataref_offset = NULL_TREE;
6008 gimple *ptr_incr = NULL;
6009 int ncopies;
6010 int i, j, group_size = -1, group_gap_adj;
6011 tree msq = NULL_TREE, lsq;
6012 tree offset = NULL_TREE;
6013 tree byte_offset = NULL_TREE;
6014 tree realignment_token = NULL_TREE;
6015 gphi *phi = NULL;
6016 vec<tree> dr_chain = vNULL;
6017 bool grouped_load = false;
6018 bool load_lanes_p = false;
6019 gimple *first_stmt;
6020 bool inv_p;
6021 bool negative = false;
6022 bool compute_in_loop = false;
6023 struct loop *at_loop;
6024 int vec_num;
6025 bool slp = (slp_node != NULL);
6026 bool slp_perm = false;
6027 enum tree_code code;
6028 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6029 int vf;
6030 tree aggr_type;
6031 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6032 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6033 int gather_scale = 1;
6034 enum vect_def_type gather_dt = vect_unknown_def_type;
6035 vec_info *vinfo = stmt_info->vinfo;
6036
6037 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6038 return false;
6039
6040 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
6041 return false;
6042
6043 /* Is vectorizable load? */
6044 if (!is_gimple_assign (stmt))
6045 return false;
6046
6047 scalar_dest = gimple_assign_lhs (stmt);
6048 if (TREE_CODE (scalar_dest) != SSA_NAME)
6049 return false;
6050
6051 code = gimple_assign_rhs_code (stmt);
6052 if (code != ARRAY_REF
6053 && code != BIT_FIELD_REF
6054 && code != INDIRECT_REF
6055 && code != COMPONENT_REF
6056 && code != IMAGPART_EXPR
6057 && code != REALPART_EXPR
6058 && code != MEM_REF
6059 && TREE_CODE_CLASS (code) != tcc_declaration)
6060 return false;
6061
6062 if (!STMT_VINFO_DATA_REF (stmt_info))
6063 return false;
6064
6065 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6066 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6067
6068 if (loop_vinfo)
6069 {
6070 loop = LOOP_VINFO_LOOP (loop_vinfo);
6071 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6072 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6073 }
6074 else
6075 vf = 1;
6076
6077 /* Multiple types in SLP are handled by creating the appropriate number of
6078 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6079 case of SLP. */
6080 if (slp || PURE_SLP_STMT (stmt_info))
6081 ncopies = 1;
6082 else
6083 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6084
6085 gcc_assert (ncopies >= 1);
6086
6087 /* FORNOW. This restriction should be relaxed. */
6088 if (nested_in_vect_loop && ncopies > 1)
6089 {
6090 if (dump_enabled_p ())
6091 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6092 "multiple types in nested loop.\n");
6093 return false;
6094 }
6095
6096 /* Invalidate assumptions made by dependence analysis when vectorization
6097 on the unrolled body effectively re-orders stmts. */
6098 if (ncopies > 1
6099 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6100 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6101 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6102 {
6103 if (dump_enabled_p ())
6104 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6105 "cannot perform implicit CSE when unrolling "
6106 "with negative dependence distance\n");
6107 return false;
6108 }
6109
6110 elem_type = TREE_TYPE (vectype);
6111 mode = TYPE_MODE (vectype);
6112
6113 /* FORNOW. In some cases can vectorize even if data-type not supported
6114 (e.g. - data copies). */
6115 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6116 {
6117 if (dump_enabled_p ())
6118 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6119 "Aligned load, but unsupported type.\n");
6120 return false;
6121 }
6122
6123 /* Check if the load is a part of an interleaving chain. */
6124 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6125 {
6126 grouped_load = true;
6127 /* FORNOW */
6128 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6129
6130 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6131
6132 /* If this is single-element interleaving with an element distance
6133 that leaves unused vector loads around punt - we at least create
6134 very sub-optimal code in that case (and blow up memory,
6135 see PR65518). */
6136 if (first_stmt == stmt
6137 && !GROUP_NEXT_ELEMENT (stmt_info)
6138 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6139 {
6140 if (dump_enabled_p ())
6141 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6142 "single-element interleaving not supported "
6143 "for not adjacent vector loads\n");
6144 return false;
6145 }
6146
6147 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6148 slp_perm = true;
6149
6150 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6151 if (!slp
6152 && !PURE_SLP_STMT (stmt_info)
6153 && !STMT_VINFO_STRIDED_P (stmt_info))
6154 {
6155 if (vect_load_lanes_supported (vectype, group_size))
6156 load_lanes_p = true;
6157 else if (!vect_grouped_load_supported (vectype, group_size))
6158 return false;
6159 }
6160
6161 /* Invalidate assumptions made by dependence analysis when vectorization
6162 on the unrolled body effectively re-orders stmts. */
6163 if (!PURE_SLP_STMT (stmt_info)
6164 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6165 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6166 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6167 {
6168 if (dump_enabled_p ())
6169 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6170 "cannot perform implicit CSE when performing "
6171 "group loads with negative dependence distance\n");
6172 return false;
6173 }
6174
6175 /* Similarly when the stmt is a load that is both part of a SLP
6176 instance and a loop vectorized stmt via the same-dr mechanism
6177 we have to give up. */
6178 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6179 && (STMT_SLP_TYPE (stmt_info)
6180 != STMT_SLP_TYPE (vinfo_for_stmt
6181 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6182 {
6183 if (dump_enabled_p ())
6184 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6185 "conflicting SLP types for CSEd load\n");
6186 return false;
6187 }
6188 }
6189
6190
6191 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6192 {
6193 gimple *def_stmt;
6194 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6195 &gather_off, &gather_scale);
6196 gcc_assert (gather_decl);
6197 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6198 &gather_off_vectype))
6199 {
6200 if (dump_enabled_p ())
6201 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6202 "gather index use not simple.\n");
6203 return false;
6204 }
6205 }
6206 else if (STMT_VINFO_STRIDED_P (stmt_info))
6207 {
6208 if ((grouped_load
6209 && (slp || PURE_SLP_STMT (stmt_info)))
6210 && (group_size > nunits
6211 || nunits % group_size != 0))
6212 {
6213 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6214 "unhandled strided group load\n");
6215 return false;
6216 }
6217 }
6218 else
6219 {
6220 negative = tree_int_cst_compare (nested_in_vect_loop
6221 ? STMT_VINFO_DR_STEP (stmt_info)
6222 : DR_STEP (dr),
6223 size_zero_node) < 0;
6224 if (negative && ncopies > 1)
6225 {
6226 if (dump_enabled_p ())
6227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6228 "multiple types with negative step.\n");
6229 return false;
6230 }
6231
6232 if (negative)
6233 {
6234 if (grouped_load)
6235 {
6236 if (dump_enabled_p ())
6237 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6238 "negative step for group load not supported"
6239 "\n");
6240 return false;
6241 }
6242 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6243 if (alignment_support_scheme != dr_aligned
6244 && alignment_support_scheme != dr_unaligned_supported)
6245 {
6246 if (dump_enabled_p ())
6247 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6248 "negative step but alignment required.\n");
6249 return false;
6250 }
6251 if (!perm_mask_for_reverse (vectype))
6252 {
6253 if (dump_enabled_p ())
6254 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6255 "negative step and reversing not supported."
6256 "\n");
6257 return false;
6258 }
6259 }
6260 }
6261
6262 if (!vec_stmt) /* transformation not required. */
6263 {
6264 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6265 /* The SLP costs are calculated during SLP analysis. */
6266 if (!PURE_SLP_STMT (stmt_info))
6267 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6268 NULL, NULL, NULL);
6269 return true;
6270 }
6271
6272 if (dump_enabled_p ())
6273 dump_printf_loc (MSG_NOTE, vect_location,
6274 "transform load. ncopies = %d\n", ncopies);
6275
6276 /** Transform. **/
6277
6278 ensure_base_align (stmt_info, dr);
6279
6280 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6281 {
6282 tree vec_oprnd0 = NULL_TREE, op;
6283 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6284 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6285 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6286 edge pe = loop_preheader_edge (loop);
6287 gimple_seq seq;
6288 basic_block new_bb;
6289 enum { NARROW, NONE, WIDEN } modifier;
6290 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6291
6292 if (nunits == gather_off_nunits)
6293 modifier = NONE;
6294 else if (nunits == gather_off_nunits / 2)
6295 {
6296 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6297 modifier = WIDEN;
6298
6299 for (i = 0; i < gather_off_nunits; ++i)
6300 sel[i] = i | nunits;
6301
6302 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6303 }
6304 else if (nunits == gather_off_nunits * 2)
6305 {
6306 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6307 modifier = NARROW;
6308
6309 for (i = 0; i < nunits; ++i)
6310 sel[i] = i < gather_off_nunits
6311 ? i : i + nunits - gather_off_nunits;
6312
6313 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6314 ncopies *= 2;
6315 }
6316 else
6317 gcc_unreachable ();
6318
6319 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6320 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6321 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6322 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6323 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6324 scaletype = TREE_VALUE (arglist);
6325 gcc_checking_assert (types_compatible_p (srctype, rettype));
6326
6327 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6328
6329 ptr = fold_convert (ptrtype, gather_base);
6330 if (!is_gimple_min_invariant (ptr))
6331 {
6332 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6333 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6334 gcc_assert (!new_bb);
6335 }
6336
6337 /* Currently we support only unconditional gather loads,
6338 so mask should be all ones. */
6339 if (TREE_CODE (masktype) == INTEGER_TYPE)
6340 mask = build_int_cst (masktype, -1);
6341 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6342 {
6343 mask = build_int_cst (TREE_TYPE (masktype), -1);
6344 mask = build_vector_from_val (masktype, mask);
6345 mask = vect_init_vector (stmt, mask, masktype, NULL);
6346 }
6347 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6348 {
6349 REAL_VALUE_TYPE r;
6350 long tmp[6];
6351 for (j = 0; j < 6; ++j)
6352 tmp[j] = -1;
6353 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6354 mask = build_real (TREE_TYPE (masktype), r);
6355 mask = build_vector_from_val (masktype, mask);
6356 mask = vect_init_vector (stmt, mask, masktype, NULL);
6357 }
6358 else
6359 gcc_unreachable ();
6360
6361 scale = build_int_cst (scaletype, gather_scale);
6362
6363 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6364 merge = build_int_cst (TREE_TYPE (rettype), 0);
6365 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6366 {
6367 REAL_VALUE_TYPE r;
6368 long tmp[6];
6369 for (j = 0; j < 6; ++j)
6370 tmp[j] = 0;
6371 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6372 merge = build_real (TREE_TYPE (rettype), r);
6373 }
6374 else
6375 gcc_unreachable ();
6376 merge = build_vector_from_val (rettype, merge);
6377 merge = vect_init_vector (stmt, merge, rettype, NULL);
6378
6379 prev_stmt_info = NULL;
6380 for (j = 0; j < ncopies; ++j)
6381 {
6382 if (modifier == WIDEN && (j & 1))
6383 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6384 perm_mask, stmt, gsi);
6385 else if (j == 0)
6386 op = vec_oprnd0
6387 = vect_get_vec_def_for_operand (gather_off, stmt);
6388 else
6389 op = vec_oprnd0
6390 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6391
6392 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6393 {
6394 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6395 == TYPE_VECTOR_SUBPARTS (idxtype));
6396 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6397 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6398 new_stmt
6399 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6400 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6401 op = var;
6402 }
6403
6404 new_stmt
6405 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6406
6407 if (!useless_type_conversion_p (vectype, rettype))
6408 {
6409 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6410 == TYPE_VECTOR_SUBPARTS (rettype));
6411 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6412 gimple_call_set_lhs (new_stmt, op);
6413 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6414 var = make_ssa_name (vec_dest);
6415 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6416 new_stmt
6417 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6418 }
6419 else
6420 {
6421 var = make_ssa_name (vec_dest, new_stmt);
6422 gimple_call_set_lhs (new_stmt, var);
6423 }
6424
6425 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6426
6427 if (modifier == NARROW)
6428 {
6429 if ((j & 1) == 0)
6430 {
6431 prev_res = var;
6432 continue;
6433 }
6434 var = permute_vec_elements (prev_res, var,
6435 perm_mask, stmt, gsi);
6436 new_stmt = SSA_NAME_DEF_STMT (var);
6437 }
6438
6439 if (prev_stmt_info == NULL)
6440 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6441 else
6442 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6443 prev_stmt_info = vinfo_for_stmt (new_stmt);
6444 }
6445 return true;
6446 }
6447 else if (STMT_VINFO_STRIDED_P (stmt_info))
6448 {
6449 gimple_stmt_iterator incr_gsi;
6450 bool insert_after;
6451 gimple *incr;
6452 tree offvar;
6453 tree ivstep;
6454 tree running_off;
6455 vec<constructor_elt, va_gc> *v = NULL;
6456 gimple_seq stmts = NULL;
6457 tree stride_base, stride_step, alias_off;
6458
6459 gcc_assert (!nested_in_vect_loop);
6460
6461 if (slp && grouped_load)
6462 first_dr = STMT_VINFO_DATA_REF
6463 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6464 else
6465 first_dr = dr;
6466
6467 stride_base
6468 = fold_build_pointer_plus
6469 (DR_BASE_ADDRESS (first_dr),
6470 size_binop (PLUS_EXPR,
6471 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6472 convert_to_ptrofftype (DR_INIT (first_dr))));
6473 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6474
6475 /* For a load with loop-invariant (but other than power-of-2)
6476 stride (i.e. not a grouped access) like so:
6477
6478 for (i = 0; i < n; i += stride)
6479 ... = array[i];
6480
6481 we generate a new induction variable and new accesses to
6482 form a new vector (or vectors, depending on ncopies):
6483
6484 for (j = 0; ; j += VF*stride)
6485 tmp1 = array[j];
6486 tmp2 = array[j + stride];
6487 ...
6488 vectemp = {tmp1, tmp2, ...}
6489 */
6490
6491 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6492 build_int_cst (TREE_TYPE (stride_step), vf));
6493
6494 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6495
6496 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6497 loop, &incr_gsi, insert_after,
6498 &offvar, NULL);
6499 incr = gsi_stmt (incr_gsi);
6500 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6501
6502 stride_step = force_gimple_operand (unshare_expr (stride_step),
6503 &stmts, true, NULL_TREE);
6504 if (stmts)
6505 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6506
6507 prev_stmt_info = NULL;
6508 running_off = offvar;
6509 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6510 int nloads = nunits;
6511 tree ltype = TREE_TYPE (vectype);
6512 auto_vec<tree> dr_chain;
6513 if (slp)
6514 {
6515 nloads = nunits / group_size;
6516 if (group_size < nunits)
6517 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6518 else
6519 ltype = vectype;
6520 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6521 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6522 if (slp_perm)
6523 dr_chain.create (ncopies);
6524 }
6525 for (j = 0; j < ncopies; j++)
6526 {
6527 tree vec_inv;
6528
6529 if (nloads > 1)
6530 {
6531 vec_alloc (v, nloads);
6532 for (i = 0; i < nloads; i++)
6533 {
6534 tree newref, newoff;
6535 gimple *incr;
6536 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6537
6538 newref = force_gimple_operand_gsi (gsi, newref, true,
6539 NULL_TREE, true,
6540 GSI_SAME_STMT);
6541 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6542 newoff = copy_ssa_name (running_off);
6543 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6544 running_off, stride_step);
6545 vect_finish_stmt_generation (stmt, incr, gsi);
6546
6547 running_off = newoff;
6548 }
6549
6550 vec_inv = build_constructor (vectype, v);
6551 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6552 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6553 }
6554 else
6555 {
6556 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6557 build2 (MEM_REF, ltype,
6558 running_off, alias_off));
6559 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6560
6561 tree newoff = copy_ssa_name (running_off);
6562 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6563 running_off, stride_step);
6564 vect_finish_stmt_generation (stmt, incr, gsi);
6565
6566 running_off = newoff;
6567 }
6568
6569 if (slp)
6570 {
6571 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6572 if (slp_perm)
6573 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6574 }
6575 else
6576 {
6577 if (j == 0)
6578 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6579 else
6580 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6581 prev_stmt_info = vinfo_for_stmt (new_stmt);
6582 }
6583 }
6584 if (slp_perm)
6585 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6586 slp_node_instance, false);
6587 return true;
6588 }
6589
6590 if (grouped_load)
6591 {
6592 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6593 if (slp
6594 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6595 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6596 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6597
6598 /* Check if the chain of loads is already vectorized. */
6599 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6600 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6601 ??? But we can only do so if there is exactly one
6602 as we have no way to get at the rest. Leave the CSE
6603 opportunity alone.
6604 ??? With the group load eventually participating
6605 in multiple different permutations (having multiple
6606 slp nodes which refer to the same group) the CSE
6607 is even wrong code. See PR56270. */
6608 && !slp)
6609 {
6610 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6611 return true;
6612 }
6613 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6614 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6615 group_gap_adj = 0;
6616
6617 /* VEC_NUM is the number of vect stmts to be created for this group. */
6618 if (slp)
6619 {
6620 grouped_load = false;
6621 /* For SLP permutation support we need to load the whole group,
6622 not only the number of vector stmts the permutation result
6623 fits in. */
6624 if (slp_perm)
6625 vec_num = (group_size * vf + nunits - 1) / nunits;
6626 else
6627 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6628 group_gap_adj = vf * group_size - nunits * vec_num;
6629 }
6630 else
6631 vec_num = group_size;
6632 }
6633 else
6634 {
6635 first_stmt = stmt;
6636 first_dr = dr;
6637 group_size = vec_num = 1;
6638 group_gap_adj = 0;
6639 }
6640
6641 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6642 gcc_assert (alignment_support_scheme);
6643 /* Targets with load-lane instructions must not require explicit
6644 realignment. */
6645 gcc_assert (!load_lanes_p
6646 || alignment_support_scheme == dr_aligned
6647 || alignment_support_scheme == dr_unaligned_supported);
6648
6649 /* In case the vectorization factor (VF) is bigger than the number
6650 of elements that we can fit in a vectype (nunits), we have to generate
6651 more than one vector stmt - i.e - we need to "unroll" the
6652 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6653 from one copy of the vector stmt to the next, in the field
6654 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6655 stages to find the correct vector defs to be used when vectorizing
6656 stmts that use the defs of the current stmt. The example below
6657 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6658 need to create 4 vectorized stmts):
6659
6660 before vectorization:
6661 RELATED_STMT VEC_STMT
6662 S1: x = memref - -
6663 S2: z = x + 1 - -
6664
6665 step 1: vectorize stmt S1:
6666 We first create the vector stmt VS1_0, and, as usual, record a
6667 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6668 Next, we create the vector stmt VS1_1, and record a pointer to
6669 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6670 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6671 stmts and pointers:
6672 RELATED_STMT VEC_STMT
6673 VS1_0: vx0 = memref0 VS1_1 -
6674 VS1_1: vx1 = memref1 VS1_2 -
6675 VS1_2: vx2 = memref2 VS1_3 -
6676 VS1_3: vx3 = memref3 - -
6677 S1: x = load - VS1_0
6678 S2: z = x + 1 - -
6679
6680 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6681 information we recorded in RELATED_STMT field is used to vectorize
6682 stmt S2. */
6683
6684 /* In case of interleaving (non-unit grouped access):
6685
6686 S1: x2 = &base + 2
6687 S2: x0 = &base
6688 S3: x1 = &base + 1
6689 S4: x3 = &base + 3
6690
6691 Vectorized loads are created in the order of memory accesses
6692 starting from the access of the first stmt of the chain:
6693
6694 VS1: vx0 = &base
6695 VS2: vx1 = &base + vec_size*1
6696 VS3: vx3 = &base + vec_size*2
6697 VS4: vx4 = &base + vec_size*3
6698
6699 Then permutation statements are generated:
6700
6701 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6702 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6703 ...
6704
6705 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6706 (the order of the data-refs in the output of vect_permute_load_chain
6707 corresponds to the order of scalar stmts in the interleaving chain - see
6708 the documentation of vect_permute_load_chain()).
6709 The generation of permutation stmts and recording them in
6710 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6711
6712 In case of both multiple types and interleaving, the vector loads and
6713 permutation stmts above are created for every copy. The result vector
6714 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6715 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6716
6717 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6718 on a target that supports unaligned accesses (dr_unaligned_supported)
6719 we generate the following code:
6720 p = initial_addr;
6721 indx = 0;
6722 loop {
6723 p = p + indx * vectype_size;
6724 vec_dest = *(p);
6725 indx = indx + 1;
6726 }
6727
6728 Otherwise, the data reference is potentially unaligned on a target that
6729 does not support unaligned accesses (dr_explicit_realign_optimized) -
6730 then generate the following code, in which the data in each iteration is
6731 obtained by two vector loads, one from the previous iteration, and one
6732 from the current iteration:
6733 p1 = initial_addr;
6734 msq_init = *(floor(p1))
6735 p2 = initial_addr + VS - 1;
6736 realignment_token = call target_builtin;
6737 indx = 0;
6738 loop {
6739 p2 = p2 + indx * vectype_size
6740 lsq = *(floor(p2))
6741 vec_dest = realign_load (msq, lsq, realignment_token)
6742 indx = indx + 1;
6743 msq = lsq;
6744 } */
6745
6746 /* If the misalignment remains the same throughout the execution of the
6747 loop, we can create the init_addr and permutation mask at the loop
6748 preheader. Otherwise, it needs to be created inside the loop.
6749 This can only occur when vectorizing memory accesses in the inner-loop
6750 nested within an outer-loop that is being vectorized. */
6751
6752 if (nested_in_vect_loop
6753 && (TREE_INT_CST_LOW (DR_STEP (dr))
6754 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6755 {
6756 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6757 compute_in_loop = true;
6758 }
6759
6760 if ((alignment_support_scheme == dr_explicit_realign_optimized
6761 || alignment_support_scheme == dr_explicit_realign)
6762 && !compute_in_loop)
6763 {
6764 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6765 alignment_support_scheme, NULL_TREE,
6766 &at_loop);
6767 if (alignment_support_scheme == dr_explicit_realign_optimized)
6768 {
6769 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6770 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6771 size_one_node);
6772 }
6773 }
6774 else
6775 at_loop = loop;
6776
6777 if (negative)
6778 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6779
6780 if (load_lanes_p)
6781 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6782 else
6783 aggr_type = vectype;
6784
6785 prev_stmt_info = NULL;
6786 for (j = 0; j < ncopies; j++)
6787 {
6788 /* 1. Create the vector or array pointer update chain. */
6789 if (j == 0)
6790 {
6791 bool simd_lane_access_p
6792 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6793 if (simd_lane_access_p
6794 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6795 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6796 && integer_zerop (DR_OFFSET (first_dr))
6797 && integer_zerop (DR_INIT (first_dr))
6798 && alias_sets_conflict_p (get_alias_set (aggr_type),
6799 get_alias_set (DR_REF (first_dr)))
6800 && (alignment_support_scheme == dr_aligned
6801 || alignment_support_scheme == dr_unaligned_supported))
6802 {
6803 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6804 dataref_offset = build_int_cst (reference_alias_ptr_type
6805 (DR_REF (first_dr)), 0);
6806 inv_p = false;
6807 }
6808 else
6809 dataref_ptr
6810 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6811 offset, &dummy, gsi, &ptr_incr,
6812 simd_lane_access_p, &inv_p,
6813 byte_offset);
6814 }
6815 else if (dataref_offset)
6816 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6817 TYPE_SIZE_UNIT (aggr_type));
6818 else
6819 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6820 TYPE_SIZE_UNIT (aggr_type));
6821
6822 if (grouped_load || slp_perm)
6823 dr_chain.create (vec_num);
6824
6825 if (load_lanes_p)
6826 {
6827 tree vec_array;
6828
6829 vec_array = create_vector_array (vectype, vec_num);
6830
6831 /* Emit:
6832 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6833 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6834 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6835 gimple_call_set_lhs (new_stmt, vec_array);
6836 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6837
6838 /* Extract each vector into an SSA_NAME. */
6839 for (i = 0; i < vec_num; i++)
6840 {
6841 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6842 vec_array, i);
6843 dr_chain.quick_push (new_temp);
6844 }
6845
6846 /* Record the mapping between SSA_NAMEs and statements. */
6847 vect_record_grouped_load_vectors (stmt, dr_chain);
6848 }
6849 else
6850 {
6851 for (i = 0; i < vec_num; i++)
6852 {
6853 if (i > 0)
6854 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6855 stmt, NULL_TREE);
6856
6857 /* 2. Create the vector-load in the loop. */
6858 switch (alignment_support_scheme)
6859 {
6860 case dr_aligned:
6861 case dr_unaligned_supported:
6862 {
6863 unsigned int align, misalign;
6864
6865 data_ref
6866 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6867 dataref_offset
6868 ? dataref_offset
6869 : build_int_cst (reference_alias_ptr_type
6870 (DR_REF (first_dr)), 0));
6871 align = TYPE_ALIGN_UNIT (vectype);
6872 if (alignment_support_scheme == dr_aligned)
6873 {
6874 gcc_assert (aligned_access_p (first_dr));
6875 misalign = 0;
6876 }
6877 else if (DR_MISALIGNMENT (first_dr) == -1)
6878 {
6879 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6880 align = TYPE_ALIGN_UNIT (elem_type);
6881 else
6882 align = (get_object_alignment (DR_REF (first_dr))
6883 / BITS_PER_UNIT);
6884 misalign = 0;
6885 TREE_TYPE (data_ref)
6886 = build_aligned_type (TREE_TYPE (data_ref),
6887 align * BITS_PER_UNIT);
6888 }
6889 else
6890 {
6891 TREE_TYPE (data_ref)
6892 = build_aligned_type (TREE_TYPE (data_ref),
6893 TYPE_ALIGN (elem_type));
6894 misalign = DR_MISALIGNMENT (first_dr);
6895 }
6896 if (dataref_offset == NULL_TREE
6897 && TREE_CODE (dataref_ptr) == SSA_NAME)
6898 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6899 align, misalign);
6900 break;
6901 }
6902 case dr_explicit_realign:
6903 {
6904 tree ptr, bump;
6905
6906 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6907
6908 if (compute_in_loop)
6909 msq = vect_setup_realignment (first_stmt, gsi,
6910 &realignment_token,
6911 dr_explicit_realign,
6912 dataref_ptr, NULL);
6913
6914 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6915 ptr = copy_ssa_name (dataref_ptr);
6916 else
6917 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6918 new_stmt = gimple_build_assign
6919 (ptr, BIT_AND_EXPR, dataref_ptr,
6920 build_int_cst
6921 (TREE_TYPE (dataref_ptr),
6922 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6923 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6924 data_ref
6925 = build2 (MEM_REF, vectype, ptr,
6926 build_int_cst (reference_alias_ptr_type
6927 (DR_REF (first_dr)), 0));
6928 vec_dest = vect_create_destination_var (scalar_dest,
6929 vectype);
6930 new_stmt = gimple_build_assign (vec_dest, data_ref);
6931 new_temp = make_ssa_name (vec_dest, new_stmt);
6932 gimple_assign_set_lhs (new_stmt, new_temp);
6933 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6934 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6935 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6936 msq = new_temp;
6937
6938 bump = size_binop (MULT_EXPR, vs,
6939 TYPE_SIZE_UNIT (elem_type));
6940 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6941 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6942 new_stmt = gimple_build_assign
6943 (NULL_TREE, BIT_AND_EXPR, ptr,
6944 build_int_cst
6945 (TREE_TYPE (ptr),
6946 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6947 ptr = copy_ssa_name (ptr, new_stmt);
6948 gimple_assign_set_lhs (new_stmt, ptr);
6949 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6950 data_ref
6951 = build2 (MEM_REF, vectype, ptr,
6952 build_int_cst (reference_alias_ptr_type
6953 (DR_REF (first_dr)), 0));
6954 break;
6955 }
6956 case dr_explicit_realign_optimized:
6957 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6958 new_temp = copy_ssa_name (dataref_ptr);
6959 else
6960 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
6961 new_stmt = gimple_build_assign
6962 (new_temp, BIT_AND_EXPR, dataref_ptr,
6963 build_int_cst
6964 (TREE_TYPE (dataref_ptr),
6965 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6967 data_ref
6968 = build2 (MEM_REF, vectype, new_temp,
6969 build_int_cst (reference_alias_ptr_type
6970 (DR_REF (first_dr)), 0));
6971 break;
6972 default:
6973 gcc_unreachable ();
6974 }
6975 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6976 new_stmt = gimple_build_assign (vec_dest, data_ref);
6977 new_temp = make_ssa_name (vec_dest, new_stmt);
6978 gimple_assign_set_lhs (new_stmt, new_temp);
6979 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6980
6981 /* 3. Handle explicit realignment if necessary/supported.
6982 Create in loop:
6983 vec_dest = realign_load (msq, lsq, realignment_token) */
6984 if (alignment_support_scheme == dr_explicit_realign_optimized
6985 || alignment_support_scheme == dr_explicit_realign)
6986 {
6987 lsq = gimple_assign_lhs (new_stmt);
6988 if (!realignment_token)
6989 realignment_token = dataref_ptr;
6990 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6991 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
6992 msq, lsq, realignment_token);
6993 new_temp = make_ssa_name (vec_dest, new_stmt);
6994 gimple_assign_set_lhs (new_stmt, new_temp);
6995 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6996
6997 if (alignment_support_scheme == dr_explicit_realign_optimized)
6998 {
6999 gcc_assert (phi);
7000 if (i == vec_num - 1 && j == ncopies - 1)
7001 add_phi_arg (phi, lsq,
7002 loop_latch_edge (containing_loop),
7003 UNKNOWN_LOCATION);
7004 msq = lsq;
7005 }
7006 }
7007
7008 /* 4. Handle invariant-load. */
7009 if (inv_p && !bb_vinfo)
7010 {
7011 gcc_assert (!grouped_load);
7012 /* If we have versioned for aliasing or the loop doesn't
7013 have any data dependencies that would preclude this,
7014 then we are sure this is a loop invariant load and
7015 thus we can insert it on the preheader edge. */
7016 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7017 && !nested_in_vect_loop
7018 && hoist_defs_of_uses (stmt, loop))
7019 {
7020 if (dump_enabled_p ())
7021 {
7022 dump_printf_loc (MSG_NOTE, vect_location,
7023 "hoisting out of the vectorized "
7024 "loop: ");
7025 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7026 }
7027 tree tem = copy_ssa_name (scalar_dest);
7028 gsi_insert_on_edge_immediate
7029 (loop_preheader_edge (loop),
7030 gimple_build_assign (tem,
7031 unshare_expr
7032 (gimple_assign_rhs1 (stmt))));
7033 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7034 }
7035 else
7036 {
7037 gimple_stmt_iterator gsi2 = *gsi;
7038 gsi_next (&gsi2);
7039 new_temp = vect_init_vector (stmt, scalar_dest,
7040 vectype, &gsi2);
7041 }
7042 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7043 set_vinfo_for_stmt (new_stmt,
7044 new_stmt_vec_info (new_stmt, vinfo));
7045 }
7046
7047 if (negative)
7048 {
7049 tree perm_mask = perm_mask_for_reverse (vectype);
7050 new_temp = permute_vec_elements (new_temp, new_temp,
7051 perm_mask, stmt, gsi);
7052 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7053 }
7054
7055 /* Collect vector loads and later create their permutation in
7056 vect_transform_grouped_load (). */
7057 if (grouped_load || slp_perm)
7058 dr_chain.quick_push (new_temp);
7059
7060 /* Store vector loads in the corresponding SLP_NODE. */
7061 if (slp && !slp_perm)
7062 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7063 }
7064 /* Bump the vector pointer to account for a gap or for excess
7065 elements loaded for a permuted SLP load. */
7066 if (group_gap_adj != 0)
7067 {
7068 bool ovf;
7069 tree bump
7070 = wide_int_to_tree (sizetype,
7071 wi::smul (TYPE_SIZE_UNIT (elem_type),
7072 group_gap_adj, &ovf));
7073 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7074 stmt, bump);
7075 }
7076 }
7077
7078 if (slp && !slp_perm)
7079 continue;
7080
7081 if (slp_perm)
7082 {
7083 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7084 slp_node_instance, false))
7085 {
7086 dr_chain.release ();
7087 return false;
7088 }
7089 }
7090 else
7091 {
7092 if (grouped_load)
7093 {
7094 if (!load_lanes_p)
7095 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7096 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7097 }
7098 else
7099 {
7100 if (j == 0)
7101 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7102 else
7103 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7104 prev_stmt_info = vinfo_for_stmt (new_stmt);
7105 }
7106 }
7107 dr_chain.release ();
7108 }
7109
7110 return true;
7111 }
7112
7113 /* Function vect_is_simple_cond.
7114
7115 Input:
7116 LOOP - the loop that is being vectorized.
7117 COND - Condition that is checked for simple use.
7118
7119 Output:
7120 *COMP_VECTYPE - the vector type for the comparison.
7121
7122 Returns whether a COND can be vectorized. Checks whether
7123 condition operands are supportable using vec_is_simple_use. */
7124
7125 static bool
7126 vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
7127 {
7128 tree lhs, rhs;
7129 enum vect_def_type dt;
7130 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7131
7132 if (!COMPARISON_CLASS_P (cond))
7133 return false;
7134
7135 lhs = TREE_OPERAND (cond, 0);
7136 rhs = TREE_OPERAND (cond, 1);
7137
7138 if (TREE_CODE (lhs) == SSA_NAME)
7139 {
7140 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7141 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
7142 return false;
7143 }
7144 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7145 && TREE_CODE (lhs) != FIXED_CST)
7146 return false;
7147
7148 if (TREE_CODE (rhs) == SSA_NAME)
7149 {
7150 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7151 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
7152 return false;
7153 }
7154 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7155 && TREE_CODE (rhs) != FIXED_CST)
7156 return false;
7157
7158 *comp_vectype = vectype1 ? vectype1 : vectype2;
7159 return true;
7160 }
7161
7162 /* vectorizable_condition.
7163
7164 Check if STMT is conditional modify expression that can be vectorized.
7165 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7166 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7167 at GSI.
7168
7169 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7170 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7171 else clause if it is 2).
7172
7173 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7174
7175 bool
7176 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7177 gimple **vec_stmt, tree reduc_def, int reduc_index,
7178 slp_tree slp_node)
7179 {
7180 tree scalar_dest = NULL_TREE;
7181 tree vec_dest = NULL_TREE;
7182 tree cond_expr, then_clause, else_clause;
7183 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7184 tree comp_vectype = NULL_TREE;
7185 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7186 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7187 tree vec_compare, vec_cond_expr;
7188 tree new_temp;
7189 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7190 enum vect_def_type dt, dts[4];
7191 int ncopies;
7192 enum tree_code code;
7193 stmt_vec_info prev_stmt_info = NULL;
7194 int i, j;
7195 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7196 vec<tree> vec_oprnds0 = vNULL;
7197 vec<tree> vec_oprnds1 = vNULL;
7198 vec<tree> vec_oprnds2 = vNULL;
7199 vec<tree> vec_oprnds3 = vNULL;
7200 tree vec_cmp_type;
7201
7202 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7203 return false;
7204
7205 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7206 {
7207 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7208 return false;
7209
7210 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7211 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7212 && reduc_def))
7213 return false;
7214
7215 /* FORNOW: not yet supported. */
7216 if (STMT_VINFO_LIVE_P (stmt_info))
7217 {
7218 if (dump_enabled_p ())
7219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7220 "value used after loop.\n");
7221 return false;
7222 }
7223 }
7224
7225 /* Is vectorizable conditional operation? */
7226 if (!is_gimple_assign (stmt))
7227 return false;
7228
7229 code = gimple_assign_rhs_code (stmt);
7230
7231 if (code != COND_EXPR)
7232 return false;
7233
7234 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7235 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7236
7237 if (slp_node || PURE_SLP_STMT (stmt_info))
7238 ncopies = 1;
7239 else
7240 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7241
7242 gcc_assert (ncopies >= 1);
7243 if (reduc_index && ncopies > 1)
7244 return false; /* FORNOW */
7245
7246 cond_expr = gimple_assign_rhs1 (stmt);
7247 then_clause = gimple_assign_rhs2 (stmt);
7248 else_clause = gimple_assign_rhs3 (stmt);
7249
7250 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
7251 || !comp_vectype)
7252 return false;
7253
7254 gimple *def_stmt;
7255 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt))
7256 return false;
7257 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt))
7258 return false;
7259
7260 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7261 if (vec_cmp_type == NULL_TREE)
7262 return false;
7263
7264 if (!vec_stmt)
7265 {
7266 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7267 return expand_vec_cond_expr_p (vectype, comp_vectype);
7268 }
7269
7270 /* Transform. */
7271
7272 if (!slp_node)
7273 {
7274 vec_oprnds0.create (1);
7275 vec_oprnds1.create (1);
7276 vec_oprnds2.create (1);
7277 vec_oprnds3.create (1);
7278 }
7279
7280 /* Handle def. */
7281 scalar_dest = gimple_assign_lhs (stmt);
7282 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7283
7284 /* Handle cond expr. */
7285 for (j = 0; j < ncopies; j++)
7286 {
7287 gassign *new_stmt = NULL;
7288 if (j == 0)
7289 {
7290 if (slp_node)
7291 {
7292 auto_vec<tree, 4> ops;
7293 auto_vec<vec<tree>, 4> vec_defs;
7294
7295 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7296 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7297 ops.safe_push (then_clause);
7298 ops.safe_push (else_clause);
7299 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7300 vec_oprnds3 = vec_defs.pop ();
7301 vec_oprnds2 = vec_defs.pop ();
7302 vec_oprnds1 = vec_defs.pop ();
7303 vec_oprnds0 = vec_defs.pop ();
7304
7305 ops.release ();
7306 vec_defs.release ();
7307 }
7308 else
7309 {
7310 gimple *gtemp;
7311 vec_cond_lhs =
7312 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt);
7313 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7314 loop_vinfo, &gtemp, &dts[0]);
7315
7316 vec_cond_rhs =
7317 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7318 stmt);
7319 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7320 loop_vinfo, &gtemp, &dts[1]);
7321 if (reduc_index == 1)
7322 vec_then_clause = reduc_def;
7323 else
7324 {
7325 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7326 stmt);
7327 vect_is_simple_use (then_clause, loop_vinfo,
7328 &gtemp, &dts[2]);
7329 }
7330 if (reduc_index == 2)
7331 vec_else_clause = reduc_def;
7332 else
7333 {
7334 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7335 stmt);
7336 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
7337 }
7338 }
7339 }
7340 else
7341 {
7342 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7343 vec_oprnds0.pop ());
7344 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7345 vec_oprnds1.pop ());
7346 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7347 vec_oprnds2.pop ());
7348 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7349 vec_oprnds3.pop ());
7350 }
7351
7352 if (!slp_node)
7353 {
7354 vec_oprnds0.quick_push (vec_cond_lhs);
7355 vec_oprnds1.quick_push (vec_cond_rhs);
7356 vec_oprnds2.quick_push (vec_then_clause);
7357 vec_oprnds3.quick_push (vec_else_clause);
7358 }
7359
7360 /* Arguments are ready. Create the new vector stmt. */
7361 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7362 {
7363 vec_cond_rhs = vec_oprnds1[i];
7364 vec_then_clause = vec_oprnds2[i];
7365 vec_else_clause = vec_oprnds3[i];
7366
7367 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7368 vec_cond_lhs, vec_cond_rhs);
7369 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7370 vec_compare, vec_then_clause, vec_else_clause);
7371
7372 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7373 new_temp = make_ssa_name (vec_dest, new_stmt);
7374 gimple_assign_set_lhs (new_stmt, new_temp);
7375 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7376 if (slp_node)
7377 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7378 }
7379
7380 if (slp_node)
7381 continue;
7382
7383 if (j == 0)
7384 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7385 else
7386 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7387
7388 prev_stmt_info = vinfo_for_stmt (new_stmt);
7389 }
7390
7391 vec_oprnds0.release ();
7392 vec_oprnds1.release ();
7393 vec_oprnds2.release ();
7394 vec_oprnds3.release ();
7395
7396 return true;
7397 }
7398
7399
7400 /* Make sure the statement is vectorizable. */
7401
7402 bool
7403 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
7404 {
7405 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7406 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7407 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7408 bool ok;
7409 tree scalar_type, vectype;
7410 gimple *pattern_stmt;
7411 gimple_seq pattern_def_seq;
7412
7413 if (dump_enabled_p ())
7414 {
7415 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7416 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7417 }
7418
7419 if (gimple_has_volatile_ops (stmt))
7420 {
7421 if (dump_enabled_p ())
7422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7423 "not vectorized: stmt has volatile operands\n");
7424
7425 return false;
7426 }
7427
7428 /* Skip stmts that do not need to be vectorized. In loops this is expected
7429 to include:
7430 - the COND_EXPR which is the loop exit condition
7431 - any LABEL_EXPRs in the loop
7432 - computations that are used only for array indexing or loop control.
7433 In basic blocks we only analyze statements that are a part of some SLP
7434 instance, therefore, all the statements are relevant.
7435
7436 Pattern statement needs to be analyzed instead of the original statement
7437 if the original statement is not relevant. Otherwise, we analyze both
7438 statements. In basic blocks we are called from some SLP instance
7439 traversal, don't analyze pattern stmts instead, the pattern stmts
7440 already will be part of SLP instance. */
7441
7442 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7443 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7444 && !STMT_VINFO_LIVE_P (stmt_info))
7445 {
7446 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7447 && pattern_stmt
7448 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7449 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7450 {
7451 /* Analyze PATTERN_STMT instead of the original stmt. */
7452 stmt = pattern_stmt;
7453 stmt_info = vinfo_for_stmt (pattern_stmt);
7454 if (dump_enabled_p ())
7455 {
7456 dump_printf_loc (MSG_NOTE, vect_location,
7457 "==> examining pattern statement: ");
7458 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7459 }
7460 }
7461 else
7462 {
7463 if (dump_enabled_p ())
7464 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7465
7466 return true;
7467 }
7468 }
7469 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7470 && node == NULL
7471 && pattern_stmt
7472 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7473 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7474 {
7475 /* Analyze PATTERN_STMT too. */
7476 if (dump_enabled_p ())
7477 {
7478 dump_printf_loc (MSG_NOTE, vect_location,
7479 "==> examining pattern statement: ");
7480 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7481 }
7482
7483 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7484 return false;
7485 }
7486
7487 if (is_pattern_stmt_p (stmt_info)
7488 && node == NULL
7489 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7490 {
7491 gimple_stmt_iterator si;
7492
7493 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7494 {
7495 gimple *pattern_def_stmt = gsi_stmt (si);
7496 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7497 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7498 {
7499 /* Analyze def stmt of STMT if it's a pattern stmt. */
7500 if (dump_enabled_p ())
7501 {
7502 dump_printf_loc (MSG_NOTE, vect_location,
7503 "==> examining pattern def statement: ");
7504 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7505 }
7506
7507 if (!vect_analyze_stmt (pattern_def_stmt,
7508 need_to_vectorize, node))
7509 return false;
7510 }
7511 }
7512 }
7513
7514 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7515 {
7516 case vect_internal_def:
7517 break;
7518
7519 case vect_reduction_def:
7520 case vect_nested_cycle:
7521 gcc_assert (!bb_vinfo
7522 && (relevance == vect_used_in_outer
7523 || relevance == vect_used_in_outer_by_reduction
7524 || relevance == vect_used_by_reduction
7525 || relevance == vect_unused_in_scope));
7526 break;
7527
7528 case vect_induction_def:
7529 case vect_constant_def:
7530 case vect_external_def:
7531 case vect_unknown_def_type:
7532 default:
7533 gcc_unreachable ();
7534 }
7535
7536 if (bb_vinfo)
7537 {
7538 gcc_assert (PURE_SLP_STMT (stmt_info));
7539
7540 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7541 if (dump_enabled_p ())
7542 {
7543 dump_printf_loc (MSG_NOTE, vect_location,
7544 "get vectype for scalar type: ");
7545 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7546 dump_printf (MSG_NOTE, "\n");
7547 }
7548
7549 vectype = get_vectype_for_scalar_type (scalar_type);
7550 if (!vectype)
7551 {
7552 if (dump_enabled_p ())
7553 {
7554 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7555 "not SLPed: unsupported data-type ");
7556 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7557 scalar_type);
7558 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7559 }
7560 return false;
7561 }
7562
7563 if (dump_enabled_p ())
7564 {
7565 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7566 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7567 dump_printf (MSG_NOTE, "\n");
7568 }
7569
7570 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7571 }
7572
7573 if (STMT_VINFO_RELEVANT_P (stmt_info))
7574 {
7575 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7576 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7577 || (is_gimple_call (stmt)
7578 && gimple_call_lhs (stmt) == NULL_TREE));
7579 *need_to_vectorize = true;
7580 }
7581
7582 if (PURE_SLP_STMT (stmt_info) && !node)
7583 {
7584 dump_printf_loc (MSG_NOTE, vect_location,
7585 "handled only by SLP analysis\n");
7586 return true;
7587 }
7588
7589 ok = true;
7590 if (!bb_vinfo
7591 && (STMT_VINFO_RELEVANT_P (stmt_info)
7592 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7593 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7594 || vectorizable_conversion (stmt, NULL, NULL, node)
7595 || vectorizable_shift (stmt, NULL, NULL, node)
7596 || vectorizable_operation (stmt, NULL, NULL, node)
7597 || vectorizable_assignment (stmt, NULL, NULL, node)
7598 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7599 || vectorizable_call (stmt, NULL, NULL, node)
7600 || vectorizable_store (stmt, NULL, NULL, node)
7601 || vectorizable_reduction (stmt, NULL, NULL, node)
7602 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7603 else
7604 {
7605 if (bb_vinfo)
7606 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7607 || vectorizable_conversion (stmt, NULL, NULL, node)
7608 || vectorizable_shift (stmt, NULL, NULL, node)
7609 || vectorizable_operation (stmt, NULL, NULL, node)
7610 || vectorizable_assignment (stmt, NULL, NULL, node)
7611 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7612 || vectorizable_call (stmt, NULL, NULL, node)
7613 || vectorizable_store (stmt, NULL, NULL, node)
7614 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7615 }
7616
7617 if (!ok)
7618 {
7619 if (dump_enabled_p ())
7620 {
7621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7622 "not vectorized: relevant stmt not ");
7623 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7624 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7625 }
7626
7627 return false;
7628 }
7629
7630 if (bb_vinfo)
7631 return true;
7632
7633 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7634 need extra handling, except for vectorizable reductions. */
7635 if (STMT_VINFO_LIVE_P (stmt_info)
7636 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7637 ok = vectorizable_live_operation (stmt, NULL, NULL);
7638
7639 if (!ok)
7640 {
7641 if (dump_enabled_p ())
7642 {
7643 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7644 "not vectorized: live stmt not ");
7645 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7646 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7647 }
7648
7649 return false;
7650 }
7651
7652 return true;
7653 }
7654
7655
7656 /* Function vect_transform_stmt.
7657
7658 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7659
7660 bool
7661 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
7662 bool *grouped_store, slp_tree slp_node,
7663 slp_instance slp_node_instance)
7664 {
7665 bool is_store = false;
7666 gimple *vec_stmt = NULL;
7667 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7668 bool done;
7669
7670 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7671
7672 switch (STMT_VINFO_TYPE (stmt_info))
7673 {
7674 case type_demotion_vec_info_type:
7675 case type_promotion_vec_info_type:
7676 case type_conversion_vec_info_type:
7677 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7678 gcc_assert (done);
7679 break;
7680
7681 case induc_vec_info_type:
7682 gcc_assert (!slp_node);
7683 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7684 gcc_assert (done);
7685 break;
7686
7687 case shift_vec_info_type:
7688 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7689 gcc_assert (done);
7690 break;
7691
7692 case op_vec_info_type:
7693 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7694 gcc_assert (done);
7695 break;
7696
7697 case assignment_vec_info_type:
7698 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7699 gcc_assert (done);
7700 break;
7701
7702 case load_vec_info_type:
7703 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7704 slp_node_instance);
7705 gcc_assert (done);
7706 break;
7707
7708 case store_vec_info_type:
7709 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7710 gcc_assert (done);
7711 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7712 {
7713 /* In case of interleaving, the whole chain is vectorized when the
7714 last store in the chain is reached. Store stmts before the last
7715 one are skipped, and there vec_stmt_info shouldn't be freed
7716 meanwhile. */
7717 *grouped_store = true;
7718 if (STMT_VINFO_VEC_STMT (stmt_info))
7719 is_store = true;
7720 }
7721 else
7722 is_store = true;
7723 break;
7724
7725 case condition_vec_info_type:
7726 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7727 gcc_assert (done);
7728 break;
7729
7730 case call_vec_info_type:
7731 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7732 stmt = gsi_stmt (*gsi);
7733 if (is_gimple_call (stmt)
7734 && gimple_call_internal_p (stmt)
7735 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7736 is_store = true;
7737 break;
7738
7739 case call_simd_clone_vec_info_type:
7740 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7741 stmt = gsi_stmt (*gsi);
7742 break;
7743
7744 case reduc_vec_info_type:
7745 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7746 gcc_assert (done);
7747 break;
7748
7749 default:
7750 if (!STMT_VINFO_LIVE_P (stmt_info))
7751 {
7752 if (dump_enabled_p ())
7753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7754 "stmt not supported.\n");
7755 gcc_unreachable ();
7756 }
7757 }
7758
7759 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7760 This would break hybrid SLP vectorization. */
7761 if (slp_node)
7762 gcc_assert (!vec_stmt
7763 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
7764
7765 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7766 is being vectorized, but outside the immediately enclosing loop. */
7767 if (vec_stmt
7768 && STMT_VINFO_LOOP_VINFO (stmt_info)
7769 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7770 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7771 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7772 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7773 || STMT_VINFO_RELEVANT (stmt_info) ==
7774 vect_used_in_outer_by_reduction))
7775 {
7776 struct loop *innerloop = LOOP_VINFO_LOOP (
7777 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7778 imm_use_iterator imm_iter;
7779 use_operand_p use_p;
7780 tree scalar_dest;
7781 gimple *exit_phi;
7782
7783 if (dump_enabled_p ())
7784 dump_printf_loc (MSG_NOTE, vect_location,
7785 "Record the vdef for outer-loop vectorization.\n");
7786
7787 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7788 (to be used when vectorizing outer-loop stmts that use the DEF of
7789 STMT). */
7790 if (gimple_code (stmt) == GIMPLE_PHI)
7791 scalar_dest = PHI_RESULT (stmt);
7792 else
7793 scalar_dest = gimple_assign_lhs (stmt);
7794
7795 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7796 {
7797 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7798 {
7799 exit_phi = USE_STMT (use_p);
7800 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7801 }
7802 }
7803 }
7804
7805 /* Handle stmts whose DEF is used outside the loop-nest that is
7806 being vectorized. */
7807 if (STMT_VINFO_LIVE_P (stmt_info)
7808 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7809 {
7810 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7811 gcc_assert (done);
7812 }
7813
7814 if (vec_stmt)
7815 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7816
7817 return is_store;
7818 }
7819
7820
7821 /* Remove a group of stores (for SLP or interleaving), free their
7822 stmt_vec_info. */
7823
7824 void
7825 vect_remove_stores (gimple *first_stmt)
7826 {
7827 gimple *next = first_stmt;
7828 gimple *tmp;
7829 gimple_stmt_iterator next_si;
7830
7831 while (next)
7832 {
7833 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7834
7835 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7836 if (is_pattern_stmt_p (stmt_info))
7837 next = STMT_VINFO_RELATED_STMT (stmt_info);
7838 /* Free the attached stmt_vec_info and remove the stmt. */
7839 next_si = gsi_for_stmt (next);
7840 unlink_stmt_vdef (next);
7841 gsi_remove (&next_si, true);
7842 release_defs (next);
7843 free_stmt_vec_info (next);
7844 next = tmp;
7845 }
7846 }
7847
7848
7849 /* Function new_stmt_vec_info.
7850
7851 Create and initialize a new stmt_vec_info struct for STMT. */
7852
7853 stmt_vec_info
7854 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
7855 {
7856 stmt_vec_info res;
7857 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7858
7859 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7860 STMT_VINFO_STMT (res) = stmt;
7861 res->vinfo = vinfo;
7862 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7863 STMT_VINFO_LIVE_P (res) = false;
7864 STMT_VINFO_VECTYPE (res) = NULL;
7865 STMT_VINFO_VEC_STMT (res) = NULL;
7866 STMT_VINFO_VECTORIZABLE (res) = true;
7867 STMT_VINFO_IN_PATTERN_P (res) = false;
7868 STMT_VINFO_RELATED_STMT (res) = NULL;
7869 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7870 STMT_VINFO_DATA_REF (res) = NULL;
7871 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
7872
7873 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7874 STMT_VINFO_DR_OFFSET (res) = NULL;
7875 STMT_VINFO_DR_INIT (res) = NULL;
7876 STMT_VINFO_DR_STEP (res) = NULL;
7877 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7878
7879 if (gimple_code (stmt) == GIMPLE_PHI
7880 && is_loop_header_bb_p (gimple_bb (stmt)))
7881 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7882 else
7883 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7884
7885 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7886 STMT_SLP_TYPE (res) = loop_vect;
7887 GROUP_FIRST_ELEMENT (res) = NULL;
7888 GROUP_NEXT_ELEMENT (res) = NULL;
7889 GROUP_SIZE (res) = 0;
7890 GROUP_STORE_COUNT (res) = 0;
7891 GROUP_GAP (res) = 0;
7892 GROUP_SAME_DR_STMT (res) = NULL;
7893
7894 return res;
7895 }
7896
7897
7898 /* Create a hash table for stmt_vec_info. */
7899
7900 void
7901 init_stmt_vec_info_vec (void)
7902 {
7903 gcc_assert (!stmt_vec_info_vec.exists ());
7904 stmt_vec_info_vec.create (50);
7905 }
7906
7907
7908 /* Free hash table for stmt_vec_info. */
7909
7910 void
7911 free_stmt_vec_info_vec (void)
7912 {
7913 unsigned int i;
7914 stmt_vec_info info;
7915 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7916 if (info != NULL)
7917 free_stmt_vec_info (STMT_VINFO_STMT (info));
7918 gcc_assert (stmt_vec_info_vec.exists ());
7919 stmt_vec_info_vec.release ();
7920 }
7921
7922
7923 /* Free stmt vectorization related info. */
7924
7925 void
7926 free_stmt_vec_info (gimple *stmt)
7927 {
7928 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7929
7930 if (!stmt_info)
7931 return;
7932
7933 /* Check if this statement has a related "pattern stmt"
7934 (introduced by the vectorizer during the pattern recognition
7935 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7936 too. */
7937 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7938 {
7939 stmt_vec_info patt_info
7940 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7941 if (patt_info)
7942 {
7943 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7944 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
7945 gimple_set_bb (patt_stmt, NULL);
7946 tree lhs = gimple_get_lhs (patt_stmt);
7947 if (TREE_CODE (lhs) == SSA_NAME)
7948 release_ssa_name (lhs);
7949 if (seq)
7950 {
7951 gimple_stmt_iterator si;
7952 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7953 {
7954 gimple *seq_stmt = gsi_stmt (si);
7955 gimple_set_bb (seq_stmt, NULL);
7956 lhs = gimple_get_lhs (seq_stmt);
7957 if (TREE_CODE (lhs) == SSA_NAME)
7958 release_ssa_name (lhs);
7959 free_stmt_vec_info (seq_stmt);
7960 }
7961 }
7962 free_stmt_vec_info (patt_stmt);
7963 }
7964 }
7965
7966 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7967 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7968 set_vinfo_for_stmt (stmt, NULL);
7969 free (stmt_info);
7970 }
7971
7972
7973 /* Function get_vectype_for_scalar_type_and_size.
7974
7975 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7976 by the target. */
7977
7978 static tree
7979 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7980 {
7981 machine_mode inner_mode = TYPE_MODE (scalar_type);
7982 machine_mode simd_mode;
7983 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7984 int nunits;
7985 tree vectype;
7986
7987 if (nbytes == 0)
7988 return NULL_TREE;
7989
7990 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7991 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7992 return NULL_TREE;
7993
7994 /* For vector types of elements whose mode precision doesn't
7995 match their types precision we use a element type of mode
7996 precision. The vectorization routines will have to make sure
7997 they support the proper result truncation/extension.
7998 We also make sure to build vector types with INTEGER_TYPE
7999 component type only. */
8000 if (INTEGRAL_TYPE_P (scalar_type)
8001 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8002 || TREE_CODE (scalar_type) != INTEGER_TYPE))
8003 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8004 TYPE_UNSIGNED (scalar_type));
8005
8006 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8007 When the component mode passes the above test simply use a type
8008 corresponding to that mode. The theory is that any use that
8009 would cause problems with this will disable vectorization anyway. */
8010 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
8011 && !INTEGRAL_TYPE_P (scalar_type))
8012 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8013
8014 /* We can't build a vector type of elements with alignment bigger than
8015 their size. */
8016 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
8017 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8018 TYPE_UNSIGNED (scalar_type));
8019
8020 /* If we felt back to using the mode fail if there was
8021 no scalar type for it. */
8022 if (scalar_type == NULL_TREE)
8023 return NULL_TREE;
8024
8025 /* If no size was supplied use the mode the target prefers. Otherwise
8026 lookup a vector mode of the specified size. */
8027 if (size == 0)
8028 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8029 else
8030 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8031 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8032 if (nunits <= 1)
8033 return NULL_TREE;
8034
8035 vectype = build_vector_type (scalar_type, nunits);
8036
8037 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8038 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8039 return NULL_TREE;
8040
8041 return vectype;
8042 }
8043
8044 unsigned int current_vector_size;
8045
8046 /* Function get_vectype_for_scalar_type.
8047
8048 Returns the vector type corresponding to SCALAR_TYPE as supported
8049 by the target. */
8050
8051 tree
8052 get_vectype_for_scalar_type (tree scalar_type)
8053 {
8054 tree vectype;
8055 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8056 current_vector_size);
8057 if (vectype
8058 && current_vector_size == 0)
8059 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8060 return vectype;
8061 }
8062
8063 /* Function get_same_sized_vectype
8064
8065 Returns a vector type corresponding to SCALAR_TYPE of size
8066 VECTOR_TYPE if supported by the target. */
8067
8068 tree
8069 get_same_sized_vectype (tree scalar_type, tree vector_type)
8070 {
8071 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8072 return build_same_sized_truth_vector_type (vector_type);
8073
8074 return get_vectype_for_scalar_type_and_size
8075 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8076 }
8077
8078 /* Function vect_is_simple_use.
8079
8080 Input:
8081 VINFO - the vect info of the loop or basic block that is being vectorized.
8082 OPERAND - operand in the loop or bb.
8083 Output:
8084 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8085 DT - the type of definition
8086
8087 Returns whether a stmt with OPERAND can be vectorized.
8088 For loops, supportable operands are constants, loop invariants, and operands
8089 that are defined by the current iteration of the loop. Unsupportable
8090 operands are those that are defined by a previous iteration of the loop (as
8091 is the case in reduction/induction computations).
8092 For basic blocks, supportable operands are constants and bb invariants.
8093 For now, operands defined outside the basic block are not supported. */
8094
8095 bool
8096 vect_is_simple_use (tree operand, vec_info *vinfo,
8097 gimple **def_stmt, enum vect_def_type *dt)
8098 {
8099 *def_stmt = NULL;
8100 *dt = vect_unknown_def_type;
8101
8102 if (dump_enabled_p ())
8103 {
8104 dump_printf_loc (MSG_NOTE, vect_location,
8105 "vect_is_simple_use: operand ");
8106 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8107 dump_printf (MSG_NOTE, "\n");
8108 }
8109
8110 if (CONSTANT_CLASS_P (operand))
8111 {
8112 *dt = vect_constant_def;
8113 return true;
8114 }
8115
8116 if (is_gimple_min_invariant (operand))
8117 {
8118 *dt = vect_external_def;
8119 return true;
8120 }
8121
8122 if (TREE_CODE (operand) != SSA_NAME)
8123 {
8124 if (dump_enabled_p ())
8125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8126 "not ssa-name.\n");
8127 return false;
8128 }
8129
8130 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8131 {
8132 *dt = vect_external_def;
8133 return true;
8134 }
8135
8136 *def_stmt = SSA_NAME_DEF_STMT (operand);
8137 if (dump_enabled_p ())
8138 {
8139 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8140 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8141 }
8142
8143 basic_block bb = gimple_bb (*def_stmt);
8144 if ((is_a <loop_vec_info> (vinfo)
8145 && !flow_bb_inside_loop_p (as_a <loop_vec_info> (vinfo)->loop, bb))
8146 || (is_a <bb_vec_info> (vinfo)
8147 && (bb != as_a <bb_vec_info> (vinfo)->bb
8148 || gimple_code (*def_stmt) == GIMPLE_PHI)))
8149 *dt = vect_external_def;
8150 else
8151 {
8152 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8153 if (is_a <bb_vec_info> (vinfo) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8154 *dt = vect_external_def;
8155 else
8156 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8157 }
8158
8159 if (dump_enabled_p ())
8160 {
8161 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8162 switch (*dt)
8163 {
8164 case vect_uninitialized_def:
8165 dump_printf (MSG_NOTE, "uninitialized\n");
8166 break;
8167 case vect_constant_def:
8168 dump_printf (MSG_NOTE, "constant\n");
8169 break;
8170 case vect_external_def:
8171 dump_printf (MSG_NOTE, "external\n");
8172 break;
8173 case vect_internal_def:
8174 dump_printf (MSG_NOTE, "internal\n");
8175 break;
8176 case vect_induction_def:
8177 dump_printf (MSG_NOTE, "induction\n");
8178 break;
8179 case vect_reduction_def:
8180 dump_printf (MSG_NOTE, "reduction\n");
8181 break;
8182 case vect_double_reduction_def:
8183 dump_printf (MSG_NOTE, "double reduction\n");
8184 break;
8185 case vect_nested_cycle:
8186 dump_printf (MSG_NOTE, "nested cycle\n");
8187 break;
8188 case vect_unknown_def_type:
8189 dump_printf (MSG_NOTE, "unknown\n");
8190 break;
8191 }
8192 }
8193
8194 if (*dt == vect_unknown_def_type)
8195 {
8196 if (dump_enabled_p ())
8197 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8198 "Unsupported pattern.\n");
8199 return false;
8200 }
8201
8202 switch (gimple_code (*def_stmt))
8203 {
8204 case GIMPLE_PHI:
8205 case GIMPLE_ASSIGN:
8206 case GIMPLE_CALL:
8207 break;
8208 default:
8209 if (dump_enabled_p ())
8210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8211 "unsupported defining stmt:\n");
8212 return false;
8213 }
8214
8215 return true;
8216 }
8217
8218 /* Function vect_is_simple_use.
8219
8220 Same as vect_is_simple_use but also determines the vector operand
8221 type of OPERAND and stores it to *VECTYPE. If the definition of
8222 OPERAND is vect_uninitialized_def, vect_constant_def or
8223 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8224 is responsible to compute the best suited vector type for the
8225 scalar operand. */
8226
8227 bool
8228 vect_is_simple_use (tree operand, vec_info *vinfo,
8229 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
8230 {
8231 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
8232 return false;
8233
8234 /* Now get a vector type if the def is internal, otherwise supply
8235 NULL_TREE and leave it up to the caller to figure out a proper
8236 type for the use stmt. */
8237 if (*dt == vect_internal_def
8238 || *dt == vect_induction_def
8239 || *dt == vect_reduction_def
8240 || *dt == vect_double_reduction_def
8241 || *dt == vect_nested_cycle)
8242 {
8243 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8244
8245 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8246 && !STMT_VINFO_RELEVANT (stmt_info)
8247 && !STMT_VINFO_LIVE_P (stmt_info))
8248 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8249
8250 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8251 gcc_assert (*vectype != NULL_TREE);
8252 }
8253 else if (*dt == vect_uninitialized_def
8254 || *dt == vect_constant_def
8255 || *dt == vect_external_def)
8256 *vectype = NULL_TREE;
8257 else
8258 gcc_unreachable ();
8259
8260 return true;
8261 }
8262
8263
8264 /* Function supportable_widening_operation
8265
8266 Check whether an operation represented by the code CODE is a
8267 widening operation that is supported by the target platform in
8268 vector form (i.e., when operating on arguments of type VECTYPE_IN
8269 producing a result of type VECTYPE_OUT).
8270
8271 Widening operations we currently support are NOP (CONVERT), FLOAT
8272 and WIDEN_MULT. This function checks if these operations are supported
8273 by the target platform either directly (via vector tree-codes), or via
8274 target builtins.
8275
8276 Output:
8277 - CODE1 and CODE2 are codes of vector operations to be used when
8278 vectorizing the operation, if available.
8279 - MULTI_STEP_CVT determines the number of required intermediate steps in
8280 case of multi-step conversion (like char->short->int - in that case
8281 MULTI_STEP_CVT will be 1).
8282 - INTERM_TYPES contains the intermediate type required to perform the
8283 widening operation (short in the above example). */
8284
8285 bool
8286 supportable_widening_operation (enum tree_code code, gimple *stmt,
8287 tree vectype_out, tree vectype_in,
8288 enum tree_code *code1, enum tree_code *code2,
8289 int *multi_step_cvt,
8290 vec<tree> *interm_types)
8291 {
8292 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8293 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8294 struct loop *vect_loop = NULL;
8295 machine_mode vec_mode;
8296 enum insn_code icode1, icode2;
8297 optab optab1, optab2;
8298 tree vectype = vectype_in;
8299 tree wide_vectype = vectype_out;
8300 enum tree_code c1, c2;
8301 int i;
8302 tree prev_type, intermediate_type;
8303 machine_mode intermediate_mode, prev_mode;
8304 optab optab3, optab4;
8305
8306 *multi_step_cvt = 0;
8307 if (loop_info)
8308 vect_loop = LOOP_VINFO_LOOP (loop_info);
8309
8310 switch (code)
8311 {
8312 case WIDEN_MULT_EXPR:
8313 /* The result of a vectorized widening operation usually requires
8314 two vectors (because the widened results do not fit into one vector).
8315 The generated vector results would normally be expected to be
8316 generated in the same order as in the original scalar computation,
8317 i.e. if 8 results are generated in each vector iteration, they are
8318 to be organized as follows:
8319 vect1: [res1,res2,res3,res4],
8320 vect2: [res5,res6,res7,res8].
8321
8322 However, in the special case that the result of the widening
8323 operation is used in a reduction computation only, the order doesn't
8324 matter (because when vectorizing a reduction we change the order of
8325 the computation). Some targets can take advantage of this and
8326 generate more efficient code. For example, targets like Altivec,
8327 that support widen_mult using a sequence of {mult_even,mult_odd}
8328 generate the following vectors:
8329 vect1: [res1,res3,res5,res7],
8330 vect2: [res2,res4,res6,res8].
8331
8332 When vectorizing outer-loops, we execute the inner-loop sequentially
8333 (each vectorized inner-loop iteration contributes to VF outer-loop
8334 iterations in parallel). We therefore don't allow to change the
8335 order of the computation in the inner-loop during outer-loop
8336 vectorization. */
8337 /* TODO: Another case in which order doesn't *really* matter is when we
8338 widen and then contract again, e.g. (short)((int)x * y >> 8).
8339 Normally, pack_trunc performs an even/odd permute, whereas the
8340 repack from an even/odd expansion would be an interleave, which
8341 would be significantly simpler for e.g. AVX2. */
8342 /* In any case, in order to avoid duplicating the code below, recurse
8343 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8344 are properly set up for the caller. If we fail, we'll continue with
8345 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8346 if (vect_loop
8347 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8348 && !nested_in_vect_loop_p (vect_loop, stmt)
8349 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8350 stmt, vectype_out, vectype_in,
8351 code1, code2, multi_step_cvt,
8352 interm_types))
8353 {
8354 /* Elements in a vector with vect_used_by_reduction property cannot
8355 be reordered if the use chain with this property does not have the
8356 same operation. One such an example is s += a * b, where elements
8357 in a and b cannot be reordered. Here we check if the vector defined
8358 by STMT is only directly used in the reduction statement. */
8359 tree lhs = gimple_assign_lhs (stmt);
8360 use_operand_p dummy;
8361 gimple *use_stmt;
8362 stmt_vec_info use_stmt_info = NULL;
8363 if (single_imm_use (lhs, &dummy, &use_stmt)
8364 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8365 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8366 return true;
8367 }
8368 c1 = VEC_WIDEN_MULT_LO_EXPR;
8369 c2 = VEC_WIDEN_MULT_HI_EXPR;
8370 break;
8371
8372 case DOT_PROD_EXPR:
8373 c1 = DOT_PROD_EXPR;
8374 c2 = DOT_PROD_EXPR;
8375 break;
8376
8377 case SAD_EXPR:
8378 c1 = SAD_EXPR;
8379 c2 = SAD_EXPR;
8380 break;
8381
8382 case VEC_WIDEN_MULT_EVEN_EXPR:
8383 /* Support the recursion induced just above. */
8384 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8385 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8386 break;
8387
8388 case WIDEN_LSHIFT_EXPR:
8389 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8390 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8391 break;
8392
8393 CASE_CONVERT:
8394 c1 = VEC_UNPACK_LO_EXPR;
8395 c2 = VEC_UNPACK_HI_EXPR;
8396 break;
8397
8398 case FLOAT_EXPR:
8399 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8400 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8401 break;
8402
8403 case FIX_TRUNC_EXPR:
8404 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8405 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8406 computing the operation. */
8407 return false;
8408
8409 default:
8410 gcc_unreachable ();
8411 }
8412
8413 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8414 std::swap (c1, c2);
8415
8416 if (code == FIX_TRUNC_EXPR)
8417 {
8418 /* The signedness is determined from output operand. */
8419 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8420 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8421 }
8422 else
8423 {
8424 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8425 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8426 }
8427
8428 if (!optab1 || !optab2)
8429 return false;
8430
8431 vec_mode = TYPE_MODE (vectype);
8432 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8433 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8434 return false;
8435
8436 *code1 = c1;
8437 *code2 = c2;
8438
8439 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8440 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8441 return true;
8442
8443 /* Check if it's a multi-step conversion that can be done using intermediate
8444 types. */
8445
8446 prev_type = vectype;
8447 prev_mode = vec_mode;
8448
8449 if (!CONVERT_EXPR_CODE_P (code))
8450 return false;
8451
8452 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8453 intermediate steps in promotion sequence. We try
8454 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8455 not. */
8456 interm_types->create (MAX_INTERM_CVT_STEPS);
8457 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8458 {
8459 intermediate_mode = insn_data[icode1].operand[0].mode;
8460 intermediate_type
8461 = lang_hooks.types.type_for_mode (intermediate_mode,
8462 TYPE_UNSIGNED (prev_type));
8463 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8464 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8465
8466 if (!optab3 || !optab4
8467 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8468 || insn_data[icode1].operand[0].mode != intermediate_mode
8469 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8470 || insn_data[icode2].operand[0].mode != intermediate_mode
8471 || ((icode1 = optab_handler (optab3, intermediate_mode))
8472 == CODE_FOR_nothing)
8473 || ((icode2 = optab_handler (optab4, intermediate_mode))
8474 == CODE_FOR_nothing))
8475 break;
8476
8477 interm_types->quick_push (intermediate_type);
8478 (*multi_step_cvt)++;
8479
8480 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8481 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8482 return true;
8483
8484 prev_type = intermediate_type;
8485 prev_mode = intermediate_mode;
8486 }
8487
8488 interm_types->release ();
8489 return false;
8490 }
8491
8492
8493 /* Function supportable_narrowing_operation
8494
8495 Check whether an operation represented by the code CODE is a
8496 narrowing operation that is supported by the target platform in
8497 vector form (i.e., when operating on arguments of type VECTYPE_IN
8498 and producing a result of type VECTYPE_OUT).
8499
8500 Narrowing operations we currently support are NOP (CONVERT) and
8501 FIX_TRUNC. This function checks if these operations are supported by
8502 the target platform directly via vector tree-codes.
8503
8504 Output:
8505 - CODE1 is the code of a vector operation to be used when
8506 vectorizing the operation, if available.
8507 - MULTI_STEP_CVT determines the number of required intermediate steps in
8508 case of multi-step conversion (like int->short->char - in that case
8509 MULTI_STEP_CVT will be 1).
8510 - INTERM_TYPES contains the intermediate type required to perform the
8511 narrowing operation (short in the above example). */
8512
8513 bool
8514 supportable_narrowing_operation (enum tree_code code,
8515 tree vectype_out, tree vectype_in,
8516 enum tree_code *code1, int *multi_step_cvt,
8517 vec<tree> *interm_types)
8518 {
8519 machine_mode vec_mode;
8520 enum insn_code icode1;
8521 optab optab1, interm_optab;
8522 tree vectype = vectype_in;
8523 tree narrow_vectype = vectype_out;
8524 enum tree_code c1;
8525 tree intermediate_type;
8526 machine_mode intermediate_mode, prev_mode;
8527 int i;
8528 bool uns;
8529
8530 *multi_step_cvt = 0;
8531 switch (code)
8532 {
8533 CASE_CONVERT:
8534 c1 = VEC_PACK_TRUNC_EXPR;
8535 break;
8536
8537 case FIX_TRUNC_EXPR:
8538 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8539 break;
8540
8541 case FLOAT_EXPR:
8542 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8543 tree code and optabs used for computing the operation. */
8544 return false;
8545
8546 default:
8547 gcc_unreachable ();
8548 }
8549
8550 if (code == FIX_TRUNC_EXPR)
8551 /* The signedness is determined from output operand. */
8552 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8553 else
8554 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8555
8556 if (!optab1)
8557 return false;
8558
8559 vec_mode = TYPE_MODE (vectype);
8560 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8561 return false;
8562
8563 *code1 = c1;
8564
8565 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8566 return true;
8567
8568 /* Check if it's a multi-step conversion that can be done using intermediate
8569 types. */
8570 prev_mode = vec_mode;
8571 if (code == FIX_TRUNC_EXPR)
8572 uns = TYPE_UNSIGNED (vectype_out);
8573 else
8574 uns = TYPE_UNSIGNED (vectype);
8575
8576 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8577 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8578 costly than signed. */
8579 if (code == FIX_TRUNC_EXPR && uns)
8580 {
8581 enum insn_code icode2;
8582
8583 intermediate_type
8584 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8585 interm_optab
8586 = optab_for_tree_code (c1, intermediate_type, optab_default);
8587 if (interm_optab != unknown_optab
8588 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8589 && insn_data[icode1].operand[0].mode
8590 == insn_data[icode2].operand[0].mode)
8591 {
8592 uns = false;
8593 optab1 = interm_optab;
8594 icode1 = icode2;
8595 }
8596 }
8597
8598 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8599 intermediate steps in promotion sequence. We try
8600 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8601 interm_types->create (MAX_INTERM_CVT_STEPS);
8602 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8603 {
8604 intermediate_mode = insn_data[icode1].operand[0].mode;
8605 intermediate_type
8606 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8607 interm_optab
8608 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8609 optab_default);
8610 if (!interm_optab
8611 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8612 || insn_data[icode1].operand[0].mode != intermediate_mode
8613 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8614 == CODE_FOR_nothing))
8615 break;
8616
8617 interm_types->quick_push (intermediate_type);
8618 (*multi_step_cvt)++;
8619
8620 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8621 return true;
8622
8623 prev_mode = intermediate_mode;
8624 optab1 = interm_optab;
8625 }
8626
8627 interm_types->release ();
8628 return false;
8629 }