md.texi (vec_load_lanes, [...]): Document.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
43
44
45 /* Return a variable of type ELEM_TYPE[NELEMS]. */
46
47 static tree
48 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
49 {
50 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
51 "vect_array");
52 }
53
54 /* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
58
59 static tree
60 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
61 tree array, unsigned HOST_WIDE_INT n)
62 {
63 tree vect_type, vect, vect_name, array_ref;
64 gimple new_stmt;
65
66 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
67 vect_type = TREE_TYPE (TREE_TYPE (array));
68 vect = vect_create_destination_var (scalar_dest, vect_type);
69 array_ref = build4 (ARRAY_REF, vect_type, array,
70 build_int_cst (size_type_node, n),
71 NULL_TREE, NULL_TREE);
72
73 new_stmt = gimple_build_assign (vect, array_ref);
74 vect_name = make_ssa_name (vect, new_stmt);
75 gimple_assign_set_lhs (new_stmt, vect_name);
76 vect_finish_stmt_generation (stmt, new_stmt, gsi);
77 mark_symbols_for_renaming (new_stmt);
78
79 return vect_name;
80 }
81
82 /* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
85
86 static void
87 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
88 tree array, unsigned HOST_WIDE_INT n)
89 {
90 tree array_ref;
91 gimple new_stmt;
92
93 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
94 build_int_cst (size_type_node, n),
95 NULL_TREE, NULL_TREE);
96
97 new_stmt = gimple_build_assign (array_ref, vect);
98 vect_finish_stmt_generation (stmt, new_stmt, gsi);
99 mark_symbols_for_renaming (new_stmt);
100 }
101
102 /* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
104 (and its group). */
105
106 static tree
107 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
108 {
109 struct ptr_info_def *pi;
110 tree mem_ref, alias_ptr_type;
111
112 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
113 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
114 /* Arrays have the same alignment as their type. */
115 pi = get_ptr_info (ptr);
116 pi->align = TYPE_ALIGN_UNIT (type);
117 pi->misalign = 0;
118 return mem_ref;
119 }
120
121 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
122
123 /* Function vect_mark_relevant.
124
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
126
127 static void
128 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
129 enum vect_relevant relevant, bool live_p)
130 {
131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
132 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
133 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
134
135 if (vect_print_dump_info (REPORT_DETAILS))
136 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
137
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
139 {
140 gimple pattern_stmt;
141
142 /* This is the last stmt in a sequence that was detected as a
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
146
147 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
148
149 if (vect_print_dump_info (REPORT_DETAILS))
150 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info = vinfo_for_stmt (pattern_stmt);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
153 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
154 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
155 stmt = pattern_stmt;
156 }
157
158 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
159 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
160 STMT_VINFO_RELEVANT (stmt_info) = relevant;
161
162 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
164 {
165 if (vect_print_dump_info (REPORT_DETAILS))
166 fprintf (vect_dump, "already marked relevant/live.");
167 return;
168 }
169
170 VEC_safe_push (gimple, heap, *worklist, stmt);
171 }
172
173
174 /* Function vect_stmt_relevant_p.
175
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
178
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
183
184 CHECKME: what other side effects would the vectorizer allow? */
185
186 static bool
187 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
188 enum vect_relevant *relevant, bool *live_p)
189 {
190 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
191 ssa_op_iter op_iter;
192 imm_use_iterator imm_iter;
193 use_operand_p use_p;
194 def_operand_p def_p;
195
196 *relevant = vect_unused_in_scope;
197 *live_p = false;
198
199 /* cond stmt other than loop exit cond. */
200 if (is_ctrl_stmt (stmt)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
202 != loop_exit_ctrl_vec_info_type)
203 *relevant = vect_used_in_scope;
204
205 /* changing memory. */
206 if (gimple_code (stmt) != GIMPLE_PHI)
207 if (gimple_vdef (stmt))
208 {
209 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
211 *relevant = vect_used_in_scope;
212 }
213
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
216 {
217 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
218 {
219 basic_block bb = gimple_bb (USE_STMT (use_p));
220 if (!flow_bb_inside_loop_p (loop, bb))
221 {
222 if (vect_print_dump_info (REPORT_DETAILS))
223 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
224
225 if (is_gimple_debug (USE_STMT (use_p)))
226 continue;
227
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
231 gcc_assert (bb == single_exit (loop)->dest);
232
233 *live_p = true;
234 }
235 }
236 }
237
238 return (*live_p || *relevant);
239 }
240
241
242 /* Function exist_non_indexing_operands_for_use_p
243
244 USE is one of the uses attached to STMT. Check if USE is
245 used in STMT for anything other than indexing an array. */
246
247 static bool
248 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
249 {
250 tree operand;
251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
252
253 /* USE corresponds to some operand in STMT. If there is no data
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info))
257 return true;
258
259 /* STMT has a data_ref. FORNOW this means that its of one of
260 the following forms:
261 -1- ARRAY_REF = var
262 -2- var = ARRAY_REF
263 (This should have been verified in analyze_data_refs).
264
265 'var' in the second case corresponds to a def, not a use,
266 so USE cannot correspond to any operands that are not used
267 for array indexing.
268
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
271
272 if (!gimple_assign_copy_p (stmt))
273 return false;
274 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
275 return false;
276 operand = gimple_assign_rhs1 (stmt);
277 if (TREE_CODE (operand) != SSA_NAME)
278 return false;
279
280 if (operand == use)
281 return true;
282
283 return false;
284 }
285
286
287 /*
288 Function process_use.
289
290 Inputs:
291 - a USE in STMT in a loop represented by LOOP_VINFO
292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
293 that defined USE. This is done by calling mark_relevant and passing it
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
295
296 Outputs:
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
301 Exceptions:
302 - case 1: If USE is used only for address computations (e.g. array indexing),
303 which does not need to be directly vectorized, then the liveness/relevance
304 of the respective DEF_STMT is left unchanged.
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
309
310 Return true if everything is as expected. Return false otherwise. */
311
312 static bool
313 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
314 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
315 {
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
318 stmt_vec_info dstmt_vinfo;
319 basic_block bb, def_bb;
320 tree def;
321 gimple def_stmt;
322 enum vect_def_type dt;
323
324 /* case 1: we are only interested in uses that need to be vectorized. Uses
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use, stmt))
327 return true;
328
329 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
330 {
331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
332 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
333 return false;
334 }
335
336 if (!def_stmt || gimple_nop_p (def_stmt))
337 return true;
338
339 def_bb = gimple_bb (def_stmt);
340 if (!flow_bb_inside_loop_p (loop, def_bb))
341 {
342 if (vect_print_dump_info (REPORT_DETAILS))
343 fprintf (vect_dump, "def_stmt is out of loop.");
344 return true;
345 }
346
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo = vinfo_for_stmt (def_stmt);
353 bb = gimple_bb (stmt);
354 if (gimple_code (stmt) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
356 && gimple_code (def_stmt) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
358 && bb->loop_father == def_bb->loop_father)
359 {
360 if (vect_print_dump_info (REPORT_DETAILS))
361 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
363 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
366 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
367 return true;
368 }
369
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
372 d = def_stmt
373 inner-loop:
374 stmt # use (d)
375 outer-loop-tail-bb:
376 ... */
377 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
378 {
379 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
381
382 switch (relevant)
383 {
384 case vect_unused_in_scope:
385 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
386 vect_used_in_scope : vect_unused_in_scope;
387 break;
388
389 case vect_used_in_outer_by_reduction:
390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
391 relevant = vect_used_by_reduction;
392 break;
393
394 case vect_used_in_outer:
395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
396 relevant = vect_used_in_scope;
397 break;
398
399 case vect_used_in_scope:
400 break;
401
402 default:
403 gcc_unreachable ();
404 }
405 }
406
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
409 ...
410 inner-loop:
411 d = def_stmt
412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
413 stmt # use (d) */
414 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
415 {
416 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
418
419 switch (relevant)
420 {
421 case vect_unused_in_scope:
422 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
423 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
424 vect_used_in_outer_by_reduction : vect_unused_in_scope;
425 break;
426
427 case vect_used_by_reduction:
428 relevant = vect_used_in_outer_by_reduction;
429 break;
430
431 case vect_used_in_scope:
432 relevant = vect_used_in_outer;
433 break;
434
435 default:
436 gcc_unreachable ();
437 }
438 }
439
440 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
441 return true;
442 }
443
444
445 /* Function vect_mark_stmts_to_be_vectorized.
446
447 Not all stmts in the loop need to be vectorized. For example:
448
449 for i...
450 for j...
451 1. T0 = i + j
452 2. T1 = a[T0]
453
454 3. j = j + 1
455
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
458
459 This pass detects such stmts. */
460
461 bool
462 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
463 {
464 VEC(gimple,heap) *worklist;
465 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
466 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
467 unsigned int nbbs = loop->num_nodes;
468 gimple_stmt_iterator si;
469 gimple stmt;
470 unsigned int i;
471 stmt_vec_info stmt_vinfo;
472 basic_block bb;
473 gimple phi;
474 bool live_p;
475 enum vect_relevant relevant, tmp_relevant;
476 enum vect_def_type def_type;
477
478 if (vect_print_dump_info (REPORT_DETAILS))
479 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
480
481 worklist = VEC_alloc (gimple, heap, 64);
482
483 /* 1. Init worklist. */
484 for (i = 0; i < nbbs; i++)
485 {
486 bb = bbs[i];
487 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
488 {
489 phi = gsi_stmt (si);
490 if (vect_print_dump_info (REPORT_DETAILS))
491 {
492 fprintf (vect_dump, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
494 }
495
496 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
497 vect_mark_relevant (&worklist, phi, relevant, live_p);
498 }
499 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
500 {
501 stmt = gsi_stmt (si);
502 if (vect_print_dump_info (REPORT_DETAILS))
503 {
504 fprintf (vect_dump, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
506 }
507
508 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
509 vect_mark_relevant (&worklist, stmt, relevant, live_p);
510 }
511 }
512
513 /* 2. Process_worklist */
514 while (VEC_length (gimple, worklist) > 0)
515 {
516 use_operand_p use_p;
517 ssa_op_iter iter;
518
519 stmt = VEC_pop (gimple, worklist);
520 if (vect_print_dump_info (REPORT_DETAILS))
521 {
522 fprintf (vect_dump, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
524 }
525
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
528 liveness and relevance properties of STMT. */
529 stmt_vinfo = vinfo_for_stmt (stmt);
530 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
531 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
532
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
537
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
540 live_p = false
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
543 those that are used by a reduction computation, and those that are
544 (also) used by a regular computation. This allows us later on to
545 identify stmts that are used solely by a reduction, and therefore the
546 order of the results that they produce does not have to be kept. */
547
548 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
549 tmp_relevant = relevant;
550 switch (def_type)
551 {
552 case vect_reduction_def:
553 switch (tmp_relevant)
554 {
555 case vect_unused_in_scope:
556 relevant = vect_used_by_reduction;
557 break;
558
559 case vect_used_by_reduction:
560 if (gimple_code (stmt) == GIMPLE_PHI)
561 break;
562 /* fall through */
563
564 default:
565 if (vect_print_dump_info (REPORT_DETAILS))
566 fprintf (vect_dump, "unsupported use of reduction.");
567
568 VEC_free (gimple, heap, worklist);
569 return false;
570 }
571
572 live_p = false;
573 break;
574
575 case vect_nested_cycle:
576 if (tmp_relevant != vect_unused_in_scope
577 && tmp_relevant != vect_used_in_outer_by_reduction
578 && tmp_relevant != vect_used_in_outer)
579 {
580 if (vect_print_dump_info (REPORT_DETAILS))
581 fprintf (vect_dump, "unsupported use of nested cycle.");
582
583 VEC_free (gimple, heap, worklist);
584 return false;
585 }
586
587 live_p = false;
588 break;
589
590 case vect_double_reduction_def:
591 if (tmp_relevant != vect_unused_in_scope
592 && tmp_relevant != vect_used_by_reduction)
593 {
594 if (vect_print_dump_info (REPORT_DETAILS))
595 fprintf (vect_dump, "unsupported use of double reduction.");
596
597 VEC_free (gimple, heap, worklist);
598 return false;
599 }
600
601 live_p = false;
602 break;
603
604 default:
605 break;
606 }
607
608 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
609 {
610 tree op = USE_FROM_PTR (use_p);
611 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
612 {
613 VEC_free (gimple, heap, worklist);
614 return false;
615 }
616 }
617 } /* while worklist */
618
619 VEC_free (gimple, heap, worklist);
620 return true;
621 }
622
623
624 /* Get cost by calling cost target builtin. */
625
626 static inline
627 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
628 {
629 tree dummy_type = NULL;
630 int dummy = 0;
631
632 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
633 dummy_type, dummy);
634 }
635
636
637 /* Get cost for STMT. */
638
639 int
640 cost_for_stmt (gimple stmt)
641 {
642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
643
644 switch (STMT_VINFO_TYPE (stmt_info))
645 {
646 case load_vec_info_type:
647 return vect_get_stmt_cost (scalar_load);
648 case store_vec_info_type:
649 return vect_get_stmt_cost (scalar_store);
650 case op_vec_info_type:
651 case condition_vec_info_type:
652 case assignment_vec_info_type:
653 case reduc_vec_info_type:
654 case induc_vec_info_type:
655 case type_promotion_vec_info_type:
656 case type_demotion_vec_info_type:
657 case type_conversion_vec_info_type:
658 case call_vec_info_type:
659 return vect_get_stmt_cost (scalar_stmt);
660 case undef_vec_info_type:
661 default:
662 gcc_unreachable ();
663 }
664 }
665
666 /* Function vect_model_simple_cost.
667
668 Models cost for simple operations, i.e. those that only emit ncopies of a
669 single op. Right now, this does not account for multiple insns that could
670 be generated for the single vector op. We will handle that shortly. */
671
672 void
673 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
674 enum vect_def_type *dt, slp_tree slp_node)
675 {
676 int i;
677 int inside_cost = 0, outside_cost = 0;
678
679 /* The SLP costs were already calculated during SLP tree build. */
680 if (PURE_SLP_STMT (stmt_info))
681 return;
682
683 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
684
685 /* FORNOW: Assuming maximum 2 args per stmts. */
686 for (i = 0; i < 2; i++)
687 {
688 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
689 outside_cost += vect_get_stmt_cost (vector_stmt);
690 }
691
692 if (vect_print_dump_info (REPORT_COST))
693 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
694 "outside_cost = %d .", inside_cost, outside_cost);
695
696 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
697 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
698 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
699 }
700
701
702 /* Function vect_cost_strided_group_size
703
704 For strided load or store, return the group_size only if it is the first
705 load or store of a group, else return 1. This ensures that group size is
706 only returned once per group. */
707
708 static int
709 vect_cost_strided_group_size (stmt_vec_info stmt_info)
710 {
711 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
712
713 if (first_stmt == STMT_VINFO_STMT (stmt_info))
714 return DR_GROUP_SIZE (stmt_info);
715
716 return 1;
717 }
718
719
720 /* Function vect_model_store_cost
721
722 Models cost for stores. In the case of strided accesses, one access
723 has the overhead of the strided access attributed to it. */
724
725 void
726 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
727 bool store_lanes_p, enum vect_def_type dt,
728 slp_tree slp_node)
729 {
730 int group_size;
731 unsigned int inside_cost = 0, outside_cost = 0;
732 struct data_reference *first_dr;
733 gimple first_stmt;
734
735 /* The SLP costs were already calculated during SLP tree build. */
736 if (PURE_SLP_STMT (stmt_info))
737 return;
738
739 if (dt == vect_constant_def || dt == vect_external_def)
740 outside_cost = vect_get_stmt_cost (scalar_to_vec);
741
742 /* Strided access? */
743 if (DR_GROUP_FIRST_DR (stmt_info))
744 {
745 if (slp_node)
746 {
747 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
748 group_size = 1;
749 }
750 else
751 {
752 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
753 group_size = vect_cost_strided_group_size (stmt_info);
754 }
755
756 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
757 }
758 /* Not a strided access. */
759 else
760 {
761 group_size = 1;
762 first_dr = STMT_VINFO_DATA_REF (stmt_info);
763 }
764
765 /* We assume that the cost of a single store-lanes instruction is
766 equivalent to the cost of GROUP_SIZE separate stores. If a strided
767 access is instead being provided by a permute-and-store operation,
768 include the cost of the permutes. */
769 if (!store_lanes_p && group_size > 1)
770 {
771 /* Uses a high and low interleave operation for each needed permute. */
772 inside_cost = ncopies * exact_log2(group_size) * group_size
773 * vect_get_stmt_cost (vector_stmt);
774
775 if (vect_print_dump_info (REPORT_COST))
776 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
777 group_size);
778
779 }
780
781 /* Costs of the stores. */
782 vect_get_store_cost (first_dr, ncopies, &inside_cost);
783
784 if (vect_print_dump_info (REPORT_COST))
785 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
786 "outside_cost = %d .", inside_cost, outside_cost);
787
788 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
789 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
790 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
791 }
792
793
794 /* Calculate cost of DR's memory access. */
795 void
796 vect_get_store_cost (struct data_reference *dr, int ncopies,
797 unsigned int *inside_cost)
798 {
799 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
800
801 switch (alignment_support_scheme)
802 {
803 case dr_aligned:
804 {
805 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
806
807 if (vect_print_dump_info (REPORT_COST))
808 fprintf (vect_dump, "vect_model_store_cost: aligned.");
809
810 break;
811 }
812
813 case dr_unaligned_supported:
814 {
815 gimple stmt = DR_STMT (dr);
816 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
817 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
818
819 /* Here, we assign an additional cost for the unaligned store. */
820 *inside_cost += ncopies
821 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
822 vectype, DR_MISALIGNMENT (dr));
823
824 if (vect_print_dump_info (REPORT_COST))
825 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
826 "hardware.");
827
828 break;
829 }
830
831 default:
832 gcc_unreachable ();
833 }
834 }
835
836
837 /* Function vect_model_load_cost
838
839 Models cost for loads. In the case of strided accesses, the last access
840 has the overhead of the strided access attributed to it. Since unaligned
841 accesses are supported for loads, we also account for the costs of the
842 access scheme chosen. */
843
844 void
845 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
846 slp_tree slp_node)
847 {
848 int group_size;
849 gimple first_stmt;
850 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
851 unsigned int inside_cost = 0, outside_cost = 0;
852
853 /* The SLP costs were already calculated during SLP tree build. */
854 if (PURE_SLP_STMT (stmt_info))
855 return;
856
857 /* Strided accesses? */
858 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
859 if (first_stmt && !slp_node)
860 {
861 group_size = vect_cost_strided_group_size (stmt_info);
862 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
863 }
864 /* Not a strided access. */
865 else
866 {
867 group_size = 1;
868 first_dr = dr;
869 }
870
871 /* We assume that the cost of a single load-lanes instruction is
872 equivalent to the cost of GROUP_SIZE separate loads. If a strided
873 access is instead being provided by a load-and-permute operation,
874 include the cost of the permutes. */
875 if (!load_lanes_p && group_size > 1)
876 {
877 /* Uses an even and odd extract operations for each needed permute. */
878 inside_cost = ncopies * exact_log2(group_size) * group_size
879 * vect_get_stmt_cost (vector_stmt);
880
881 if (vect_print_dump_info (REPORT_COST))
882 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
883 group_size);
884 }
885
886 /* The loads themselves. */
887 vect_get_load_cost (first_dr, ncopies,
888 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
889 &inside_cost, &outside_cost);
890
891 if (vect_print_dump_info (REPORT_COST))
892 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
893 "outside_cost = %d .", inside_cost, outside_cost);
894
895 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
896 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
897 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
898 }
899
900
901 /* Calculate cost of DR's memory access. */
902 void
903 vect_get_load_cost (struct data_reference *dr, int ncopies,
904 bool add_realign_cost, unsigned int *inside_cost,
905 unsigned int *outside_cost)
906 {
907 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
908
909 switch (alignment_support_scheme)
910 {
911 case dr_aligned:
912 {
913 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
914
915 if (vect_print_dump_info (REPORT_COST))
916 fprintf (vect_dump, "vect_model_load_cost: aligned.");
917
918 break;
919 }
920 case dr_unaligned_supported:
921 {
922 gimple stmt = DR_STMT (dr);
923 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
924 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
925
926 /* Here, we assign an additional cost for the unaligned load. */
927 *inside_cost += ncopies
928 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
929 vectype, DR_MISALIGNMENT (dr));
930 if (vect_print_dump_info (REPORT_COST))
931 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
932 "hardware.");
933
934 break;
935 }
936 case dr_explicit_realign:
937 {
938 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
939 + vect_get_stmt_cost (vector_stmt));
940
941 /* FIXME: If the misalignment remains fixed across the iterations of
942 the containing loop, the following cost should be added to the
943 outside costs. */
944 if (targetm.vectorize.builtin_mask_for_load)
945 *inside_cost += vect_get_stmt_cost (vector_stmt);
946
947 break;
948 }
949 case dr_explicit_realign_optimized:
950 {
951 if (vect_print_dump_info (REPORT_COST))
952 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
953 "pipelined.");
954
955 /* Unaligned software pipeline has a load of an address, an initial
956 load, and possibly a mask operation to "prime" the loop. However,
957 if this is an access in a group of loads, which provide strided
958 access, then the above cost should only be considered for one
959 access in the group. Inside the loop, there is a load op
960 and a realignment op. */
961
962 if (add_realign_cost)
963 {
964 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
965 if (targetm.vectorize.builtin_mask_for_load)
966 *outside_cost += vect_get_stmt_cost (vector_stmt);
967 }
968
969 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
970 + vect_get_stmt_cost (vector_stmt));
971 break;
972 }
973
974 default:
975 gcc_unreachable ();
976 }
977 }
978
979
980 /* Function vect_init_vector.
981
982 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
983 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
984 is not NULL. Otherwise, place the initialization at the loop preheader.
985 Return the DEF of INIT_STMT.
986 It will be used in the vectorization of STMT. */
987
988 tree
989 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
990 gimple_stmt_iterator *gsi)
991 {
992 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
993 tree new_var;
994 gimple init_stmt;
995 tree vec_oprnd;
996 edge pe;
997 tree new_temp;
998 basic_block new_bb;
999
1000 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
1001 add_referenced_var (new_var);
1002 init_stmt = gimple_build_assign (new_var, vector_var);
1003 new_temp = make_ssa_name (new_var, init_stmt);
1004 gimple_assign_set_lhs (init_stmt, new_temp);
1005
1006 if (gsi)
1007 vect_finish_stmt_generation (stmt, init_stmt, gsi);
1008 else
1009 {
1010 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1011
1012 if (loop_vinfo)
1013 {
1014 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1015
1016 if (nested_in_vect_loop_p (loop, stmt))
1017 loop = loop->inner;
1018
1019 pe = loop_preheader_edge (loop);
1020 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1021 gcc_assert (!new_bb);
1022 }
1023 else
1024 {
1025 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1026 basic_block bb;
1027 gimple_stmt_iterator gsi_bb_start;
1028
1029 gcc_assert (bb_vinfo);
1030 bb = BB_VINFO_BB (bb_vinfo);
1031 gsi_bb_start = gsi_after_labels (bb);
1032 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
1033 }
1034 }
1035
1036 if (vect_print_dump_info (REPORT_DETAILS))
1037 {
1038 fprintf (vect_dump, "created new init_stmt: ");
1039 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1040 }
1041
1042 vec_oprnd = gimple_assign_lhs (init_stmt);
1043 return vec_oprnd;
1044 }
1045
1046
1047 /* Function vect_get_vec_def_for_operand.
1048
1049 OP is an operand in STMT. This function returns a (vector) def that will be
1050 used in the vectorized stmt for STMT.
1051
1052 In the case that OP is an SSA_NAME which is defined in the loop, then
1053 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1054
1055 In case OP is an invariant or constant, a new stmt that creates a vector def
1056 needs to be introduced. */
1057
1058 tree
1059 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1060 {
1061 tree vec_oprnd;
1062 gimple vec_stmt;
1063 gimple def_stmt;
1064 stmt_vec_info def_stmt_info = NULL;
1065 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1066 unsigned int nunits;
1067 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1068 tree vec_inv;
1069 tree vec_cst;
1070 tree t = NULL_TREE;
1071 tree def;
1072 int i;
1073 enum vect_def_type dt;
1074 bool is_simple_use;
1075 tree vector_type;
1076
1077 if (vect_print_dump_info (REPORT_DETAILS))
1078 {
1079 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1080 print_generic_expr (vect_dump, op, TDF_SLIM);
1081 }
1082
1083 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1084 &dt);
1085 gcc_assert (is_simple_use);
1086 if (vect_print_dump_info (REPORT_DETAILS))
1087 {
1088 if (def)
1089 {
1090 fprintf (vect_dump, "def = ");
1091 print_generic_expr (vect_dump, def, TDF_SLIM);
1092 }
1093 if (def_stmt)
1094 {
1095 fprintf (vect_dump, " def_stmt = ");
1096 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1097 }
1098 }
1099
1100 switch (dt)
1101 {
1102 /* Case 1: operand is a constant. */
1103 case vect_constant_def:
1104 {
1105 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1106 gcc_assert (vector_type);
1107 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1108
1109 if (scalar_def)
1110 *scalar_def = op;
1111
1112 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1113 if (vect_print_dump_info (REPORT_DETAILS))
1114 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1115
1116 vec_cst = build_vector_from_val (vector_type, op);
1117 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1118 }
1119
1120 /* Case 2: operand is defined outside the loop - loop invariant. */
1121 case vect_external_def:
1122 {
1123 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1124 gcc_assert (vector_type);
1125 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1126
1127 if (scalar_def)
1128 *scalar_def = def;
1129
1130 /* Create 'vec_inv = {inv,inv,..,inv}' */
1131 if (vect_print_dump_info (REPORT_DETAILS))
1132 fprintf (vect_dump, "Create vector_inv.");
1133
1134 for (i = nunits - 1; i >= 0; --i)
1135 {
1136 t = tree_cons (NULL_TREE, def, t);
1137 }
1138
1139 /* FIXME: use build_constructor directly. */
1140 vec_inv = build_constructor_from_list (vector_type, t);
1141 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1142 }
1143
1144 /* Case 3: operand is defined inside the loop. */
1145 case vect_internal_def:
1146 {
1147 if (scalar_def)
1148 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1149
1150 /* Get the def from the vectorized stmt. */
1151 def_stmt_info = vinfo_for_stmt (def_stmt);
1152 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1153 gcc_assert (vec_stmt);
1154 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1155 vec_oprnd = PHI_RESULT (vec_stmt);
1156 else if (is_gimple_call (vec_stmt))
1157 vec_oprnd = gimple_call_lhs (vec_stmt);
1158 else
1159 vec_oprnd = gimple_assign_lhs (vec_stmt);
1160 return vec_oprnd;
1161 }
1162
1163 /* Case 4: operand is defined by a loop header phi - reduction */
1164 case vect_reduction_def:
1165 case vect_double_reduction_def:
1166 case vect_nested_cycle:
1167 {
1168 struct loop *loop;
1169
1170 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1171 loop = (gimple_bb (def_stmt))->loop_father;
1172
1173 /* Get the def before the loop */
1174 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1175 return get_initial_def_for_reduction (stmt, op, scalar_def);
1176 }
1177
1178 /* Case 5: operand is defined by loop-header phi - induction. */
1179 case vect_induction_def:
1180 {
1181 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1182
1183 /* Get the def from the vectorized stmt. */
1184 def_stmt_info = vinfo_for_stmt (def_stmt);
1185 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1186 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1187 vec_oprnd = PHI_RESULT (vec_stmt);
1188 else
1189 vec_oprnd = gimple_get_lhs (vec_stmt);
1190 return vec_oprnd;
1191 }
1192
1193 default:
1194 gcc_unreachable ();
1195 }
1196 }
1197
1198
1199 /* Function vect_get_vec_def_for_stmt_copy
1200
1201 Return a vector-def for an operand. This function is used when the
1202 vectorized stmt to be created (by the caller to this function) is a "copy"
1203 created in case the vectorized result cannot fit in one vector, and several
1204 copies of the vector-stmt are required. In this case the vector-def is
1205 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1206 of the stmt that defines VEC_OPRND.
1207 DT is the type of the vector def VEC_OPRND.
1208
1209 Context:
1210 In case the vectorization factor (VF) is bigger than the number
1211 of elements that can fit in a vectype (nunits), we have to generate
1212 more than one vector stmt to vectorize the scalar stmt. This situation
1213 arises when there are multiple data-types operated upon in the loop; the
1214 smallest data-type determines the VF, and as a result, when vectorizing
1215 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1216 vector stmt (each computing a vector of 'nunits' results, and together
1217 computing 'VF' results in each iteration). This function is called when
1218 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1219 which VF=16 and nunits=4, so the number of copies required is 4):
1220
1221 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1222
1223 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1224 VS1.1: vx.1 = memref1 VS1.2
1225 VS1.2: vx.2 = memref2 VS1.3
1226 VS1.3: vx.3 = memref3
1227
1228 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1229 VSnew.1: vz1 = vx.1 + ... VSnew.2
1230 VSnew.2: vz2 = vx.2 + ... VSnew.3
1231 VSnew.3: vz3 = vx.3 + ...
1232
1233 The vectorization of S1 is explained in vectorizable_load.
1234 The vectorization of S2:
1235 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1236 the function 'vect_get_vec_def_for_operand' is called to
1237 get the relevant vector-def for each operand of S2. For operand x it
1238 returns the vector-def 'vx.0'.
1239
1240 To create the remaining copies of the vector-stmt (VSnew.j), this
1241 function is called to get the relevant vector-def for each operand. It is
1242 obtained from the respective VS1.j stmt, which is recorded in the
1243 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1244
1245 For example, to obtain the vector-def 'vx.1' in order to create the
1246 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1247 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1248 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1249 and return its def ('vx.1').
1250 Overall, to create the above sequence this function will be called 3 times:
1251 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1252 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1253 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1254
1255 tree
1256 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1257 {
1258 gimple vec_stmt_for_operand;
1259 stmt_vec_info def_stmt_info;
1260
1261 /* Do nothing; can reuse same def. */
1262 if (dt == vect_external_def || dt == vect_constant_def )
1263 return vec_oprnd;
1264
1265 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1266 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1267 gcc_assert (def_stmt_info);
1268 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1269 gcc_assert (vec_stmt_for_operand);
1270 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1271 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1272 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1273 else
1274 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1275 return vec_oprnd;
1276 }
1277
1278
1279 /* Get vectorized definitions for the operands to create a copy of an original
1280 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1281
1282 static void
1283 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1284 VEC(tree,heap) **vec_oprnds0,
1285 VEC(tree,heap) **vec_oprnds1)
1286 {
1287 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1288
1289 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1290 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1291
1292 if (vec_oprnds1 && *vec_oprnds1)
1293 {
1294 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1295 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1296 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1297 }
1298 }
1299
1300
1301 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1302 NULL. */
1303
1304 static void
1305 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1306 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1307 slp_tree slp_node)
1308 {
1309 if (slp_node)
1310 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1311 else
1312 {
1313 tree vec_oprnd;
1314
1315 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1316 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1317 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1318
1319 if (op1)
1320 {
1321 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1322 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1323 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1324 }
1325 }
1326 }
1327
1328
1329 /* Function vect_finish_stmt_generation.
1330
1331 Insert a new stmt. */
1332
1333 void
1334 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1335 gimple_stmt_iterator *gsi)
1336 {
1337 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1338 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1339 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1340
1341 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1342
1343 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1344
1345 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1346 bb_vinfo));
1347
1348 if (vect_print_dump_info (REPORT_DETAILS))
1349 {
1350 fprintf (vect_dump, "add new stmt: ");
1351 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1352 }
1353
1354 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1355 }
1356
1357 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1358 a function declaration if the target has a vectorized version
1359 of the function, or NULL_TREE if the function cannot be vectorized. */
1360
1361 tree
1362 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1363 {
1364 tree fndecl = gimple_call_fndecl (call);
1365
1366 /* We only handle functions that do not read or clobber memory -- i.e.
1367 const or novops ones. */
1368 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1369 return NULL_TREE;
1370
1371 if (!fndecl
1372 || TREE_CODE (fndecl) != FUNCTION_DECL
1373 || !DECL_BUILT_IN (fndecl))
1374 return NULL_TREE;
1375
1376 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1377 vectype_in);
1378 }
1379
1380 /* Function vectorizable_call.
1381
1382 Check if STMT performs a function call that can be vectorized.
1383 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1384 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1385 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1386
1387 static bool
1388 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1389 {
1390 tree vec_dest;
1391 tree scalar_dest;
1392 tree op, type;
1393 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1394 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1395 tree vectype_out, vectype_in;
1396 int nunits_in;
1397 int nunits_out;
1398 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1399 tree fndecl, new_temp, def, rhs_type;
1400 gimple def_stmt;
1401 enum vect_def_type dt[3]
1402 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1403 gimple new_stmt = NULL;
1404 int ncopies, j;
1405 VEC(tree, heap) *vargs = NULL;
1406 enum { NARROW, NONE, WIDEN } modifier;
1407 size_t i, nargs;
1408
1409 /* FORNOW: unsupported in basic block SLP. */
1410 gcc_assert (loop_vinfo);
1411
1412 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1413 return false;
1414
1415 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1416 return false;
1417
1418 /* FORNOW: SLP not supported. */
1419 if (STMT_SLP_TYPE (stmt_info))
1420 return false;
1421
1422 /* Is STMT a vectorizable call? */
1423 if (!is_gimple_call (stmt))
1424 return false;
1425
1426 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1427 return false;
1428
1429 if (stmt_can_throw_internal (stmt))
1430 return false;
1431
1432 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1433
1434 /* Process function arguments. */
1435 rhs_type = NULL_TREE;
1436 vectype_in = NULL_TREE;
1437 nargs = gimple_call_num_args (stmt);
1438
1439 /* Bail out if the function has more than three arguments, we do not have
1440 interesting builtin functions to vectorize with more than two arguments
1441 except for fma. No arguments is also not good. */
1442 if (nargs == 0 || nargs > 3)
1443 return false;
1444
1445 for (i = 0; i < nargs; i++)
1446 {
1447 tree opvectype;
1448
1449 op = gimple_call_arg (stmt, i);
1450
1451 /* We can only handle calls with arguments of the same type. */
1452 if (rhs_type
1453 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1454 {
1455 if (vect_print_dump_info (REPORT_DETAILS))
1456 fprintf (vect_dump, "argument types differ.");
1457 return false;
1458 }
1459 if (!rhs_type)
1460 rhs_type = TREE_TYPE (op);
1461
1462 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1463 &def_stmt, &def, &dt[i], &opvectype))
1464 {
1465 if (vect_print_dump_info (REPORT_DETAILS))
1466 fprintf (vect_dump, "use not simple.");
1467 return false;
1468 }
1469
1470 if (!vectype_in)
1471 vectype_in = opvectype;
1472 else if (opvectype
1473 && opvectype != vectype_in)
1474 {
1475 if (vect_print_dump_info (REPORT_DETAILS))
1476 fprintf (vect_dump, "argument vector types differ.");
1477 return false;
1478 }
1479 }
1480 /* If all arguments are external or constant defs use a vector type with
1481 the same size as the output vector type. */
1482 if (!vectype_in)
1483 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1484 if (vec_stmt)
1485 gcc_assert (vectype_in);
1486 if (!vectype_in)
1487 {
1488 if (vect_print_dump_info (REPORT_DETAILS))
1489 {
1490 fprintf (vect_dump, "no vectype for scalar type ");
1491 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1492 }
1493
1494 return false;
1495 }
1496
1497 /* FORNOW */
1498 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1499 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1500 if (nunits_in == nunits_out / 2)
1501 modifier = NARROW;
1502 else if (nunits_out == nunits_in)
1503 modifier = NONE;
1504 else if (nunits_out == nunits_in / 2)
1505 modifier = WIDEN;
1506 else
1507 return false;
1508
1509 /* For now, we only vectorize functions if a target specific builtin
1510 is available. TODO -- in some cases, it might be profitable to
1511 insert the calls for pieces of the vector, in order to be able
1512 to vectorize other operations in the loop. */
1513 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1514 if (fndecl == NULL_TREE)
1515 {
1516 if (vect_print_dump_info (REPORT_DETAILS))
1517 fprintf (vect_dump, "function is not vectorizable.");
1518
1519 return false;
1520 }
1521
1522 gcc_assert (!gimple_vuse (stmt));
1523
1524 if (modifier == NARROW)
1525 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1526 else
1527 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1528
1529 /* Sanity check: make sure that at least one copy of the vectorized stmt
1530 needs to be generated. */
1531 gcc_assert (ncopies >= 1);
1532
1533 if (!vec_stmt) /* transformation not required. */
1534 {
1535 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1536 if (vect_print_dump_info (REPORT_DETAILS))
1537 fprintf (vect_dump, "=== vectorizable_call ===");
1538 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1539 return true;
1540 }
1541
1542 /** Transform. **/
1543
1544 if (vect_print_dump_info (REPORT_DETAILS))
1545 fprintf (vect_dump, "transform operation.");
1546
1547 /* Handle def. */
1548 scalar_dest = gimple_call_lhs (stmt);
1549 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1550
1551 prev_stmt_info = NULL;
1552 switch (modifier)
1553 {
1554 case NONE:
1555 for (j = 0; j < ncopies; ++j)
1556 {
1557 /* Build argument list for the vectorized call. */
1558 if (j == 0)
1559 vargs = VEC_alloc (tree, heap, nargs);
1560 else
1561 VEC_truncate (tree, vargs, 0);
1562
1563 for (i = 0; i < nargs; i++)
1564 {
1565 op = gimple_call_arg (stmt, i);
1566 if (j == 0)
1567 vec_oprnd0
1568 = vect_get_vec_def_for_operand (op, stmt, NULL);
1569 else
1570 {
1571 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1572 vec_oprnd0
1573 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1574 }
1575
1576 VEC_quick_push (tree, vargs, vec_oprnd0);
1577 }
1578
1579 new_stmt = gimple_build_call_vec (fndecl, vargs);
1580 new_temp = make_ssa_name (vec_dest, new_stmt);
1581 gimple_call_set_lhs (new_stmt, new_temp);
1582
1583 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1584 mark_symbols_for_renaming (new_stmt);
1585
1586 if (j == 0)
1587 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1588 else
1589 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1590
1591 prev_stmt_info = vinfo_for_stmt (new_stmt);
1592 }
1593
1594 break;
1595
1596 case NARROW:
1597 for (j = 0; j < ncopies; ++j)
1598 {
1599 /* Build argument list for the vectorized call. */
1600 if (j == 0)
1601 vargs = VEC_alloc (tree, heap, nargs * 2);
1602 else
1603 VEC_truncate (tree, vargs, 0);
1604
1605 for (i = 0; i < nargs; i++)
1606 {
1607 op = gimple_call_arg (stmt, i);
1608 if (j == 0)
1609 {
1610 vec_oprnd0
1611 = vect_get_vec_def_for_operand (op, stmt, NULL);
1612 vec_oprnd1
1613 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1614 }
1615 else
1616 {
1617 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1618 vec_oprnd0
1619 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1620 vec_oprnd1
1621 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1622 }
1623
1624 VEC_quick_push (tree, vargs, vec_oprnd0);
1625 VEC_quick_push (tree, vargs, vec_oprnd1);
1626 }
1627
1628 new_stmt = gimple_build_call_vec (fndecl, vargs);
1629 new_temp = make_ssa_name (vec_dest, new_stmt);
1630 gimple_call_set_lhs (new_stmt, new_temp);
1631
1632 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1633 mark_symbols_for_renaming (new_stmt);
1634
1635 if (j == 0)
1636 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1637 else
1638 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1639
1640 prev_stmt_info = vinfo_for_stmt (new_stmt);
1641 }
1642
1643 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1644
1645 break;
1646
1647 case WIDEN:
1648 /* No current target implements this case. */
1649 return false;
1650 }
1651
1652 VEC_free (tree, heap, vargs);
1653
1654 /* Update the exception handling table with the vector stmt if necessary. */
1655 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1656 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1657
1658 /* The call in STMT might prevent it from being removed in dce.
1659 We however cannot remove it here, due to the way the ssa name
1660 it defines is mapped to the new definition. So just replace
1661 rhs of the statement with something harmless. */
1662
1663 type = TREE_TYPE (scalar_dest);
1664 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1665 build_zero_cst (type));
1666 set_vinfo_for_stmt (new_stmt, stmt_info);
1667 set_vinfo_for_stmt (stmt, NULL);
1668 STMT_VINFO_STMT (stmt_info) = new_stmt;
1669 gsi_replace (gsi, new_stmt, false);
1670 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1671
1672 return true;
1673 }
1674
1675
1676 /* Function vect_gen_widened_results_half
1677
1678 Create a vector stmt whose code, type, number of arguments, and result
1679 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1680 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1681 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1682 needs to be created (DECL is a function-decl of a target-builtin).
1683 STMT is the original scalar stmt that we are vectorizing. */
1684
1685 static gimple
1686 vect_gen_widened_results_half (enum tree_code code,
1687 tree decl,
1688 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1689 tree vec_dest, gimple_stmt_iterator *gsi,
1690 gimple stmt)
1691 {
1692 gimple new_stmt;
1693 tree new_temp;
1694
1695 /* Generate half of the widened result: */
1696 if (code == CALL_EXPR)
1697 {
1698 /* Target specific support */
1699 if (op_type == binary_op)
1700 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1701 else
1702 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1703 new_temp = make_ssa_name (vec_dest, new_stmt);
1704 gimple_call_set_lhs (new_stmt, new_temp);
1705 }
1706 else
1707 {
1708 /* Generic support */
1709 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1710 if (op_type != binary_op)
1711 vec_oprnd1 = NULL;
1712 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1713 vec_oprnd1);
1714 new_temp = make_ssa_name (vec_dest, new_stmt);
1715 gimple_assign_set_lhs (new_stmt, new_temp);
1716 }
1717 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1718
1719 return new_stmt;
1720 }
1721
1722
1723 /* Check if STMT performs a conversion operation, that can be vectorized.
1724 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1725 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1726 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1727
1728 static bool
1729 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1730 gimple *vec_stmt, slp_tree slp_node)
1731 {
1732 tree vec_dest;
1733 tree scalar_dest;
1734 tree op0;
1735 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1736 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1737 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1738 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1739 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1740 tree new_temp;
1741 tree def;
1742 gimple def_stmt;
1743 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1744 gimple new_stmt = NULL;
1745 stmt_vec_info prev_stmt_info;
1746 int nunits_in;
1747 int nunits_out;
1748 tree vectype_out, vectype_in;
1749 int ncopies, j;
1750 tree rhs_type;
1751 tree builtin_decl;
1752 enum { NARROW, NONE, WIDEN } modifier;
1753 int i;
1754 VEC(tree,heap) *vec_oprnds0 = NULL;
1755 tree vop0;
1756 VEC(tree,heap) *dummy = NULL;
1757 int dummy_int;
1758
1759 /* Is STMT a vectorizable conversion? */
1760
1761 /* FORNOW: unsupported in basic block SLP. */
1762 gcc_assert (loop_vinfo);
1763
1764 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1765 return false;
1766
1767 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1768 return false;
1769
1770 if (!is_gimple_assign (stmt))
1771 return false;
1772
1773 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1774 return false;
1775
1776 code = gimple_assign_rhs_code (stmt);
1777 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1778 return false;
1779
1780 /* Check types of lhs and rhs. */
1781 scalar_dest = gimple_assign_lhs (stmt);
1782 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1783
1784 op0 = gimple_assign_rhs1 (stmt);
1785 rhs_type = TREE_TYPE (op0);
1786 /* Check the operands of the operation. */
1787 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1788 &def_stmt, &def, &dt[0], &vectype_in))
1789 {
1790 if (vect_print_dump_info (REPORT_DETAILS))
1791 fprintf (vect_dump, "use not simple.");
1792 return false;
1793 }
1794 /* If op0 is an external or constant defs use a vector type of
1795 the same size as the output vector type. */
1796 if (!vectype_in)
1797 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1798 if (vec_stmt)
1799 gcc_assert (vectype_in);
1800 if (!vectype_in)
1801 {
1802 if (vect_print_dump_info (REPORT_DETAILS))
1803 {
1804 fprintf (vect_dump, "no vectype for scalar type ");
1805 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1806 }
1807
1808 return false;
1809 }
1810
1811 /* FORNOW */
1812 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1813 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1814 if (nunits_in == nunits_out / 2)
1815 modifier = NARROW;
1816 else if (nunits_out == nunits_in)
1817 modifier = NONE;
1818 else if (nunits_out == nunits_in / 2)
1819 modifier = WIDEN;
1820 else
1821 return false;
1822
1823 if (modifier == NARROW)
1824 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1825 else
1826 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1827
1828 /* Multiple types in SLP are handled by creating the appropriate number of
1829 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1830 case of SLP. */
1831 if (slp_node || PURE_SLP_STMT (stmt_info))
1832 ncopies = 1;
1833
1834 /* Sanity check: make sure that at least one copy of the vectorized stmt
1835 needs to be generated. */
1836 gcc_assert (ncopies >= 1);
1837
1838 /* Supportable by target? */
1839 if ((modifier == NONE
1840 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1841 || (modifier == WIDEN
1842 && !supportable_widening_operation (code, stmt,
1843 vectype_out, vectype_in,
1844 &decl1, &decl2,
1845 &code1, &code2,
1846 &dummy_int, &dummy))
1847 || (modifier == NARROW
1848 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1849 &code1, &dummy_int, &dummy)))
1850 {
1851 if (vect_print_dump_info (REPORT_DETAILS))
1852 fprintf (vect_dump, "conversion not supported by target.");
1853 return false;
1854 }
1855
1856 if (modifier != NONE)
1857 {
1858 /* FORNOW: SLP not supported. */
1859 if (STMT_SLP_TYPE (stmt_info))
1860 return false;
1861 }
1862
1863 if (!vec_stmt) /* transformation not required. */
1864 {
1865 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1866 return true;
1867 }
1868
1869 /** Transform. **/
1870 if (vect_print_dump_info (REPORT_DETAILS))
1871 fprintf (vect_dump, "transform conversion.");
1872
1873 /* Handle def. */
1874 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1875
1876 if (modifier == NONE && !slp_node)
1877 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1878
1879 prev_stmt_info = NULL;
1880 switch (modifier)
1881 {
1882 case NONE:
1883 for (j = 0; j < ncopies; j++)
1884 {
1885 if (j == 0)
1886 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1887 else
1888 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1889
1890 builtin_decl =
1891 targetm.vectorize.builtin_conversion (code,
1892 vectype_out, vectype_in);
1893 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1894 {
1895 /* Arguments are ready. create the new vector stmt. */
1896 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1897 new_temp = make_ssa_name (vec_dest, new_stmt);
1898 gimple_call_set_lhs (new_stmt, new_temp);
1899 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1900 if (slp_node)
1901 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1902 }
1903
1904 if (j == 0)
1905 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1906 else
1907 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1908 prev_stmt_info = vinfo_for_stmt (new_stmt);
1909 }
1910 break;
1911
1912 case WIDEN:
1913 /* In case the vectorization factor (VF) is bigger than the number
1914 of elements that we can fit in a vectype (nunits), we have to
1915 generate more than one vector stmt - i.e - we need to "unroll"
1916 the vector stmt by a factor VF/nunits. */
1917 for (j = 0; j < ncopies; j++)
1918 {
1919 if (j == 0)
1920 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1921 else
1922 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1923
1924 /* Generate first half of the widened result: */
1925 new_stmt
1926 = vect_gen_widened_results_half (code1, decl1,
1927 vec_oprnd0, vec_oprnd1,
1928 unary_op, vec_dest, gsi, stmt);
1929 if (j == 0)
1930 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1931 else
1932 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1933 prev_stmt_info = vinfo_for_stmt (new_stmt);
1934
1935 /* Generate second half of the widened result: */
1936 new_stmt
1937 = vect_gen_widened_results_half (code2, decl2,
1938 vec_oprnd0, vec_oprnd1,
1939 unary_op, vec_dest, gsi, stmt);
1940 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1941 prev_stmt_info = vinfo_for_stmt (new_stmt);
1942 }
1943 break;
1944
1945 case NARROW:
1946 /* In case the vectorization factor (VF) is bigger than the number
1947 of elements that we can fit in a vectype (nunits), we have to
1948 generate more than one vector stmt - i.e - we need to "unroll"
1949 the vector stmt by a factor VF/nunits. */
1950 for (j = 0; j < ncopies; j++)
1951 {
1952 /* Handle uses. */
1953 if (j == 0)
1954 {
1955 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1956 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1957 }
1958 else
1959 {
1960 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1961 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1962 }
1963
1964 /* Arguments are ready. Create the new vector stmt. */
1965 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1966 vec_oprnd1);
1967 new_temp = make_ssa_name (vec_dest, new_stmt);
1968 gimple_assign_set_lhs (new_stmt, new_temp);
1969 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1970
1971 if (j == 0)
1972 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1973 else
1974 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1975
1976 prev_stmt_info = vinfo_for_stmt (new_stmt);
1977 }
1978
1979 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1980 }
1981
1982 if (vec_oprnds0)
1983 VEC_free (tree, heap, vec_oprnds0);
1984
1985 return true;
1986 }
1987
1988
1989 /* Function vectorizable_assignment.
1990
1991 Check if STMT performs an assignment (copy) that can be vectorized.
1992 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1993 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1994 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1995
1996 static bool
1997 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1998 gimple *vec_stmt, slp_tree slp_node)
1999 {
2000 tree vec_dest;
2001 tree scalar_dest;
2002 tree op;
2003 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2004 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2005 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2006 tree new_temp;
2007 tree def;
2008 gimple def_stmt;
2009 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2010 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2011 int ncopies;
2012 int i, j;
2013 VEC(tree,heap) *vec_oprnds = NULL;
2014 tree vop;
2015 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2016 gimple new_stmt = NULL;
2017 stmt_vec_info prev_stmt_info = NULL;
2018 enum tree_code code;
2019 tree vectype_in;
2020
2021 /* Multiple types in SLP are handled by creating the appropriate number of
2022 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2023 case of SLP. */
2024 if (slp_node || PURE_SLP_STMT (stmt_info))
2025 ncopies = 1;
2026 else
2027 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2028
2029 gcc_assert (ncopies >= 1);
2030
2031 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2032 return false;
2033
2034 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2035 return false;
2036
2037 /* Is vectorizable assignment? */
2038 if (!is_gimple_assign (stmt))
2039 return false;
2040
2041 scalar_dest = gimple_assign_lhs (stmt);
2042 if (TREE_CODE (scalar_dest) != SSA_NAME)
2043 return false;
2044
2045 code = gimple_assign_rhs_code (stmt);
2046 if (gimple_assign_single_p (stmt)
2047 || code == PAREN_EXPR
2048 || CONVERT_EXPR_CODE_P (code))
2049 op = gimple_assign_rhs1 (stmt);
2050 else
2051 return false;
2052
2053 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
2054 &def_stmt, &def, &dt[0], &vectype_in))
2055 {
2056 if (vect_print_dump_info (REPORT_DETAILS))
2057 fprintf (vect_dump, "use not simple.");
2058 return false;
2059 }
2060
2061 /* We can handle NOP_EXPR conversions that do not change the number
2062 of elements or the vector size. */
2063 if (CONVERT_EXPR_CODE_P (code)
2064 && (!vectype_in
2065 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
2066 || (GET_MODE_SIZE (TYPE_MODE (vectype))
2067 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
2068 return false;
2069
2070 if (!vec_stmt) /* transformation not required. */
2071 {
2072 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
2073 if (vect_print_dump_info (REPORT_DETAILS))
2074 fprintf (vect_dump, "=== vectorizable_assignment ===");
2075 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2076 return true;
2077 }
2078
2079 /** Transform. **/
2080 if (vect_print_dump_info (REPORT_DETAILS))
2081 fprintf (vect_dump, "transform assignment.");
2082
2083 /* Handle def. */
2084 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2085
2086 /* Handle use. */
2087 for (j = 0; j < ncopies; j++)
2088 {
2089 /* Handle uses. */
2090 if (j == 0)
2091 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2092 else
2093 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2094
2095 /* Arguments are ready. create the new vector stmt. */
2096 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2097 {
2098 if (CONVERT_EXPR_CODE_P (code))
2099 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2100 new_stmt = gimple_build_assign (vec_dest, vop);
2101 new_temp = make_ssa_name (vec_dest, new_stmt);
2102 gimple_assign_set_lhs (new_stmt, new_temp);
2103 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2104 if (slp_node)
2105 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2106 }
2107
2108 if (slp_node)
2109 continue;
2110
2111 if (j == 0)
2112 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2113 else
2114 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2115
2116 prev_stmt_info = vinfo_for_stmt (new_stmt);
2117 }
2118
2119 VEC_free (tree, heap, vec_oprnds);
2120 return true;
2121 }
2122
2123
2124 /* Function vectorizable_shift.
2125
2126 Check if STMT performs a shift operation that can be vectorized.
2127 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2128 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2129 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2130
2131 static bool
2132 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2133 gimple *vec_stmt, slp_tree slp_node)
2134 {
2135 tree vec_dest;
2136 tree scalar_dest;
2137 tree op0, op1 = NULL;
2138 tree vec_oprnd1 = NULL_TREE;
2139 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2140 tree vectype;
2141 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2142 enum tree_code code;
2143 enum machine_mode vec_mode;
2144 tree new_temp;
2145 optab optab;
2146 int icode;
2147 enum machine_mode optab_op2_mode;
2148 tree def;
2149 gimple def_stmt;
2150 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2151 gimple new_stmt = NULL;
2152 stmt_vec_info prev_stmt_info;
2153 int nunits_in;
2154 int nunits_out;
2155 tree vectype_out;
2156 int ncopies;
2157 int j, i;
2158 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2159 tree vop0, vop1;
2160 unsigned int k;
2161 bool scalar_shift_arg = true;
2162 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2163 int vf;
2164
2165 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2166 return false;
2167
2168 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2169 return false;
2170
2171 /* Is STMT a vectorizable binary/unary operation? */
2172 if (!is_gimple_assign (stmt))
2173 return false;
2174
2175 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2176 return false;
2177
2178 code = gimple_assign_rhs_code (stmt);
2179
2180 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2181 || code == RROTATE_EXPR))
2182 return false;
2183
2184 scalar_dest = gimple_assign_lhs (stmt);
2185 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2186
2187 op0 = gimple_assign_rhs1 (stmt);
2188 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2189 &def_stmt, &def, &dt[0], &vectype))
2190 {
2191 if (vect_print_dump_info (REPORT_DETAILS))
2192 fprintf (vect_dump, "use not simple.");
2193 return false;
2194 }
2195 /* If op0 is an external or constant def use a vector type with
2196 the same size as the output vector type. */
2197 if (!vectype)
2198 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2199 if (vec_stmt)
2200 gcc_assert (vectype);
2201 if (!vectype)
2202 {
2203 if (vect_print_dump_info (REPORT_DETAILS))
2204 {
2205 fprintf (vect_dump, "no vectype for scalar type ");
2206 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2207 }
2208
2209 return false;
2210 }
2211
2212 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2213 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2214 if (nunits_out != nunits_in)
2215 return false;
2216
2217 op1 = gimple_assign_rhs2 (stmt);
2218 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2219 {
2220 if (vect_print_dump_info (REPORT_DETAILS))
2221 fprintf (vect_dump, "use not simple.");
2222 return false;
2223 }
2224
2225 if (loop_vinfo)
2226 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2227 else
2228 vf = 1;
2229
2230 /* Multiple types in SLP are handled by creating the appropriate number of
2231 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2232 case of SLP. */
2233 if (slp_node || PURE_SLP_STMT (stmt_info))
2234 ncopies = 1;
2235 else
2236 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2237
2238 gcc_assert (ncopies >= 1);
2239
2240 /* Determine whether the shift amount is a vector, or scalar. If the
2241 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2242
2243 if (dt[1] == vect_internal_def && !slp_node)
2244 scalar_shift_arg = false;
2245 else if (dt[1] == vect_constant_def
2246 || dt[1] == vect_external_def
2247 || dt[1] == vect_internal_def)
2248 {
2249 /* In SLP, need to check whether the shift count is the same,
2250 in loops if it is a constant or invariant, it is always
2251 a scalar shift. */
2252 if (slp_node)
2253 {
2254 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2255 gimple slpstmt;
2256
2257 FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
2258 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
2259 scalar_shift_arg = false;
2260 }
2261 }
2262 else
2263 {
2264 if (vect_print_dump_info (REPORT_DETAILS))
2265 fprintf (vect_dump, "operand mode requires invariant argument.");
2266 return false;
2267 }
2268
2269 /* Vector shifted by vector. */
2270 if (!scalar_shift_arg)
2271 {
2272 optab = optab_for_tree_code (code, vectype, optab_vector);
2273 if (vect_print_dump_info (REPORT_DETAILS))
2274 fprintf (vect_dump, "vector/vector shift/rotate found.");
2275 }
2276 /* See if the machine has a vector shifted by scalar insn and if not
2277 then see if it has a vector shifted by vector insn. */
2278 else
2279 {
2280 optab = optab_for_tree_code (code, vectype, optab_scalar);
2281 if (optab
2282 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2283 {
2284 if (vect_print_dump_info (REPORT_DETAILS))
2285 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2286 }
2287 else
2288 {
2289 optab = optab_for_tree_code (code, vectype, optab_vector);
2290 if (optab
2291 && (optab_handler (optab, TYPE_MODE (vectype))
2292 != CODE_FOR_nothing))
2293 {
2294 scalar_shift_arg = false;
2295
2296 if (vect_print_dump_info (REPORT_DETAILS))
2297 fprintf (vect_dump, "vector/vector shift/rotate found.");
2298
2299 /* Unlike the other binary operators, shifts/rotates have
2300 the rhs being int, instead of the same type as the lhs,
2301 so make sure the scalar is the right type if we are
2302 dealing with vectors of short/char. */
2303 if (dt[1] == vect_constant_def)
2304 op1 = fold_convert (TREE_TYPE (vectype), op1);
2305 }
2306 }
2307 }
2308
2309 /* Supportable by target? */
2310 if (!optab)
2311 {
2312 if (vect_print_dump_info (REPORT_DETAILS))
2313 fprintf (vect_dump, "no optab.");
2314 return false;
2315 }
2316 vec_mode = TYPE_MODE (vectype);
2317 icode = (int) optab_handler (optab, vec_mode);
2318 if (icode == CODE_FOR_nothing)
2319 {
2320 if (vect_print_dump_info (REPORT_DETAILS))
2321 fprintf (vect_dump, "op not supported by target.");
2322 /* Check only during analysis. */
2323 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2324 || (vf < vect_min_worthwhile_factor (code)
2325 && !vec_stmt))
2326 return false;
2327 if (vect_print_dump_info (REPORT_DETAILS))
2328 fprintf (vect_dump, "proceeding using word mode.");
2329 }
2330
2331 /* Worthwhile without SIMD support? Check only during analysis. */
2332 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2333 && vf < vect_min_worthwhile_factor (code)
2334 && !vec_stmt)
2335 {
2336 if (vect_print_dump_info (REPORT_DETAILS))
2337 fprintf (vect_dump, "not worthwhile without SIMD support.");
2338 return false;
2339 }
2340
2341 if (!vec_stmt) /* transformation not required. */
2342 {
2343 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2344 if (vect_print_dump_info (REPORT_DETAILS))
2345 fprintf (vect_dump, "=== vectorizable_shift ===");
2346 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2347 return true;
2348 }
2349
2350 /** Transform. **/
2351
2352 if (vect_print_dump_info (REPORT_DETAILS))
2353 fprintf (vect_dump, "transform binary/unary operation.");
2354
2355 /* Handle def. */
2356 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2357
2358 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2359 created in the previous stages of the recursion, so no allocation is
2360 needed, except for the case of shift with scalar shift argument. In that
2361 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2362 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2363 In case of loop-based vectorization we allocate VECs of size 1. We
2364 allocate VEC_OPRNDS1 only in case of binary operation. */
2365 if (!slp_node)
2366 {
2367 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2368 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2369 }
2370 else if (scalar_shift_arg)
2371 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2372
2373 prev_stmt_info = NULL;
2374 for (j = 0; j < ncopies; j++)
2375 {
2376 /* Handle uses. */
2377 if (j == 0)
2378 {
2379 if (scalar_shift_arg)
2380 {
2381 /* Vector shl and shr insn patterns can be defined with scalar
2382 operand 2 (shift operand). In this case, use constant or loop
2383 invariant op1 directly, without extending it to vector mode
2384 first. */
2385 optab_op2_mode = insn_data[icode].operand[2].mode;
2386 if (!VECTOR_MODE_P (optab_op2_mode))
2387 {
2388 if (vect_print_dump_info (REPORT_DETAILS))
2389 fprintf (vect_dump, "operand 1 using scalar mode.");
2390 vec_oprnd1 = op1;
2391 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2392 if (slp_node)
2393 {
2394 /* Store vec_oprnd1 for every vector stmt to be created
2395 for SLP_NODE. We check during the analysis that all
2396 the shift arguments are the same.
2397 TODO: Allow different constants for different vector
2398 stmts generated for an SLP instance. */
2399 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2400 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2401 }
2402 }
2403 }
2404
2405 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2406 (a special case for certain kind of vector shifts); otherwise,
2407 operand 1 should be of a vector type (the usual case). */
2408 if (vec_oprnd1)
2409 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2410 slp_node);
2411 else
2412 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2413 slp_node);
2414 }
2415 else
2416 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2417
2418 /* Arguments are ready. Create the new vector stmt. */
2419 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2420 {
2421 vop1 = VEC_index (tree, vec_oprnds1, i);
2422 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2423 new_temp = make_ssa_name (vec_dest, new_stmt);
2424 gimple_assign_set_lhs (new_stmt, new_temp);
2425 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2426 if (slp_node)
2427 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2428 }
2429
2430 if (slp_node)
2431 continue;
2432
2433 if (j == 0)
2434 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2435 else
2436 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2437 prev_stmt_info = vinfo_for_stmt (new_stmt);
2438 }
2439
2440 VEC_free (tree, heap, vec_oprnds0);
2441 VEC_free (tree, heap, vec_oprnds1);
2442
2443 return true;
2444 }
2445
2446
2447 /* Function vectorizable_operation.
2448
2449 Check if STMT performs a binary, unary or ternary operation that can
2450 be vectorized.
2451 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2452 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2453 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2454
2455 static bool
2456 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2457 gimple *vec_stmt, slp_tree slp_node)
2458 {
2459 tree vec_dest;
2460 tree scalar_dest;
2461 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
2462 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2463 tree vectype;
2464 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2465 enum tree_code code;
2466 enum machine_mode vec_mode;
2467 tree new_temp;
2468 int op_type;
2469 optab optab;
2470 int icode;
2471 tree def;
2472 gimple def_stmt;
2473 enum vect_def_type dt[3]
2474 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2475 gimple new_stmt = NULL;
2476 stmt_vec_info prev_stmt_info;
2477 int nunits_in;
2478 int nunits_out;
2479 tree vectype_out;
2480 int ncopies;
2481 int j, i;
2482 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2483 tree vop0, vop1, vop2;
2484 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2485 int vf;
2486
2487 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2488 return false;
2489
2490 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2491 return false;
2492
2493 /* Is STMT a vectorizable binary/unary operation? */
2494 if (!is_gimple_assign (stmt))
2495 return false;
2496
2497 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2498 return false;
2499
2500 code = gimple_assign_rhs_code (stmt);
2501
2502 /* For pointer addition, we should use the normal plus for
2503 the vector addition. */
2504 if (code == POINTER_PLUS_EXPR)
2505 code = PLUS_EXPR;
2506
2507 /* Support only unary or binary operations. */
2508 op_type = TREE_CODE_LENGTH (code);
2509 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
2510 {
2511 if (vect_print_dump_info (REPORT_DETAILS))
2512 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2513 op_type);
2514 return false;
2515 }
2516
2517 scalar_dest = gimple_assign_lhs (stmt);
2518 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2519
2520 op0 = gimple_assign_rhs1 (stmt);
2521 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2522 &def_stmt, &def, &dt[0], &vectype))
2523 {
2524 if (vect_print_dump_info (REPORT_DETAILS))
2525 fprintf (vect_dump, "use not simple.");
2526 return false;
2527 }
2528 /* If op0 is an external or constant def use a vector type with
2529 the same size as the output vector type. */
2530 if (!vectype)
2531 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2532 if (vec_stmt)
2533 gcc_assert (vectype);
2534 if (!vectype)
2535 {
2536 if (vect_print_dump_info (REPORT_DETAILS))
2537 {
2538 fprintf (vect_dump, "no vectype for scalar type ");
2539 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2540 }
2541
2542 return false;
2543 }
2544
2545 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2546 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2547 if (nunits_out != nunits_in)
2548 return false;
2549
2550 if (op_type == binary_op || op_type == ternary_op)
2551 {
2552 op1 = gimple_assign_rhs2 (stmt);
2553 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2554 &dt[1]))
2555 {
2556 if (vect_print_dump_info (REPORT_DETAILS))
2557 fprintf (vect_dump, "use not simple.");
2558 return false;
2559 }
2560 }
2561 if (op_type == ternary_op)
2562 {
2563 op2 = gimple_assign_rhs3 (stmt);
2564 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2565 &dt[2]))
2566 {
2567 if (vect_print_dump_info (REPORT_DETAILS))
2568 fprintf (vect_dump, "use not simple.");
2569 return false;
2570 }
2571 }
2572
2573 if (loop_vinfo)
2574 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2575 else
2576 vf = 1;
2577
2578 /* Multiple types in SLP are handled by creating the appropriate number of
2579 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2580 case of SLP. */
2581 if (slp_node || PURE_SLP_STMT (stmt_info))
2582 ncopies = 1;
2583 else
2584 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2585
2586 gcc_assert (ncopies >= 1);
2587
2588 /* Shifts are handled in vectorizable_shift (). */
2589 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2590 || code == RROTATE_EXPR)
2591 return false;
2592
2593 optab = optab_for_tree_code (code, vectype, optab_default);
2594
2595 /* Supportable by target? */
2596 if (!optab)
2597 {
2598 if (vect_print_dump_info (REPORT_DETAILS))
2599 fprintf (vect_dump, "no optab.");
2600 return false;
2601 }
2602 vec_mode = TYPE_MODE (vectype);
2603 icode = (int) optab_handler (optab, vec_mode);
2604 if (icode == CODE_FOR_nothing)
2605 {
2606 if (vect_print_dump_info (REPORT_DETAILS))
2607 fprintf (vect_dump, "op not supported by target.");
2608 /* Check only during analysis. */
2609 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2610 || (vf < vect_min_worthwhile_factor (code)
2611 && !vec_stmt))
2612 return false;
2613 if (vect_print_dump_info (REPORT_DETAILS))
2614 fprintf (vect_dump, "proceeding using word mode.");
2615 }
2616
2617 /* Worthwhile without SIMD support? Check only during analysis. */
2618 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2619 && vf < vect_min_worthwhile_factor (code)
2620 && !vec_stmt)
2621 {
2622 if (vect_print_dump_info (REPORT_DETAILS))
2623 fprintf (vect_dump, "not worthwhile without SIMD support.");
2624 return false;
2625 }
2626
2627 if (!vec_stmt) /* transformation not required. */
2628 {
2629 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2630 if (vect_print_dump_info (REPORT_DETAILS))
2631 fprintf (vect_dump, "=== vectorizable_operation ===");
2632 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2633 return true;
2634 }
2635
2636 /** Transform. **/
2637
2638 if (vect_print_dump_info (REPORT_DETAILS))
2639 fprintf (vect_dump, "transform binary/unary operation.");
2640
2641 /* Handle def. */
2642 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2643
2644 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2645 created in the previous stages of the recursion, so no allocation is
2646 needed, except for the case of shift with scalar shift argument. In that
2647 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2648 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2649 In case of loop-based vectorization we allocate VECs of size 1. We
2650 allocate VEC_OPRNDS1 only in case of binary operation. */
2651 if (!slp_node)
2652 {
2653 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2654 if (op_type == binary_op || op_type == ternary_op)
2655 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2656 if (op_type == ternary_op)
2657 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2658 }
2659
2660 /* In case the vectorization factor (VF) is bigger than the number
2661 of elements that we can fit in a vectype (nunits), we have to generate
2662 more than one vector stmt - i.e - we need to "unroll" the
2663 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2664 from one copy of the vector stmt to the next, in the field
2665 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2666 stages to find the correct vector defs to be used when vectorizing
2667 stmts that use the defs of the current stmt. The example below
2668 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2669 we need to create 4 vectorized stmts):
2670
2671 before vectorization:
2672 RELATED_STMT VEC_STMT
2673 S1: x = memref - -
2674 S2: z = x + 1 - -
2675
2676 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2677 there):
2678 RELATED_STMT VEC_STMT
2679 VS1_0: vx0 = memref0 VS1_1 -
2680 VS1_1: vx1 = memref1 VS1_2 -
2681 VS1_2: vx2 = memref2 VS1_3 -
2682 VS1_3: vx3 = memref3 - -
2683 S1: x = load - VS1_0
2684 S2: z = x + 1 - -
2685
2686 step2: vectorize stmt S2 (done here):
2687 To vectorize stmt S2 we first need to find the relevant vector
2688 def for the first operand 'x'. This is, as usual, obtained from
2689 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2690 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2691 relevant vector def 'vx0'. Having found 'vx0' we can generate
2692 the vector stmt VS2_0, and as usual, record it in the
2693 STMT_VINFO_VEC_STMT of stmt S2.
2694 When creating the second copy (VS2_1), we obtain the relevant vector
2695 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2696 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2697 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2698 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2699 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2700 chain of stmts and pointers:
2701 RELATED_STMT VEC_STMT
2702 VS1_0: vx0 = memref0 VS1_1 -
2703 VS1_1: vx1 = memref1 VS1_2 -
2704 VS1_2: vx2 = memref2 VS1_3 -
2705 VS1_3: vx3 = memref3 - -
2706 S1: x = load - VS1_0
2707 VS2_0: vz0 = vx0 + v1 VS2_1 -
2708 VS2_1: vz1 = vx1 + v1 VS2_2 -
2709 VS2_2: vz2 = vx2 + v1 VS2_3 -
2710 VS2_3: vz3 = vx3 + v1 - -
2711 S2: z = x + 1 - VS2_0 */
2712
2713 prev_stmt_info = NULL;
2714 for (j = 0; j < ncopies; j++)
2715 {
2716 /* Handle uses. */
2717 if (j == 0)
2718 {
2719 if (op_type == binary_op || op_type == ternary_op)
2720 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2721 slp_node);
2722 else
2723 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2724 slp_node);
2725 if (op_type == ternary_op)
2726 {
2727 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2728 VEC_quick_push (tree, vec_oprnds2,
2729 vect_get_vec_def_for_operand (op2, stmt, NULL));
2730 }
2731 }
2732 else
2733 {
2734 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2735 if (op_type == ternary_op)
2736 {
2737 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2738 VEC_quick_push (tree, vec_oprnds2,
2739 vect_get_vec_def_for_stmt_copy (dt[2],
2740 vec_oprnd));
2741 }
2742 }
2743
2744 /* Arguments are ready. Create the new vector stmt. */
2745 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2746 {
2747 vop1 = ((op_type == binary_op || op_type == ternary_op)
2748 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2749 vop2 = ((op_type == ternary_op)
2750 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2751 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2752 vop0, vop1, vop2);
2753 new_temp = make_ssa_name (vec_dest, new_stmt);
2754 gimple_assign_set_lhs (new_stmt, new_temp);
2755 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2756 if (slp_node)
2757 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2758 }
2759
2760 if (slp_node)
2761 continue;
2762
2763 if (j == 0)
2764 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2765 else
2766 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2767 prev_stmt_info = vinfo_for_stmt (new_stmt);
2768 }
2769
2770 VEC_free (tree, heap, vec_oprnds0);
2771 if (vec_oprnds1)
2772 VEC_free (tree, heap, vec_oprnds1);
2773 if (vec_oprnds2)
2774 VEC_free (tree, heap, vec_oprnds2);
2775
2776 return true;
2777 }
2778
2779
2780 /* Get vectorized definitions for loop-based vectorization. For the first
2781 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2782 scalar operand), and for the rest we get a copy with
2783 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2784 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2785 The vectors are collected into VEC_OPRNDS. */
2786
2787 static void
2788 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2789 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2790 {
2791 tree vec_oprnd;
2792
2793 /* Get first vector operand. */
2794 /* All the vector operands except the very first one (that is scalar oprnd)
2795 are stmt copies. */
2796 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2797 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2798 else
2799 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2800
2801 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2802
2803 /* Get second vector operand. */
2804 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2805 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2806
2807 *oprnd = vec_oprnd;
2808
2809 /* For conversion in multiple steps, continue to get operands
2810 recursively. */
2811 if (multi_step_cvt)
2812 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2813 }
2814
2815
2816 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2817 For multi-step conversions store the resulting vectors and call the function
2818 recursively. */
2819
2820 static void
2821 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2822 int multi_step_cvt, gimple stmt,
2823 VEC (tree, heap) *vec_dsts,
2824 gimple_stmt_iterator *gsi,
2825 slp_tree slp_node, enum tree_code code,
2826 stmt_vec_info *prev_stmt_info)
2827 {
2828 unsigned int i;
2829 tree vop0, vop1, new_tmp, vec_dest;
2830 gimple new_stmt;
2831 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2832
2833 vec_dest = VEC_pop (tree, vec_dsts);
2834
2835 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2836 {
2837 /* Create demotion operation. */
2838 vop0 = VEC_index (tree, *vec_oprnds, i);
2839 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2840 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2841 new_tmp = make_ssa_name (vec_dest, new_stmt);
2842 gimple_assign_set_lhs (new_stmt, new_tmp);
2843 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2844
2845 if (multi_step_cvt)
2846 /* Store the resulting vector for next recursive call. */
2847 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2848 else
2849 {
2850 /* This is the last step of the conversion sequence. Store the
2851 vectors in SLP_NODE or in vector info of the scalar statement
2852 (or in STMT_VINFO_RELATED_STMT chain). */
2853 if (slp_node)
2854 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2855 else
2856 {
2857 if (!*prev_stmt_info)
2858 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2859 else
2860 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2861
2862 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2863 }
2864 }
2865 }
2866
2867 /* For multi-step demotion operations we first generate demotion operations
2868 from the source type to the intermediate types, and then combine the
2869 results (stored in VEC_OPRNDS) in demotion operation to the destination
2870 type. */
2871 if (multi_step_cvt)
2872 {
2873 /* At each level of recursion we have have of the operands we had at the
2874 previous level. */
2875 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2876 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2877 stmt, vec_dsts, gsi, slp_node,
2878 code, prev_stmt_info);
2879 }
2880 }
2881
2882
2883 /* Function vectorizable_type_demotion
2884
2885 Check if STMT performs a binary or unary operation that involves
2886 type demotion, and if it can be vectorized.
2887 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2888 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2889 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2890
2891 static bool
2892 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2893 gimple *vec_stmt, slp_tree slp_node)
2894 {
2895 tree vec_dest;
2896 tree scalar_dest;
2897 tree op0;
2898 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2899 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2900 enum tree_code code, code1 = ERROR_MARK;
2901 tree def;
2902 gimple def_stmt;
2903 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2904 stmt_vec_info prev_stmt_info;
2905 int nunits_in;
2906 int nunits_out;
2907 tree vectype_out;
2908 int ncopies;
2909 int j, i;
2910 tree vectype_in;
2911 int multi_step_cvt = 0;
2912 VEC (tree, heap) *vec_oprnds0 = NULL;
2913 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2914 tree last_oprnd, intermediate_type;
2915
2916 /* FORNOW: not supported by basic block SLP vectorization. */
2917 gcc_assert (loop_vinfo);
2918
2919 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2920 return false;
2921
2922 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2923 return false;
2924
2925 /* Is STMT a vectorizable type-demotion operation? */
2926 if (!is_gimple_assign (stmt))
2927 return false;
2928
2929 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2930 return false;
2931
2932 code = gimple_assign_rhs_code (stmt);
2933 if (!CONVERT_EXPR_CODE_P (code))
2934 return false;
2935
2936 scalar_dest = gimple_assign_lhs (stmt);
2937 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2938
2939 /* Check the operands of the operation. */
2940 op0 = gimple_assign_rhs1 (stmt);
2941 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2942 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2943 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2944 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2945 && CONVERT_EXPR_CODE_P (code))))
2946 return false;
2947 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2948 &def_stmt, &def, &dt[0], &vectype_in))
2949 {
2950 if (vect_print_dump_info (REPORT_DETAILS))
2951 fprintf (vect_dump, "use not simple.");
2952 return false;
2953 }
2954 /* If op0 is an external def use a vector type with the
2955 same size as the output vector type if possible. */
2956 if (!vectype_in)
2957 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2958 if (vec_stmt)
2959 gcc_assert (vectype_in);
2960 if (!vectype_in)
2961 {
2962 if (vect_print_dump_info (REPORT_DETAILS))
2963 {
2964 fprintf (vect_dump, "no vectype for scalar type ");
2965 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2966 }
2967
2968 return false;
2969 }
2970
2971 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2972 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2973 if (nunits_in >= nunits_out)
2974 return false;
2975
2976 /* Multiple types in SLP are handled by creating the appropriate number of
2977 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2978 case of SLP. */
2979 if (slp_node || PURE_SLP_STMT (stmt_info))
2980 ncopies = 1;
2981 else
2982 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2983 gcc_assert (ncopies >= 1);
2984
2985 /* Supportable by target? */
2986 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2987 &code1, &multi_step_cvt, &interm_types))
2988 return false;
2989
2990 if (!vec_stmt) /* transformation not required. */
2991 {
2992 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2993 if (vect_print_dump_info (REPORT_DETAILS))
2994 fprintf (vect_dump, "=== vectorizable_demotion ===");
2995 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2996 return true;
2997 }
2998
2999 /** Transform. **/
3000 if (vect_print_dump_info (REPORT_DETAILS))
3001 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
3002 ncopies);
3003
3004 /* In case of multi-step demotion, we first generate demotion operations to
3005 the intermediate types, and then from that types to the final one.
3006 We create vector destinations for the intermediate type (TYPES) received
3007 from supportable_narrowing_operation, and store them in the correct order
3008 for future use in vect_create_vectorized_demotion_stmts(). */
3009 if (multi_step_cvt)
3010 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3011 else
3012 vec_dsts = VEC_alloc (tree, heap, 1);
3013
3014 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3015 VEC_quick_push (tree, vec_dsts, vec_dest);
3016
3017 if (multi_step_cvt)
3018 {
3019 for (i = VEC_length (tree, interm_types) - 1;
3020 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3021 {
3022 vec_dest = vect_create_destination_var (scalar_dest,
3023 intermediate_type);
3024 VEC_quick_push (tree, vec_dsts, vec_dest);
3025 }
3026 }
3027
3028 /* In case the vectorization factor (VF) is bigger than the number
3029 of elements that we can fit in a vectype (nunits), we have to generate
3030 more than one vector stmt - i.e - we need to "unroll" the
3031 vector stmt by a factor VF/nunits. */
3032 last_oprnd = op0;
3033 prev_stmt_info = NULL;
3034 for (j = 0; j < ncopies; j++)
3035 {
3036 /* Handle uses. */
3037 if (slp_node)
3038 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
3039 else
3040 {
3041 VEC_free (tree, heap, vec_oprnds0);
3042 vec_oprnds0 = VEC_alloc (tree, heap,
3043 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
3044 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3045 vect_pow2 (multi_step_cvt) - 1);
3046 }
3047
3048 /* Arguments are ready. Create the new vector stmts. */
3049 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3050 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
3051 multi_step_cvt, stmt, tmp_vec_dsts,
3052 gsi, slp_node, code1,
3053 &prev_stmt_info);
3054 }
3055
3056 VEC_free (tree, heap, vec_oprnds0);
3057 VEC_free (tree, heap, vec_dsts);
3058 VEC_free (tree, heap, tmp_vec_dsts);
3059 VEC_free (tree, heap, interm_types);
3060
3061 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3062 return true;
3063 }
3064
3065
3066 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3067 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3068 the resulting vectors and call the function recursively. */
3069
3070 static void
3071 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
3072 VEC (tree, heap) **vec_oprnds1,
3073 int multi_step_cvt, gimple stmt,
3074 VEC (tree, heap) *vec_dsts,
3075 gimple_stmt_iterator *gsi,
3076 slp_tree slp_node, enum tree_code code1,
3077 enum tree_code code2, tree decl1,
3078 tree decl2, int op_type,
3079 stmt_vec_info *prev_stmt_info)
3080 {
3081 int i;
3082 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
3083 gimple new_stmt1, new_stmt2;
3084 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3085 VEC (tree, heap) *vec_tmp;
3086
3087 vec_dest = VEC_pop (tree, vec_dsts);
3088 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
3089
3090 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
3091 {
3092 if (op_type == binary_op)
3093 vop1 = VEC_index (tree, *vec_oprnds1, i);
3094 else
3095 vop1 = NULL_TREE;
3096
3097 /* Generate the two halves of promotion operation. */
3098 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3099 op_type, vec_dest, gsi, stmt);
3100 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3101 op_type, vec_dest, gsi, stmt);
3102 if (is_gimple_call (new_stmt1))
3103 {
3104 new_tmp1 = gimple_call_lhs (new_stmt1);
3105 new_tmp2 = gimple_call_lhs (new_stmt2);
3106 }
3107 else
3108 {
3109 new_tmp1 = gimple_assign_lhs (new_stmt1);
3110 new_tmp2 = gimple_assign_lhs (new_stmt2);
3111 }
3112
3113 if (multi_step_cvt)
3114 {
3115 /* Store the results for the recursive call. */
3116 VEC_quick_push (tree, vec_tmp, new_tmp1);
3117 VEC_quick_push (tree, vec_tmp, new_tmp2);
3118 }
3119 else
3120 {
3121 /* Last step of promotion sequience - store the results. */
3122 if (slp_node)
3123 {
3124 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3125 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3126 }
3127 else
3128 {
3129 if (!*prev_stmt_info)
3130 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3131 else
3132 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3133
3134 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3135 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3136 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3137 }
3138 }
3139 }
3140
3141 if (multi_step_cvt)
3142 {
3143 /* For multi-step promotion operation we first generate we call the
3144 function recurcively for every stage. We start from the input type,
3145 create promotion operations to the intermediate types, and then
3146 create promotions to the output type. */
3147 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3148 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3149 multi_step_cvt - 1, stmt,
3150 vec_dsts, gsi, slp_node, code1,
3151 code2, decl2, decl2, op_type,
3152 prev_stmt_info);
3153 }
3154
3155 VEC_free (tree, heap, vec_tmp);
3156 }
3157
3158
3159 /* Function vectorizable_type_promotion
3160
3161 Check if STMT performs a binary or unary operation that involves
3162 type promotion, and if it can be vectorized.
3163 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3164 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3165 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3166
3167 static bool
3168 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3169 gimple *vec_stmt, slp_tree slp_node)
3170 {
3171 tree vec_dest;
3172 tree scalar_dest;
3173 tree op0, op1 = NULL;
3174 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3175 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3176 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3177 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3178 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3179 int op_type;
3180 tree def;
3181 gimple def_stmt;
3182 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3183 stmt_vec_info prev_stmt_info;
3184 int nunits_in;
3185 int nunits_out;
3186 tree vectype_out;
3187 int ncopies;
3188 int j, i;
3189 tree vectype_in;
3190 tree intermediate_type = NULL_TREE;
3191 int multi_step_cvt = 0;
3192 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3193 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3194
3195 /* FORNOW: not supported by basic block SLP vectorization. */
3196 gcc_assert (loop_vinfo);
3197
3198 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3199 return false;
3200
3201 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3202 return false;
3203
3204 /* Is STMT a vectorizable type-promotion operation? */
3205 if (!is_gimple_assign (stmt))
3206 return false;
3207
3208 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3209 return false;
3210
3211 code = gimple_assign_rhs_code (stmt);
3212 if (!CONVERT_EXPR_CODE_P (code)
3213 && code != WIDEN_MULT_EXPR)
3214 return false;
3215
3216 scalar_dest = gimple_assign_lhs (stmt);
3217 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3218
3219 /* Check the operands of the operation. */
3220 op0 = gimple_assign_rhs1 (stmt);
3221 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3222 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3223 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3224 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3225 && CONVERT_EXPR_CODE_P (code))))
3226 return false;
3227 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3228 &def_stmt, &def, &dt[0], &vectype_in))
3229 {
3230 if (vect_print_dump_info (REPORT_DETAILS))
3231 fprintf (vect_dump, "use not simple.");
3232 return false;
3233 }
3234 /* If op0 is an external or constant def use a vector type with
3235 the same size as the output vector type. */
3236 if (!vectype_in)
3237 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3238 if (vec_stmt)
3239 gcc_assert (vectype_in);
3240 if (!vectype_in)
3241 {
3242 if (vect_print_dump_info (REPORT_DETAILS))
3243 {
3244 fprintf (vect_dump, "no vectype for scalar type ");
3245 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3246 }
3247
3248 return false;
3249 }
3250
3251 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3252 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3253 if (nunits_in <= nunits_out)
3254 return false;
3255
3256 /* Multiple types in SLP are handled by creating the appropriate number of
3257 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3258 case of SLP. */
3259 if (slp_node || PURE_SLP_STMT (stmt_info))
3260 ncopies = 1;
3261 else
3262 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3263
3264 gcc_assert (ncopies >= 1);
3265
3266 op_type = TREE_CODE_LENGTH (code);
3267 if (op_type == binary_op)
3268 {
3269 op1 = gimple_assign_rhs2 (stmt);
3270 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3271 {
3272 if (vect_print_dump_info (REPORT_DETAILS))
3273 fprintf (vect_dump, "use not simple.");
3274 return false;
3275 }
3276 }
3277
3278 /* Supportable by target? */
3279 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3280 &decl1, &decl2, &code1, &code2,
3281 &multi_step_cvt, &interm_types))
3282 return false;
3283
3284 /* Binary widening operation can only be supported directly by the
3285 architecture. */
3286 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3287
3288 if (!vec_stmt) /* transformation not required. */
3289 {
3290 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3291 if (vect_print_dump_info (REPORT_DETAILS))
3292 fprintf (vect_dump, "=== vectorizable_promotion ===");
3293 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3294 return true;
3295 }
3296
3297 /** Transform. **/
3298
3299 if (vect_print_dump_info (REPORT_DETAILS))
3300 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3301 ncopies);
3302
3303 /* Handle def. */
3304 /* In case of multi-step promotion, we first generate promotion operations
3305 to the intermediate types, and then from that types to the final one.
3306 We store vector destination in VEC_DSTS in the correct order for
3307 recursive creation of promotion operations in
3308 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3309 according to TYPES recieved from supportable_widening_operation(). */
3310 if (multi_step_cvt)
3311 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3312 else
3313 vec_dsts = VEC_alloc (tree, heap, 1);
3314
3315 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3316 VEC_quick_push (tree, vec_dsts, vec_dest);
3317
3318 if (multi_step_cvt)
3319 {
3320 for (i = VEC_length (tree, interm_types) - 1;
3321 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3322 {
3323 vec_dest = vect_create_destination_var (scalar_dest,
3324 intermediate_type);
3325 VEC_quick_push (tree, vec_dsts, vec_dest);
3326 }
3327 }
3328
3329 if (!slp_node)
3330 {
3331 vec_oprnds0 = VEC_alloc (tree, heap,
3332 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3333 if (op_type == binary_op)
3334 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3335 }
3336
3337 /* In case the vectorization factor (VF) is bigger than the number
3338 of elements that we can fit in a vectype (nunits), we have to generate
3339 more than one vector stmt - i.e - we need to "unroll" the
3340 vector stmt by a factor VF/nunits. */
3341
3342 prev_stmt_info = NULL;
3343 for (j = 0; j < ncopies; j++)
3344 {
3345 /* Handle uses. */
3346 if (j == 0)
3347 {
3348 if (slp_node)
3349 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3350 &vec_oprnds1, -1);
3351 else
3352 {
3353 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3354 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3355 if (op_type == binary_op)
3356 {
3357 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3358 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3359 }
3360 }
3361 }
3362 else
3363 {
3364 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3365 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3366 if (op_type == binary_op)
3367 {
3368 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3369 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3370 }
3371 }
3372
3373 /* Arguments are ready. Create the new vector stmts. */
3374 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3375 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3376 multi_step_cvt, stmt,
3377 tmp_vec_dsts,
3378 gsi, slp_node, code1, code2,
3379 decl1, decl2, op_type,
3380 &prev_stmt_info);
3381 }
3382
3383 VEC_free (tree, heap, vec_dsts);
3384 VEC_free (tree, heap, tmp_vec_dsts);
3385 VEC_free (tree, heap, interm_types);
3386 VEC_free (tree, heap, vec_oprnds0);
3387 VEC_free (tree, heap, vec_oprnds1);
3388
3389 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3390 return true;
3391 }
3392
3393
3394 /* Function vectorizable_store.
3395
3396 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3397 can be vectorized.
3398 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3399 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3400 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3401
3402 static bool
3403 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3404 slp_tree slp_node)
3405 {
3406 tree scalar_dest;
3407 tree data_ref;
3408 tree op;
3409 tree vec_oprnd = NULL_TREE;
3410 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3411 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3412 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3413 tree elem_type;
3414 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3415 struct loop *loop = NULL;
3416 enum machine_mode vec_mode;
3417 tree dummy;
3418 enum dr_alignment_support alignment_support_scheme;
3419 tree def;
3420 gimple def_stmt;
3421 enum vect_def_type dt;
3422 stmt_vec_info prev_stmt_info = NULL;
3423 tree dataref_ptr = NULL_TREE;
3424 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3425 int ncopies;
3426 int j;
3427 gimple next_stmt, first_stmt = NULL;
3428 bool strided_store = false;
3429 bool store_lanes_p = false;
3430 unsigned int group_size, i;
3431 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3432 bool inv_p;
3433 VEC(tree,heap) *vec_oprnds = NULL;
3434 bool slp = (slp_node != NULL);
3435 unsigned int vec_num;
3436 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3437 tree aggr_type;
3438
3439 if (loop_vinfo)
3440 loop = LOOP_VINFO_LOOP (loop_vinfo);
3441
3442 /* Multiple types in SLP are handled by creating the appropriate number of
3443 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3444 case of SLP. */
3445 if (slp || PURE_SLP_STMT (stmt_info))
3446 ncopies = 1;
3447 else
3448 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3449
3450 gcc_assert (ncopies >= 1);
3451
3452 /* FORNOW. This restriction should be relaxed. */
3453 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3454 {
3455 if (vect_print_dump_info (REPORT_DETAILS))
3456 fprintf (vect_dump, "multiple types in nested loop.");
3457 return false;
3458 }
3459
3460 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3461 return false;
3462
3463 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3464 return false;
3465
3466 /* Is vectorizable store? */
3467
3468 if (!is_gimple_assign (stmt))
3469 return false;
3470
3471 scalar_dest = gimple_assign_lhs (stmt);
3472 if (TREE_CODE (scalar_dest) != ARRAY_REF
3473 && TREE_CODE (scalar_dest) != INDIRECT_REF
3474 && TREE_CODE (scalar_dest) != COMPONENT_REF
3475 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3476 && TREE_CODE (scalar_dest) != REALPART_EXPR
3477 && TREE_CODE (scalar_dest) != MEM_REF)
3478 return false;
3479
3480 gcc_assert (gimple_assign_single_p (stmt));
3481 op = gimple_assign_rhs1 (stmt);
3482 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3483 {
3484 if (vect_print_dump_info (REPORT_DETAILS))
3485 fprintf (vect_dump, "use not simple.");
3486 return false;
3487 }
3488
3489 /* The scalar rhs type needs to be trivially convertible to the vector
3490 component type. This should always be the case. */
3491 elem_type = TREE_TYPE (vectype);
3492 if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
3493 {
3494 if (vect_print_dump_info (REPORT_DETAILS))
3495 fprintf (vect_dump, "??? operands of different types");
3496 return false;
3497 }
3498
3499 vec_mode = TYPE_MODE (vectype);
3500 /* FORNOW. In some cases can vectorize even if data-type not supported
3501 (e.g. - array initialization with 0). */
3502 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3503 return false;
3504
3505 if (!STMT_VINFO_DATA_REF (stmt_info))
3506 return false;
3507
3508 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3509 {
3510 if (vect_print_dump_info (REPORT_DETAILS))
3511 fprintf (vect_dump, "negative step for store.");
3512 return false;
3513 }
3514
3515 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3516 {
3517 strided_store = true;
3518 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3519 if (!slp && !PURE_SLP_STMT (stmt_info))
3520 {
3521 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3522 if (vect_store_lanes_supported (vectype, group_size))
3523 store_lanes_p = true;
3524 else if (!vect_strided_store_supported (vectype, group_size))
3525 return false;
3526 }
3527
3528 if (first_stmt == stmt)
3529 {
3530 /* STMT is the leader of the group. Check the operands of all the
3531 stmts of the group. */
3532 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3533 while (next_stmt)
3534 {
3535 gcc_assert (gimple_assign_single_p (next_stmt));
3536 op = gimple_assign_rhs1 (next_stmt);
3537 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3538 &def, &dt))
3539 {
3540 if (vect_print_dump_info (REPORT_DETAILS))
3541 fprintf (vect_dump, "use not simple.");
3542 return false;
3543 }
3544 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3545 }
3546 }
3547 }
3548
3549 if (!vec_stmt) /* transformation not required. */
3550 {
3551 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3552 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt, NULL);
3553 return true;
3554 }
3555
3556 /** Transform. **/
3557
3558 if (strided_store)
3559 {
3560 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3561 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3562
3563 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3564
3565 /* FORNOW */
3566 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3567
3568 /* We vectorize all the stmts of the interleaving group when we
3569 reach the last stmt in the group. */
3570 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3571 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3572 && !slp)
3573 {
3574 *vec_stmt = NULL;
3575 return true;
3576 }
3577
3578 if (slp)
3579 {
3580 strided_store = false;
3581 /* VEC_NUM is the number of vect stmts to be created for this
3582 group. */
3583 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3584 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3585 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3586 }
3587 else
3588 /* VEC_NUM is the number of vect stmts to be created for this
3589 group. */
3590 vec_num = group_size;
3591 }
3592 else
3593 {
3594 first_stmt = stmt;
3595 first_dr = dr;
3596 group_size = vec_num = 1;
3597 }
3598
3599 if (vect_print_dump_info (REPORT_DETAILS))
3600 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3601
3602 dr_chain = VEC_alloc (tree, heap, group_size);
3603 oprnds = VEC_alloc (tree, heap, group_size);
3604
3605 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3606 gcc_assert (alignment_support_scheme);
3607 /* Targets with store-lane instructions must not require explicit
3608 realignment. */
3609 gcc_assert (!store_lanes_p
3610 || alignment_support_scheme == dr_aligned
3611 || alignment_support_scheme == dr_unaligned_supported);
3612
3613 if (store_lanes_p)
3614 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
3615 else
3616 aggr_type = vectype;
3617
3618 /* In case the vectorization factor (VF) is bigger than the number
3619 of elements that we can fit in a vectype (nunits), we have to generate
3620 more than one vector stmt - i.e - we need to "unroll" the
3621 vector stmt by a factor VF/nunits. For more details see documentation in
3622 vect_get_vec_def_for_copy_stmt. */
3623
3624 /* In case of interleaving (non-unit strided access):
3625
3626 S1: &base + 2 = x2
3627 S2: &base = x0
3628 S3: &base + 1 = x1
3629 S4: &base + 3 = x3
3630
3631 We create vectorized stores starting from base address (the access of the
3632 first stmt in the chain (S2 in the above example), when the last store stmt
3633 of the chain (S4) is reached:
3634
3635 VS1: &base = vx2
3636 VS2: &base + vec_size*1 = vx0
3637 VS3: &base + vec_size*2 = vx1
3638 VS4: &base + vec_size*3 = vx3
3639
3640 Then permutation statements are generated:
3641
3642 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3643 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3644 ...
3645
3646 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3647 (the order of the data-refs in the output of vect_permute_store_chain
3648 corresponds to the order of scalar stmts in the interleaving chain - see
3649 the documentation of vect_permute_store_chain()).
3650
3651 In case of both multiple types and interleaving, above vector stores and
3652 permutation stmts are created for every copy. The result vector stmts are
3653 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3654 STMT_VINFO_RELATED_STMT for the next copies.
3655 */
3656
3657 prev_stmt_info = NULL;
3658 for (j = 0; j < ncopies; j++)
3659 {
3660 gimple new_stmt;
3661 gimple ptr_incr;
3662
3663 if (j == 0)
3664 {
3665 if (slp)
3666 {
3667 /* Get vectorized arguments for SLP_NODE. */
3668 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3669 NULL, -1);
3670
3671 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3672 }
3673 else
3674 {
3675 /* For interleaved stores we collect vectorized defs for all the
3676 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3677 used as an input to vect_permute_store_chain(), and OPRNDS as
3678 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3679
3680 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3681 OPRNDS are of size 1. */
3682 next_stmt = first_stmt;
3683 for (i = 0; i < group_size; i++)
3684 {
3685 /* Since gaps are not supported for interleaved stores,
3686 GROUP_SIZE is the exact number of stmts in the chain.
3687 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3688 there is no interleaving, GROUP_SIZE is 1, and only one
3689 iteration of the loop will be executed. */
3690 gcc_assert (next_stmt
3691 && gimple_assign_single_p (next_stmt));
3692 op = gimple_assign_rhs1 (next_stmt);
3693
3694 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3695 NULL);
3696 VEC_quick_push(tree, dr_chain, vec_oprnd);
3697 VEC_quick_push(tree, oprnds, vec_oprnd);
3698 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3699 }
3700 }
3701
3702 /* We should have catched mismatched types earlier. */
3703 gcc_assert (useless_type_conversion_p (vectype,
3704 TREE_TYPE (vec_oprnd)));
3705 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, NULL,
3706 NULL_TREE, &dummy, gsi,
3707 &ptr_incr, false, &inv_p);
3708 gcc_assert (bb_vinfo || !inv_p);
3709 }
3710 else
3711 {
3712 /* For interleaved stores we created vectorized defs for all the
3713 defs stored in OPRNDS in the previous iteration (previous copy).
3714 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3715 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3716 next copy.
3717 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3718 OPRNDS are of size 1. */
3719 for (i = 0; i < group_size; i++)
3720 {
3721 op = VEC_index (tree, oprnds, i);
3722 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3723 &dt);
3724 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3725 VEC_replace(tree, dr_chain, i, vec_oprnd);
3726 VEC_replace(tree, oprnds, i, vec_oprnd);
3727 }
3728 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3729 TYPE_SIZE_UNIT (aggr_type));
3730 }
3731
3732 if (store_lanes_p)
3733 {
3734 tree vec_array;
3735
3736 /* Combine all the vectors into an array. */
3737 vec_array = create_vector_array (vectype, vec_num);
3738 for (i = 0; i < vec_num; i++)
3739 {
3740 vec_oprnd = VEC_index (tree, dr_chain, i);
3741 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
3742 }
3743
3744 /* Emit:
3745 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3746 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
3747 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
3748 gimple_call_set_lhs (new_stmt, data_ref);
3749 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3750 mark_symbols_for_renaming (new_stmt);
3751 }
3752 else
3753 {
3754 new_stmt = NULL;
3755 if (strided_store)
3756 {
3757 result_chain = VEC_alloc (tree, heap, group_size);
3758 /* Permute. */
3759 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3760 &result_chain);
3761 }
3762
3763 next_stmt = first_stmt;
3764 for (i = 0; i < vec_num; i++)
3765 {
3766 struct ptr_info_def *pi;
3767
3768 if (i > 0)
3769 /* Bump the vector pointer. */
3770 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
3771 stmt, NULL_TREE);
3772
3773 if (slp)
3774 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3775 else if (strided_store)
3776 /* For strided stores vectorized defs are interleaved in
3777 vect_permute_store_chain(). */
3778 vec_oprnd = VEC_index (tree, result_chain, i);
3779
3780 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3781 build_int_cst (reference_alias_ptr_type
3782 (DR_REF (first_dr)), 0));
3783 pi = get_ptr_info (dataref_ptr);
3784 pi->align = TYPE_ALIGN_UNIT (vectype);
3785 if (aligned_access_p (first_dr))
3786 pi->misalign = 0;
3787 else if (DR_MISALIGNMENT (first_dr) == -1)
3788 {
3789 TREE_TYPE (data_ref)
3790 = build_aligned_type (TREE_TYPE (data_ref),
3791 TYPE_ALIGN (elem_type));
3792 pi->align = TYPE_ALIGN_UNIT (elem_type);
3793 pi->misalign = 0;
3794 }
3795 else
3796 {
3797 TREE_TYPE (data_ref)
3798 = build_aligned_type (TREE_TYPE (data_ref),
3799 TYPE_ALIGN (elem_type));
3800 pi->misalign = DR_MISALIGNMENT (first_dr);
3801 }
3802
3803 /* Arguments are ready. Create the new vector stmt. */
3804 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3805 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3806 mark_symbols_for_renaming (new_stmt);
3807
3808 if (slp)
3809 continue;
3810
3811 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3812 if (!next_stmt)
3813 break;
3814 }
3815 }
3816 if (!slp)
3817 {
3818 if (j == 0)
3819 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3820 else
3821 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3822 prev_stmt_info = vinfo_for_stmt (new_stmt);
3823 }
3824 }
3825
3826 VEC_free (tree, heap, dr_chain);
3827 VEC_free (tree, heap, oprnds);
3828 if (result_chain)
3829 VEC_free (tree, heap, result_chain);
3830 if (vec_oprnds)
3831 VEC_free (tree, heap, vec_oprnds);
3832
3833 return true;
3834 }
3835
3836 /* Given a vector type VECTYPE returns a builtin DECL to be used
3837 for vector permutation and stores a mask into *MASK that implements
3838 reversal of the vector elements. If that is impossible to do
3839 returns NULL (and *MASK is unchanged). */
3840
3841 static tree
3842 perm_mask_for_reverse (tree vectype, tree *mask)
3843 {
3844 tree builtin_decl;
3845 tree mask_element_type, mask_type;
3846 tree mask_vec = NULL;
3847 int i;
3848 int nunits;
3849 if (!targetm.vectorize.builtin_vec_perm)
3850 return NULL;
3851
3852 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3853 &mask_element_type);
3854 if (!builtin_decl || !mask_element_type)
3855 return NULL;
3856
3857 mask_type = get_vectype_for_scalar_type (mask_element_type);
3858 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3859 if (!mask_type
3860 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3861 return NULL;
3862
3863 for (i = 0; i < nunits; i++)
3864 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3865 mask_vec = build_vector (mask_type, mask_vec);
3866
3867 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3868 return NULL;
3869 if (mask)
3870 *mask = mask_vec;
3871 return builtin_decl;
3872 }
3873
3874 /* Given a vector variable X, that was generated for the scalar LHS of
3875 STMT, generate instructions to reverse the vector elements of X,
3876 insert them a *GSI and return the permuted vector variable. */
3877
3878 static tree
3879 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3880 {
3881 tree vectype = TREE_TYPE (x);
3882 tree mask_vec, builtin_decl;
3883 tree perm_dest, data_ref;
3884 gimple perm_stmt;
3885
3886 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3887
3888 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3889
3890 /* Generate the permute statement. */
3891 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3892 if (!useless_type_conversion_p (vectype,
3893 TREE_TYPE (TREE_TYPE (builtin_decl))))
3894 {
3895 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3896 tem = make_ssa_name (tem, perm_stmt);
3897 gimple_call_set_lhs (perm_stmt, tem);
3898 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3899 perm_stmt = gimple_build_assign (NULL_TREE,
3900 build1 (VIEW_CONVERT_EXPR,
3901 vectype, tem));
3902 }
3903 data_ref = make_ssa_name (perm_dest, perm_stmt);
3904 gimple_set_lhs (perm_stmt, data_ref);
3905 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3906
3907 return data_ref;
3908 }
3909
3910 /* vectorizable_load.
3911
3912 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3913 can be vectorized.
3914 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3915 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3916 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3917
3918 static bool
3919 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3920 slp_tree slp_node, slp_instance slp_node_instance)
3921 {
3922 tree scalar_dest;
3923 tree vec_dest = NULL;
3924 tree data_ref = NULL;
3925 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3926 stmt_vec_info prev_stmt_info;
3927 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3928 struct loop *loop = NULL;
3929 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3930 bool nested_in_vect_loop = false;
3931 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3932 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3933 tree elem_type;
3934 tree new_temp;
3935 enum machine_mode mode;
3936 gimple new_stmt = NULL;
3937 tree dummy;
3938 enum dr_alignment_support alignment_support_scheme;
3939 tree dataref_ptr = NULL_TREE;
3940 gimple ptr_incr;
3941 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3942 int ncopies;
3943 int i, j, group_size;
3944 tree msq = NULL_TREE, lsq;
3945 tree offset = NULL_TREE;
3946 tree realignment_token = NULL_TREE;
3947 gimple phi = NULL;
3948 VEC(tree,heap) *dr_chain = NULL;
3949 bool strided_load = false;
3950 bool load_lanes_p = false;
3951 gimple first_stmt;
3952 tree scalar_type;
3953 bool inv_p;
3954 bool negative;
3955 bool compute_in_loop = false;
3956 struct loop *at_loop;
3957 int vec_num;
3958 bool slp = (slp_node != NULL);
3959 bool slp_perm = false;
3960 enum tree_code code;
3961 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3962 int vf;
3963 tree aggr_type;
3964
3965 if (loop_vinfo)
3966 {
3967 loop = LOOP_VINFO_LOOP (loop_vinfo);
3968 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3969 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3970 }
3971 else
3972 vf = 1;
3973
3974 /* Multiple types in SLP are handled by creating the appropriate number of
3975 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3976 case of SLP. */
3977 if (slp || PURE_SLP_STMT (stmt_info))
3978 ncopies = 1;
3979 else
3980 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3981
3982 gcc_assert (ncopies >= 1);
3983
3984 /* FORNOW. This restriction should be relaxed. */
3985 if (nested_in_vect_loop && ncopies > 1)
3986 {
3987 if (vect_print_dump_info (REPORT_DETAILS))
3988 fprintf (vect_dump, "multiple types in nested loop.");
3989 return false;
3990 }
3991
3992 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3993 return false;
3994
3995 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3996 return false;
3997
3998 /* Is vectorizable load? */
3999 if (!is_gimple_assign (stmt))
4000 return false;
4001
4002 scalar_dest = gimple_assign_lhs (stmt);
4003 if (TREE_CODE (scalar_dest) != SSA_NAME)
4004 return false;
4005
4006 code = gimple_assign_rhs_code (stmt);
4007 if (code != ARRAY_REF
4008 && code != INDIRECT_REF
4009 && code != COMPONENT_REF
4010 && code != IMAGPART_EXPR
4011 && code != REALPART_EXPR
4012 && code != MEM_REF)
4013 return false;
4014
4015 if (!STMT_VINFO_DATA_REF (stmt_info))
4016 return false;
4017
4018 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
4019 if (negative && ncopies > 1)
4020 {
4021 if (vect_print_dump_info (REPORT_DETAILS))
4022 fprintf (vect_dump, "multiple types with negative step.");
4023 return false;
4024 }
4025
4026 scalar_type = TREE_TYPE (DR_REF (dr));
4027 mode = TYPE_MODE (vectype);
4028
4029 /* FORNOW. In some cases can vectorize even if data-type not supported
4030 (e.g. - data copies). */
4031 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
4032 {
4033 if (vect_print_dump_info (REPORT_DETAILS))
4034 fprintf (vect_dump, "Aligned load, but unsupported type.");
4035 return false;
4036 }
4037
4038 /* The vector component type needs to be trivially convertible to the
4039 scalar lhs. This should always be the case. */
4040 elem_type = TREE_TYPE (vectype);
4041 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
4042 {
4043 if (vect_print_dump_info (REPORT_DETAILS))
4044 fprintf (vect_dump, "??? operands of different types");
4045 return false;
4046 }
4047
4048 /* Check if the load is a part of an interleaving chain. */
4049 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4050 {
4051 strided_load = true;
4052 /* FORNOW */
4053 gcc_assert (! nested_in_vect_loop);
4054
4055 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
4056 if (!slp && !PURE_SLP_STMT (stmt_info))
4057 {
4058 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
4059 if (vect_load_lanes_supported (vectype, group_size))
4060 load_lanes_p = true;
4061 else if (!vect_strided_load_supported (vectype, group_size))
4062 return false;
4063 }
4064 }
4065
4066 if (negative)
4067 {
4068 gcc_assert (!strided_load);
4069 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
4070 if (alignment_support_scheme != dr_aligned
4071 && alignment_support_scheme != dr_unaligned_supported)
4072 {
4073 if (vect_print_dump_info (REPORT_DETAILS))
4074 fprintf (vect_dump, "negative step but alignment required.");
4075 return false;
4076 }
4077 if (!perm_mask_for_reverse (vectype, NULL))
4078 {
4079 if (vect_print_dump_info (REPORT_DETAILS))
4080 fprintf (vect_dump, "negative step and reversing not supported.");
4081 return false;
4082 }
4083 }
4084
4085 if (!vec_stmt) /* transformation not required. */
4086 {
4087 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
4088 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
4089 return true;
4090 }
4091
4092 if (vect_print_dump_info (REPORT_DETAILS))
4093 fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
4094
4095 /** Transform. **/
4096
4097 if (strided_load)
4098 {
4099 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
4100 /* Check if the chain of loads is already vectorized. */
4101 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
4102 {
4103 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4104 return true;
4105 }
4106 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
4107 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
4108
4109 /* VEC_NUM is the number of vect stmts to be created for this group. */
4110 if (slp)
4111 {
4112 strided_load = false;
4113 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4114 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
4115 slp_perm = true;
4116 }
4117 else
4118 vec_num = group_size;
4119 }
4120 else
4121 {
4122 first_stmt = stmt;
4123 first_dr = dr;
4124 group_size = vec_num = 1;
4125 }
4126
4127 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
4128 gcc_assert (alignment_support_scheme);
4129 /* Targets with load-lane instructions must not require explicit
4130 realignment. */
4131 gcc_assert (!load_lanes_p
4132 || alignment_support_scheme == dr_aligned
4133 || alignment_support_scheme == dr_unaligned_supported);
4134
4135 /* In case the vectorization factor (VF) is bigger than the number
4136 of elements that we can fit in a vectype (nunits), we have to generate
4137 more than one vector stmt - i.e - we need to "unroll" the
4138 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4139 from one copy of the vector stmt to the next, in the field
4140 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4141 stages to find the correct vector defs to be used when vectorizing
4142 stmts that use the defs of the current stmt. The example below
4143 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4144 need to create 4 vectorized stmts):
4145
4146 before vectorization:
4147 RELATED_STMT VEC_STMT
4148 S1: x = memref - -
4149 S2: z = x + 1 - -
4150
4151 step 1: vectorize stmt S1:
4152 We first create the vector stmt VS1_0, and, as usual, record a
4153 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4154 Next, we create the vector stmt VS1_1, and record a pointer to
4155 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4156 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4157 stmts and pointers:
4158 RELATED_STMT VEC_STMT
4159 VS1_0: vx0 = memref0 VS1_1 -
4160 VS1_1: vx1 = memref1 VS1_2 -
4161 VS1_2: vx2 = memref2 VS1_3 -
4162 VS1_3: vx3 = memref3 - -
4163 S1: x = load - VS1_0
4164 S2: z = x + 1 - -
4165
4166 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4167 information we recorded in RELATED_STMT field is used to vectorize
4168 stmt S2. */
4169
4170 /* In case of interleaving (non-unit strided access):
4171
4172 S1: x2 = &base + 2
4173 S2: x0 = &base
4174 S3: x1 = &base + 1
4175 S4: x3 = &base + 3
4176
4177 Vectorized loads are created in the order of memory accesses
4178 starting from the access of the first stmt of the chain:
4179
4180 VS1: vx0 = &base
4181 VS2: vx1 = &base + vec_size*1
4182 VS3: vx3 = &base + vec_size*2
4183 VS4: vx4 = &base + vec_size*3
4184
4185 Then permutation statements are generated:
4186
4187 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4188 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4189 ...
4190
4191 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4192 (the order of the data-refs in the output of vect_permute_load_chain
4193 corresponds to the order of scalar stmts in the interleaving chain - see
4194 the documentation of vect_permute_load_chain()).
4195 The generation of permutation stmts and recording them in
4196 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4197
4198 In case of both multiple types and interleaving, the vector loads and
4199 permutation stmts above are created for every copy. The result vector
4200 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4201 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4202
4203 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4204 on a target that supports unaligned accesses (dr_unaligned_supported)
4205 we generate the following code:
4206 p = initial_addr;
4207 indx = 0;
4208 loop {
4209 p = p + indx * vectype_size;
4210 vec_dest = *(p);
4211 indx = indx + 1;
4212 }
4213
4214 Otherwise, the data reference is potentially unaligned on a target that
4215 does not support unaligned accesses (dr_explicit_realign_optimized) -
4216 then generate the following code, in which the data in each iteration is
4217 obtained by two vector loads, one from the previous iteration, and one
4218 from the current iteration:
4219 p1 = initial_addr;
4220 msq_init = *(floor(p1))
4221 p2 = initial_addr + VS - 1;
4222 realignment_token = call target_builtin;
4223 indx = 0;
4224 loop {
4225 p2 = p2 + indx * vectype_size
4226 lsq = *(floor(p2))
4227 vec_dest = realign_load (msq, lsq, realignment_token)
4228 indx = indx + 1;
4229 msq = lsq;
4230 } */
4231
4232 /* If the misalignment remains the same throughout the execution of the
4233 loop, we can create the init_addr and permutation mask at the loop
4234 preheader. Otherwise, it needs to be created inside the loop.
4235 This can only occur when vectorizing memory accesses in the inner-loop
4236 nested within an outer-loop that is being vectorized. */
4237
4238 if (loop && nested_in_vect_loop_p (loop, stmt)
4239 && (TREE_INT_CST_LOW (DR_STEP (dr))
4240 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4241 {
4242 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4243 compute_in_loop = true;
4244 }
4245
4246 if ((alignment_support_scheme == dr_explicit_realign_optimized
4247 || alignment_support_scheme == dr_explicit_realign)
4248 && !compute_in_loop)
4249 {
4250 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4251 alignment_support_scheme, NULL_TREE,
4252 &at_loop);
4253 if (alignment_support_scheme == dr_explicit_realign_optimized)
4254 {
4255 phi = SSA_NAME_DEF_STMT (msq);
4256 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4257 }
4258 }
4259 else
4260 at_loop = loop;
4261
4262 if (negative)
4263 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4264
4265 if (load_lanes_p)
4266 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
4267 else
4268 aggr_type = vectype;
4269
4270 prev_stmt_info = NULL;
4271 for (j = 0; j < ncopies; j++)
4272 {
4273 /* 1. Create the vector or array pointer update chain. */
4274 if (j == 0)
4275 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
4276 offset, &dummy, gsi,
4277 &ptr_incr, false, &inv_p);
4278 else
4279 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4280 TYPE_SIZE_UNIT (aggr_type));
4281
4282 if (strided_load || slp_perm)
4283 dr_chain = VEC_alloc (tree, heap, vec_num);
4284
4285 if (load_lanes_p)
4286 {
4287 tree vec_array;
4288
4289 vec_array = create_vector_array (vectype, vec_num);
4290
4291 /* Emit:
4292 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4293 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
4294 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
4295 gimple_call_set_lhs (new_stmt, vec_array);
4296 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4297 mark_symbols_for_renaming (new_stmt);
4298
4299 /* Extract each vector into an SSA_NAME. */
4300 for (i = 0; i < vec_num; i++)
4301 {
4302 new_temp = read_vector_array (stmt, gsi, scalar_dest,
4303 vec_array, i);
4304 VEC_quick_push (tree, dr_chain, new_temp);
4305 }
4306
4307 /* Record the mapping between SSA_NAMEs and statements. */
4308 vect_record_strided_load_vectors (stmt, dr_chain);
4309 }
4310 else
4311 {
4312 for (i = 0; i < vec_num; i++)
4313 {
4314 if (i > 0)
4315 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
4316 stmt, NULL_TREE);
4317
4318 /* 2. Create the vector-load in the loop. */
4319 switch (alignment_support_scheme)
4320 {
4321 case dr_aligned:
4322 case dr_unaligned_supported:
4323 {
4324 struct ptr_info_def *pi;
4325 data_ref
4326 = build2 (MEM_REF, vectype, dataref_ptr,
4327 build_int_cst (reference_alias_ptr_type
4328 (DR_REF (first_dr)), 0));
4329 pi = get_ptr_info (dataref_ptr);
4330 pi->align = TYPE_ALIGN_UNIT (vectype);
4331 if (alignment_support_scheme == dr_aligned)
4332 {
4333 gcc_assert (aligned_access_p (first_dr));
4334 pi->misalign = 0;
4335 }
4336 else if (DR_MISALIGNMENT (first_dr) == -1)
4337 {
4338 TREE_TYPE (data_ref)
4339 = build_aligned_type (TREE_TYPE (data_ref),
4340 TYPE_ALIGN (elem_type));
4341 pi->align = TYPE_ALIGN_UNIT (elem_type);
4342 pi->misalign = 0;
4343 }
4344 else
4345 {
4346 TREE_TYPE (data_ref)
4347 = build_aligned_type (TREE_TYPE (data_ref),
4348 TYPE_ALIGN (elem_type));
4349 pi->misalign = DR_MISALIGNMENT (first_dr);
4350 }
4351 break;
4352 }
4353 case dr_explicit_realign:
4354 {
4355 tree ptr, bump;
4356 tree vs_minus_1;
4357
4358 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4359
4360 if (compute_in_loop)
4361 msq = vect_setup_realignment (first_stmt, gsi,
4362 &realignment_token,
4363 dr_explicit_realign,
4364 dataref_ptr, NULL);
4365
4366 new_stmt = gimple_build_assign_with_ops
4367 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4368 build_int_cst
4369 (TREE_TYPE (dataref_ptr),
4370 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4371 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4372 gimple_assign_set_lhs (new_stmt, ptr);
4373 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4374 data_ref
4375 = build2 (MEM_REF, vectype, ptr,
4376 build_int_cst (reference_alias_ptr_type
4377 (DR_REF (first_dr)), 0));
4378 vec_dest = vect_create_destination_var (scalar_dest,
4379 vectype);
4380 new_stmt = gimple_build_assign (vec_dest, data_ref);
4381 new_temp = make_ssa_name (vec_dest, new_stmt);
4382 gimple_assign_set_lhs (new_stmt, new_temp);
4383 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4384 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4385 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4386 msq = new_temp;
4387
4388 bump = size_binop (MULT_EXPR, vs_minus_1,
4389 TYPE_SIZE_UNIT (scalar_type));
4390 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4391 new_stmt = gimple_build_assign_with_ops
4392 (BIT_AND_EXPR, NULL_TREE, ptr,
4393 build_int_cst
4394 (TREE_TYPE (ptr),
4395 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4396 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4397 gimple_assign_set_lhs (new_stmt, ptr);
4398 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4399 data_ref
4400 = build2 (MEM_REF, vectype, ptr,
4401 build_int_cst (reference_alias_ptr_type
4402 (DR_REF (first_dr)), 0));
4403 break;
4404 }
4405 case dr_explicit_realign_optimized:
4406 new_stmt = gimple_build_assign_with_ops
4407 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4408 build_int_cst
4409 (TREE_TYPE (dataref_ptr),
4410 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4411 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
4412 new_stmt);
4413 gimple_assign_set_lhs (new_stmt, new_temp);
4414 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4415 data_ref
4416 = build2 (MEM_REF, vectype, new_temp,
4417 build_int_cst (reference_alias_ptr_type
4418 (DR_REF (first_dr)), 0));
4419 break;
4420 default:
4421 gcc_unreachable ();
4422 }
4423 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4424 new_stmt = gimple_build_assign (vec_dest, data_ref);
4425 new_temp = make_ssa_name (vec_dest, new_stmt);
4426 gimple_assign_set_lhs (new_stmt, new_temp);
4427 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4428 mark_symbols_for_renaming (new_stmt);
4429
4430 /* 3. Handle explicit realignment if necessary/supported.
4431 Create in loop:
4432 vec_dest = realign_load (msq, lsq, realignment_token) */
4433 if (alignment_support_scheme == dr_explicit_realign_optimized
4434 || alignment_support_scheme == dr_explicit_realign)
4435 {
4436 lsq = gimple_assign_lhs (new_stmt);
4437 if (!realignment_token)
4438 realignment_token = dataref_ptr;
4439 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4440 new_stmt
4441 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
4442 vec_dest, msq, lsq,
4443 realignment_token);
4444 new_temp = make_ssa_name (vec_dest, new_stmt);
4445 gimple_assign_set_lhs (new_stmt, new_temp);
4446 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4447
4448 if (alignment_support_scheme == dr_explicit_realign_optimized)
4449 {
4450 gcc_assert (phi);
4451 if (i == vec_num - 1 && j == ncopies - 1)
4452 add_phi_arg (phi, lsq,
4453 loop_latch_edge (containing_loop),
4454 UNKNOWN_LOCATION);
4455 msq = lsq;
4456 }
4457 }
4458
4459 /* 4. Handle invariant-load. */
4460 if (inv_p && !bb_vinfo)
4461 {
4462 gcc_assert (!strided_load);
4463 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4464 if (j == 0)
4465 {
4466 int k;
4467 tree t = NULL_TREE;
4468 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4469
4470 /* CHECKME: bitpos depends on endianess? */
4471 bitpos = bitsize_zero_node;
4472 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4473 bitsize, bitpos);
4474 vec_dest = vect_create_destination_var (scalar_dest,
4475 NULL_TREE);
4476 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4477 new_temp = make_ssa_name (vec_dest, new_stmt);
4478 gimple_assign_set_lhs (new_stmt, new_temp);
4479 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4480
4481 for (k = nunits - 1; k >= 0; --k)
4482 t = tree_cons (NULL_TREE, new_temp, t);
4483 /* FIXME: use build_constructor directly. */
4484 vec_inv = build_constructor_from_list (vectype, t);
4485 new_temp = vect_init_vector (stmt, vec_inv,
4486 vectype, gsi);
4487 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4488 }
4489 else
4490 gcc_unreachable (); /* FORNOW. */
4491 }
4492
4493 if (negative)
4494 {
4495 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4496 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4497 }
4498
4499 /* Collect vector loads and later create their permutation in
4500 vect_transform_strided_load (). */
4501 if (strided_load || slp_perm)
4502 VEC_quick_push (tree, dr_chain, new_temp);
4503
4504 /* Store vector loads in the corresponding SLP_NODE. */
4505 if (slp && !slp_perm)
4506 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
4507 new_stmt);
4508 }
4509 }
4510
4511 if (slp && !slp_perm)
4512 continue;
4513
4514 if (slp_perm)
4515 {
4516 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4517 slp_node_instance, false))
4518 {
4519 VEC_free (tree, heap, dr_chain);
4520 return false;
4521 }
4522 }
4523 else
4524 {
4525 if (strided_load)
4526 {
4527 if (!load_lanes_p)
4528 vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
4529 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4530 }
4531 else
4532 {
4533 if (j == 0)
4534 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4535 else
4536 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4537 prev_stmt_info = vinfo_for_stmt (new_stmt);
4538 }
4539 }
4540 if (dr_chain)
4541 VEC_free (tree, heap, dr_chain);
4542 }
4543
4544 return true;
4545 }
4546
4547 /* Function vect_is_simple_cond.
4548
4549 Input:
4550 LOOP - the loop that is being vectorized.
4551 COND - Condition that is checked for simple use.
4552
4553 Returns whether a COND can be vectorized. Checks whether
4554 condition operands are supportable using vec_is_simple_use. */
4555
4556 static bool
4557 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4558 {
4559 tree lhs, rhs;
4560 tree def;
4561 enum vect_def_type dt;
4562
4563 if (!COMPARISON_CLASS_P (cond))
4564 return false;
4565
4566 lhs = TREE_OPERAND (cond, 0);
4567 rhs = TREE_OPERAND (cond, 1);
4568
4569 if (TREE_CODE (lhs) == SSA_NAME)
4570 {
4571 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4572 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4573 &dt))
4574 return false;
4575 }
4576 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4577 && TREE_CODE (lhs) != FIXED_CST)
4578 return false;
4579
4580 if (TREE_CODE (rhs) == SSA_NAME)
4581 {
4582 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4583 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4584 &dt))
4585 return false;
4586 }
4587 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4588 && TREE_CODE (rhs) != FIXED_CST)
4589 return false;
4590
4591 return true;
4592 }
4593
4594 /* vectorizable_condition.
4595
4596 Check if STMT is conditional modify expression that can be vectorized.
4597 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4598 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4599 at GSI.
4600
4601 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4602 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4603 else caluse if it is 2).
4604
4605 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4606
4607 bool
4608 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4609 gimple *vec_stmt, tree reduc_def, int reduc_index)
4610 {
4611 tree scalar_dest = NULL_TREE;
4612 tree vec_dest = NULL_TREE;
4613 tree op = NULL_TREE;
4614 tree cond_expr, then_clause, else_clause;
4615 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4616 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4617 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4618 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4619 tree vec_compare, vec_cond_expr;
4620 tree new_temp;
4621 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4622 enum machine_mode vec_mode;
4623 tree def;
4624 enum vect_def_type dt, dts[4];
4625 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4626 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4627 enum tree_code code;
4628 stmt_vec_info prev_stmt_info = NULL;
4629 int j;
4630
4631 /* FORNOW: unsupported in basic block SLP. */
4632 gcc_assert (loop_vinfo);
4633
4634 /* FORNOW: SLP not supported. */
4635 if (STMT_SLP_TYPE (stmt_info))
4636 return false;
4637
4638 gcc_assert (ncopies >= 1);
4639 if (reduc_index && ncopies > 1)
4640 return false; /* FORNOW */
4641
4642 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4643 return false;
4644
4645 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4646 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4647 && reduc_def))
4648 return false;
4649
4650 /* FORNOW: not yet supported. */
4651 if (STMT_VINFO_LIVE_P (stmt_info))
4652 {
4653 if (vect_print_dump_info (REPORT_DETAILS))
4654 fprintf (vect_dump, "value used after loop.");
4655 return false;
4656 }
4657
4658 /* Is vectorizable conditional operation? */
4659 if (!is_gimple_assign (stmt))
4660 return false;
4661
4662 code = gimple_assign_rhs_code (stmt);
4663
4664 if (code != COND_EXPR)
4665 return false;
4666
4667 gcc_assert (gimple_assign_single_p (stmt));
4668 op = gimple_assign_rhs1 (stmt);
4669 cond_expr = TREE_OPERAND (op, 0);
4670 then_clause = TREE_OPERAND (op, 1);
4671 else_clause = TREE_OPERAND (op, 2);
4672
4673 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4674 return false;
4675
4676 /* We do not handle two different vector types for the condition
4677 and the values. */
4678 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4679 TREE_TYPE (vectype)))
4680 return false;
4681
4682 if (TREE_CODE (then_clause) == SSA_NAME)
4683 {
4684 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4685 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4686 &then_def_stmt, &def, &dt))
4687 return false;
4688 }
4689 else if (TREE_CODE (then_clause) != INTEGER_CST
4690 && TREE_CODE (then_clause) != REAL_CST
4691 && TREE_CODE (then_clause) != FIXED_CST)
4692 return false;
4693
4694 if (TREE_CODE (else_clause) == SSA_NAME)
4695 {
4696 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4697 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4698 &else_def_stmt, &def, &dt))
4699 return false;
4700 }
4701 else if (TREE_CODE (else_clause) != INTEGER_CST
4702 && TREE_CODE (else_clause) != REAL_CST
4703 && TREE_CODE (else_clause) != FIXED_CST)
4704 return false;
4705
4706
4707 vec_mode = TYPE_MODE (vectype);
4708
4709 if (!vec_stmt)
4710 {
4711 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4712 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4713 }
4714
4715 /* Transform */
4716
4717 /* Handle def. */
4718 scalar_dest = gimple_assign_lhs (stmt);
4719 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4720
4721 /* Handle cond expr. */
4722 for (j = 0; j < ncopies; j++)
4723 {
4724 gimple new_stmt;
4725 if (j == 0)
4726 {
4727 gimple gtemp;
4728 vec_cond_lhs =
4729 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4730 stmt, NULL);
4731 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4732 NULL, &gtemp, &def, &dts[0]);
4733 vec_cond_rhs =
4734 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4735 stmt, NULL);
4736 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4737 NULL, &gtemp, &def, &dts[1]);
4738 if (reduc_index == 1)
4739 vec_then_clause = reduc_def;
4740 else
4741 {
4742 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4743 stmt, NULL);
4744 vect_is_simple_use (then_clause, loop_vinfo,
4745 NULL, &gtemp, &def, &dts[2]);
4746 }
4747 if (reduc_index == 2)
4748 vec_else_clause = reduc_def;
4749 else
4750 {
4751 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4752 stmt, NULL);
4753 vect_is_simple_use (else_clause, loop_vinfo,
4754 NULL, &gtemp, &def, &dts[3]);
4755 }
4756 }
4757 else
4758 {
4759 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4760 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4761 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4762 vec_then_clause);
4763 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4764 vec_else_clause);
4765 }
4766
4767 /* Arguments are ready. Create the new vector stmt. */
4768 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4769 vec_cond_lhs, vec_cond_rhs);
4770 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4771 vec_compare, vec_then_clause, vec_else_clause);
4772
4773 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4774 new_temp = make_ssa_name (vec_dest, new_stmt);
4775 gimple_assign_set_lhs (new_stmt, new_temp);
4776 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4777 if (j == 0)
4778 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4779 else
4780 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4781
4782 prev_stmt_info = vinfo_for_stmt (new_stmt);
4783 }
4784
4785 return true;
4786 }
4787
4788
4789 /* Make sure the statement is vectorizable. */
4790
4791 bool
4792 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4793 {
4794 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4795 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4796 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4797 bool ok;
4798 tree scalar_type, vectype;
4799
4800 if (vect_print_dump_info (REPORT_DETAILS))
4801 {
4802 fprintf (vect_dump, "==> examining statement: ");
4803 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4804 }
4805
4806 if (gimple_has_volatile_ops (stmt))
4807 {
4808 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4809 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4810
4811 return false;
4812 }
4813
4814 /* Skip stmts that do not need to be vectorized. In loops this is expected
4815 to include:
4816 - the COND_EXPR which is the loop exit condition
4817 - any LABEL_EXPRs in the loop
4818 - computations that are used only for array indexing or loop control.
4819 In basic blocks we only analyze statements that are a part of some SLP
4820 instance, therefore, all the statements are relevant. */
4821
4822 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4823 && !STMT_VINFO_LIVE_P (stmt_info))
4824 {
4825 if (vect_print_dump_info (REPORT_DETAILS))
4826 fprintf (vect_dump, "irrelevant.");
4827
4828 return true;
4829 }
4830
4831 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4832 {
4833 case vect_internal_def:
4834 break;
4835
4836 case vect_reduction_def:
4837 case vect_nested_cycle:
4838 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4839 || relevance == vect_used_in_outer_by_reduction
4840 || relevance == vect_unused_in_scope));
4841 break;
4842
4843 case vect_induction_def:
4844 case vect_constant_def:
4845 case vect_external_def:
4846 case vect_unknown_def_type:
4847 default:
4848 gcc_unreachable ();
4849 }
4850
4851 if (bb_vinfo)
4852 {
4853 gcc_assert (PURE_SLP_STMT (stmt_info));
4854
4855 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4856 if (vect_print_dump_info (REPORT_DETAILS))
4857 {
4858 fprintf (vect_dump, "get vectype for scalar type: ");
4859 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4860 }
4861
4862 vectype = get_vectype_for_scalar_type (scalar_type);
4863 if (!vectype)
4864 {
4865 if (vect_print_dump_info (REPORT_DETAILS))
4866 {
4867 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4868 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4869 }
4870 return false;
4871 }
4872
4873 if (vect_print_dump_info (REPORT_DETAILS))
4874 {
4875 fprintf (vect_dump, "vectype: ");
4876 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4877 }
4878
4879 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4880 }
4881
4882 if (STMT_VINFO_RELEVANT_P (stmt_info))
4883 {
4884 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4885 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4886 *need_to_vectorize = true;
4887 }
4888
4889 ok = true;
4890 if (!bb_vinfo
4891 && (STMT_VINFO_RELEVANT_P (stmt_info)
4892 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4893 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4894 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4895 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4896 || vectorizable_shift (stmt, NULL, NULL, NULL)
4897 || vectorizable_operation (stmt, NULL, NULL, NULL)
4898 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4899 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4900 || vectorizable_call (stmt, NULL, NULL)
4901 || vectorizable_store (stmt, NULL, NULL, NULL)
4902 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4903 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4904 else
4905 {
4906 if (bb_vinfo)
4907 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4908 || vectorizable_operation (stmt, NULL, NULL, node)
4909 || vectorizable_assignment (stmt, NULL, NULL, node)
4910 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4911 || vectorizable_store (stmt, NULL, NULL, node));
4912 }
4913
4914 if (!ok)
4915 {
4916 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4917 {
4918 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4919 fprintf (vect_dump, "supported: ");
4920 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4921 }
4922
4923 return false;
4924 }
4925
4926 if (bb_vinfo)
4927 return true;
4928
4929 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4930 need extra handling, except for vectorizable reductions. */
4931 if (STMT_VINFO_LIVE_P (stmt_info)
4932 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4933 ok = vectorizable_live_operation (stmt, NULL, NULL);
4934
4935 if (!ok)
4936 {
4937 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4938 {
4939 fprintf (vect_dump, "not vectorized: live stmt not ");
4940 fprintf (vect_dump, "supported: ");
4941 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4942 }
4943
4944 return false;
4945 }
4946
4947 return true;
4948 }
4949
4950
4951 /* Function vect_transform_stmt.
4952
4953 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4954
4955 bool
4956 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4957 bool *strided_store, slp_tree slp_node,
4958 slp_instance slp_node_instance)
4959 {
4960 bool is_store = false;
4961 gimple vec_stmt = NULL;
4962 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4963 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4964 bool done;
4965
4966 switch (STMT_VINFO_TYPE (stmt_info))
4967 {
4968 case type_demotion_vec_info_type:
4969 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4970 gcc_assert (done);
4971 break;
4972
4973 case type_promotion_vec_info_type:
4974 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4975 gcc_assert (done);
4976 break;
4977
4978 case type_conversion_vec_info_type:
4979 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4980 gcc_assert (done);
4981 break;
4982
4983 case induc_vec_info_type:
4984 gcc_assert (!slp_node);
4985 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4986 gcc_assert (done);
4987 break;
4988
4989 case shift_vec_info_type:
4990 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4991 gcc_assert (done);
4992 break;
4993
4994 case op_vec_info_type:
4995 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4996 gcc_assert (done);
4997 break;
4998
4999 case assignment_vec_info_type:
5000 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
5001 gcc_assert (done);
5002 break;
5003
5004 case load_vec_info_type:
5005 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
5006 slp_node_instance);
5007 gcc_assert (done);
5008 break;
5009
5010 case store_vec_info_type:
5011 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
5012 gcc_assert (done);
5013 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
5014 {
5015 /* In case of interleaving, the whole chain is vectorized when the
5016 last store in the chain is reached. Store stmts before the last
5017 one are skipped, and there vec_stmt_info shouldn't be freed
5018 meanwhile. */
5019 *strided_store = true;
5020 if (STMT_VINFO_VEC_STMT (stmt_info))
5021 is_store = true;
5022 }
5023 else
5024 is_store = true;
5025 break;
5026
5027 case condition_vec_info_type:
5028 gcc_assert (!slp_node);
5029 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
5030 gcc_assert (done);
5031 break;
5032
5033 case call_vec_info_type:
5034 gcc_assert (!slp_node);
5035 done = vectorizable_call (stmt, gsi, &vec_stmt);
5036 stmt = gsi_stmt (*gsi);
5037 break;
5038
5039 case reduc_vec_info_type:
5040 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
5041 gcc_assert (done);
5042 break;
5043
5044 default:
5045 if (!STMT_VINFO_LIVE_P (stmt_info))
5046 {
5047 if (vect_print_dump_info (REPORT_DETAILS))
5048 fprintf (vect_dump, "stmt not supported.");
5049 gcc_unreachable ();
5050 }
5051 }
5052
5053 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5054 is being vectorized, but outside the immediately enclosing loop. */
5055 if (vec_stmt
5056 && STMT_VINFO_LOOP_VINFO (stmt_info)
5057 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5058 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
5059 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
5060 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
5061 || STMT_VINFO_RELEVANT (stmt_info) ==
5062 vect_used_in_outer_by_reduction))
5063 {
5064 struct loop *innerloop = LOOP_VINFO_LOOP (
5065 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
5066 imm_use_iterator imm_iter;
5067 use_operand_p use_p;
5068 tree scalar_dest;
5069 gimple exit_phi;
5070
5071 if (vect_print_dump_info (REPORT_DETAILS))
5072 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
5073
5074 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5075 (to be used when vectorizing outer-loop stmts that use the DEF of
5076 STMT). */
5077 if (gimple_code (stmt) == GIMPLE_PHI)
5078 scalar_dest = PHI_RESULT (stmt);
5079 else
5080 scalar_dest = gimple_assign_lhs (stmt);
5081
5082 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5083 {
5084 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
5085 {
5086 exit_phi = USE_STMT (use_p);
5087 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
5088 }
5089 }
5090 }
5091
5092 /* Handle stmts whose DEF is used outside the loop-nest that is
5093 being vectorized. */
5094 if (STMT_VINFO_LIVE_P (stmt_info)
5095 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5096 {
5097 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
5098 gcc_assert (done);
5099 }
5100
5101 if (vec_stmt)
5102 {
5103 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
5104 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
5105 if (orig_stmt_in_pattern)
5106 {
5107 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
5108 /* STMT was inserted by the vectorizer to replace a computation idiom.
5109 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5110 computed this idiom. We need to record a pointer to VEC_STMT in
5111 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5112 documentation of vect_pattern_recog. */
5113 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
5114 {
5115 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
5116 == orig_scalar_stmt);
5117 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
5118 }
5119 }
5120 }
5121
5122 return is_store;
5123 }
5124
5125
5126 /* Remove a group of stores (for SLP or interleaving), free their
5127 stmt_vec_info. */
5128
5129 void
5130 vect_remove_stores (gimple first_stmt)
5131 {
5132 gimple next = first_stmt;
5133 gimple tmp;
5134 gimple_stmt_iterator next_si;
5135
5136 while (next)
5137 {
5138 /* Free the attached stmt_vec_info and remove the stmt. */
5139 next_si = gsi_for_stmt (next);
5140 gsi_remove (&next_si, true);
5141 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
5142 free_stmt_vec_info (next);
5143 next = tmp;
5144 }
5145 }
5146
5147
5148 /* Function new_stmt_vec_info.
5149
5150 Create and initialize a new stmt_vec_info struct for STMT. */
5151
5152 stmt_vec_info
5153 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
5154 bb_vec_info bb_vinfo)
5155 {
5156 stmt_vec_info res;
5157 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
5158
5159 STMT_VINFO_TYPE (res) = undef_vec_info_type;
5160 STMT_VINFO_STMT (res) = stmt;
5161 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
5162 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
5163 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
5164 STMT_VINFO_LIVE_P (res) = false;
5165 STMT_VINFO_VECTYPE (res) = NULL;
5166 STMT_VINFO_VEC_STMT (res) = NULL;
5167 STMT_VINFO_VECTORIZABLE (res) = true;
5168 STMT_VINFO_IN_PATTERN_P (res) = false;
5169 STMT_VINFO_RELATED_STMT (res) = NULL;
5170 STMT_VINFO_DATA_REF (res) = NULL;
5171
5172 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
5173 STMT_VINFO_DR_OFFSET (res) = NULL;
5174 STMT_VINFO_DR_INIT (res) = NULL;
5175 STMT_VINFO_DR_STEP (res) = NULL;
5176 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5177
5178 if (gimple_code (stmt) == GIMPLE_PHI
5179 && is_loop_header_bb_p (gimple_bb (stmt)))
5180 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5181 else
5182 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5183
5184 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5185 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5186 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
5187 STMT_SLP_TYPE (res) = loop_vect;
5188 DR_GROUP_FIRST_DR (res) = NULL;
5189 DR_GROUP_NEXT_DR (res) = NULL;
5190 DR_GROUP_SIZE (res) = 0;
5191 DR_GROUP_STORE_COUNT (res) = 0;
5192 DR_GROUP_GAP (res) = 0;
5193 DR_GROUP_SAME_DR_STMT (res) = NULL;
5194 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
5195
5196 return res;
5197 }
5198
5199
5200 /* Create a hash table for stmt_vec_info. */
5201
5202 void
5203 init_stmt_vec_info_vec (void)
5204 {
5205 gcc_assert (!stmt_vec_info_vec);
5206 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5207 }
5208
5209
5210 /* Free hash table for stmt_vec_info. */
5211
5212 void
5213 free_stmt_vec_info_vec (void)
5214 {
5215 gcc_assert (stmt_vec_info_vec);
5216 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5217 }
5218
5219
5220 /* Free stmt vectorization related info. */
5221
5222 void
5223 free_stmt_vec_info (gimple stmt)
5224 {
5225 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5226
5227 if (!stmt_info)
5228 return;
5229
5230 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5231 set_vinfo_for_stmt (stmt, NULL);
5232 free (stmt_info);
5233 }
5234
5235
5236 /* Function get_vectype_for_scalar_type_and_size.
5237
5238 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5239 by the target. */
5240
5241 static tree
5242 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5243 {
5244 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5245 enum machine_mode simd_mode;
5246 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5247 int nunits;
5248 tree vectype;
5249
5250 if (nbytes == 0)
5251 return NULL_TREE;
5252
5253 /* We can't build a vector type of elements with alignment bigger than
5254 their size. */
5255 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5256 return NULL_TREE;
5257
5258 /* If we'd build a vector type of elements whose mode precision doesn't
5259 match their types precision we'll get mismatched types on vector
5260 extracts via BIT_FIELD_REFs. This effectively means we disable
5261 vectorization of bool and/or enum types in some languages. */
5262 if (INTEGRAL_TYPE_P (scalar_type)
5263 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5264 return NULL_TREE;
5265
5266 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5267 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5268 return NULL_TREE;
5269
5270 /* If no size was supplied use the mode the target prefers. Otherwise
5271 lookup a vector mode of the specified size. */
5272 if (size == 0)
5273 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5274 else
5275 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5276 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5277 if (nunits <= 1)
5278 return NULL_TREE;
5279
5280 vectype = build_vector_type (scalar_type, nunits);
5281 if (vect_print_dump_info (REPORT_DETAILS))
5282 {
5283 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5284 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5285 }
5286
5287 if (!vectype)
5288 return NULL_TREE;
5289
5290 if (vect_print_dump_info (REPORT_DETAILS))
5291 {
5292 fprintf (vect_dump, "vectype: ");
5293 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5294 }
5295
5296 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5297 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5298 {
5299 if (vect_print_dump_info (REPORT_DETAILS))
5300 fprintf (vect_dump, "mode not supported by target.");
5301 return NULL_TREE;
5302 }
5303
5304 return vectype;
5305 }
5306
5307 unsigned int current_vector_size;
5308
5309 /* Function get_vectype_for_scalar_type.
5310
5311 Returns the vector type corresponding to SCALAR_TYPE as supported
5312 by the target. */
5313
5314 tree
5315 get_vectype_for_scalar_type (tree scalar_type)
5316 {
5317 tree vectype;
5318 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5319 current_vector_size);
5320 if (vectype
5321 && current_vector_size == 0)
5322 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5323 return vectype;
5324 }
5325
5326 /* Function get_same_sized_vectype
5327
5328 Returns a vector type corresponding to SCALAR_TYPE of size
5329 VECTOR_TYPE if supported by the target. */
5330
5331 tree
5332 get_same_sized_vectype (tree scalar_type, tree vector_type)
5333 {
5334 return get_vectype_for_scalar_type_and_size
5335 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5336 }
5337
5338 /* Function vect_is_simple_use.
5339
5340 Input:
5341 LOOP_VINFO - the vect info of the loop that is being vectorized.
5342 BB_VINFO - the vect info of the basic block that is being vectorized.
5343 OPERAND - operand of a stmt in the loop or bb.
5344 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5345
5346 Returns whether a stmt with OPERAND can be vectorized.
5347 For loops, supportable operands are constants, loop invariants, and operands
5348 that are defined by the current iteration of the loop. Unsupportable
5349 operands are those that are defined by a previous iteration of the loop (as
5350 is the case in reduction/induction computations).
5351 For basic blocks, supportable operands are constants and bb invariants.
5352 For now, operands defined outside the basic block are not supported. */
5353
5354 bool
5355 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5356 bb_vec_info bb_vinfo, gimple *def_stmt,
5357 tree *def, enum vect_def_type *dt)
5358 {
5359 basic_block bb;
5360 stmt_vec_info stmt_vinfo;
5361 struct loop *loop = NULL;
5362
5363 if (loop_vinfo)
5364 loop = LOOP_VINFO_LOOP (loop_vinfo);
5365
5366 *def_stmt = NULL;
5367 *def = NULL_TREE;
5368
5369 if (vect_print_dump_info (REPORT_DETAILS))
5370 {
5371 fprintf (vect_dump, "vect_is_simple_use: operand ");
5372 print_generic_expr (vect_dump, operand, TDF_SLIM);
5373 }
5374
5375 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5376 {
5377 *dt = vect_constant_def;
5378 return true;
5379 }
5380
5381 if (is_gimple_min_invariant (operand))
5382 {
5383 *def = operand;
5384 *dt = vect_external_def;
5385 return true;
5386 }
5387
5388 if (TREE_CODE (operand) == PAREN_EXPR)
5389 {
5390 if (vect_print_dump_info (REPORT_DETAILS))
5391 fprintf (vect_dump, "non-associatable copy.");
5392 operand = TREE_OPERAND (operand, 0);
5393 }
5394
5395 if (TREE_CODE (operand) != SSA_NAME)
5396 {
5397 if (vect_print_dump_info (REPORT_DETAILS))
5398 fprintf (vect_dump, "not ssa-name.");
5399 return false;
5400 }
5401
5402 *def_stmt = SSA_NAME_DEF_STMT (operand);
5403 if (*def_stmt == NULL)
5404 {
5405 if (vect_print_dump_info (REPORT_DETAILS))
5406 fprintf (vect_dump, "no def_stmt.");
5407 return false;
5408 }
5409
5410 if (vect_print_dump_info (REPORT_DETAILS))
5411 {
5412 fprintf (vect_dump, "def_stmt: ");
5413 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5414 }
5415
5416 /* Empty stmt is expected only in case of a function argument.
5417 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5418 if (gimple_nop_p (*def_stmt))
5419 {
5420 *def = operand;
5421 *dt = vect_external_def;
5422 return true;
5423 }
5424
5425 bb = gimple_bb (*def_stmt);
5426
5427 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5428 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5429 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5430 *dt = vect_external_def;
5431 else
5432 {
5433 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5434 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5435 }
5436
5437 if (*dt == vect_unknown_def_type)
5438 {
5439 if (vect_print_dump_info (REPORT_DETAILS))
5440 fprintf (vect_dump, "Unsupported pattern.");
5441 return false;
5442 }
5443
5444 if (vect_print_dump_info (REPORT_DETAILS))
5445 fprintf (vect_dump, "type of def: %d.",*dt);
5446
5447 switch (gimple_code (*def_stmt))
5448 {
5449 case GIMPLE_PHI:
5450 *def = gimple_phi_result (*def_stmt);
5451 break;
5452
5453 case GIMPLE_ASSIGN:
5454 *def = gimple_assign_lhs (*def_stmt);
5455 break;
5456
5457 case GIMPLE_CALL:
5458 *def = gimple_call_lhs (*def_stmt);
5459 if (*def != NULL)
5460 break;
5461 /* FALLTHRU */
5462 default:
5463 if (vect_print_dump_info (REPORT_DETAILS))
5464 fprintf (vect_dump, "unsupported defining stmt: ");
5465 return false;
5466 }
5467
5468 return true;
5469 }
5470
5471 /* Function vect_is_simple_use_1.
5472
5473 Same as vect_is_simple_use_1 but also determines the vector operand
5474 type of OPERAND and stores it to *VECTYPE. If the definition of
5475 OPERAND is vect_uninitialized_def, vect_constant_def or
5476 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5477 is responsible to compute the best suited vector type for the
5478 scalar operand. */
5479
5480 bool
5481 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5482 bb_vec_info bb_vinfo, gimple *def_stmt,
5483 tree *def, enum vect_def_type *dt, tree *vectype)
5484 {
5485 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5486 return false;
5487
5488 /* Now get a vector type if the def is internal, otherwise supply
5489 NULL_TREE and leave it up to the caller to figure out a proper
5490 type for the use stmt. */
5491 if (*dt == vect_internal_def
5492 || *dt == vect_induction_def
5493 || *dt == vect_reduction_def
5494 || *dt == vect_double_reduction_def
5495 || *dt == vect_nested_cycle)
5496 {
5497 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5498 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5499 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5500 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5501 gcc_assert (*vectype != NULL_TREE);
5502 }
5503 else if (*dt == vect_uninitialized_def
5504 || *dt == vect_constant_def
5505 || *dt == vect_external_def)
5506 *vectype = NULL_TREE;
5507 else
5508 gcc_unreachable ();
5509
5510 return true;
5511 }
5512
5513
5514 /* Function supportable_widening_operation
5515
5516 Check whether an operation represented by the code CODE is a
5517 widening operation that is supported by the target platform in
5518 vector form (i.e., when operating on arguments of type VECTYPE_IN
5519 producing a result of type VECTYPE_OUT).
5520
5521 Widening operations we currently support are NOP (CONVERT), FLOAT
5522 and WIDEN_MULT. This function checks if these operations are supported
5523 by the target platform either directly (via vector tree-codes), or via
5524 target builtins.
5525
5526 Output:
5527 - CODE1 and CODE2 are codes of vector operations to be used when
5528 vectorizing the operation, if available.
5529 - DECL1 and DECL2 are decls of target builtin functions to be used
5530 when vectorizing the operation, if available. In this case,
5531 CODE1 and CODE2 are CALL_EXPR.
5532 - MULTI_STEP_CVT determines the number of required intermediate steps in
5533 case of multi-step conversion (like char->short->int - in that case
5534 MULTI_STEP_CVT will be 1).
5535 - INTERM_TYPES contains the intermediate type required to perform the
5536 widening operation (short in the above example). */
5537
5538 bool
5539 supportable_widening_operation (enum tree_code code, gimple stmt,
5540 tree vectype_out, tree vectype_in,
5541 tree *decl1, tree *decl2,
5542 enum tree_code *code1, enum tree_code *code2,
5543 int *multi_step_cvt,
5544 VEC (tree, heap) **interm_types)
5545 {
5546 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5547 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5548 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5549 bool ordered_p;
5550 enum machine_mode vec_mode;
5551 enum insn_code icode1, icode2;
5552 optab optab1, optab2;
5553 tree vectype = vectype_in;
5554 tree wide_vectype = vectype_out;
5555 enum tree_code c1, c2;
5556
5557 /* The result of a vectorized widening operation usually requires two vectors
5558 (because the widened results do not fit int one vector). The generated
5559 vector results would normally be expected to be generated in the same
5560 order as in the original scalar computation, i.e. if 8 results are
5561 generated in each vector iteration, they are to be organized as follows:
5562 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5563
5564 However, in the special case that the result of the widening operation is
5565 used in a reduction computation only, the order doesn't matter (because
5566 when vectorizing a reduction we change the order of the computation).
5567 Some targets can take advantage of this and generate more efficient code.
5568 For example, targets like Altivec, that support widen_mult using a sequence
5569 of {mult_even,mult_odd} generate the following vectors:
5570 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5571
5572 When vectorizing outer-loops, we execute the inner-loop sequentially
5573 (each vectorized inner-loop iteration contributes to VF outer-loop
5574 iterations in parallel). We therefore don't allow to change the order
5575 of the computation in the inner-loop during outer-loop vectorization. */
5576
5577 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5578 && !nested_in_vect_loop_p (vect_loop, stmt))
5579 ordered_p = false;
5580 else
5581 ordered_p = true;
5582
5583 if (!ordered_p
5584 && code == WIDEN_MULT_EXPR
5585 && targetm.vectorize.builtin_mul_widen_even
5586 && targetm.vectorize.builtin_mul_widen_even (vectype)
5587 && targetm.vectorize.builtin_mul_widen_odd
5588 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5589 {
5590 if (vect_print_dump_info (REPORT_DETAILS))
5591 fprintf (vect_dump, "Unordered widening operation detected.");
5592
5593 *code1 = *code2 = CALL_EXPR;
5594 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5595 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5596 return true;
5597 }
5598
5599 switch (code)
5600 {
5601 case WIDEN_MULT_EXPR:
5602 if (BYTES_BIG_ENDIAN)
5603 {
5604 c1 = VEC_WIDEN_MULT_HI_EXPR;
5605 c2 = VEC_WIDEN_MULT_LO_EXPR;
5606 }
5607 else
5608 {
5609 c2 = VEC_WIDEN_MULT_HI_EXPR;
5610 c1 = VEC_WIDEN_MULT_LO_EXPR;
5611 }
5612 break;
5613
5614 CASE_CONVERT:
5615 if (BYTES_BIG_ENDIAN)
5616 {
5617 c1 = VEC_UNPACK_HI_EXPR;
5618 c2 = VEC_UNPACK_LO_EXPR;
5619 }
5620 else
5621 {
5622 c2 = VEC_UNPACK_HI_EXPR;
5623 c1 = VEC_UNPACK_LO_EXPR;
5624 }
5625 break;
5626
5627 case FLOAT_EXPR:
5628 if (BYTES_BIG_ENDIAN)
5629 {
5630 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5631 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5632 }
5633 else
5634 {
5635 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5636 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5637 }
5638 break;
5639
5640 case FIX_TRUNC_EXPR:
5641 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5642 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5643 computing the operation. */
5644 return false;
5645
5646 default:
5647 gcc_unreachable ();
5648 }
5649
5650 if (code == FIX_TRUNC_EXPR)
5651 {
5652 /* The signedness is determined from output operand. */
5653 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5654 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5655 }
5656 else
5657 {
5658 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5659 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5660 }
5661
5662 if (!optab1 || !optab2)
5663 return false;
5664
5665 vec_mode = TYPE_MODE (vectype);
5666 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5667 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5668 return false;
5669
5670 /* Check if it's a multi-step conversion that can be done using intermediate
5671 types. */
5672 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5673 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5674 {
5675 int i;
5676 tree prev_type = vectype, intermediate_type;
5677 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5678 optab optab3, optab4;
5679
5680 if (!CONVERT_EXPR_CODE_P (code))
5681 return false;
5682
5683 *code1 = c1;
5684 *code2 = c2;
5685
5686 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5687 intermediate steps in promotion sequence. We try
5688 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5689 not. */
5690 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5691 for (i = 0; i < 3; i++)
5692 {
5693 intermediate_mode = insn_data[icode1].operand[0].mode;
5694 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5695 TYPE_UNSIGNED (prev_type));
5696 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5697 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5698
5699 if (!optab3 || !optab4
5700 || ((icode1 = optab_handler (optab1, prev_mode))
5701 == CODE_FOR_nothing)
5702 || insn_data[icode1].operand[0].mode != intermediate_mode
5703 || ((icode2 = optab_handler (optab2, prev_mode))
5704 == CODE_FOR_nothing)
5705 || insn_data[icode2].operand[0].mode != intermediate_mode
5706 || ((icode1 = optab_handler (optab3, intermediate_mode))
5707 == CODE_FOR_nothing)
5708 || ((icode2 = optab_handler (optab4, intermediate_mode))
5709 == CODE_FOR_nothing))
5710 return false;
5711
5712 VEC_quick_push (tree, *interm_types, intermediate_type);
5713 (*multi_step_cvt)++;
5714
5715 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5716 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5717 return true;
5718
5719 prev_type = intermediate_type;
5720 prev_mode = intermediate_mode;
5721 }
5722
5723 return false;
5724 }
5725
5726 *code1 = c1;
5727 *code2 = c2;
5728 return true;
5729 }
5730
5731
5732 /* Function supportable_narrowing_operation
5733
5734 Check whether an operation represented by the code CODE is a
5735 narrowing operation that is supported by the target platform in
5736 vector form (i.e., when operating on arguments of type VECTYPE_IN
5737 and producing a result of type VECTYPE_OUT).
5738
5739 Narrowing operations we currently support are NOP (CONVERT) and
5740 FIX_TRUNC. This function checks if these operations are supported by
5741 the target platform directly via vector tree-codes.
5742
5743 Output:
5744 - CODE1 is the code of a vector operation to be used when
5745 vectorizing the operation, if available.
5746 - MULTI_STEP_CVT determines the number of required intermediate steps in
5747 case of multi-step conversion (like int->short->char - in that case
5748 MULTI_STEP_CVT will be 1).
5749 - INTERM_TYPES contains the intermediate type required to perform the
5750 narrowing operation (short in the above example). */
5751
5752 bool
5753 supportable_narrowing_operation (enum tree_code code,
5754 tree vectype_out, tree vectype_in,
5755 enum tree_code *code1, int *multi_step_cvt,
5756 VEC (tree, heap) **interm_types)
5757 {
5758 enum machine_mode vec_mode;
5759 enum insn_code icode1;
5760 optab optab1, interm_optab;
5761 tree vectype = vectype_in;
5762 tree narrow_vectype = vectype_out;
5763 enum tree_code c1;
5764 tree intermediate_type, prev_type;
5765 int i;
5766
5767 switch (code)
5768 {
5769 CASE_CONVERT:
5770 c1 = VEC_PACK_TRUNC_EXPR;
5771 break;
5772
5773 case FIX_TRUNC_EXPR:
5774 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5775 break;
5776
5777 case FLOAT_EXPR:
5778 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5779 tree code and optabs used for computing the operation. */
5780 return false;
5781
5782 default:
5783 gcc_unreachable ();
5784 }
5785
5786 if (code == FIX_TRUNC_EXPR)
5787 /* The signedness is determined from output operand. */
5788 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5789 else
5790 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5791
5792 if (!optab1)
5793 return false;
5794
5795 vec_mode = TYPE_MODE (vectype);
5796 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5797 return false;
5798
5799 /* Check if it's a multi-step conversion that can be done using intermediate
5800 types. */
5801 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5802 {
5803 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5804
5805 *code1 = c1;
5806 prev_type = vectype;
5807 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5808 intermediate steps in promotion sequence. We try
5809 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5810 not. */
5811 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5812 for (i = 0; i < 3; i++)
5813 {
5814 intermediate_mode = insn_data[icode1].operand[0].mode;
5815 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5816 TYPE_UNSIGNED (prev_type));
5817 interm_optab = optab_for_tree_code (c1, intermediate_type,
5818 optab_default);
5819 if (!interm_optab
5820 || ((icode1 = optab_handler (optab1, prev_mode))
5821 == CODE_FOR_nothing)
5822 || insn_data[icode1].operand[0].mode != intermediate_mode
5823 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5824 == CODE_FOR_nothing))
5825 return false;
5826
5827 VEC_quick_push (tree, *interm_types, intermediate_type);
5828 (*multi_step_cvt)++;
5829
5830 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5831 return true;
5832
5833 prev_type = intermediate_type;
5834 prev_mode = intermediate_mode;
5835 }
5836
5837 return false;
5838 }
5839
5840 *code1 = c1;
5841 return true;
5842 }