re PR tree-optimization/49483 (unable to vectorize code equivalent to "scalbnf")
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
43
44
45 /* Return a variable of type ELEM_TYPE[NELEMS]. */
46
47 static tree
48 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
49 {
50 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
51 "vect_array");
52 }
53
54 /* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
58
59 static tree
60 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
61 tree array, unsigned HOST_WIDE_INT n)
62 {
63 tree vect_type, vect, vect_name, array_ref;
64 gimple new_stmt;
65
66 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
67 vect_type = TREE_TYPE (TREE_TYPE (array));
68 vect = vect_create_destination_var (scalar_dest, vect_type);
69 array_ref = build4 (ARRAY_REF, vect_type, array,
70 build_int_cst (size_type_node, n),
71 NULL_TREE, NULL_TREE);
72
73 new_stmt = gimple_build_assign (vect, array_ref);
74 vect_name = make_ssa_name (vect, new_stmt);
75 gimple_assign_set_lhs (new_stmt, vect_name);
76 vect_finish_stmt_generation (stmt, new_stmt, gsi);
77 mark_symbols_for_renaming (new_stmt);
78
79 return vect_name;
80 }
81
82 /* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
85
86 static void
87 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
88 tree array, unsigned HOST_WIDE_INT n)
89 {
90 tree array_ref;
91 gimple new_stmt;
92
93 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
94 build_int_cst (size_type_node, n),
95 NULL_TREE, NULL_TREE);
96
97 new_stmt = gimple_build_assign (array_ref, vect);
98 vect_finish_stmt_generation (stmt, new_stmt, gsi);
99 mark_symbols_for_renaming (new_stmt);
100 }
101
102 /* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
104 (and its group). */
105
106 static tree
107 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
108 {
109 struct ptr_info_def *pi;
110 tree mem_ref, alias_ptr_type;
111
112 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
113 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
114 /* Arrays have the same alignment as their type. */
115 pi = get_ptr_info (ptr);
116 pi->align = TYPE_ALIGN_UNIT (type);
117 pi->misalign = 0;
118 return mem_ref;
119 }
120
121 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
122
123 /* Function vect_mark_relevant.
124
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
126
127 static void
128 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
129 enum vect_relevant relevant, bool live_p)
130 {
131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
132 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
133 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
134
135 if (vect_print_dump_info (REPORT_DETAILS))
136 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
137
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
139 {
140 gimple pattern_stmt;
141
142 /* This is the last stmt in a sequence that was detected as a
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
146
147 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
148
149 if (vect_print_dump_info (REPORT_DETAILS))
150 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info = vinfo_for_stmt (pattern_stmt);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
153 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
154 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
155 stmt = pattern_stmt;
156 }
157
158 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
159 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
160 STMT_VINFO_RELEVANT (stmt_info) = relevant;
161
162 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
164 {
165 if (vect_print_dump_info (REPORT_DETAILS))
166 fprintf (vect_dump, "already marked relevant/live.");
167 return;
168 }
169
170 VEC_safe_push (gimple, heap, *worklist, stmt);
171 }
172
173
174 /* Function vect_stmt_relevant_p.
175
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
178
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
183
184 CHECKME: what other side effects would the vectorizer allow? */
185
186 static bool
187 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
188 enum vect_relevant *relevant, bool *live_p)
189 {
190 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
191 ssa_op_iter op_iter;
192 imm_use_iterator imm_iter;
193 use_operand_p use_p;
194 def_operand_p def_p;
195
196 *relevant = vect_unused_in_scope;
197 *live_p = false;
198
199 /* cond stmt other than loop exit cond. */
200 if (is_ctrl_stmt (stmt)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
202 != loop_exit_ctrl_vec_info_type)
203 *relevant = vect_used_in_scope;
204
205 /* changing memory. */
206 if (gimple_code (stmt) != GIMPLE_PHI)
207 if (gimple_vdef (stmt))
208 {
209 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
211 *relevant = vect_used_in_scope;
212 }
213
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
216 {
217 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
218 {
219 basic_block bb = gimple_bb (USE_STMT (use_p));
220 if (!flow_bb_inside_loop_p (loop, bb))
221 {
222 if (vect_print_dump_info (REPORT_DETAILS))
223 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
224
225 if (is_gimple_debug (USE_STMT (use_p)))
226 continue;
227
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
231 gcc_assert (bb == single_exit (loop)->dest);
232
233 *live_p = true;
234 }
235 }
236 }
237
238 return (*live_p || *relevant);
239 }
240
241
242 /* Function exist_non_indexing_operands_for_use_p
243
244 USE is one of the uses attached to STMT. Check if USE is
245 used in STMT for anything other than indexing an array. */
246
247 static bool
248 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
249 {
250 tree operand;
251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
252
253 /* USE corresponds to some operand in STMT. If there is no data
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info))
257 return true;
258
259 /* STMT has a data_ref. FORNOW this means that its of one of
260 the following forms:
261 -1- ARRAY_REF = var
262 -2- var = ARRAY_REF
263 (This should have been verified in analyze_data_refs).
264
265 'var' in the second case corresponds to a def, not a use,
266 so USE cannot correspond to any operands that are not used
267 for array indexing.
268
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
271
272 if (!gimple_assign_copy_p (stmt))
273 return false;
274 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
275 return false;
276 operand = gimple_assign_rhs1 (stmt);
277 if (TREE_CODE (operand) != SSA_NAME)
278 return false;
279
280 if (operand == use)
281 return true;
282
283 return false;
284 }
285
286
287 /*
288 Function process_use.
289
290 Inputs:
291 - a USE in STMT in a loop represented by LOOP_VINFO
292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
293 that defined USE. This is done by calling mark_relevant and passing it
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
295
296 Outputs:
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
301 Exceptions:
302 - case 1: If USE is used only for address computations (e.g. array indexing),
303 which does not need to be directly vectorized, then the liveness/relevance
304 of the respective DEF_STMT is left unchanged.
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
309
310 Return true if everything is as expected. Return false otherwise. */
311
312 static bool
313 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
314 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
315 {
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
318 stmt_vec_info dstmt_vinfo;
319 basic_block bb, def_bb;
320 tree def;
321 gimple def_stmt;
322 enum vect_def_type dt;
323
324 /* case 1: we are only interested in uses that need to be vectorized. Uses
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use, stmt))
327 return true;
328
329 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
330 {
331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
332 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
333 return false;
334 }
335
336 if (!def_stmt || gimple_nop_p (def_stmt))
337 return true;
338
339 def_bb = gimple_bb (def_stmt);
340 if (!flow_bb_inside_loop_p (loop, def_bb))
341 {
342 if (vect_print_dump_info (REPORT_DETAILS))
343 fprintf (vect_dump, "def_stmt is out of loop.");
344 return true;
345 }
346
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo = vinfo_for_stmt (def_stmt);
353 bb = gimple_bb (stmt);
354 if (gimple_code (stmt) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
356 && gimple_code (def_stmt) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
358 && bb->loop_father == def_bb->loop_father)
359 {
360 if (vect_print_dump_info (REPORT_DETAILS))
361 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
363 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
366 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
367 return true;
368 }
369
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
372 d = def_stmt
373 inner-loop:
374 stmt # use (d)
375 outer-loop-tail-bb:
376 ... */
377 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
378 {
379 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
381
382 switch (relevant)
383 {
384 case vect_unused_in_scope:
385 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
386 vect_used_in_scope : vect_unused_in_scope;
387 break;
388
389 case vect_used_in_outer_by_reduction:
390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
391 relevant = vect_used_by_reduction;
392 break;
393
394 case vect_used_in_outer:
395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
396 relevant = vect_used_in_scope;
397 break;
398
399 case vect_used_in_scope:
400 break;
401
402 default:
403 gcc_unreachable ();
404 }
405 }
406
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
409 ...
410 inner-loop:
411 d = def_stmt
412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
413 stmt # use (d) */
414 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
415 {
416 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
418
419 switch (relevant)
420 {
421 case vect_unused_in_scope:
422 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
423 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
424 vect_used_in_outer_by_reduction : vect_unused_in_scope;
425 break;
426
427 case vect_used_by_reduction:
428 relevant = vect_used_in_outer_by_reduction;
429 break;
430
431 case vect_used_in_scope:
432 relevant = vect_used_in_outer;
433 break;
434
435 default:
436 gcc_unreachable ();
437 }
438 }
439
440 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
441 return true;
442 }
443
444
445 /* Function vect_mark_stmts_to_be_vectorized.
446
447 Not all stmts in the loop need to be vectorized. For example:
448
449 for i...
450 for j...
451 1. T0 = i + j
452 2. T1 = a[T0]
453
454 3. j = j + 1
455
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
458
459 This pass detects such stmts. */
460
461 bool
462 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
463 {
464 VEC(gimple,heap) *worklist;
465 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
466 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
467 unsigned int nbbs = loop->num_nodes;
468 gimple_stmt_iterator si;
469 gimple stmt;
470 unsigned int i;
471 stmt_vec_info stmt_vinfo;
472 basic_block bb;
473 gimple phi;
474 bool live_p;
475 enum vect_relevant relevant, tmp_relevant;
476 enum vect_def_type def_type;
477
478 if (vect_print_dump_info (REPORT_DETAILS))
479 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
480
481 worklist = VEC_alloc (gimple, heap, 64);
482
483 /* 1. Init worklist. */
484 for (i = 0; i < nbbs; i++)
485 {
486 bb = bbs[i];
487 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
488 {
489 phi = gsi_stmt (si);
490 if (vect_print_dump_info (REPORT_DETAILS))
491 {
492 fprintf (vect_dump, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
494 }
495
496 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
497 vect_mark_relevant (&worklist, phi, relevant, live_p);
498 }
499 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
500 {
501 stmt = gsi_stmt (si);
502 if (vect_print_dump_info (REPORT_DETAILS))
503 {
504 fprintf (vect_dump, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
506 }
507
508 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
509 vect_mark_relevant (&worklist, stmt, relevant, live_p);
510 }
511 }
512
513 /* 2. Process_worklist */
514 while (VEC_length (gimple, worklist) > 0)
515 {
516 use_operand_p use_p;
517 ssa_op_iter iter;
518
519 stmt = VEC_pop (gimple, worklist);
520 if (vect_print_dump_info (REPORT_DETAILS))
521 {
522 fprintf (vect_dump, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
524 }
525
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
528 liveness and relevance properties of STMT. */
529 stmt_vinfo = vinfo_for_stmt (stmt);
530 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
531 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
532
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
537
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
540 live_p = false
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
543 those that are used by a reduction computation, and those that are
544 (also) used by a regular computation. This allows us later on to
545 identify stmts that are used solely by a reduction, and therefore the
546 order of the results that they produce does not have to be kept. */
547
548 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
549 tmp_relevant = relevant;
550 switch (def_type)
551 {
552 case vect_reduction_def:
553 switch (tmp_relevant)
554 {
555 case vect_unused_in_scope:
556 relevant = vect_used_by_reduction;
557 break;
558
559 case vect_used_by_reduction:
560 if (gimple_code (stmt) == GIMPLE_PHI)
561 break;
562 /* fall through */
563
564 default:
565 if (vect_print_dump_info (REPORT_DETAILS))
566 fprintf (vect_dump, "unsupported use of reduction.");
567
568 VEC_free (gimple, heap, worklist);
569 return false;
570 }
571
572 live_p = false;
573 break;
574
575 case vect_nested_cycle:
576 if (tmp_relevant != vect_unused_in_scope
577 && tmp_relevant != vect_used_in_outer_by_reduction
578 && tmp_relevant != vect_used_in_outer)
579 {
580 if (vect_print_dump_info (REPORT_DETAILS))
581 fprintf (vect_dump, "unsupported use of nested cycle.");
582
583 VEC_free (gimple, heap, worklist);
584 return false;
585 }
586
587 live_p = false;
588 break;
589
590 case vect_double_reduction_def:
591 if (tmp_relevant != vect_unused_in_scope
592 && tmp_relevant != vect_used_by_reduction)
593 {
594 if (vect_print_dump_info (REPORT_DETAILS))
595 fprintf (vect_dump, "unsupported use of double reduction.");
596
597 VEC_free (gimple, heap, worklist);
598 return false;
599 }
600
601 live_p = false;
602 break;
603
604 default:
605 break;
606 }
607
608 if (is_pattern_stmt_p (vinfo_for_stmt (stmt)))
609 {
610 /* Pattern statements are not inserted into the code, so
611 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
612 have to scan the RHS or function arguments instead. */
613 if (is_gimple_assign (stmt))
614 {
615 for (i = 1; i < gimple_num_ops (stmt); i++)
616 {
617 tree op = gimple_op (stmt, i);
618 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
619 &worklist))
620 {
621 VEC_free (gimple, heap, worklist);
622 return false;
623 }
624 }
625 }
626 else if (is_gimple_call (stmt))
627 {
628 for (i = 0; i < gimple_call_num_args (stmt); i++)
629 {
630 tree arg = gimple_call_arg (stmt, i);
631 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
632 &worklist))
633 {
634 VEC_free (gimple, heap, worklist);
635 return false;
636 }
637 }
638 }
639 }
640 else
641 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
642 {
643 tree op = USE_FROM_PTR (use_p);
644 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
645 &worklist))
646 {
647 VEC_free (gimple, heap, worklist);
648 return false;
649 }
650 }
651 } /* while worklist */
652
653 VEC_free (gimple, heap, worklist);
654 return true;
655 }
656
657
658 /* Get cost by calling cost target builtin. */
659
660 static inline
661 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
662 {
663 tree dummy_type = NULL;
664 int dummy = 0;
665
666 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
667 dummy_type, dummy);
668 }
669
670
671 /* Get cost for STMT. */
672
673 int
674 cost_for_stmt (gimple stmt)
675 {
676 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
677
678 switch (STMT_VINFO_TYPE (stmt_info))
679 {
680 case load_vec_info_type:
681 return vect_get_stmt_cost (scalar_load);
682 case store_vec_info_type:
683 return vect_get_stmt_cost (scalar_store);
684 case op_vec_info_type:
685 case condition_vec_info_type:
686 case assignment_vec_info_type:
687 case reduc_vec_info_type:
688 case induc_vec_info_type:
689 case type_promotion_vec_info_type:
690 case type_demotion_vec_info_type:
691 case type_conversion_vec_info_type:
692 case call_vec_info_type:
693 return vect_get_stmt_cost (scalar_stmt);
694 case undef_vec_info_type:
695 default:
696 gcc_unreachable ();
697 }
698 }
699
700 /* Function vect_model_simple_cost.
701
702 Models cost for simple operations, i.e. those that only emit ncopies of a
703 single op. Right now, this does not account for multiple insns that could
704 be generated for the single vector op. We will handle that shortly. */
705
706 void
707 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
708 enum vect_def_type *dt, slp_tree slp_node)
709 {
710 int i;
711 int inside_cost = 0, outside_cost = 0;
712
713 /* The SLP costs were already calculated during SLP tree build. */
714 if (PURE_SLP_STMT (stmt_info))
715 return;
716
717 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
718
719 /* FORNOW: Assuming maximum 2 args per stmts. */
720 for (i = 0; i < 2; i++)
721 {
722 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
723 outside_cost += vect_get_stmt_cost (vector_stmt);
724 }
725
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
728 "outside_cost = %d .", inside_cost, outside_cost);
729
730 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
731 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
732 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
733 }
734
735
736 /* Function vect_cost_strided_group_size
737
738 For strided load or store, return the group_size only if it is the first
739 load or store of a group, else return 1. This ensures that group size is
740 only returned once per group. */
741
742 static int
743 vect_cost_strided_group_size (stmt_vec_info stmt_info)
744 {
745 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
746
747 if (first_stmt == STMT_VINFO_STMT (stmt_info))
748 return GROUP_SIZE (stmt_info);
749
750 return 1;
751 }
752
753
754 /* Function vect_model_store_cost
755
756 Models cost for stores. In the case of strided accesses, one access
757 has the overhead of the strided access attributed to it. */
758
759 void
760 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
761 bool store_lanes_p, enum vect_def_type dt,
762 slp_tree slp_node)
763 {
764 int group_size;
765 unsigned int inside_cost = 0, outside_cost = 0;
766 struct data_reference *first_dr;
767 gimple first_stmt;
768
769 /* The SLP costs were already calculated during SLP tree build. */
770 if (PURE_SLP_STMT (stmt_info))
771 return;
772
773 if (dt == vect_constant_def || dt == vect_external_def)
774 outside_cost = vect_get_stmt_cost (scalar_to_vec);
775
776 /* Strided access? */
777 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
778 {
779 if (slp_node)
780 {
781 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
782 group_size = 1;
783 }
784 else
785 {
786 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
787 group_size = vect_cost_strided_group_size (stmt_info);
788 }
789
790 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
791 }
792 /* Not a strided access. */
793 else
794 {
795 group_size = 1;
796 first_dr = STMT_VINFO_DATA_REF (stmt_info);
797 }
798
799 /* We assume that the cost of a single store-lanes instruction is
800 equivalent to the cost of GROUP_SIZE separate stores. If a strided
801 access is instead being provided by a permute-and-store operation,
802 include the cost of the permutes. */
803 if (!store_lanes_p && group_size > 1)
804 {
805 /* Uses a high and low interleave operation for each needed permute. */
806 inside_cost = ncopies * exact_log2(group_size) * group_size
807 * vect_get_stmt_cost (vector_stmt);
808
809 if (vect_print_dump_info (REPORT_COST))
810 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
811 group_size);
812
813 }
814
815 /* Costs of the stores. */
816 vect_get_store_cost (first_dr, ncopies, &inside_cost);
817
818 if (vect_print_dump_info (REPORT_COST))
819 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
820 "outside_cost = %d .", inside_cost, outside_cost);
821
822 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
823 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
824 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
825 }
826
827
828 /* Calculate cost of DR's memory access. */
829 void
830 vect_get_store_cost (struct data_reference *dr, int ncopies,
831 unsigned int *inside_cost)
832 {
833 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
834
835 switch (alignment_support_scheme)
836 {
837 case dr_aligned:
838 {
839 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
840
841 if (vect_print_dump_info (REPORT_COST))
842 fprintf (vect_dump, "vect_model_store_cost: aligned.");
843
844 break;
845 }
846
847 case dr_unaligned_supported:
848 {
849 gimple stmt = DR_STMT (dr);
850 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
851 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
852
853 /* Here, we assign an additional cost for the unaligned store. */
854 *inside_cost += ncopies
855 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
856 vectype, DR_MISALIGNMENT (dr));
857
858 if (vect_print_dump_info (REPORT_COST))
859 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
860 "hardware.");
861
862 break;
863 }
864
865 default:
866 gcc_unreachable ();
867 }
868 }
869
870
871 /* Function vect_model_load_cost
872
873 Models cost for loads. In the case of strided accesses, the last access
874 has the overhead of the strided access attributed to it. Since unaligned
875 accesses are supported for loads, we also account for the costs of the
876 access scheme chosen. */
877
878 void
879 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
880 slp_tree slp_node)
881 {
882 int group_size;
883 gimple first_stmt;
884 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
885 unsigned int inside_cost = 0, outside_cost = 0;
886
887 /* The SLP costs were already calculated during SLP tree build. */
888 if (PURE_SLP_STMT (stmt_info))
889 return;
890
891 /* Strided accesses? */
892 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
893 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
894 {
895 group_size = vect_cost_strided_group_size (stmt_info);
896 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
897 }
898 /* Not a strided access. */
899 else
900 {
901 group_size = 1;
902 first_dr = dr;
903 }
904
905 /* We assume that the cost of a single load-lanes instruction is
906 equivalent to the cost of GROUP_SIZE separate loads. If a strided
907 access is instead being provided by a load-and-permute operation,
908 include the cost of the permutes. */
909 if (!load_lanes_p && group_size > 1)
910 {
911 /* Uses an even and odd extract operations for each needed permute. */
912 inside_cost = ncopies * exact_log2(group_size) * group_size
913 * vect_get_stmt_cost (vector_stmt);
914
915 if (vect_print_dump_info (REPORT_COST))
916 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
917 group_size);
918 }
919
920 /* The loads themselves. */
921 vect_get_load_cost (first_dr, ncopies,
922 ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
923 || slp_node),
924 &inside_cost, &outside_cost);
925
926 if (vect_print_dump_info (REPORT_COST))
927 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
928 "outside_cost = %d .", inside_cost, outside_cost);
929
930 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
931 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
932 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
933 }
934
935
936 /* Calculate cost of DR's memory access. */
937 void
938 vect_get_load_cost (struct data_reference *dr, int ncopies,
939 bool add_realign_cost, unsigned int *inside_cost,
940 unsigned int *outside_cost)
941 {
942 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
943
944 switch (alignment_support_scheme)
945 {
946 case dr_aligned:
947 {
948 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
949
950 if (vect_print_dump_info (REPORT_COST))
951 fprintf (vect_dump, "vect_model_load_cost: aligned.");
952
953 break;
954 }
955 case dr_unaligned_supported:
956 {
957 gimple stmt = DR_STMT (dr);
958 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
959 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
960
961 /* Here, we assign an additional cost for the unaligned load. */
962 *inside_cost += ncopies
963 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
964 vectype, DR_MISALIGNMENT (dr));
965 if (vect_print_dump_info (REPORT_COST))
966 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
967 "hardware.");
968
969 break;
970 }
971 case dr_explicit_realign:
972 {
973 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
974 + vect_get_stmt_cost (vector_stmt));
975
976 /* FIXME: If the misalignment remains fixed across the iterations of
977 the containing loop, the following cost should be added to the
978 outside costs. */
979 if (targetm.vectorize.builtin_mask_for_load)
980 *inside_cost += vect_get_stmt_cost (vector_stmt);
981
982 break;
983 }
984 case dr_explicit_realign_optimized:
985 {
986 if (vect_print_dump_info (REPORT_COST))
987 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
988 "pipelined.");
989
990 /* Unaligned software pipeline has a load of an address, an initial
991 load, and possibly a mask operation to "prime" the loop. However,
992 if this is an access in a group of loads, which provide strided
993 access, then the above cost should only be considered for one
994 access in the group. Inside the loop, there is a load op
995 and a realignment op. */
996
997 if (add_realign_cost)
998 {
999 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
1000 if (targetm.vectorize.builtin_mask_for_load)
1001 *outside_cost += vect_get_stmt_cost (vector_stmt);
1002 }
1003
1004 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
1005 + vect_get_stmt_cost (vector_stmt));
1006 break;
1007 }
1008
1009 default:
1010 gcc_unreachable ();
1011 }
1012 }
1013
1014
1015 /* Function vect_init_vector.
1016
1017 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
1018 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
1019 is not NULL. Otherwise, place the initialization at the loop preheader.
1020 Return the DEF of INIT_STMT.
1021 It will be used in the vectorization of STMT. */
1022
1023 tree
1024 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
1025 gimple_stmt_iterator *gsi)
1026 {
1027 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1028 tree new_var;
1029 gimple init_stmt;
1030 tree vec_oprnd;
1031 edge pe;
1032 tree new_temp;
1033 basic_block new_bb;
1034
1035 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
1036 add_referenced_var (new_var);
1037 init_stmt = gimple_build_assign (new_var, vector_var);
1038 new_temp = make_ssa_name (new_var, init_stmt);
1039 gimple_assign_set_lhs (init_stmt, new_temp);
1040
1041 if (gsi)
1042 vect_finish_stmt_generation (stmt, init_stmt, gsi);
1043 else
1044 {
1045 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1046
1047 if (loop_vinfo)
1048 {
1049 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1050
1051 if (nested_in_vect_loop_p (loop, stmt))
1052 loop = loop->inner;
1053
1054 pe = loop_preheader_edge (loop);
1055 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1056 gcc_assert (!new_bb);
1057 }
1058 else
1059 {
1060 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1061 basic_block bb;
1062 gimple_stmt_iterator gsi_bb_start;
1063
1064 gcc_assert (bb_vinfo);
1065 bb = BB_VINFO_BB (bb_vinfo);
1066 gsi_bb_start = gsi_after_labels (bb);
1067 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
1068 }
1069 }
1070
1071 if (vect_print_dump_info (REPORT_DETAILS))
1072 {
1073 fprintf (vect_dump, "created new init_stmt: ");
1074 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1075 }
1076
1077 vec_oprnd = gimple_assign_lhs (init_stmt);
1078 return vec_oprnd;
1079 }
1080
1081
1082 /* Function vect_get_vec_def_for_operand.
1083
1084 OP is an operand in STMT. This function returns a (vector) def that will be
1085 used in the vectorized stmt for STMT.
1086
1087 In the case that OP is an SSA_NAME which is defined in the loop, then
1088 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1089
1090 In case OP is an invariant or constant, a new stmt that creates a vector def
1091 needs to be introduced. */
1092
1093 tree
1094 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1095 {
1096 tree vec_oprnd;
1097 gimple vec_stmt;
1098 gimple def_stmt;
1099 stmt_vec_info def_stmt_info = NULL;
1100 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1101 unsigned int nunits;
1102 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1103 tree vec_inv;
1104 tree vec_cst;
1105 tree t = NULL_TREE;
1106 tree def;
1107 int i;
1108 enum vect_def_type dt;
1109 bool is_simple_use;
1110 tree vector_type;
1111
1112 if (vect_print_dump_info (REPORT_DETAILS))
1113 {
1114 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1115 print_generic_expr (vect_dump, op, TDF_SLIM);
1116 }
1117
1118 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1119 &dt);
1120 gcc_assert (is_simple_use);
1121 if (vect_print_dump_info (REPORT_DETAILS))
1122 {
1123 if (def)
1124 {
1125 fprintf (vect_dump, "def = ");
1126 print_generic_expr (vect_dump, def, TDF_SLIM);
1127 }
1128 if (def_stmt)
1129 {
1130 fprintf (vect_dump, " def_stmt = ");
1131 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1132 }
1133 }
1134
1135 switch (dt)
1136 {
1137 /* Case 1: operand is a constant. */
1138 case vect_constant_def:
1139 {
1140 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1141 gcc_assert (vector_type);
1142 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1143
1144 if (scalar_def)
1145 *scalar_def = op;
1146
1147 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1148 if (vect_print_dump_info (REPORT_DETAILS))
1149 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1150
1151 vec_cst = build_vector_from_val (vector_type, op);
1152 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1153 }
1154
1155 /* Case 2: operand is defined outside the loop - loop invariant. */
1156 case vect_external_def:
1157 {
1158 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1159 gcc_assert (vector_type);
1160 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1161
1162 if (scalar_def)
1163 *scalar_def = def;
1164
1165 /* Create 'vec_inv = {inv,inv,..,inv}' */
1166 if (vect_print_dump_info (REPORT_DETAILS))
1167 fprintf (vect_dump, "Create vector_inv.");
1168
1169 for (i = nunits - 1; i >= 0; --i)
1170 {
1171 t = tree_cons (NULL_TREE, def, t);
1172 }
1173
1174 /* FIXME: use build_constructor directly. */
1175 vec_inv = build_constructor_from_list (vector_type, t);
1176 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1177 }
1178
1179 /* Case 3: operand is defined inside the loop. */
1180 case vect_internal_def:
1181 {
1182 if (scalar_def)
1183 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1184
1185 /* Get the def from the vectorized stmt. */
1186 def_stmt_info = vinfo_for_stmt (def_stmt);
1187 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1188 gcc_assert (vec_stmt);
1189 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1190 vec_oprnd = PHI_RESULT (vec_stmt);
1191 else if (is_gimple_call (vec_stmt))
1192 vec_oprnd = gimple_call_lhs (vec_stmt);
1193 else
1194 vec_oprnd = gimple_assign_lhs (vec_stmt);
1195 return vec_oprnd;
1196 }
1197
1198 /* Case 4: operand is defined by a loop header phi - reduction */
1199 case vect_reduction_def:
1200 case vect_double_reduction_def:
1201 case vect_nested_cycle:
1202 {
1203 struct loop *loop;
1204
1205 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1206 loop = (gimple_bb (def_stmt))->loop_father;
1207
1208 /* Get the def before the loop */
1209 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1210 return get_initial_def_for_reduction (stmt, op, scalar_def);
1211 }
1212
1213 /* Case 5: operand is defined by loop-header phi - induction. */
1214 case vect_induction_def:
1215 {
1216 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1217
1218 /* Get the def from the vectorized stmt. */
1219 def_stmt_info = vinfo_for_stmt (def_stmt);
1220 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1221 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1222 vec_oprnd = PHI_RESULT (vec_stmt);
1223 else
1224 vec_oprnd = gimple_get_lhs (vec_stmt);
1225 return vec_oprnd;
1226 }
1227
1228 default:
1229 gcc_unreachable ();
1230 }
1231 }
1232
1233
1234 /* Function vect_get_vec_def_for_stmt_copy
1235
1236 Return a vector-def for an operand. This function is used when the
1237 vectorized stmt to be created (by the caller to this function) is a "copy"
1238 created in case the vectorized result cannot fit in one vector, and several
1239 copies of the vector-stmt are required. In this case the vector-def is
1240 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1241 of the stmt that defines VEC_OPRND.
1242 DT is the type of the vector def VEC_OPRND.
1243
1244 Context:
1245 In case the vectorization factor (VF) is bigger than the number
1246 of elements that can fit in a vectype (nunits), we have to generate
1247 more than one vector stmt to vectorize the scalar stmt. This situation
1248 arises when there are multiple data-types operated upon in the loop; the
1249 smallest data-type determines the VF, and as a result, when vectorizing
1250 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1251 vector stmt (each computing a vector of 'nunits' results, and together
1252 computing 'VF' results in each iteration). This function is called when
1253 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1254 which VF=16 and nunits=4, so the number of copies required is 4):
1255
1256 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1257
1258 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1259 VS1.1: vx.1 = memref1 VS1.2
1260 VS1.2: vx.2 = memref2 VS1.3
1261 VS1.3: vx.3 = memref3
1262
1263 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1264 VSnew.1: vz1 = vx.1 + ... VSnew.2
1265 VSnew.2: vz2 = vx.2 + ... VSnew.3
1266 VSnew.3: vz3 = vx.3 + ...
1267
1268 The vectorization of S1 is explained in vectorizable_load.
1269 The vectorization of S2:
1270 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1271 the function 'vect_get_vec_def_for_operand' is called to
1272 get the relevant vector-def for each operand of S2. For operand x it
1273 returns the vector-def 'vx.0'.
1274
1275 To create the remaining copies of the vector-stmt (VSnew.j), this
1276 function is called to get the relevant vector-def for each operand. It is
1277 obtained from the respective VS1.j stmt, which is recorded in the
1278 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1279
1280 For example, to obtain the vector-def 'vx.1' in order to create the
1281 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1282 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1283 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1284 and return its def ('vx.1').
1285 Overall, to create the above sequence this function will be called 3 times:
1286 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1287 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1288 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1289
1290 tree
1291 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1292 {
1293 gimple vec_stmt_for_operand;
1294 stmt_vec_info def_stmt_info;
1295
1296 /* Do nothing; can reuse same def. */
1297 if (dt == vect_external_def || dt == vect_constant_def )
1298 return vec_oprnd;
1299
1300 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1301 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1302 gcc_assert (def_stmt_info);
1303 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1304 gcc_assert (vec_stmt_for_operand);
1305 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1306 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1307 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1308 else
1309 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1310 return vec_oprnd;
1311 }
1312
1313
1314 /* Get vectorized definitions for the operands to create a copy of an original
1315 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1316
1317 static void
1318 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1319 VEC(tree,heap) **vec_oprnds0,
1320 VEC(tree,heap) **vec_oprnds1)
1321 {
1322 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1323
1324 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1325 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1326
1327 if (vec_oprnds1 && *vec_oprnds1)
1328 {
1329 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1330 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1331 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1332 }
1333 }
1334
1335
1336 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1337 NULL. */
1338
1339 static void
1340 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1341 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1342 slp_tree slp_node)
1343 {
1344 if (slp_node)
1345 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1346 else
1347 {
1348 tree vec_oprnd;
1349
1350 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1351 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1352 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1353
1354 if (op1)
1355 {
1356 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1357 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1358 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1359 }
1360 }
1361 }
1362
1363
1364 /* Function vect_finish_stmt_generation.
1365
1366 Insert a new stmt. */
1367
1368 void
1369 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1370 gimple_stmt_iterator *gsi)
1371 {
1372 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1373 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1374 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1375
1376 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1377
1378 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1379
1380 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1381 bb_vinfo));
1382
1383 if (vect_print_dump_info (REPORT_DETAILS))
1384 {
1385 fprintf (vect_dump, "add new stmt: ");
1386 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1387 }
1388
1389 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1390 }
1391
1392 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1393 a function declaration if the target has a vectorized version
1394 of the function, or NULL_TREE if the function cannot be vectorized. */
1395
1396 tree
1397 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1398 {
1399 tree fndecl = gimple_call_fndecl (call);
1400
1401 /* We only handle functions that do not read or clobber memory -- i.e.
1402 const or novops ones. */
1403 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1404 return NULL_TREE;
1405
1406 if (!fndecl
1407 || TREE_CODE (fndecl) != FUNCTION_DECL
1408 || !DECL_BUILT_IN (fndecl))
1409 return NULL_TREE;
1410
1411 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1412 vectype_in);
1413 }
1414
1415 /* Function vectorizable_call.
1416
1417 Check if STMT performs a function call that can be vectorized.
1418 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1419 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1420 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1421
1422 static bool
1423 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1424 {
1425 tree vec_dest;
1426 tree scalar_dest;
1427 tree op, type;
1428 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1429 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1430 tree vectype_out, vectype_in;
1431 int nunits_in;
1432 int nunits_out;
1433 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1434 tree fndecl, new_temp, def, rhs_type;
1435 gimple def_stmt;
1436 enum vect_def_type dt[3]
1437 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1438 gimple new_stmt = NULL;
1439 int ncopies, j;
1440 VEC(tree, heap) *vargs = NULL;
1441 enum { NARROW, NONE, WIDEN } modifier;
1442 size_t i, nargs;
1443 tree lhs;
1444
1445 /* FORNOW: unsupported in basic block SLP. */
1446 gcc_assert (loop_vinfo);
1447
1448 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1449 return false;
1450
1451 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1452 return false;
1453
1454 /* FORNOW: SLP not supported. */
1455 if (STMT_SLP_TYPE (stmt_info))
1456 return false;
1457
1458 /* Is STMT a vectorizable call? */
1459 if (!is_gimple_call (stmt))
1460 return false;
1461
1462 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1463 return false;
1464
1465 if (stmt_can_throw_internal (stmt))
1466 return false;
1467
1468 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1469
1470 /* Process function arguments. */
1471 rhs_type = NULL_TREE;
1472 vectype_in = NULL_TREE;
1473 nargs = gimple_call_num_args (stmt);
1474
1475 /* Bail out if the function has more than three arguments, we do not have
1476 interesting builtin functions to vectorize with more than two arguments
1477 except for fma. No arguments is also not good. */
1478 if (nargs == 0 || nargs > 3)
1479 return false;
1480
1481 for (i = 0; i < nargs; i++)
1482 {
1483 tree opvectype;
1484
1485 op = gimple_call_arg (stmt, i);
1486
1487 /* We can only handle calls with arguments of the same type. */
1488 if (rhs_type
1489 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1490 {
1491 if (vect_print_dump_info (REPORT_DETAILS))
1492 fprintf (vect_dump, "argument types differ.");
1493 return false;
1494 }
1495 if (!rhs_type)
1496 rhs_type = TREE_TYPE (op);
1497
1498 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1499 &def_stmt, &def, &dt[i], &opvectype))
1500 {
1501 if (vect_print_dump_info (REPORT_DETAILS))
1502 fprintf (vect_dump, "use not simple.");
1503 return false;
1504 }
1505
1506 if (!vectype_in)
1507 vectype_in = opvectype;
1508 else if (opvectype
1509 && opvectype != vectype_in)
1510 {
1511 if (vect_print_dump_info (REPORT_DETAILS))
1512 fprintf (vect_dump, "argument vector types differ.");
1513 return false;
1514 }
1515 }
1516 /* If all arguments are external or constant defs use a vector type with
1517 the same size as the output vector type. */
1518 if (!vectype_in)
1519 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1520 if (vec_stmt)
1521 gcc_assert (vectype_in);
1522 if (!vectype_in)
1523 {
1524 if (vect_print_dump_info (REPORT_DETAILS))
1525 {
1526 fprintf (vect_dump, "no vectype for scalar type ");
1527 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1528 }
1529
1530 return false;
1531 }
1532
1533 /* FORNOW */
1534 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1535 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1536 if (nunits_in == nunits_out / 2)
1537 modifier = NARROW;
1538 else if (nunits_out == nunits_in)
1539 modifier = NONE;
1540 else if (nunits_out == nunits_in / 2)
1541 modifier = WIDEN;
1542 else
1543 return false;
1544
1545 /* For now, we only vectorize functions if a target specific builtin
1546 is available. TODO -- in some cases, it might be profitable to
1547 insert the calls for pieces of the vector, in order to be able
1548 to vectorize other operations in the loop. */
1549 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1550 if (fndecl == NULL_TREE)
1551 {
1552 if (vect_print_dump_info (REPORT_DETAILS))
1553 fprintf (vect_dump, "function is not vectorizable.");
1554
1555 return false;
1556 }
1557
1558 gcc_assert (!gimple_vuse (stmt));
1559
1560 if (modifier == NARROW)
1561 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1562 else
1563 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1564
1565 /* Sanity check: make sure that at least one copy of the vectorized stmt
1566 needs to be generated. */
1567 gcc_assert (ncopies >= 1);
1568
1569 if (!vec_stmt) /* transformation not required. */
1570 {
1571 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1572 if (vect_print_dump_info (REPORT_DETAILS))
1573 fprintf (vect_dump, "=== vectorizable_call ===");
1574 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1575 return true;
1576 }
1577
1578 /** Transform. **/
1579
1580 if (vect_print_dump_info (REPORT_DETAILS))
1581 fprintf (vect_dump, "transform call.");
1582
1583 /* Handle def. */
1584 scalar_dest = gimple_call_lhs (stmt);
1585 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1586
1587 prev_stmt_info = NULL;
1588 switch (modifier)
1589 {
1590 case NONE:
1591 for (j = 0; j < ncopies; ++j)
1592 {
1593 /* Build argument list for the vectorized call. */
1594 if (j == 0)
1595 vargs = VEC_alloc (tree, heap, nargs);
1596 else
1597 VEC_truncate (tree, vargs, 0);
1598
1599 for (i = 0; i < nargs; i++)
1600 {
1601 op = gimple_call_arg (stmt, i);
1602 if (j == 0)
1603 vec_oprnd0
1604 = vect_get_vec_def_for_operand (op, stmt, NULL);
1605 else
1606 {
1607 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1608 vec_oprnd0
1609 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1610 }
1611
1612 VEC_quick_push (tree, vargs, vec_oprnd0);
1613 }
1614
1615 new_stmt = gimple_build_call_vec (fndecl, vargs);
1616 new_temp = make_ssa_name (vec_dest, new_stmt);
1617 gimple_call_set_lhs (new_stmt, new_temp);
1618
1619 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1620 mark_symbols_for_renaming (new_stmt);
1621
1622 if (j == 0)
1623 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1624 else
1625 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1626
1627 prev_stmt_info = vinfo_for_stmt (new_stmt);
1628 }
1629
1630 break;
1631
1632 case NARROW:
1633 for (j = 0; j < ncopies; ++j)
1634 {
1635 /* Build argument list for the vectorized call. */
1636 if (j == 0)
1637 vargs = VEC_alloc (tree, heap, nargs * 2);
1638 else
1639 VEC_truncate (tree, vargs, 0);
1640
1641 for (i = 0; i < nargs; i++)
1642 {
1643 op = gimple_call_arg (stmt, i);
1644 if (j == 0)
1645 {
1646 vec_oprnd0
1647 = vect_get_vec_def_for_operand (op, stmt, NULL);
1648 vec_oprnd1
1649 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1650 }
1651 else
1652 {
1653 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1654 vec_oprnd0
1655 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1656 vec_oprnd1
1657 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1658 }
1659
1660 VEC_quick_push (tree, vargs, vec_oprnd0);
1661 VEC_quick_push (tree, vargs, vec_oprnd1);
1662 }
1663
1664 new_stmt = gimple_build_call_vec (fndecl, vargs);
1665 new_temp = make_ssa_name (vec_dest, new_stmt);
1666 gimple_call_set_lhs (new_stmt, new_temp);
1667
1668 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1669 mark_symbols_for_renaming (new_stmt);
1670
1671 if (j == 0)
1672 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1673 else
1674 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1675
1676 prev_stmt_info = vinfo_for_stmt (new_stmt);
1677 }
1678
1679 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1680
1681 break;
1682
1683 case WIDEN:
1684 /* No current target implements this case. */
1685 return false;
1686 }
1687
1688 VEC_free (tree, heap, vargs);
1689
1690 /* Update the exception handling table with the vector stmt if necessary. */
1691 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1692 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1693
1694 /* The call in STMT might prevent it from being removed in dce.
1695 We however cannot remove it here, due to the way the ssa name
1696 it defines is mapped to the new definition. So just replace
1697 rhs of the statement with something harmless. */
1698
1699 type = TREE_TYPE (scalar_dest);
1700 if (is_pattern_stmt_p (stmt_info))
1701 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
1702 else
1703 lhs = gimple_call_lhs (stmt);
1704 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
1705 set_vinfo_for_stmt (new_stmt, stmt_info);
1706 set_vinfo_for_stmt (stmt, NULL);
1707 STMT_VINFO_STMT (stmt_info) = new_stmt;
1708 gsi_replace (gsi, new_stmt, false);
1709 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1710
1711 return true;
1712 }
1713
1714
1715 /* Function vect_gen_widened_results_half
1716
1717 Create a vector stmt whose code, type, number of arguments, and result
1718 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1719 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1720 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1721 needs to be created (DECL is a function-decl of a target-builtin).
1722 STMT is the original scalar stmt that we are vectorizing. */
1723
1724 static gimple
1725 vect_gen_widened_results_half (enum tree_code code,
1726 tree decl,
1727 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1728 tree vec_dest, gimple_stmt_iterator *gsi,
1729 gimple stmt)
1730 {
1731 gimple new_stmt;
1732 tree new_temp;
1733
1734 /* Generate half of the widened result: */
1735 if (code == CALL_EXPR)
1736 {
1737 /* Target specific support */
1738 if (op_type == binary_op)
1739 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1740 else
1741 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1742 new_temp = make_ssa_name (vec_dest, new_stmt);
1743 gimple_call_set_lhs (new_stmt, new_temp);
1744 }
1745 else
1746 {
1747 /* Generic support */
1748 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1749 if (op_type != binary_op)
1750 vec_oprnd1 = NULL;
1751 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1752 vec_oprnd1);
1753 new_temp = make_ssa_name (vec_dest, new_stmt);
1754 gimple_assign_set_lhs (new_stmt, new_temp);
1755 }
1756 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1757
1758 return new_stmt;
1759 }
1760
1761
1762 /* Check if STMT performs a conversion operation, that can be vectorized.
1763 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1764 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1765 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1766
1767 static bool
1768 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1769 gimple *vec_stmt, slp_tree slp_node)
1770 {
1771 tree vec_dest;
1772 tree scalar_dest;
1773 tree op0;
1774 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1775 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1776 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1777 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1778 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1779 tree new_temp;
1780 tree def;
1781 gimple def_stmt;
1782 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1783 gimple new_stmt = NULL;
1784 stmt_vec_info prev_stmt_info;
1785 int nunits_in;
1786 int nunits_out;
1787 tree vectype_out, vectype_in;
1788 int ncopies, j;
1789 tree rhs_type;
1790 tree builtin_decl;
1791 enum { NARROW, NONE, WIDEN } modifier;
1792 int i;
1793 VEC(tree,heap) *vec_oprnds0 = NULL;
1794 tree vop0;
1795 VEC(tree,heap) *dummy = NULL;
1796 int dummy_int;
1797
1798 /* Is STMT a vectorizable conversion? */
1799
1800 /* FORNOW: unsupported in basic block SLP. */
1801 gcc_assert (loop_vinfo);
1802
1803 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1804 return false;
1805
1806 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1807 return false;
1808
1809 if (!is_gimple_assign (stmt))
1810 return false;
1811
1812 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1813 return false;
1814
1815 code = gimple_assign_rhs_code (stmt);
1816 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1817 return false;
1818
1819 /* Check types of lhs and rhs. */
1820 scalar_dest = gimple_assign_lhs (stmt);
1821 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1822
1823 op0 = gimple_assign_rhs1 (stmt);
1824 rhs_type = TREE_TYPE (op0);
1825 /* Check the operands of the operation. */
1826 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1827 &def_stmt, &def, &dt[0], &vectype_in))
1828 {
1829 if (vect_print_dump_info (REPORT_DETAILS))
1830 fprintf (vect_dump, "use not simple.");
1831 return false;
1832 }
1833 /* If op0 is an external or constant defs use a vector type of
1834 the same size as the output vector type. */
1835 if (!vectype_in)
1836 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1837 if (vec_stmt)
1838 gcc_assert (vectype_in);
1839 if (!vectype_in)
1840 {
1841 if (vect_print_dump_info (REPORT_DETAILS))
1842 {
1843 fprintf (vect_dump, "no vectype for scalar type ");
1844 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1845 }
1846
1847 return false;
1848 }
1849
1850 /* FORNOW */
1851 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1852 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1853 if (nunits_in == nunits_out / 2)
1854 modifier = NARROW;
1855 else if (nunits_out == nunits_in)
1856 modifier = NONE;
1857 else if (nunits_out == nunits_in / 2)
1858 modifier = WIDEN;
1859 else
1860 return false;
1861
1862 if (modifier == NARROW)
1863 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1864 else
1865 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1866
1867 /* Multiple types in SLP are handled by creating the appropriate number of
1868 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1869 case of SLP. */
1870 if (slp_node || PURE_SLP_STMT (stmt_info))
1871 ncopies = 1;
1872
1873 /* Sanity check: make sure that at least one copy of the vectorized stmt
1874 needs to be generated. */
1875 gcc_assert (ncopies >= 1);
1876
1877 /* Supportable by target? */
1878 if ((modifier == NONE
1879 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1880 || (modifier == WIDEN
1881 && !supportable_widening_operation (code, stmt,
1882 vectype_out, vectype_in,
1883 &decl1, &decl2,
1884 &code1, &code2,
1885 &dummy_int, &dummy))
1886 || (modifier == NARROW
1887 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1888 &code1, &dummy_int, &dummy)))
1889 {
1890 if (vect_print_dump_info (REPORT_DETAILS))
1891 fprintf (vect_dump, "conversion not supported by target.");
1892 return false;
1893 }
1894
1895 if (modifier != NONE)
1896 {
1897 /* FORNOW: SLP not supported. */
1898 if (STMT_SLP_TYPE (stmt_info))
1899 return false;
1900 }
1901
1902 if (!vec_stmt) /* transformation not required. */
1903 {
1904 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1905 return true;
1906 }
1907
1908 /** Transform. **/
1909 if (vect_print_dump_info (REPORT_DETAILS))
1910 fprintf (vect_dump, "transform conversion.");
1911
1912 /* Handle def. */
1913 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1914
1915 if (modifier == NONE && !slp_node)
1916 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1917
1918 prev_stmt_info = NULL;
1919 switch (modifier)
1920 {
1921 case NONE:
1922 for (j = 0; j < ncopies; j++)
1923 {
1924 if (j == 0)
1925 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1926 else
1927 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1928
1929 builtin_decl =
1930 targetm.vectorize.builtin_conversion (code,
1931 vectype_out, vectype_in);
1932 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1933 {
1934 /* Arguments are ready. create the new vector stmt. */
1935 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1936 new_temp = make_ssa_name (vec_dest, new_stmt);
1937 gimple_call_set_lhs (new_stmt, new_temp);
1938 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1939 if (slp_node)
1940 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1941 }
1942
1943 if (j == 0)
1944 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1945 else
1946 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1947 prev_stmt_info = vinfo_for_stmt (new_stmt);
1948 }
1949 break;
1950
1951 case WIDEN:
1952 /* In case the vectorization factor (VF) is bigger than the number
1953 of elements that we can fit in a vectype (nunits), we have to
1954 generate more than one vector stmt - i.e - we need to "unroll"
1955 the vector stmt by a factor VF/nunits. */
1956 for (j = 0; j < ncopies; j++)
1957 {
1958 if (j == 0)
1959 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1960 else
1961 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1962
1963 /* Generate first half of the widened result: */
1964 new_stmt
1965 = vect_gen_widened_results_half (code1, decl1,
1966 vec_oprnd0, vec_oprnd1,
1967 unary_op, vec_dest, gsi, stmt);
1968 if (j == 0)
1969 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1970 else
1971 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1972 prev_stmt_info = vinfo_for_stmt (new_stmt);
1973
1974 /* Generate second half of the widened result: */
1975 new_stmt
1976 = vect_gen_widened_results_half (code2, decl2,
1977 vec_oprnd0, vec_oprnd1,
1978 unary_op, vec_dest, gsi, stmt);
1979 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1980 prev_stmt_info = vinfo_for_stmt (new_stmt);
1981 }
1982 break;
1983
1984 case NARROW:
1985 /* In case the vectorization factor (VF) is bigger than the number
1986 of elements that we can fit in a vectype (nunits), we have to
1987 generate more than one vector stmt - i.e - we need to "unroll"
1988 the vector stmt by a factor VF/nunits. */
1989 for (j = 0; j < ncopies; j++)
1990 {
1991 /* Handle uses. */
1992 if (j == 0)
1993 {
1994 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1995 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1996 }
1997 else
1998 {
1999 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
2000 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2001 }
2002
2003 /* Arguments are ready. Create the new vector stmt. */
2004 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
2005 vec_oprnd1);
2006 new_temp = make_ssa_name (vec_dest, new_stmt);
2007 gimple_assign_set_lhs (new_stmt, new_temp);
2008 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2009
2010 if (j == 0)
2011 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2012 else
2013 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2014
2015 prev_stmt_info = vinfo_for_stmt (new_stmt);
2016 }
2017
2018 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2019 }
2020
2021 if (vec_oprnds0)
2022 VEC_free (tree, heap, vec_oprnds0);
2023
2024 return true;
2025 }
2026
2027
2028 /* Function vectorizable_assignment.
2029
2030 Check if STMT performs an assignment (copy) that can be vectorized.
2031 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2032 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2033 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2034
2035 static bool
2036 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
2037 gimple *vec_stmt, slp_tree slp_node)
2038 {
2039 tree vec_dest;
2040 tree scalar_dest;
2041 tree op;
2042 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2043 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2044 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2045 tree new_temp;
2046 tree def;
2047 gimple def_stmt;
2048 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2049 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2050 int ncopies;
2051 int i, j;
2052 VEC(tree,heap) *vec_oprnds = NULL;
2053 tree vop;
2054 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2055 gimple new_stmt = NULL;
2056 stmt_vec_info prev_stmt_info = NULL;
2057 enum tree_code code;
2058 tree vectype_in;
2059
2060 /* Multiple types in SLP are handled by creating the appropriate number of
2061 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2062 case of SLP. */
2063 if (slp_node || PURE_SLP_STMT (stmt_info))
2064 ncopies = 1;
2065 else
2066 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2067
2068 gcc_assert (ncopies >= 1);
2069
2070 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2071 return false;
2072
2073 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2074 return false;
2075
2076 /* Is vectorizable assignment? */
2077 if (!is_gimple_assign (stmt))
2078 return false;
2079
2080 scalar_dest = gimple_assign_lhs (stmt);
2081 if (TREE_CODE (scalar_dest) != SSA_NAME)
2082 return false;
2083
2084 code = gimple_assign_rhs_code (stmt);
2085 if (gimple_assign_single_p (stmt)
2086 || code == PAREN_EXPR
2087 || CONVERT_EXPR_CODE_P (code))
2088 op = gimple_assign_rhs1 (stmt);
2089 else
2090 return false;
2091
2092 if (code == VIEW_CONVERT_EXPR)
2093 op = TREE_OPERAND (op, 0);
2094
2095 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
2096 &def_stmt, &def, &dt[0], &vectype_in))
2097 {
2098 if (vect_print_dump_info (REPORT_DETAILS))
2099 fprintf (vect_dump, "use not simple.");
2100 return false;
2101 }
2102
2103 /* We can handle NOP_EXPR conversions that do not change the number
2104 of elements or the vector size. */
2105 if ((CONVERT_EXPR_CODE_P (code)
2106 || code == VIEW_CONVERT_EXPR)
2107 && (!vectype_in
2108 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
2109 || (GET_MODE_SIZE (TYPE_MODE (vectype))
2110 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
2111 return false;
2112
2113 if (!vec_stmt) /* transformation not required. */
2114 {
2115 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
2116 if (vect_print_dump_info (REPORT_DETAILS))
2117 fprintf (vect_dump, "=== vectorizable_assignment ===");
2118 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2119 return true;
2120 }
2121
2122 /** Transform. **/
2123 if (vect_print_dump_info (REPORT_DETAILS))
2124 fprintf (vect_dump, "transform assignment.");
2125
2126 /* Handle def. */
2127 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2128
2129 /* Handle use. */
2130 for (j = 0; j < ncopies; j++)
2131 {
2132 /* Handle uses. */
2133 if (j == 0)
2134 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2135 else
2136 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2137
2138 /* Arguments are ready. create the new vector stmt. */
2139 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2140 {
2141 if (CONVERT_EXPR_CODE_P (code)
2142 || code == VIEW_CONVERT_EXPR)
2143 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2144 new_stmt = gimple_build_assign (vec_dest, vop);
2145 new_temp = make_ssa_name (vec_dest, new_stmt);
2146 gimple_assign_set_lhs (new_stmt, new_temp);
2147 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2148 if (slp_node)
2149 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2150 }
2151
2152 if (slp_node)
2153 continue;
2154
2155 if (j == 0)
2156 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2157 else
2158 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2159
2160 prev_stmt_info = vinfo_for_stmt (new_stmt);
2161 }
2162
2163 VEC_free (tree, heap, vec_oprnds);
2164 return true;
2165 }
2166
2167
2168 /* Function vectorizable_shift.
2169
2170 Check if STMT performs a shift operation that can be vectorized.
2171 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2172 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2173 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2174
2175 static bool
2176 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2177 gimple *vec_stmt, slp_tree slp_node)
2178 {
2179 tree vec_dest;
2180 tree scalar_dest;
2181 tree op0, op1 = NULL;
2182 tree vec_oprnd1 = NULL_TREE;
2183 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2184 tree vectype;
2185 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2186 enum tree_code code;
2187 enum machine_mode vec_mode;
2188 tree new_temp;
2189 optab optab;
2190 int icode;
2191 enum machine_mode optab_op2_mode;
2192 tree def;
2193 gimple def_stmt;
2194 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2195 gimple new_stmt = NULL;
2196 stmt_vec_info prev_stmt_info;
2197 int nunits_in;
2198 int nunits_out;
2199 tree vectype_out;
2200 int ncopies;
2201 int j, i;
2202 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2203 tree vop0, vop1;
2204 unsigned int k;
2205 bool scalar_shift_arg = true;
2206 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2207 int vf;
2208
2209 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2210 return false;
2211
2212 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2213 return false;
2214
2215 /* Is STMT a vectorizable binary/unary operation? */
2216 if (!is_gimple_assign (stmt))
2217 return false;
2218
2219 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2220 return false;
2221
2222 code = gimple_assign_rhs_code (stmt);
2223
2224 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2225 || code == RROTATE_EXPR))
2226 return false;
2227
2228 scalar_dest = gimple_assign_lhs (stmt);
2229 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2230
2231 op0 = gimple_assign_rhs1 (stmt);
2232 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2233 &def_stmt, &def, &dt[0], &vectype))
2234 {
2235 if (vect_print_dump_info (REPORT_DETAILS))
2236 fprintf (vect_dump, "use not simple.");
2237 return false;
2238 }
2239 /* If op0 is an external or constant def use a vector type with
2240 the same size as the output vector type. */
2241 if (!vectype)
2242 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2243 if (vec_stmt)
2244 gcc_assert (vectype);
2245 if (!vectype)
2246 {
2247 if (vect_print_dump_info (REPORT_DETAILS))
2248 {
2249 fprintf (vect_dump, "no vectype for scalar type ");
2250 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2251 }
2252
2253 return false;
2254 }
2255
2256 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2257 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2258 if (nunits_out != nunits_in)
2259 return false;
2260
2261 op1 = gimple_assign_rhs2 (stmt);
2262 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2263 {
2264 if (vect_print_dump_info (REPORT_DETAILS))
2265 fprintf (vect_dump, "use not simple.");
2266 return false;
2267 }
2268
2269 if (loop_vinfo)
2270 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2271 else
2272 vf = 1;
2273
2274 /* Multiple types in SLP are handled by creating the appropriate number of
2275 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2276 case of SLP. */
2277 if (slp_node || PURE_SLP_STMT (stmt_info))
2278 ncopies = 1;
2279 else
2280 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2281
2282 gcc_assert (ncopies >= 1);
2283
2284 /* Determine whether the shift amount is a vector, or scalar. If the
2285 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2286
2287 if (dt[1] == vect_internal_def && !slp_node)
2288 scalar_shift_arg = false;
2289 else if (dt[1] == vect_constant_def
2290 || dt[1] == vect_external_def
2291 || dt[1] == vect_internal_def)
2292 {
2293 /* In SLP, need to check whether the shift count is the same,
2294 in loops if it is a constant or invariant, it is always
2295 a scalar shift. */
2296 if (slp_node)
2297 {
2298 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2299 gimple slpstmt;
2300
2301 FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
2302 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
2303 scalar_shift_arg = false;
2304 }
2305 }
2306 else
2307 {
2308 if (vect_print_dump_info (REPORT_DETAILS))
2309 fprintf (vect_dump, "operand mode requires invariant argument.");
2310 return false;
2311 }
2312
2313 /* Vector shifted by vector. */
2314 if (!scalar_shift_arg)
2315 {
2316 optab = optab_for_tree_code (code, vectype, optab_vector);
2317 if (vect_print_dump_info (REPORT_DETAILS))
2318 fprintf (vect_dump, "vector/vector shift/rotate found.");
2319 }
2320 /* See if the machine has a vector shifted by scalar insn and if not
2321 then see if it has a vector shifted by vector insn. */
2322 else
2323 {
2324 optab = optab_for_tree_code (code, vectype, optab_scalar);
2325 if (optab
2326 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2327 {
2328 if (vect_print_dump_info (REPORT_DETAILS))
2329 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2330 }
2331 else
2332 {
2333 optab = optab_for_tree_code (code, vectype, optab_vector);
2334 if (optab
2335 && (optab_handler (optab, TYPE_MODE (vectype))
2336 != CODE_FOR_nothing))
2337 {
2338 scalar_shift_arg = false;
2339
2340 if (vect_print_dump_info (REPORT_DETAILS))
2341 fprintf (vect_dump, "vector/vector shift/rotate found.");
2342
2343 /* Unlike the other binary operators, shifts/rotates have
2344 the rhs being int, instead of the same type as the lhs,
2345 so make sure the scalar is the right type if we are
2346 dealing with vectors of short/char. */
2347 if (dt[1] == vect_constant_def)
2348 op1 = fold_convert (TREE_TYPE (vectype), op1);
2349 }
2350 }
2351 }
2352
2353 /* Supportable by target? */
2354 if (!optab)
2355 {
2356 if (vect_print_dump_info (REPORT_DETAILS))
2357 fprintf (vect_dump, "no optab.");
2358 return false;
2359 }
2360 vec_mode = TYPE_MODE (vectype);
2361 icode = (int) optab_handler (optab, vec_mode);
2362 if (icode == CODE_FOR_nothing)
2363 {
2364 if (vect_print_dump_info (REPORT_DETAILS))
2365 fprintf (vect_dump, "op not supported by target.");
2366 /* Check only during analysis. */
2367 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2368 || (vf < vect_min_worthwhile_factor (code)
2369 && !vec_stmt))
2370 return false;
2371 if (vect_print_dump_info (REPORT_DETAILS))
2372 fprintf (vect_dump, "proceeding using word mode.");
2373 }
2374
2375 /* Worthwhile without SIMD support? Check only during analysis. */
2376 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2377 && vf < vect_min_worthwhile_factor (code)
2378 && !vec_stmt)
2379 {
2380 if (vect_print_dump_info (REPORT_DETAILS))
2381 fprintf (vect_dump, "not worthwhile without SIMD support.");
2382 return false;
2383 }
2384
2385 if (!vec_stmt) /* transformation not required. */
2386 {
2387 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2388 if (vect_print_dump_info (REPORT_DETAILS))
2389 fprintf (vect_dump, "=== vectorizable_shift ===");
2390 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2391 return true;
2392 }
2393
2394 /** Transform. **/
2395
2396 if (vect_print_dump_info (REPORT_DETAILS))
2397 fprintf (vect_dump, "transform binary/unary operation.");
2398
2399 /* Handle def. */
2400 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2401
2402 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2403 created in the previous stages of the recursion, so no allocation is
2404 needed, except for the case of shift with scalar shift argument. In that
2405 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2406 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2407 In case of loop-based vectorization we allocate VECs of size 1. We
2408 allocate VEC_OPRNDS1 only in case of binary operation. */
2409 if (!slp_node)
2410 {
2411 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2412 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2413 }
2414 else if (scalar_shift_arg)
2415 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2416
2417 prev_stmt_info = NULL;
2418 for (j = 0; j < ncopies; j++)
2419 {
2420 /* Handle uses. */
2421 if (j == 0)
2422 {
2423 if (scalar_shift_arg)
2424 {
2425 /* Vector shl and shr insn patterns can be defined with scalar
2426 operand 2 (shift operand). In this case, use constant or loop
2427 invariant op1 directly, without extending it to vector mode
2428 first. */
2429 optab_op2_mode = insn_data[icode].operand[2].mode;
2430 if (!VECTOR_MODE_P (optab_op2_mode))
2431 {
2432 if (vect_print_dump_info (REPORT_DETAILS))
2433 fprintf (vect_dump, "operand 1 using scalar mode.");
2434 vec_oprnd1 = op1;
2435 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2436 if (slp_node)
2437 {
2438 /* Store vec_oprnd1 for every vector stmt to be created
2439 for SLP_NODE. We check during the analysis that all
2440 the shift arguments are the same.
2441 TODO: Allow different constants for different vector
2442 stmts generated for an SLP instance. */
2443 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2444 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2445 }
2446 }
2447 }
2448
2449 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2450 (a special case for certain kind of vector shifts); otherwise,
2451 operand 1 should be of a vector type (the usual case). */
2452 if (vec_oprnd1)
2453 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2454 slp_node);
2455 else
2456 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2457 slp_node);
2458 }
2459 else
2460 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2461
2462 /* Arguments are ready. Create the new vector stmt. */
2463 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2464 {
2465 vop1 = VEC_index (tree, vec_oprnds1, i);
2466 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2467 new_temp = make_ssa_name (vec_dest, new_stmt);
2468 gimple_assign_set_lhs (new_stmt, new_temp);
2469 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2470 if (slp_node)
2471 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2472 }
2473
2474 if (slp_node)
2475 continue;
2476
2477 if (j == 0)
2478 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2479 else
2480 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2481 prev_stmt_info = vinfo_for_stmt (new_stmt);
2482 }
2483
2484 VEC_free (tree, heap, vec_oprnds0);
2485 VEC_free (tree, heap, vec_oprnds1);
2486
2487 return true;
2488 }
2489
2490
2491 /* Function vectorizable_operation.
2492
2493 Check if STMT performs a binary, unary or ternary operation that can
2494 be vectorized.
2495 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2496 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2497 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2498
2499 static bool
2500 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2501 gimple *vec_stmt, slp_tree slp_node)
2502 {
2503 tree vec_dest;
2504 tree scalar_dest;
2505 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
2506 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2507 tree vectype;
2508 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2509 enum tree_code code;
2510 enum machine_mode vec_mode;
2511 tree new_temp;
2512 int op_type;
2513 optab optab;
2514 int icode;
2515 tree def;
2516 gimple def_stmt;
2517 enum vect_def_type dt[3]
2518 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2519 gimple new_stmt = NULL;
2520 stmt_vec_info prev_stmt_info;
2521 int nunits_in;
2522 int nunits_out;
2523 tree vectype_out;
2524 int ncopies;
2525 int j, i;
2526 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2527 tree vop0, vop1, vop2;
2528 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2529 int vf;
2530
2531 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2532 return false;
2533
2534 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2535 return false;
2536
2537 /* Is STMT a vectorizable binary/unary operation? */
2538 if (!is_gimple_assign (stmt))
2539 return false;
2540
2541 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2542 return false;
2543
2544 code = gimple_assign_rhs_code (stmt);
2545
2546 /* For pointer addition, we should use the normal plus for
2547 the vector addition. */
2548 if (code == POINTER_PLUS_EXPR)
2549 code = PLUS_EXPR;
2550
2551 /* Support only unary or binary operations. */
2552 op_type = TREE_CODE_LENGTH (code);
2553 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
2554 {
2555 if (vect_print_dump_info (REPORT_DETAILS))
2556 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2557 op_type);
2558 return false;
2559 }
2560
2561 scalar_dest = gimple_assign_lhs (stmt);
2562 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2563
2564 op0 = gimple_assign_rhs1 (stmt);
2565 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2566 &def_stmt, &def, &dt[0], &vectype))
2567 {
2568 if (vect_print_dump_info (REPORT_DETAILS))
2569 fprintf (vect_dump, "use not simple.");
2570 return false;
2571 }
2572 /* If op0 is an external or constant def use a vector type with
2573 the same size as the output vector type. */
2574 if (!vectype)
2575 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2576 if (vec_stmt)
2577 gcc_assert (vectype);
2578 if (!vectype)
2579 {
2580 if (vect_print_dump_info (REPORT_DETAILS))
2581 {
2582 fprintf (vect_dump, "no vectype for scalar type ");
2583 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2584 }
2585
2586 return false;
2587 }
2588
2589 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2590 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2591 if (nunits_out != nunits_in)
2592 return false;
2593
2594 if (op_type == binary_op || op_type == ternary_op)
2595 {
2596 op1 = gimple_assign_rhs2 (stmt);
2597 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2598 &dt[1]))
2599 {
2600 if (vect_print_dump_info (REPORT_DETAILS))
2601 fprintf (vect_dump, "use not simple.");
2602 return false;
2603 }
2604 }
2605 if (op_type == ternary_op)
2606 {
2607 op2 = gimple_assign_rhs3 (stmt);
2608 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2609 &dt[2]))
2610 {
2611 if (vect_print_dump_info (REPORT_DETAILS))
2612 fprintf (vect_dump, "use not simple.");
2613 return false;
2614 }
2615 }
2616
2617 if (loop_vinfo)
2618 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2619 else
2620 vf = 1;
2621
2622 /* Multiple types in SLP are handled by creating the appropriate number of
2623 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2624 case of SLP. */
2625 if (slp_node || PURE_SLP_STMT (stmt_info))
2626 ncopies = 1;
2627 else
2628 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2629
2630 gcc_assert (ncopies >= 1);
2631
2632 /* Shifts are handled in vectorizable_shift (). */
2633 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2634 || code == RROTATE_EXPR)
2635 return false;
2636
2637 optab = optab_for_tree_code (code, vectype, optab_default);
2638
2639 /* Supportable by target? */
2640 if (!optab)
2641 {
2642 if (vect_print_dump_info (REPORT_DETAILS))
2643 fprintf (vect_dump, "no optab.");
2644 return false;
2645 }
2646 vec_mode = TYPE_MODE (vectype);
2647 icode = (int) optab_handler (optab, vec_mode);
2648 if (icode == CODE_FOR_nothing)
2649 {
2650 if (vect_print_dump_info (REPORT_DETAILS))
2651 fprintf (vect_dump, "op not supported by target.");
2652 /* Check only during analysis. */
2653 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2654 || (vf < vect_min_worthwhile_factor (code)
2655 && !vec_stmt))
2656 return false;
2657 if (vect_print_dump_info (REPORT_DETAILS))
2658 fprintf (vect_dump, "proceeding using word mode.");
2659 }
2660
2661 /* Worthwhile without SIMD support? Check only during analysis. */
2662 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2663 && vf < vect_min_worthwhile_factor (code)
2664 && !vec_stmt)
2665 {
2666 if (vect_print_dump_info (REPORT_DETAILS))
2667 fprintf (vect_dump, "not worthwhile without SIMD support.");
2668 return false;
2669 }
2670
2671 if (!vec_stmt) /* transformation not required. */
2672 {
2673 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2674 if (vect_print_dump_info (REPORT_DETAILS))
2675 fprintf (vect_dump, "=== vectorizable_operation ===");
2676 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2677 return true;
2678 }
2679
2680 /** Transform. **/
2681
2682 if (vect_print_dump_info (REPORT_DETAILS))
2683 fprintf (vect_dump, "transform binary/unary operation.");
2684
2685 /* Handle def. */
2686 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2687
2688 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2689 created in the previous stages of the recursion, so no allocation is
2690 needed, except for the case of shift with scalar shift argument. In that
2691 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2692 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2693 In case of loop-based vectorization we allocate VECs of size 1. We
2694 allocate VEC_OPRNDS1 only in case of binary operation. */
2695 if (!slp_node)
2696 {
2697 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2698 if (op_type == binary_op || op_type == ternary_op)
2699 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2700 if (op_type == ternary_op)
2701 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2702 }
2703
2704 /* In case the vectorization factor (VF) is bigger than the number
2705 of elements that we can fit in a vectype (nunits), we have to generate
2706 more than one vector stmt - i.e - we need to "unroll" the
2707 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2708 from one copy of the vector stmt to the next, in the field
2709 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2710 stages to find the correct vector defs to be used when vectorizing
2711 stmts that use the defs of the current stmt. The example below
2712 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2713 we need to create 4 vectorized stmts):
2714
2715 before vectorization:
2716 RELATED_STMT VEC_STMT
2717 S1: x = memref - -
2718 S2: z = x + 1 - -
2719
2720 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2721 there):
2722 RELATED_STMT VEC_STMT
2723 VS1_0: vx0 = memref0 VS1_1 -
2724 VS1_1: vx1 = memref1 VS1_2 -
2725 VS1_2: vx2 = memref2 VS1_3 -
2726 VS1_3: vx3 = memref3 - -
2727 S1: x = load - VS1_0
2728 S2: z = x + 1 - -
2729
2730 step2: vectorize stmt S2 (done here):
2731 To vectorize stmt S2 we first need to find the relevant vector
2732 def for the first operand 'x'. This is, as usual, obtained from
2733 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2734 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2735 relevant vector def 'vx0'. Having found 'vx0' we can generate
2736 the vector stmt VS2_0, and as usual, record it in the
2737 STMT_VINFO_VEC_STMT of stmt S2.
2738 When creating the second copy (VS2_1), we obtain the relevant vector
2739 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2740 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2741 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2742 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2743 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2744 chain of stmts and pointers:
2745 RELATED_STMT VEC_STMT
2746 VS1_0: vx0 = memref0 VS1_1 -
2747 VS1_1: vx1 = memref1 VS1_2 -
2748 VS1_2: vx2 = memref2 VS1_3 -
2749 VS1_3: vx3 = memref3 - -
2750 S1: x = load - VS1_0
2751 VS2_0: vz0 = vx0 + v1 VS2_1 -
2752 VS2_1: vz1 = vx1 + v1 VS2_2 -
2753 VS2_2: vz2 = vx2 + v1 VS2_3 -
2754 VS2_3: vz3 = vx3 + v1 - -
2755 S2: z = x + 1 - VS2_0 */
2756
2757 prev_stmt_info = NULL;
2758 for (j = 0; j < ncopies; j++)
2759 {
2760 /* Handle uses. */
2761 if (j == 0)
2762 {
2763 if (op_type == binary_op || op_type == ternary_op)
2764 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2765 slp_node);
2766 else
2767 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2768 slp_node);
2769 if (op_type == ternary_op)
2770 {
2771 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2772 VEC_quick_push (tree, vec_oprnds2,
2773 vect_get_vec_def_for_operand (op2, stmt, NULL));
2774 }
2775 }
2776 else
2777 {
2778 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2779 if (op_type == ternary_op)
2780 {
2781 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2782 VEC_quick_push (tree, vec_oprnds2,
2783 vect_get_vec_def_for_stmt_copy (dt[2],
2784 vec_oprnd));
2785 }
2786 }
2787
2788 /* Arguments are ready. Create the new vector stmt. */
2789 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2790 {
2791 vop1 = ((op_type == binary_op || op_type == ternary_op)
2792 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2793 vop2 = ((op_type == ternary_op)
2794 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2795 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2796 vop0, vop1, vop2);
2797 new_temp = make_ssa_name (vec_dest, new_stmt);
2798 gimple_assign_set_lhs (new_stmt, new_temp);
2799 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2800 if (slp_node)
2801 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2802 }
2803
2804 if (slp_node)
2805 continue;
2806
2807 if (j == 0)
2808 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2809 else
2810 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2811 prev_stmt_info = vinfo_for_stmt (new_stmt);
2812 }
2813
2814 VEC_free (tree, heap, vec_oprnds0);
2815 if (vec_oprnds1)
2816 VEC_free (tree, heap, vec_oprnds1);
2817 if (vec_oprnds2)
2818 VEC_free (tree, heap, vec_oprnds2);
2819
2820 return true;
2821 }
2822
2823
2824 /* Get vectorized definitions for loop-based vectorization. For the first
2825 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2826 scalar operand), and for the rest we get a copy with
2827 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2828 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2829 The vectors are collected into VEC_OPRNDS. */
2830
2831 static void
2832 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2833 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2834 {
2835 tree vec_oprnd;
2836
2837 /* Get first vector operand. */
2838 /* All the vector operands except the very first one (that is scalar oprnd)
2839 are stmt copies. */
2840 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2841 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2842 else
2843 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2844
2845 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2846
2847 /* Get second vector operand. */
2848 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2849 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2850
2851 *oprnd = vec_oprnd;
2852
2853 /* For conversion in multiple steps, continue to get operands
2854 recursively. */
2855 if (multi_step_cvt)
2856 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2857 }
2858
2859
2860 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2861 For multi-step conversions store the resulting vectors and call the function
2862 recursively. */
2863
2864 static void
2865 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2866 int multi_step_cvt, gimple stmt,
2867 VEC (tree, heap) *vec_dsts,
2868 gimple_stmt_iterator *gsi,
2869 slp_tree slp_node, enum tree_code code,
2870 stmt_vec_info *prev_stmt_info)
2871 {
2872 unsigned int i;
2873 tree vop0, vop1, new_tmp, vec_dest;
2874 gimple new_stmt;
2875 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2876
2877 vec_dest = VEC_pop (tree, vec_dsts);
2878
2879 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2880 {
2881 /* Create demotion operation. */
2882 vop0 = VEC_index (tree, *vec_oprnds, i);
2883 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2884 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2885 new_tmp = make_ssa_name (vec_dest, new_stmt);
2886 gimple_assign_set_lhs (new_stmt, new_tmp);
2887 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2888
2889 if (multi_step_cvt)
2890 /* Store the resulting vector for next recursive call. */
2891 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2892 else
2893 {
2894 /* This is the last step of the conversion sequence. Store the
2895 vectors in SLP_NODE or in vector info of the scalar statement
2896 (or in STMT_VINFO_RELATED_STMT chain). */
2897 if (slp_node)
2898 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2899 else
2900 {
2901 if (!*prev_stmt_info)
2902 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2903 else
2904 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2905
2906 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2907 }
2908 }
2909 }
2910
2911 /* For multi-step demotion operations we first generate demotion operations
2912 from the source type to the intermediate types, and then combine the
2913 results (stored in VEC_OPRNDS) in demotion operation to the destination
2914 type. */
2915 if (multi_step_cvt)
2916 {
2917 /* At each level of recursion we have have of the operands we had at the
2918 previous level. */
2919 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2920 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2921 stmt, vec_dsts, gsi, slp_node,
2922 code, prev_stmt_info);
2923 }
2924 }
2925
2926
2927 /* Function vectorizable_type_demotion
2928
2929 Check if STMT performs a binary or unary operation that involves
2930 type demotion, and if it can be vectorized.
2931 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2932 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2933 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2934
2935 static bool
2936 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2937 gimple *vec_stmt, slp_tree slp_node)
2938 {
2939 tree vec_dest;
2940 tree scalar_dest;
2941 tree op0;
2942 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2943 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2944 enum tree_code code, code1 = ERROR_MARK;
2945 tree def;
2946 gimple def_stmt;
2947 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2948 stmt_vec_info prev_stmt_info;
2949 int nunits_in;
2950 int nunits_out;
2951 tree vectype_out;
2952 int ncopies;
2953 int j, i;
2954 tree vectype_in;
2955 int multi_step_cvt = 0;
2956 VEC (tree, heap) *vec_oprnds0 = NULL;
2957 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2958 tree last_oprnd, intermediate_type;
2959
2960 /* FORNOW: not supported by basic block SLP vectorization. */
2961 gcc_assert (loop_vinfo);
2962
2963 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2964 return false;
2965
2966 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2967 return false;
2968
2969 /* Is STMT a vectorizable type-demotion operation? */
2970 if (!is_gimple_assign (stmt))
2971 return false;
2972
2973 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2974 return false;
2975
2976 code = gimple_assign_rhs_code (stmt);
2977 if (!CONVERT_EXPR_CODE_P (code))
2978 return false;
2979
2980 scalar_dest = gimple_assign_lhs (stmt);
2981 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2982
2983 /* Check the operands of the operation. */
2984 op0 = gimple_assign_rhs1 (stmt);
2985 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2986 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2987 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2988 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2989 && CONVERT_EXPR_CODE_P (code))))
2990 return false;
2991 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2992 &def_stmt, &def, &dt[0], &vectype_in))
2993 {
2994 if (vect_print_dump_info (REPORT_DETAILS))
2995 fprintf (vect_dump, "use not simple.");
2996 return false;
2997 }
2998 /* If op0 is an external def use a vector type with the
2999 same size as the output vector type if possible. */
3000 if (!vectype_in)
3001 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3002 if (vec_stmt)
3003 gcc_assert (vectype_in);
3004 if (!vectype_in)
3005 {
3006 if (vect_print_dump_info (REPORT_DETAILS))
3007 {
3008 fprintf (vect_dump, "no vectype for scalar type ");
3009 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3010 }
3011
3012 return false;
3013 }
3014
3015 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3016 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3017 if (nunits_in >= nunits_out)
3018 return false;
3019
3020 /* Multiple types in SLP are handled by creating the appropriate number of
3021 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3022 case of SLP. */
3023 if (slp_node || PURE_SLP_STMT (stmt_info))
3024 ncopies = 1;
3025 else
3026 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3027 gcc_assert (ncopies >= 1);
3028
3029 /* Supportable by target? */
3030 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
3031 &code1, &multi_step_cvt, &interm_types))
3032 return false;
3033
3034 if (!vec_stmt) /* transformation not required. */
3035 {
3036 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3037 if (vect_print_dump_info (REPORT_DETAILS))
3038 fprintf (vect_dump, "=== vectorizable_demotion ===");
3039 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
3040 return true;
3041 }
3042
3043 /** Transform. **/
3044 if (vect_print_dump_info (REPORT_DETAILS))
3045 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
3046 ncopies);
3047
3048 /* In case of multi-step demotion, we first generate demotion operations to
3049 the intermediate types, and then from that types to the final one.
3050 We create vector destinations for the intermediate type (TYPES) received
3051 from supportable_narrowing_operation, and store them in the correct order
3052 for future use in vect_create_vectorized_demotion_stmts(). */
3053 if (multi_step_cvt)
3054 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3055 else
3056 vec_dsts = VEC_alloc (tree, heap, 1);
3057
3058 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3059 VEC_quick_push (tree, vec_dsts, vec_dest);
3060
3061 if (multi_step_cvt)
3062 {
3063 for (i = VEC_length (tree, interm_types) - 1;
3064 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3065 {
3066 vec_dest = vect_create_destination_var (scalar_dest,
3067 intermediate_type);
3068 VEC_quick_push (tree, vec_dsts, vec_dest);
3069 }
3070 }
3071
3072 /* In case the vectorization factor (VF) is bigger than the number
3073 of elements that we can fit in a vectype (nunits), we have to generate
3074 more than one vector stmt - i.e - we need to "unroll" the
3075 vector stmt by a factor VF/nunits. */
3076 last_oprnd = op0;
3077 prev_stmt_info = NULL;
3078 for (j = 0; j < ncopies; j++)
3079 {
3080 /* Handle uses. */
3081 if (slp_node)
3082 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
3083 else
3084 {
3085 VEC_free (tree, heap, vec_oprnds0);
3086 vec_oprnds0 = VEC_alloc (tree, heap,
3087 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
3088 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3089 vect_pow2 (multi_step_cvt) - 1);
3090 }
3091
3092 /* Arguments are ready. Create the new vector stmts. */
3093 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3094 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
3095 multi_step_cvt, stmt, tmp_vec_dsts,
3096 gsi, slp_node, code1,
3097 &prev_stmt_info);
3098 }
3099
3100 VEC_free (tree, heap, vec_oprnds0);
3101 VEC_free (tree, heap, vec_dsts);
3102 VEC_free (tree, heap, tmp_vec_dsts);
3103 VEC_free (tree, heap, interm_types);
3104
3105 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3106 return true;
3107 }
3108
3109
3110 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3111 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3112 the resulting vectors and call the function recursively. */
3113
3114 static void
3115 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
3116 VEC (tree, heap) **vec_oprnds1,
3117 int multi_step_cvt, gimple stmt,
3118 VEC (tree, heap) *vec_dsts,
3119 gimple_stmt_iterator *gsi,
3120 slp_tree slp_node, enum tree_code code1,
3121 enum tree_code code2, tree decl1,
3122 tree decl2, int op_type,
3123 stmt_vec_info *prev_stmt_info)
3124 {
3125 int i;
3126 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
3127 gimple new_stmt1, new_stmt2;
3128 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3129 VEC (tree, heap) *vec_tmp;
3130
3131 vec_dest = VEC_pop (tree, vec_dsts);
3132 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
3133
3134 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
3135 {
3136 if (op_type == binary_op)
3137 vop1 = VEC_index (tree, *vec_oprnds1, i);
3138 else
3139 vop1 = NULL_TREE;
3140
3141 /* Generate the two halves of promotion operation. */
3142 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3143 op_type, vec_dest, gsi, stmt);
3144 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3145 op_type, vec_dest, gsi, stmt);
3146 if (is_gimple_call (new_stmt1))
3147 {
3148 new_tmp1 = gimple_call_lhs (new_stmt1);
3149 new_tmp2 = gimple_call_lhs (new_stmt2);
3150 }
3151 else
3152 {
3153 new_tmp1 = gimple_assign_lhs (new_stmt1);
3154 new_tmp2 = gimple_assign_lhs (new_stmt2);
3155 }
3156
3157 if (multi_step_cvt)
3158 {
3159 /* Store the results for the recursive call. */
3160 VEC_quick_push (tree, vec_tmp, new_tmp1);
3161 VEC_quick_push (tree, vec_tmp, new_tmp2);
3162 }
3163 else
3164 {
3165 /* Last step of promotion sequience - store the results. */
3166 if (slp_node)
3167 {
3168 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3169 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3170 }
3171 else
3172 {
3173 if (!*prev_stmt_info)
3174 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3175 else
3176 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3177
3178 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3179 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3180 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3181 }
3182 }
3183 }
3184
3185 if (multi_step_cvt)
3186 {
3187 /* For multi-step promotion operation we first generate we call the
3188 function recurcively for every stage. We start from the input type,
3189 create promotion operations to the intermediate types, and then
3190 create promotions to the output type. */
3191 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3192 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3193 multi_step_cvt - 1, stmt,
3194 vec_dsts, gsi, slp_node, code1,
3195 code2, decl2, decl2, op_type,
3196 prev_stmt_info);
3197 }
3198
3199 VEC_free (tree, heap, vec_tmp);
3200 }
3201
3202
3203 /* Function vectorizable_type_promotion
3204
3205 Check if STMT performs a binary or unary operation that involves
3206 type promotion, and if it can be vectorized.
3207 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3208 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3209 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3210
3211 static bool
3212 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3213 gimple *vec_stmt, slp_tree slp_node)
3214 {
3215 tree vec_dest;
3216 tree scalar_dest;
3217 tree op0, op1 = NULL;
3218 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3219 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3220 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3221 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3222 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3223 int op_type;
3224 tree def;
3225 gimple def_stmt;
3226 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3227 stmt_vec_info prev_stmt_info;
3228 int nunits_in;
3229 int nunits_out;
3230 tree vectype_out;
3231 int ncopies;
3232 int j, i;
3233 tree vectype_in;
3234 tree intermediate_type = NULL_TREE;
3235 int multi_step_cvt = 0;
3236 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3237 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3238
3239 /* FORNOW: not supported by basic block SLP vectorization. */
3240 gcc_assert (loop_vinfo);
3241
3242 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3243 return false;
3244
3245 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3246 return false;
3247
3248 /* Is STMT a vectorizable type-promotion operation? */
3249 if (!is_gimple_assign (stmt))
3250 return false;
3251
3252 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3253 return false;
3254
3255 code = gimple_assign_rhs_code (stmt);
3256 if (!CONVERT_EXPR_CODE_P (code)
3257 && code != WIDEN_MULT_EXPR)
3258 return false;
3259
3260 scalar_dest = gimple_assign_lhs (stmt);
3261 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3262
3263 /* Check the operands of the operation. */
3264 op0 = gimple_assign_rhs1 (stmt);
3265 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3266 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3267 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3268 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3269 && CONVERT_EXPR_CODE_P (code))))
3270 return false;
3271 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3272 &def_stmt, &def, &dt[0], &vectype_in))
3273 {
3274 if (vect_print_dump_info (REPORT_DETAILS))
3275 fprintf (vect_dump, "use not simple.");
3276 return false;
3277 }
3278
3279 op_type = TREE_CODE_LENGTH (code);
3280 if (op_type == binary_op)
3281 {
3282 bool ok;
3283
3284 op1 = gimple_assign_rhs2 (stmt);
3285 if (code == WIDEN_MULT_EXPR)
3286 {
3287 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3288 OP1. */
3289 if (CONSTANT_CLASS_P (op0))
3290 ok = vect_is_simple_use_1 (op1, loop_vinfo, NULL,
3291 &def_stmt, &def, &dt[1], &vectype_in);
3292 else
3293 ok = vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def,
3294 &dt[1]);
3295
3296 if (!ok)
3297 {
3298 if (vect_print_dump_info (REPORT_DETAILS))
3299 fprintf (vect_dump, "use not simple.");
3300 return false;
3301 }
3302 }
3303 }
3304
3305 /* If op0 is an external or constant def use a vector type with
3306 the same size as the output vector type. */
3307 if (!vectype_in)
3308 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3309 if (vec_stmt)
3310 gcc_assert (vectype_in);
3311 if (!vectype_in)
3312 {
3313 if (vect_print_dump_info (REPORT_DETAILS))
3314 {
3315 fprintf (vect_dump, "no vectype for scalar type ");
3316 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3317 }
3318
3319 return false;
3320 }
3321
3322 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3323 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3324 if (nunits_in <= nunits_out)
3325 return false;
3326
3327 /* Multiple types in SLP are handled by creating the appropriate number of
3328 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3329 case of SLP. */
3330 if (slp_node || PURE_SLP_STMT (stmt_info))
3331 ncopies = 1;
3332 else
3333 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3334
3335 gcc_assert (ncopies >= 1);
3336
3337 /* Supportable by target? */
3338 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3339 &decl1, &decl2, &code1, &code2,
3340 &multi_step_cvt, &interm_types))
3341 return false;
3342
3343 /* Binary widening operation can only be supported directly by the
3344 architecture. */
3345 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3346
3347 if (!vec_stmt) /* transformation not required. */
3348 {
3349 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3350 if (vect_print_dump_info (REPORT_DETAILS))
3351 fprintf (vect_dump, "=== vectorizable_promotion ===");
3352 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3353 return true;
3354 }
3355
3356 /** Transform. **/
3357
3358 if (vect_print_dump_info (REPORT_DETAILS))
3359 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3360 ncopies);
3361
3362 if (code == WIDEN_MULT_EXPR)
3363 {
3364 if (CONSTANT_CLASS_P (op0))
3365 op0 = fold_convert (TREE_TYPE (op1), op0);
3366 else if (CONSTANT_CLASS_P (op1))
3367 op1 = fold_convert (TREE_TYPE (op0), op1);
3368 }
3369
3370 /* Handle def. */
3371 /* In case of multi-step promotion, we first generate promotion operations
3372 to the intermediate types, and then from that types to the final one.
3373 We store vector destination in VEC_DSTS in the correct order for
3374 recursive creation of promotion operations in
3375 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3376 according to TYPES recieved from supportable_widening_operation(). */
3377 if (multi_step_cvt)
3378 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3379 else
3380 vec_dsts = VEC_alloc (tree, heap, 1);
3381
3382 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3383 VEC_quick_push (tree, vec_dsts, vec_dest);
3384
3385 if (multi_step_cvt)
3386 {
3387 for (i = VEC_length (tree, interm_types) - 1;
3388 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3389 {
3390 vec_dest = vect_create_destination_var (scalar_dest,
3391 intermediate_type);
3392 VEC_quick_push (tree, vec_dsts, vec_dest);
3393 }
3394 }
3395
3396 if (!slp_node)
3397 {
3398 vec_oprnds0 = VEC_alloc (tree, heap,
3399 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3400 if (op_type == binary_op)
3401 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3402 }
3403
3404 /* In case the vectorization factor (VF) is bigger than the number
3405 of elements that we can fit in a vectype (nunits), we have to generate
3406 more than one vector stmt - i.e - we need to "unroll" the
3407 vector stmt by a factor VF/nunits. */
3408
3409 prev_stmt_info = NULL;
3410 for (j = 0; j < ncopies; j++)
3411 {
3412 /* Handle uses. */
3413 if (j == 0)
3414 {
3415 if (slp_node)
3416 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3417 &vec_oprnds1, -1);
3418 else
3419 {
3420 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3421 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3422 if (op_type == binary_op)
3423 {
3424 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3425 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3426 }
3427 }
3428 }
3429 else
3430 {
3431 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3432 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3433 if (op_type == binary_op)
3434 {
3435 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3436 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3437 }
3438 }
3439
3440 /* Arguments are ready. Create the new vector stmts. */
3441 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3442 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3443 multi_step_cvt, stmt,
3444 tmp_vec_dsts,
3445 gsi, slp_node, code1, code2,
3446 decl1, decl2, op_type,
3447 &prev_stmt_info);
3448 }
3449
3450 VEC_free (tree, heap, vec_dsts);
3451 VEC_free (tree, heap, tmp_vec_dsts);
3452 VEC_free (tree, heap, interm_types);
3453 VEC_free (tree, heap, vec_oprnds0);
3454 VEC_free (tree, heap, vec_oprnds1);
3455
3456 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3457 return true;
3458 }
3459
3460
3461 /* Function vectorizable_store.
3462
3463 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3464 can be vectorized.
3465 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3466 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3467 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3468
3469 static bool
3470 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3471 slp_tree slp_node)
3472 {
3473 tree scalar_dest;
3474 tree data_ref;
3475 tree op;
3476 tree vec_oprnd = NULL_TREE;
3477 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3478 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3479 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3480 tree elem_type;
3481 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3482 struct loop *loop = NULL;
3483 enum machine_mode vec_mode;
3484 tree dummy;
3485 enum dr_alignment_support alignment_support_scheme;
3486 tree def;
3487 gimple def_stmt;
3488 enum vect_def_type dt;
3489 stmt_vec_info prev_stmt_info = NULL;
3490 tree dataref_ptr = NULL_TREE;
3491 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3492 int ncopies;
3493 int j;
3494 gimple next_stmt, first_stmt = NULL;
3495 bool strided_store = false;
3496 bool store_lanes_p = false;
3497 unsigned int group_size, i;
3498 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3499 bool inv_p;
3500 VEC(tree,heap) *vec_oprnds = NULL;
3501 bool slp = (slp_node != NULL);
3502 unsigned int vec_num;
3503 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3504 tree aggr_type;
3505
3506 if (loop_vinfo)
3507 loop = LOOP_VINFO_LOOP (loop_vinfo);
3508
3509 /* Multiple types in SLP are handled by creating the appropriate number of
3510 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3511 case of SLP. */
3512 if (slp || PURE_SLP_STMT (stmt_info))
3513 ncopies = 1;
3514 else
3515 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3516
3517 gcc_assert (ncopies >= 1);
3518
3519 /* FORNOW. This restriction should be relaxed. */
3520 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3521 {
3522 if (vect_print_dump_info (REPORT_DETAILS))
3523 fprintf (vect_dump, "multiple types in nested loop.");
3524 return false;
3525 }
3526
3527 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3528 return false;
3529
3530 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3531 return false;
3532
3533 /* Is vectorizable store? */
3534
3535 if (!is_gimple_assign (stmt))
3536 return false;
3537
3538 scalar_dest = gimple_assign_lhs (stmt);
3539 if (TREE_CODE (scalar_dest) != ARRAY_REF
3540 && TREE_CODE (scalar_dest) != INDIRECT_REF
3541 && TREE_CODE (scalar_dest) != COMPONENT_REF
3542 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3543 && TREE_CODE (scalar_dest) != REALPART_EXPR
3544 && TREE_CODE (scalar_dest) != MEM_REF)
3545 return false;
3546
3547 gcc_assert (gimple_assign_single_p (stmt));
3548 op = gimple_assign_rhs1 (stmt);
3549 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3550 {
3551 if (vect_print_dump_info (REPORT_DETAILS))
3552 fprintf (vect_dump, "use not simple.");
3553 return false;
3554 }
3555
3556 /* The scalar rhs type needs to be trivially convertible to the vector
3557 component type. This should always be the case. */
3558 elem_type = TREE_TYPE (vectype);
3559 if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
3560 {
3561 if (vect_print_dump_info (REPORT_DETAILS))
3562 fprintf (vect_dump, "??? operands of different types");
3563 return false;
3564 }
3565
3566 vec_mode = TYPE_MODE (vectype);
3567 /* FORNOW. In some cases can vectorize even if data-type not supported
3568 (e.g. - array initialization with 0). */
3569 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3570 return false;
3571
3572 if (!STMT_VINFO_DATA_REF (stmt_info))
3573 return false;
3574
3575 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3576 {
3577 if (vect_print_dump_info (REPORT_DETAILS))
3578 fprintf (vect_dump, "negative step for store.");
3579 return false;
3580 }
3581
3582 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3583 {
3584 strided_store = true;
3585 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
3586 if (!slp && !PURE_SLP_STMT (stmt_info))
3587 {
3588 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
3589 if (vect_store_lanes_supported (vectype, group_size))
3590 store_lanes_p = true;
3591 else if (!vect_strided_store_supported (vectype, group_size))
3592 return false;
3593 }
3594
3595 if (first_stmt == stmt)
3596 {
3597 /* STMT is the leader of the group. Check the operands of all the
3598 stmts of the group. */
3599 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
3600 while (next_stmt)
3601 {
3602 gcc_assert (gimple_assign_single_p (next_stmt));
3603 op = gimple_assign_rhs1 (next_stmt);
3604 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3605 &def, &dt))
3606 {
3607 if (vect_print_dump_info (REPORT_DETAILS))
3608 fprintf (vect_dump, "use not simple.");
3609 return false;
3610 }
3611 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3612 }
3613 }
3614 }
3615
3616 if (!vec_stmt) /* transformation not required. */
3617 {
3618 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3619 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt, NULL);
3620 return true;
3621 }
3622
3623 /** Transform. **/
3624
3625 if (strided_store)
3626 {
3627 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3628 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
3629
3630 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3631
3632 /* FORNOW */
3633 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3634
3635 /* We vectorize all the stmts of the interleaving group when we
3636 reach the last stmt in the group. */
3637 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3638 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
3639 && !slp)
3640 {
3641 *vec_stmt = NULL;
3642 return true;
3643 }
3644
3645 if (slp)
3646 {
3647 strided_store = false;
3648 /* VEC_NUM is the number of vect stmts to be created for this
3649 group. */
3650 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3651 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3652 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3653 }
3654 else
3655 /* VEC_NUM is the number of vect stmts to be created for this
3656 group. */
3657 vec_num = group_size;
3658 }
3659 else
3660 {
3661 first_stmt = stmt;
3662 first_dr = dr;
3663 group_size = vec_num = 1;
3664 }
3665
3666 if (vect_print_dump_info (REPORT_DETAILS))
3667 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3668
3669 dr_chain = VEC_alloc (tree, heap, group_size);
3670 oprnds = VEC_alloc (tree, heap, group_size);
3671
3672 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3673 gcc_assert (alignment_support_scheme);
3674 /* Targets with store-lane instructions must not require explicit
3675 realignment. */
3676 gcc_assert (!store_lanes_p
3677 || alignment_support_scheme == dr_aligned
3678 || alignment_support_scheme == dr_unaligned_supported);
3679
3680 if (store_lanes_p)
3681 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
3682 else
3683 aggr_type = vectype;
3684
3685 /* In case the vectorization factor (VF) is bigger than the number
3686 of elements that we can fit in a vectype (nunits), we have to generate
3687 more than one vector stmt - i.e - we need to "unroll" the
3688 vector stmt by a factor VF/nunits. For more details see documentation in
3689 vect_get_vec_def_for_copy_stmt. */
3690
3691 /* In case of interleaving (non-unit strided access):
3692
3693 S1: &base + 2 = x2
3694 S2: &base = x0
3695 S3: &base + 1 = x1
3696 S4: &base + 3 = x3
3697
3698 We create vectorized stores starting from base address (the access of the
3699 first stmt in the chain (S2 in the above example), when the last store stmt
3700 of the chain (S4) is reached:
3701
3702 VS1: &base = vx2
3703 VS2: &base + vec_size*1 = vx0
3704 VS3: &base + vec_size*2 = vx1
3705 VS4: &base + vec_size*3 = vx3
3706
3707 Then permutation statements are generated:
3708
3709 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3710 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3711 ...
3712
3713 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3714 (the order of the data-refs in the output of vect_permute_store_chain
3715 corresponds to the order of scalar stmts in the interleaving chain - see
3716 the documentation of vect_permute_store_chain()).
3717
3718 In case of both multiple types and interleaving, above vector stores and
3719 permutation stmts are created for every copy. The result vector stmts are
3720 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3721 STMT_VINFO_RELATED_STMT for the next copies.
3722 */
3723
3724 prev_stmt_info = NULL;
3725 for (j = 0; j < ncopies; j++)
3726 {
3727 gimple new_stmt;
3728 gimple ptr_incr;
3729
3730 if (j == 0)
3731 {
3732 if (slp)
3733 {
3734 /* Get vectorized arguments for SLP_NODE. */
3735 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3736 NULL, -1);
3737
3738 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3739 }
3740 else
3741 {
3742 /* For interleaved stores we collect vectorized defs for all the
3743 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3744 used as an input to vect_permute_store_chain(), and OPRNDS as
3745 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3746
3747 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3748 OPRNDS are of size 1. */
3749 next_stmt = first_stmt;
3750 for (i = 0; i < group_size; i++)
3751 {
3752 /* Since gaps are not supported for interleaved stores,
3753 GROUP_SIZE is the exact number of stmts in the chain.
3754 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3755 there is no interleaving, GROUP_SIZE is 1, and only one
3756 iteration of the loop will be executed. */
3757 gcc_assert (next_stmt
3758 && gimple_assign_single_p (next_stmt));
3759 op = gimple_assign_rhs1 (next_stmt);
3760
3761 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3762 NULL);
3763 VEC_quick_push(tree, dr_chain, vec_oprnd);
3764 VEC_quick_push(tree, oprnds, vec_oprnd);
3765 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3766 }
3767 }
3768
3769 /* We should have catched mismatched types earlier. */
3770 gcc_assert (useless_type_conversion_p (vectype,
3771 TREE_TYPE (vec_oprnd)));
3772 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, NULL,
3773 NULL_TREE, &dummy, gsi,
3774 &ptr_incr, false, &inv_p);
3775 gcc_assert (bb_vinfo || !inv_p);
3776 }
3777 else
3778 {
3779 /* For interleaved stores we created vectorized defs for all the
3780 defs stored in OPRNDS in the previous iteration (previous copy).
3781 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3782 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3783 next copy.
3784 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3785 OPRNDS are of size 1. */
3786 for (i = 0; i < group_size; i++)
3787 {
3788 op = VEC_index (tree, oprnds, i);
3789 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3790 &dt);
3791 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3792 VEC_replace(tree, dr_chain, i, vec_oprnd);
3793 VEC_replace(tree, oprnds, i, vec_oprnd);
3794 }
3795 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3796 TYPE_SIZE_UNIT (aggr_type));
3797 }
3798
3799 if (store_lanes_p)
3800 {
3801 tree vec_array;
3802
3803 /* Combine all the vectors into an array. */
3804 vec_array = create_vector_array (vectype, vec_num);
3805 for (i = 0; i < vec_num; i++)
3806 {
3807 vec_oprnd = VEC_index (tree, dr_chain, i);
3808 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
3809 }
3810
3811 /* Emit:
3812 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3813 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
3814 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
3815 gimple_call_set_lhs (new_stmt, data_ref);
3816 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3817 mark_symbols_for_renaming (new_stmt);
3818 }
3819 else
3820 {
3821 new_stmt = NULL;
3822 if (strided_store)
3823 {
3824 result_chain = VEC_alloc (tree, heap, group_size);
3825 /* Permute. */
3826 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3827 &result_chain);
3828 }
3829
3830 next_stmt = first_stmt;
3831 for (i = 0; i < vec_num; i++)
3832 {
3833 struct ptr_info_def *pi;
3834
3835 if (i > 0)
3836 /* Bump the vector pointer. */
3837 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
3838 stmt, NULL_TREE);
3839
3840 if (slp)
3841 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3842 else if (strided_store)
3843 /* For strided stores vectorized defs are interleaved in
3844 vect_permute_store_chain(). */
3845 vec_oprnd = VEC_index (tree, result_chain, i);
3846
3847 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3848 build_int_cst (reference_alias_ptr_type
3849 (DR_REF (first_dr)), 0));
3850 pi = get_ptr_info (dataref_ptr);
3851 pi->align = TYPE_ALIGN_UNIT (vectype);
3852 if (aligned_access_p (first_dr))
3853 pi->misalign = 0;
3854 else if (DR_MISALIGNMENT (first_dr) == -1)
3855 {
3856 TREE_TYPE (data_ref)
3857 = build_aligned_type (TREE_TYPE (data_ref),
3858 TYPE_ALIGN (elem_type));
3859 pi->align = TYPE_ALIGN_UNIT (elem_type);
3860 pi->misalign = 0;
3861 }
3862 else
3863 {
3864 TREE_TYPE (data_ref)
3865 = build_aligned_type (TREE_TYPE (data_ref),
3866 TYPE_ALIGN (elem_type));
3867 pi->misalign = DR_MISALIGNMENT (first_dr);
3868 }
3869
3870 /* Arguments are ready. Create the new vector stmt. */
3871 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3872 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3873 mark_symbols_for_renaming (new_stmt);
3874
3875 if (slp)
3876 continue;
3877
3878 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3879 if (!next_stmt)
3880 break;
3881 }
3882 }
3883 if (!slp)
3884 {
3885 if (j == 0)
3886 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3887 else
3888 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3889 prev_stmt_info = vinfo_for_stmt (new_stmt);
3890 }
3891 }
3892
3893 VEC_free (tree, heap, dr_chain);
3894 VEC_free (tree, heap, oprnds);
3895 if (result_chain)
3896 VEC_free (tree, heap, result_chain);
3897 if (vec_oprnds)
3898 VEC_free (tree, heap, vec_oprnds);
3899
3900 return true;
3901 }
3902
3903 /* Given a vector type VECTYPE returns a builtin DECL to be used
3904 for vector permutation and stores a mask into *MASK that implements
3905 reversal of the vector elements. If that is impossible to do
3906 returns NULL (and *MASK is unchanged). */
3907
3908 static tree
3909 perm_mask_for_reverse (tree vectype, tree *mask)
3910 {
3911 tree builtin_decl;
3912 tree mask_element_type, mask_type;
3913 tree mask_vec = NULL;
3914 int i;
3915 int nunits;
3916 if (!targetm.vectorize.builtin_vec_perm)
3917 return NULL;
3918
3919 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3920 &mask_element_type);
3921 if (!builtin_decl || !mask_element_type)
3922 return NULL;
3923
3924 mask_type = get_vectype_for_scalar_type (mask_element_type);
3925 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3926 if (!mask_type
3927 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3928 return NULL;
3929
3930 for (i = 0; i < nunits; i++)
3931 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3932 mask_vec = build_vector (mask_type, mask_vec);
3933
3934 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3935 return NULL;
3936 if (mask)
3937 *mask = mask_vec;
3938 return builtin_decl;
3939 }
3940
3941 /* Given a vector variable X, that was generated for the scalar LHS of
3942 STMT, generate instructions to reverse the vector elements of X,
3943 insert them a *GSI and return the permuted vector variable. */
3944
3945 static tree
3946 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3947 {
3948 tree vectype = TREE_TYPE (x);
3949 tree mask_vec, builtin_decl;
3950 tree perm_dest, data_ref;
3951 gimple perm_stmt;
3952
3953 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3954
3955 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3956
3957 /* Generate the permute statement. */
3958 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3959 if (!useless_type_conversion_p (vectype,
3960 TREE_TYPE (TREE_TYPE (builtin_decl))))
3961 {
3962 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3963 tem = make_ssa_name (tem, perm_stmt);
3964 gimple_call_set_lhs (perm_stmt, tem);
3965 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3966 perm_stmt = gimple_build_assign (NULL_TREE,
3967 build1 (VIEW_CONVERT_EXPR,
3968 vectype, tem));
3969 }
3970 data_ref = make_ssa_name (perm_dest, perm_stmt);
3971 gimple_set_lhs (perm_stmt, data_ref);
3972 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3973
3974 return data_ref;
3975 }
3976
3977 /* vectorizable_load.
3978
3979 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3980 can be vectorized.
3981 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3982 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3983 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3984
3985 static bool
3986 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3987 slp_tree slp_node, slp_instance slp_node_instance)
3988 {
3989 tree scalar_dest;
3990 tree vec_dest = NULL;
3991 tree data_ref = NULL;
3992 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3993 stmt_vec_info prev_stmt_info;
3994 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3995 struct loop *loop = NULL;
3996 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3997 bool nested_in_vect_loop = false;
3998 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3999 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4000 tree elem_type;
4001 tree new_temp;
4002 enum machine_mode mode;
4003 gimple new_stmt = NULL;
4004 tree dummy;
4005 enum dr_alignment_support alignment_support_scheme;
4006 tree dataref_ptr = NULL_TREE;
4007 gimple ptr_incr;
4008 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4009 int ncopies;
4010 int i, j, group_size;
4011 tree msq = NULL_TREE, lsq;
4012 tree offset = NULL_TREE;
4013 tree realignment_token = NULL_TREE;
4014 gimple phi = NULL;
4015 VEC(tree,heap) *dr_chain = NULL;
4016 bool strided_load = false;
4017 bool load_lanes_p = false;
4018 gimple first_stmt;
4019 tree scalar_type;
4020 bool inv_p;
4021 bool negative;
4022 bool compute_in_loop = false;
4023 struct loop *at_loop;
4024 int vec_num;
4025 bool slp = (slp_node != NULL);
4026 bool slp_perm = false;
4027 enum tree_code code;
4028 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4029 int vf;
4030 tree aggr_type;
4031
4032 if (loop_vinfo)
4033 {
4034 loop = LOOP_VINFO_LOOP (loop_vinfo);
4035 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4036 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4037 }
4038 else
4039 vf = 1;
4040
4041 /* Multiple types in SLP are handled by creating the appropriate number of
4042 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4043 case of SLP. */
4044 if (slp || PURE_SLP_STMT (stmt_info))
4045 ncopies = 1;
4046 else
4047 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4048
4049 gcc_assert (ncopies >= 1);
4050
4051 /* FORNOW. This restriction should be relaxed. */
4052 if (nested_in_vect_loop && ncopies > 1)
4053 {
4054 if (vect_print_dump_info (REPORT_DETAILS))
4055 fprintf (vect_dump, "multiple types in nested loop.");
4056 return false;
4057 }
4058
4059 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4060 return false;
4061
4062 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4063 return false;
4064
4065 /* Is vectorizable load? */
4066 if (!is_gimple_assign (stmt))
4067 return false;
4068
4069 scalar_dest = gimple_assign_lhs (stmt);
4070 if (TREE_CODE (scalar_dest) != SSA_NAME)
4071 return false;
4072
4073 code = gimple_assign_rhs_code (stmt);
4074 if (code != ARRAY_REF
4075 && code != INDIRECT_REF
4076 && code != COMPONENT_REF
4077 && code != IMAGPART_EXPR
4078 && code != REALPART_EXPR
4079 && code != MEM_REF)
4080 return false;
4081
4082 if (!STMT_VINFO_DATA_REF (stmt_info))
4083 return false;
4084
4085 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
4086 if (negative && ncopies > 1)
4087 {
4088 if (vect_print_dump_info (REPORT_DETAILS))
4089 fprintf (vect_dump, "multiple types with negative step.");
4090 return false;
4091 }
4092
4093 scalar_type = TREE_TYPE (DR_REF (dr));
4094 mode = TYPE_MODE (vectype);
4095
4096 /* FORNOW. In some cases can vectorize even if data-type not supported
4097 (e.g. - data copies). */
4098 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
4099 {
4100 if (vect_print_dump_info (REPORT_DETAILS))
4101 fprintf (vect_dump, "Aligned load, but unsupported type.");
4102 return false;
4103 }
4104
4105 /* The vector component type needs to be trivially convertible to the
4106 scalar lhs. This should always be the case. */
4107 elem_type = TREE_TYPE (vectype);
4108 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
4109 {
4110 if (vect_print_dump_info (REPORT_DETAILS))
4111 fprintf (vect_dump, "??? operands of different types");
4112 return false;
4113 }
4114
4115 /* Check if the load is a part of an interleaving chain. */
4116 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4117 {
4118 strided_load = true;
4119 /* FORNOW */
4120 gcc_assert (! nested_in_vect_loop);
4121
4122 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4123 if (!slp && !PURE_SLP_STMT (stmt_info))
4124 {
4125 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
4126 if (vect_load_lanes_supported (vectype, group_size))
4127 load_lanes_p = true;
4128 else if (!vect_strided_load_supported (vectype, group_size))
4129 return false;
4130 }
4131 }
4132
4133 if (negative)
4134 {
4135 gcc_assert (!strided_load);
4136 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
4137 if (alignment_support_scheme != dr_aligned
4138 && alignment_support_scheme != dr_unaligned_supported)
4139 {
4140 if (vect_print_dump_info (REPORT_DETAILS))
4141 fprintf (vect_dump, "negative step but alignment required.");
4142 return false;
4143 }
4144 if (!perm_mask_for_reverse (vectype, NULL))
4145 {
4146 if (vect_print_dump_info (REPORT_DETAILS))
4147 fprintf (vect_dump, "negative step and reversing not supported.");
4148 return false;
4149 }
4150 }
4151
4152 if (!vec_stmt) /* transformation not required. */
4153 {
4154 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
4155 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
4156 return true;
4157 }
4158
4159 if (vect_print_dump_info (REPORT_DETAILS))
4160 fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
4161
4162 /** Transform. **/
4163
4164 if (strided_load)
4165 {
4166 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4167 /* Check if the chain of loads is already vectorized. */
4168 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
4169 {
4170 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4171 return true;
4172 }
4173 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
4174 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
4175
4176 /* VEC_NUM is the number of vect stmts to be created for this group. */
4177 if (slp)
4178 {
4179 strided_load = false;
4180 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4181 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
4182 slp_perm = true;
4183 }
4184 else
4185 vec_num = group_size;
4186 }
4187 else
4188 {
4189 first_stmt = stmt;
4190 first_dr = dr;
4191 group_size = vec_num = 1;
4192 }
4193
4194 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
4195 gcc_assert (alignment_support_scheme);
4196 /* Targets with load-lane instructions must not require explicit
4197 realignment. */
4198 gcc_assert (!load_lanes_p
4199 || alignment_support_scheme == dr_aligned
4200 || alignment_support_scheme == dr_unaligned_supported);
4201
4202 /* In case the vectorization factor (VF) is bigger than the number
4203 of elements that we can fit in a vectype (nunits), we have to generate
4204 more than one vector stmt - i.e - we need to "unroll" the
4205 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4206 from one copy of the vector stmt to the next, in the field
4207 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4208 stages to find the correct vector defs to be used when vectorizing
4209 stmts that use the defs of the current stmt. The example below
4210 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4211 need to create 4 vectorized stmts):
4212
4213 before vectorization:
4214 RELATED_STMT VEC_STMT
4215 S1: x = memref - -
4216 S2: z = x + 1 - -
4217
4218 step 1: vectorize stmt S1:
4219 We first create the vector stmt VS1_0, and, as usual, record a
4220 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4221 Next, we create the vector stmt VS1_1, and record a pointer to
4222 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4223 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4224 stmts and pointers:
4225 RELATED_STMT VEC_STMT
4226 VS1_0: vx0 = memref0 VS1_1 -
4227 VS1_1: vx1 = memref1 VS1_2 -
4228 VS1_2: vx2 = memref2 VS1_3 -
4229 VS1_3: vx3 = memref3 - -
4230 S1: x = load - VS1_0
4231 S2: z = x + 1 - -
4232
4233 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4234 information we recorded in RELATED_STMT field is used to vectorize
4235 stmt S2. */
4236
4237 /* In case of interleaving (non-unit strided access):
4238
4239 S1: x2 = &base + 2
4240 S2: x0 = &base
4241 S3: x1 = &base + 1
4242 S4: x3 = &base + 3
4243
4244 Vectorized loads are created in the order of memory accesses
4245 starting from the access of the first stmt of the chain:
4246
4247 VS1: vx0 = &base
4248 VS2: vx1 = &base + vec_size*1
4249 VS3: vx3 = &base + vec_size*2
4250 VS4: vx4 = &base + vec_size*3
4251
4252 Then permutation statements are generated:
4253
4254 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4255 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4256 ...
4257
4258 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4259 (the order of the data-refs in the output of vect_permute_load_chain
4260 corresponds to the order of scalar stmts in the interleaving chain - see
4261 the documentation of vect_permute_load_chain()).
4262 The generation of permutation stmts and recording them in
4263 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4264
4265 In case of both multiple types and interleaving, the vector loads and
4266 permutation stmts above are created for every copy. The result vector
4267 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4268 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4269
4270 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4271 on a target that supports unaligned accesses (dr_unaligned_supported)
4272 we generate the following code:
4273 p = initial_addr;
4274 indx = 0;
4275 loop {
4276 p = p + indx * vectype_size;
4277 vec_dest = *(p);
4278 indx = indx + 1;
4279 }
4280
4281 Otherwise, the data reference is potentially unaligned on a target that
4282 does not support unaligned accesses (dr_explicit_realign_optimized) -
4283 then generate the following code, in which the data in each iteration is
4284 obtained by two vector loads, one from the previous iteration, and one
4285 from the current iteration:
4286 p1 = initial_addr;
4287 msq_init = *(floor(p1))
4288 p2 = initial_addr + VS - 1;
4289 realignment_token = call target_builtin;
4290 indx = 0;
4291 loop {
4292 p2 = p2 + indx * vectype_size
4293 lsq = *(floor(p2))
4294 vec_dest = realign_load (msq, lsq, realignment_token)
4295 indx = indx + 1;
4296 msq = lsq;
4297 } */
4298
4299 /* If the misalignment remains the same throughout the execution of the
4300 loop, we can create the init_addr and permutation mask at the loop
4301 preheader. Otherwise, it needs to be created inside the loop.
4302 This can only occur when vectorizing memory accesses in the inner-loop
4303 nested within an outer-loop that is being vectorized. */
4304
4305 if (loop && nested_in_vect_loop_p (loop, stmt)
4306 && (TREE_INT_CST_LOW (DR_STEP (dr))
4307 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4308 {
4309 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4310 compute_in_loop = true;
4311 }
4312
4313 if ((alignment_support_scheme == dr_explicit_realign_optimized
4314 || alignment_support_scheme == dr_explicit_realign)
4315 && !compute_in_loop)
4316 {
4317 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4318 alignment_support_scheme, NULL_TREE,
4319 &at_loop);
4320 if (alignment_support_scheme == dr_explicit_realign_optimized)
4321 {
4322 phi = SSA_NAME_DEF_STMT (msq);
4323 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4324 }
4325 }
4326 else
4327 at_loop = loop;
4328
4329 if (negative)
4330 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4331
4332 if (load_lanes_p)
4333 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
4334 else
4335 aggr_type = vectype;
4336
4337 prev_stmt_info = NULL;
4338 for (j = 0; j < ncopies; j++)
4339 {
4340 /* 1. Create the vector or array pointer update chain. */
4341 if (j == 0)
4342 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
4343 offset, &dummy, gsi,
4344 &ptr_incr, false, &inv_p);
4345 else
4346 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4347 TYPE_SIZE_UNIT (aggr_type));
4348
4349 if (strided_load || slp_perm)
4350 dr_chain = VEC_alloc (tree, heap, vec_num);
4351
4352 if (load_lanes_p)
4353 {
4354 tree vec_array;
4355
4356 vec_array = create_vector_array (vectype, vec_num);
4357
4358 /* Emit:
4359 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4360 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
4361 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
4362 gimple_call_set_lhs (new_stmt, vec_array);
4363 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4364 mark_symbols_for_renaming (new_stmt);
4365
4366 /* Extract each vector into an SSA_NAME. */
4367 for (i = 0; i < vec_num; i++)
4368 {
4369 new_temp = read_vector_array (stmt, gsi, scalar_dest,
4370 vec_array, i);
4371 VEC_quick_push (tree, dr_chain, new_temp);
4372 }
4373
4374 /* Record the mapping between SSA_NAMEs and statements. */
4375 vect_record_strided_load_vectors (stmt, dr_chain);
4376 }
4377 else
4378 {
4379 for (i = 0; i < vec_num; i++)
4380 {
4381 if (i > 0)
4382 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
4383 stmt, NULL_TREE);
4384
4385 /* 2. Create the vector-load in the loop. */
4386 switch (alignment_support_scheme)
4387 {
4388 case dr_aligned:
4389 case dr_unaligned_supported:
4390 {
4391 struct ptr_info_def *pi;
4392 data_ref
4393 = build2 (MEM_REF, vectype, dataref_ptr,
4394 build_int_cst (reference_alias_ptr_type
4395 (DR_REF (first_dr)), 0));
4396 pi = get_ptr_info (dataref_ptr);
4397 pi->align = TYPE_ALIGN_UNIT (vectype);
4398 if (alignment_support_scheme == dr_aligned)
4399 {
4400 gcc_assert (aligned_access_p (first_dr));
4401 pi->misalign = 0;
4402 }
4403 else if (DR_MISALIGNMENT (first_dr) == -1)
4404 {
4405 TREE_TYPE (data_ref)
4406 = build_aligned_type (TREE_TYPE (data_ref),
4407 TYPE_ALIGN (elem_type));
4408 pi->align = TYPE_ALIGN_UNIT (elem_type);
4409 pi->misalign = 0;
4410 }
4411 else
4412 {
4413 TREE_TYPE (data_ref)
4414 = build_aligned_type (TREE_TYPE (data_ref),
4415 TYPE_ALIGN (elem_type));
4416 pi->misalign = DR_MISALIGNMENT (first_dr);
4417 }
4418 break;
4419 }
4420 case dr_explicit_realign:
4421 {
4422 tree ptr, bump;
4423 tree vs_minus_1;
4424
4425 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4426
4427 if (compute_in_loop)
4428 msq = vect_setup_realignment (first_stmt, gsi,
4429 &realignment_token,
4430 dr_explicit_realign,
4431 dataref_ptr, NULL);
4432
4433 new_stmt = gimple_build_assign_with_ops
4434 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4435 build_int_cst
4436 (TREE_TYPE (dataref_ptr),
4437 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4438 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4439 gimple_assign_set_lhs (new_stmt, ptr);
4440 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4441 data_ref
4442 = build2 (MEM_REF, vectype, ptr,
4443 build_int_cst (reference_alias_ptr_type
4444 (DR_REF (first_dr)), 0));
4445 vec_dest = vect_create_destination_var (scalar_dest,
4446 vectype);
4447 new_stmt = gimple_build_assign (vec_dest, data_ref);
4448 new_temp = make_ssa_name (vec_dest, new_stmt);
4449 gimple_assign_set_lhs (new_stmt, new_temp);
4450 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4451 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4452 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4453 msq = new_temp;
4454
4455 bump = size_binop (MULT_EXPR, vs_minus_1,
4456 TYPE_SIZE_UNIT (scalar_type));
4457 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4458 new_stmt = gimple_build_assign_with_ops
4459 (BIT_AND_EXPR, NULL_TREE, ptr,
4460 build_int_cst
4461 (TREE_TYPE (ptr),
4462 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4463 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4464 gimple_assign_set_lhs (new_stmt, ptr);
4465 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4466 data_ref
4467 = build2 (MEM_REF, vectype, ptr,
4468 build_int_cst (reference_alias_ptr_type
4469 (DR_REF (first_dr)), 0));
4470 break;
4471 }
4472 case dr_explicit_realign_optimized:
4473 new_stmt = gimple_build_assign_with_ops
4474 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4475 build_int_cst
4476 (TREE_TYPE (dataref_ptr),
4477 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4478 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
4479 new_stmt);
4480 gimple_assign_set_lhs (new_stmt, new_temp);
4481 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4482 data_ref
4483 = build2 (MEM_REF, vectype, new_temp,
4484 build_int_cst (reference_alias_ptr_type
4485 (DR_REF (first_dr)), 0));
4486 break;
4487 default:
4488 gcc_unreachable ();
4489 }
4490 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4491 new_stmt = gimple_build_assign (vec_dest, data_ref);
4492 new_temp = make_ssa_name (vec_dest, new_stmt);
4493 gimple_assign_set_lhs (new_stmt, new_temp);
4494 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4495 mark_symbols_for_renaming (new_stmt);
4496
4497 /* 3. Handle explicit realignment if necessary/supported.
4498 Create in loop:
4499 vec_dest = realign_load (msq, lsq, realignment_token) */
4500 if (alignment_support_scheme == dr_explicit_realign_optimized
4501 || alignment_support_scheme == dr_explicit_realign)
4502 {
4503 lsq = gimple_assign_lhs (new_stmt);
4504 if (!realignment_token)
4505 realignment_token = dataref_ptr;
4506 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4507 new_stmt
4508 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
4509 vec_dest, msq, lsq,
4510 realignment_token);
4511 new_temp = make_ssa_name (vec_dest, new_stmt);
4512 gimple_assign_set_lhs (new_stmt, new_temp);
4513 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4514
4515 if (alignment_support_scheme == dr_explicit_realign_optimized)
4516 {
4517 gcc_assert (phi);
4518 if (i == vec_num - 1 && j == ncopies - 1)
4519 add_phi_arg (phi, lsq,
4520 loop_latch_edge (containing_loop),
4521 UNKNOWN_LOCATION);
4522 msq = lsq;
4523 }
4524 }
4525
4526 /* 4. Handle invariant-load. */
4527 if (inv_p && !bb_vinfo)
4528 {
4529 gcc_assert (!strided_load);
4530 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4531 if (j == 0)
4532 {
4533 int k;
4534 tree t = NULL_TREE;
4535 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4536
4537 /* CHECKME: bitpos depends on endianess? */
4538 bitpos = bitsize_zero_node;
4539 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4540 bitsize, bitpos);
4541 vec_dest = vect_create_destination_var (scalar_dest,
4542 NULL_TREE);
4543 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4544 new_temp = make_ssa_name (vec_dest, new_stmt);
4545 gimple_assign_set_lhs (new_stmt, new_temp);
4546 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4547
4548 for (k = nunits - 1; k >= 0; --k)
4549 t = tree_cons (NULL_TREE, new_temp, t);
4550 /* FIXME: use build_constructor directly. */
4551 vec_inv = build_constructor_from_list (vectype, t);
4552 new_temp = vect_init_vector (stmt, vec_inv,
4553 vectype, gsi);
4554 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4555 }
4556 else
4557 gcc_unreachable (); /* FORNOW. */
4558 }
4559
4560 if (negative)
4561 {
4562 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4563 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4564 }
4565
4566 /* Collect vector loads and later create their permutation in
4567 vect_transform_strided_load (). */
4568 if (strided_load || slp_perm)
4569 VEC_quick_push (tree, dr_chain, new_temp);
4570
4571 /* Store vector loads in the corresponding SLP_NODE. */
4572 if (slp && !slp_perm)
4573 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
4574 new_stmt);
4575 }
4576 }
4577
4578 if (slp && !slp_perm)
4579 continue;
4580
4581 if (slp_perm)
4582 {
4583 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4584 slp_node_instance, false))
4585 {
4586 VEC_free (tree, heap, dr_chain);
4587 return false;
4588 }
4589 }
4590 else
4591 {
4592 if (strided_load)
4593 {
4594 if (!load_lanes_p)
4595 vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
4596 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4597 }
4598 else
4599 {
4600 if (j == 0)
4601 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4602 else
4603 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4604 prev_stmt_info = vinfo_for_stmt (new_stmt);
4605 }
4606 }
4607 if (dr_chain)
4608 VEC_free (tree, heap, dr_chain);
4609 }
4610
4611 return true;
4612 }
4613
4614 /* Function vect_is_simple_cond.
4615
4616 Input:
4617 LOOP - the loop that is being vectorized.
4618 COND - Condition that is checked for simple use.
4619
4620 Returns whether a COND can be vectorized. Checks whether
4621 condition operands are supportable using vec_is_simple_use. */
4622
4623 static bool
4624 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4625 {
4626 tree lhs, rhs;
4627 tree def;
4628 enum vect_def_type dt;
4629
4630 if (!COMPARISON_CLASS_P (cond))
4631 return false;
4632
4633 lhs = TREE_OPERAND (cond, 0);
4634 rhs = TREE_OPERAND (cond, 1);
4635
4636 if (TREE_CODE (lhs) == SSA_NAME)
4637 {
4638 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4639 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4640 &dt))
4641 return false;
4642 }
4643 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4644 && TREE_CODE (lhs) != FIXED_CST)
4645 return false;
4646
4647 if (TREE_CODE (rhs) == SSA_NAME)
4648 {
4649 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4650 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4651 &dt))
4652 return false;
4653 }
4654 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4655 && TREE_CODE (rhs) != FIXED_CST)
4656 return false;
4657
4658 return true;
4659 }
4660
4661 /* vectorizable_condition.
4662
4663 Check if STMT is conditional modify expression that can be vectorized.
4664 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4665 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4666 at GSI.
4667
4668 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4669 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4670 else caluse if it is 2).
4671
4672 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4673
4674 bool
4675 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4676 gimple *vec_stmt, tree reduc_def, int reduc_index)
4677 {
4678 tree scalar_dest = NULL_TREE;
4679 tree vec_dest = NULL_TREE;
4680 tree op = NULL_TREE;
4681 tree cond_expr, then_clause, else_clause;
4682 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4683 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4684 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4685 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4686 tree vec_compare, vec_cond_expr;
4687 tree new_temp;
4688 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4689 enum machine_mode vec_mode;
4690 tree def;
4691 enum vect_def_type dt, dts[4];
4692 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4693 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4694 enum tree_code code;
4695 stmt_vec_info prev_stmt_info = NULL;
4696 int j;
4697
4698 /* FORNOW: unsupported in basic block SLP. */
4699 gcc_assert (loop_vinfo);
4700
4701 /* FORNOW: SLP not supported. */
4702 if (STMT_SLP_TYPE (stmt_info))
4703 return false;
4704
4705 gcc_assert (ncopies >= 1);
4706 if (reduc_index && ncopies > 1)
4707 return false; /* FORNOW */
4708
4709 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4710 return false;
4711
4712 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4713 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4714 && reduc_def))
4715 return false;
4716
4717 /* FORNOW: not yet supported. */
4718 if (STMT_VINFO_LIVE_P (stmt_info))
4719 {
4720 if (vect_print_dump_info (REPORT_DETAILS))
4721 fprintf (vect_dump, "value used after loop.");
4722 return false;
4723 }
4724
4725 /* Is vectorizable conditional operation? */
4726 if (!is_gimple_assign (stmt))
4727 return false;
4728
4729 code = gimple_assign_rhs_code (stmt);
4730
4731 if (code != COND_EXPR)
4732 return false;
4733
4734 gcc_assert (gimple_assign_single_p (stmt));
4735 op = gimple_assign_rhs1 (stmt);
4736 cond_expr = TREE_OPERAND (op, 0);
4737 then_clause = TREE_OPERAND (op, 1);
4738 else_clause = TREE_OPERAND (op, 2);
4739
4740 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4741 return false;
4742
4743 /* We do not handle two different vector types for the condition
4744 and the values. */
4745 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4746 TREE_TYPE (vectype)))
4747 return false;
4748
4749 if (TREE_CODE (then_clause) == SSA_NAME)
4750 {
4751 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4752 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4753 &then_def_stmt, &def, &dt))
4754 return false;
4755 }
4756 else if (TREE_CODE (then_clause) != INTEGER_CST
4757 && TREE_CODE (then_clause) != REAL_CST
4758 && TREE_CODE (then_clause) != FIXED_CST)
4759 return false;
4760
4761 if (TREE_CODE (else_clause) == SSA_NAME)
4762 {
4763 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4764 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4765 &else_def_stmt, &def, &dt))
4766 return false;
4767 }
4768 else if (TREE_CODE (else_clause) != INTEGER_CST
4769 && TREE_CODE (else_clause) != REAL_CST
4770 && TREE_CODE (else_clause) != FIXED_CST)
4771 return false;
4772
4773
4774 vec_mode = TYPE_MODE (vectype);
4775
4776 if (!vec_stmt)
4777 {
4778 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4779 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4780 }
4781
4782 /* Transform */
4783
4784 /* Handle def. */
4785 scalar_dest = gimple_assign_lhs (stmt);
4786 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4787
4788 /* Handle cond expr. */
4789 for (j = 0; j < ncopies; j++)
4790 {
4791 gimple new_stmt;
4792 if (j == 0)
4793 {
4794 gimple gtemp;
4795 vec_cond_lhs =
4796 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4797 stmt, NULL);
4798 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4799 NULL, &gtemp, &def, &dts[0]);
4800 vec_cond_rhs =
4801 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4802 stmt, NULL);
4803 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4804 NULL, &gtemp, &def, &dts[1]);
4805 if (reduc_index == 1)
4806 vec_then_clause = reduc_def;
4807 else
4808 {
4809 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4810 stmt, NULL);
4811 vect_is_simple_use (then_clause, loop_vinfo,
4812 NULL, &gtemp, &def, &dts[2]);
4813 }
4814 if (reduc_index == 2)
4815 vec_else_clause = reduc_def;
4816 else
4817 {
4818 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4819 stmt, NULL);
4820 vect_is_simple_use (else_clause, loop_vinfo,
4821 NULL, &gtemp, &def, &dts[3]);
4822 }
4823 }
4824 else
4825 {
4826 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4827 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4828 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4829 vec_then_clause);
4830 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4831 vec_else_clause);
4832 }
4833
4834 /* Arguments are ready. Create the new vector stmt. */
4835 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4836 vec_cond_lhs, vec_cond_rhs);
4837 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4838 vec_compare, vec_then_clause, vec_else_clause);
4839
4840 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4841 new_temp = make_ssa_name (vec_dest, new_stmt);
4842 gimple_assign_set_lhs (new_stmt, new_temp);
4843 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4844 if (j == 0)
4845 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4846 else
4847 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4848
4849 prev_stmt_info = vinfo_for_stmt (new_stmt);
4850 }
4851
4852 return true;
4853 }
4854
4855
4856 /* Make sure the statement is vectorizable. */
4857
4858 bool
4859 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4860 {
4861 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4862 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4863 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4864 bool ok;
4865 tree scalar_type, vectype;
4866
4867 if (vect_print_dump_info (REPORT_DETAILS))
4868 {
4869 fprintf (vect_dump, "==> examining statement: ");
4870 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4871 }
4872
4873 if (gimple_has_volatile_ops (stmt))
4874 {
4875 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4876 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4877
4878 return false;
4879 }
4880
4881 /* Skip stmts that do not need to be vectorized. In loops this is expected
4882 to include:
4883 - the COND_EXPR which is the loop exit condition
4884 - any LABEL_EXPRs in the loop
4885 - computations that are used only for array indexing or loop control.
4886 In basic blocks we only analyze statements that are a part of some SLP
4887 instance, therefore, all the statements are relevant. */
4888
4889 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4890 && !STMT_VINFO_LIVE_P (stmt_info))
4891 {
4892 gimple pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4893 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
4894 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
4895 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
4896 {
4897 stmt = pattern_stmt;
4898 stmt_info = vinfo_for_stmt (pattern_stmt);
4899 if (vect_print_dump_info (REPORT_DETAILS))
4900 {
4901 fprintf (vect_dump, "==> examining pattern statement: ");
4902 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4903 }
4904 }
4905 else
4906 {
4907 if (vect_print_dump_info (REPORT_DETAILS))
4908 fprintf (vect_dump, "irrelevant.");
4909
4910 return true;
4911 }
4912 }
4913
4914 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4915 {
4916 case vect_internal_def:
4917 break;
4918
4919 case vect_reduction_def:
4920 case vect_nested_cycle:
4921 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4922 || relevance == vect_used_in_outer_by_reduction
4923 || relevance == vect_unused_in_scope));
4924 break;
4925
4926 case vect_induction_def:
4927 case vect_constant_def:
4928 case vect_external_def:
4929 case vect_unknown_def_type:
4930 default:
4931 gcc_unreachable ();
4932 }
4933
4934 if (bb_vinfo)
4935 {
4936 gcc_assert (PURE_SLP_STMT (stmt_info));
4937
4938 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4939 if (vect_print_dump_info (REPORT_DETAILS))
4940 {
4941 fprintf (vect_dump, "get vectype for scalar type: ");
4942 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4943 }
4944
4945 vectype = get_vectype_for_scalar_type (scalar_type);
4946 if (!vectype)
4947 {
4948 if (vect_print_dump_info (REPORT_DETAILS))
4949 {
4950 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4951 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4952 }
4953 return false;
4954 }
4955
4956 if (vect_print_dump_info (REPORT_DETAILS))
4957 {
4958 fprintf (vect_dump, "vectype: ");
4959 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4960 }
4961
4962 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4963 }
4964
4965 if (STMT_VINFO_RELEVANT_P (stmt_info))
4966 {
4967 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4968 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4969 *need_to_vectorize = true;
4970 }
4971
4972 ok = true;
4973 if (!bb_vinfo
4974 && (STMT_VINFO_RELEVANT_P (stmt_info)
4975 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4976 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4977 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4978 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4979 || vectorizable_shift (stmt, NULL, NULL, NULL)
4980 || vectorizable_operation (stmt, NULL, NULL, NULL)
4981 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4982 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4983 || vectorizable_call (stmt, NULL, NULL)
4984 || vectorizable_store (stmt, NULL, NULL, NULL)
4985 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4986 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4987 else
4988 {
4989 if (bb_vinfo)
4990 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4991 || vectorizable_operation (stmt, NULL, NULL, node)
4992 || vectorizable_assignment (stmt, NULL, NULL, node)
4993 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4994 || vectorizable_store (stmt, NULL, NULL, node));
4995 }
4996
4997 if (!ok)
4998 {
4999 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
5000 {
5001 fprintf (vect_dump, "not vectorized: relevant stmt not ");
5002 fprintf (vect_dump, "supported: ");
5003 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5004 }
5005
5006 return false;
5007 }
5008
5009 if (bb_vinfo)
5010 return true;
5011
5012 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
5013 need extra handling, except for vectorizable reductions. */
5014 if (STMT_VINFO_LIVE_P (stmt_info)
5015 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5016 ok = vectorizable_live_operation (stmt, NULL, NULL);
5017
5018 if (!ok)
5019 {
5020 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
5021 {
5022 fprintf (vect_dump, "not vectorized: live stmt not ");
5023 fprintf (vect_dump, "supported: ");
5024 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
5025 }
5026
5027 return false;
5028 }
5029
5030 return true;
5031 }
5032
5033
5034 /* Function vect_transform_stmt.
5035
5036 Create a vectorized stmt to replace STMT, and insert it at BSI. */
5037
5038 bool
5039 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
5040 bool *strided_store, slp_tree slp_node,
5041 slp_instance slp_node_instance)
5042 {
5043 bool is_store = false;
5044 gimple vec_stmt = NULL;
5045 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5046 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
5047 bool done;
5048
5049 switch (STMT_VINFO_TYPE (stmt_info))
5050 {
5051 case type_demotion_vec_info_type:
5052 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
5053 gcc_assert (done);
5054 break;
5055
5056 case type_promotion_vec_info_type:
5057 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
5058 gcc_assert (done);
5059 break;
5060
5061 case type_conversion_vec_info_type:
5062 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
5063 gcc_assert (done);
5064 break;
5065
5066 case induc_vec_info_type:
5067 gcc_assert (!slp_node);
5068 done = vectorizable_induction (stmt, gsi, &vec_stmt);
5069 gcc_assert (done);
5070 break;
5071
5072 case shift_vec_info_type:
5073 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
5074 gcc_assert (done);
5075 break;
5076
5077 case op_vec_info_type:
5078 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
5079 gcc_assert (done);
5080 break;
5081
5082 case assignment_vec_info_type:
5083 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
5084 gcc_assert (done);
5085 break;
5086
5087 case load_vec_info_type:
5088 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
5089 slp_node_instance);
5090 gcc_assert (done);
5091 break;
5092
5093 case store_vec_info_type:
5094 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
5095 gcc_assert (done);
5096 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
5097 {
5098 /* In case of interleaving, the whole chain is vectorized when the
5099 last store in the chain is reached. Store stmts before the last
5100 one are skipped, and there vec_stmt_info shouldn't be freed
5101 meanwhile. */
5102 *strided_store = true;
5103 if (STMT_VINFO_VEC_STMT (stmt_info))
5104 is_store = true;
5105 }
5106 else
5107 is_store = true;
5108 break;
5109
5110 case condition_vec_info_type:
5111 gcc_assert (!slp_node);
5112 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
5113 gcc_assert (done);
5114 break;
5115
5116 case call_vec_info_type:
5117 gcc_assert (!slp_node);
5118 done = vectorizable_call (stmt, gsi, &vec_stmt);
5119 stmt = gsi_stmt (*gsi);
5120 break;
5121
5122 case reduc_vec_info_type:
5123 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
5124 gcc_assert (done);
5125 break;
5126
5127 default:
5128 if (!STMT_VINFO_LIVE_P (stmt_info))
5129 {
5130 if (vect_print_dump_info (REPORT_DETAILS))
5131 fprintf (vect_dump, "stmt not supported.");
5132 gcc_unreachable ();
5133 }
5134 }
5135
5136 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5137 is being vectorized, but outside the immediately enclosing loop. */
5138 if (vec_stmt
5139 && STMT_VINFO_LOOP_VINFO (stmt_info)
5140 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5141 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
5142 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
5143 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
5144 || STMT_VINFO_RELEVANT (stmt_info) ==
5145 vect_used_in_outer_by_reduction))
5146 {
5147 struct loop *innerloop = LOOP_VINFO_LOOP (
5148 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
5149 imm_use_iterator imm_iter;
5150 use_operand_p use_p;
5151 tree scalar_dest;
5152 gimple exit_phi;
5153
5154 if (vect_print_dump_info (REPORT_DETAILS))
5155 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
5156
5157 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5158 (to be used when vectorizing outer-loop stmts that use the DEF of
5159 STMT). */
5160 if (gimple_code (stmt) == GIMPLE_PHI)
5161 scalar_dest = PHI_RESULT (stmt);
5162 else
5163 scalar_dest = gimple_assign_lhs (stmt);
5164
5165 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5166 {
5167 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
5168 {
5169 exit_phi = USE_STMT (use_p);
5170 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
5171 }
5172 }
5173 }
5174
5175 /* Handle stmts whose DEF is used outside the loop-nest that is
5176 being vectorized. */
5177 if (STMT_VINFO_LIVE_P (stmt_info)
5178 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5179 {
5180 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
5181 gcc_assert (done);
5182 }
5183
5184 if (vec_stmt)
5185 {
5186 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
5187 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
5188 if (orig_stmt_in_pattern)
5189 {
5190 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
5191 /* STMT was inserted by the vectorizer to replace a computation idiom.
5192 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5193 computed this idiom. We need to record a pointer to VEC_STMT in
5194 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5195 documentation of vect_pattern_recog. */
5196 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
5197 {
5198 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
5199 == orig_scalar_stmt);
5200 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
5201 }
5202 }
5203 }
5204
5205 return is_store;
5206 }
5207
5208
5209 /* Remove a group of stores (for SLP or interleaving), free their
5210 stmt_vec_info. */
5211
5212 void
5213 vect_remove_stores (gimple first_stmt)
5214 {
5215 gimple next = first_stmt;
5216 gimple tmp;
5217 gimple_stmt_iterator next_si;
5218
5219 while (next)
5220 {
5221 /* Free the attached stmt_vec_info and remove the stmt. */
5222 next_si = gsi_for_stmt (next);
5223 gsi_remove (&next_si, true);
5224 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
5225 free_stmt_vec_info (next);
5226 next = tmp;
5227 }
5228 }
5229
5230
5231 /* Function new_stmt_vec_info.
5232
5233 Create and initialize a new stmt_vec_info struct for STMT. */
5234
5235 stmt_vec_info
5236 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
5237 bb_vec_info bb_vinfo)
5238 {
5239 stmt_vec_info res;
5240 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
5241
5242 STMT_VINFO_TYPE (res) = undef_vec_info_type;
5243 STMT_VINFO_STMT (res) = stmt;
5244 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
5245 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
5246 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
5247 STMT_VINFO_LIVE_P (res) = false;
5248 STMT_VINFO_VECTYPE (res) = NULL;
5249 STMT_VINFO_VEC_STMT (res) = NULL;
5250 STMT_VINFO_VECTORIZABLE (res) = true;
5251 STMT_VINFO_IN_PATTERN_P (res) = false;
5252 STMT_VINFO_RELATED_STMT (res) = NULL;
5253 STMT_VINFO_DATA_REF (res) = NULL;
5254
5255 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
5256 STMT_VINFO_DR_OFFSET (res) = NULL;
5257 STMT_VINFO_DR_INIT (res) = NULL;
5258 STMT_VINFO_DR_STEP (res) = NULL;
5259 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5260
5261 if (gimple_code (stmt) == GIMPLE_PHI
5262 && is_loop_header_bb_p (gimple_bb (stmt)))
5263 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5264 else
5265 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5266
5267 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5268 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5269 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
5270 STMT_SLP_TYPE (res) = loop_vect;
5271 GROUP_FIRST_ELEMENT (res) = NULL;
5272 GROUP_NEXT_ELEMENT (res) = NULL;
5273 GROUP_SIZE (res) = 0;
5274 GROUP_STORE_COUNT (res) = 0;
5275 GROUP_GAP (res) = 0;
5276 GROUP_SAME_DR_STMT (res) = NULL;
5277 GROUP_READ_WRITE_DEPENDENCE (res) = false;
5278
5279 return res;
5280 }
5281
5282
5283 /* Create a hash table for stmt_vec_info. */
5284
5285 void
5286 init_stmt_vec_info_vec (void)
5287 {
5288 gcc_assert (!stmt_vec_info_vec);
5289 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5290 }
5291
5292
5293 /* Free hash table for stmt_vec_info. */
5294
5295 void
5296 free_stmt_vec_info_vec (void)
5297 {
5298 gcc_assert (stmt_vec_info_vec);
5299 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5300 }
5301
5302
5303 /* Free stmt vectorization related info. */
5304
5305 void
5306 free_stmt_vec_info (gimple stmt)
5307 {
5308 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5309
5310 if (!stmt_info)
5311 return;
5312
5313 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5314 set_vinfo_for_stmt (stmt, NULL);
5315 free (stmt_info);
5316 }
5317
5318
5319 /* Function get_vectype_for_scalar_type_and_size.
5320
5321 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5322 by the target. */
5323
5324 static tree
5325 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5326 {
5327 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5328 enum machine_mode simd_mode;
5329 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5330 int nunits;
5331 tree vectype;
5332
5333 if (nbytes == 0)
5334 return NULL_TREE;
5335
5336 /* We can't build a vector type of elements with alignment bigger than
5337 their size. */
5338 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5339 return NULL_TREE;
5340
5341 /* If we'd build a vector type of elements whose mode precision doesn't
5342 match their types precision we'll get mismatched types on vector
5343 extracts via BIT_FIELD_REFs. This effectively means we disable
5344 vectorization of bool and/or enum types in some languages. */
5345 if (INTEGRAL_TYPE_P (scalar_type)
5346 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5347 return NULL_TREE;
5348
5349 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5350 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5351 return NULL_TREE;
5352
5353 /* If no size was supplied use the mode the target prefers. Otherwise
5354 lookup a vector mode of the specified size. */
5355 if (size == 0)
5356 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5357 else
5358 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5359 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5360 if (nunits <= 1)
5361 return NULL_TREE;
5362
5363 vectype = build_vector_type (scalar_type, nunits);
5364 if (vect_print_dump_info (REPORT_DETAILS))
5365 {
5366 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5367 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5368 }
5369
5370 if (!vectype)
5371 return NULL_TREE;
5372
5373 if (vect_print_dump_info (REPORT_DETAILS))
5374 {
5375 fprintf (vect_dump, "vectype: ");
5376 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5377 }
5378
5379 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5380 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5381 {
5382 if (vect_print_dump_info (REPORT_DETAILS))
5383 fprintf (vect_dump, "mode not supported by target.");
5384 return NULL_TREE;
5385 }
5386
5387 return vectype;
5388 }
5389
5390 unsigned int current_vector_size;
5391
5392 /* Function get_vectype_for_scalar_type.
5393
5394 Returns the vector type corresponding to SCALAR_TYPE as supported
5395 by the target. */
5396
5397 tree
5398 get_vectype_for_scalar_type (tree scalar_type)
5399 {
5400 tree vectype;
5401 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5402 current_vector_size);
5403 if (vectype
5404 && current_vector_size == 0)
5405 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5406 return vectype;
5407 }
5408
5409 /* Function get_same_sized_vectype
5410
5411 Returns a vector type corresponding to SCALAR_TYPE of size
5412 VECTOR_TYPE if supported by the target. */
5413
5414 tree
5415 get_same_sized_vectype (tree scalar_type, tree vector_type)
5416 {
5417 return get_vectype_for_scalar_type_and_size
5418 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5419 }
5420
5421 /* Function vect_is_simple_use.
5422
5423 Input:
5424 LOOP_VINFO - the vect info of the loop that is being vectorized.
5425 BB_VINFO - the vect info of the basic block that is being vectorized.
5426 OPERAND - operand of a stmt in the loop or bb.
5427 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5428
5429 Returns whether a stmt with OPERAND can be vectorized.
5430 For loops, supportable operands are constants, loop invariants, and operands
5431 that are defined by the current iteration of the loop. Unsupportable
5432 operands are those that are defined by a previous iteration of the loop (as
5433 is the case in reduction/induction computations).
5434 For basic blocks, supportable operands are constants and bb invariants.
5435 For now, operands defined outside the basic block are not supported. */
5436
5437 bool
5438 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5439 bb_vec_info bb_vinfo, gimple *def_stmt,
5440 tree *def, enum vect_def_type *dt)
5441 {
5442 basic_block bb;
5443 stmt_vec_info stmt_vinfo;
5444 struct loop *loop = NULL;
5445
5446 if (loop_vinfo)
5447 loop = LOOP_VINFO_LOOP (loop_vinfo);
5448
5449 *def_stmt = NULL;
5450 *def = NULL_TREE;
5451
5452 if (vect_print_dump_info (REPORT_DETAILS))
5453 {
5454 fprintf (vect_dump, "vect_is_simple_use: operand ");
5455 print_generic_expr (vect_dump, operand, TDF_SLIM);
5456 }
5457
5458 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5459 {
5460 *dt = vect_constant_def;
5461 return true;
5462 }
5463
5464 if (is_gimple_min_invariant (operand))
5465 {
5466 *def = operand;
5467 *dt = vect_external_def;
5468 return true;
5469 }
5470
5471 if (TREE_CODE (operand) == PAREN_EXPR)
5472 {
5473 if (vect_print_dump_info (REPORT_DETAILS))
5474 fprintf (vect_dump, "non-associatable copy.");
5475 operand = TREE_OPERAND (operand, 0);
5476 }
5477
5478 if (TREE_CODE (operand) != SSA_NAME)
5479 {
5480 if (vect_print_dump_info (REPORT_DETAILS))
5481 fprintf (vect_dump, "not ssa-name.");
5482 return false;
5483 }
5484
5485 *def_stmt = SSA_NAME_DEF_STMT (operand);
5486 if (*def_stmt == NULL)
5487 {
5488 if (vect_print_dump_info (REPORT_DETAILS))
5489 fprintf (vect_dump, "no def_stmt.");
5490 return false;
5491 }
5492
5493 if (vect_print_dump_info (REPORT_DETAILS))
5494 {
5495 fprintf (vect_dump, "def_stmt: ");
5496 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5497 }
5498
5499 /* Empty stmt is expected only in case of a function argument.
5500 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5501 if (gimple_nop_p (*def_stmt))
5502 {
5503 *def = operand;
5504 *dt = vect_external_def;
5505 return true;
5506 }
5507
5508 bb = gimple_bb (*def_stmt);
5509
5510 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5511 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5512 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5513 *dt = vect_external_def;
5514 else
5515 {
5516 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5517 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5518 }
5519
5520 if (*dt == vect_unknown_def_type)
5521 {
5522 if (vect_print_dump_info (REPORT_DETAILS))
5523 fprintf (vect_dump, "Unsupported pattern.");
5524 return false;
5525 }
5526
5527 if (vect_print_dump_info (REPORT_DETAILS))
5528 fprintf (vect_dump, "type of def: %d.",*dt);
5529
5530 switch (gimple_code (*def_stmt))
5531 {
5532 case GIMPLE_PHI:
5533 *def = gimple_phi_result (*def_stmt);
5534 break;
5535
5536 case GIMPLE_ASSIGN:
5537 *def = gimple_assign_lhs (*def_stmt);
5538 break;
5539
5540 case GIMPLE_CALL:
5541 *def = gimple_call_lhs (*def_stmt);
5542 if (*def != NULL)
5543 break;
5544 /* FALLTHRU */
5545 default:
5546 if (vect_print_dump_info (REPORT_DETAILS))
5547 fprintf (vect_dump, "unsupported defining stmt: ");
5548 return false;
5549 }
5550
5551 return true;
5552 }
5553
5554 /* Function vect_is_simple_use_1.
5555
5556 Same as vect_is_simple_use_1 but also determines the vector operand
5557 type of OPERAND and stores it to *VECTYPE. If the definition of
5558 OPERAND is vect_uninitialized_def, vect_constant_def or
5559 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5560 is responsible to compute the best suited vector type for the
5561 scalar operand. */
5562
5563 bool
5564 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5565 bb_vec_info bb_vinfo, gimple *def_stmt,
5566 tree *def, enum vect_def_type *dt, tree *vectype)
5567 {
5568 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5569 return false;
5570
5571 /* Now get a vector type if the def is internal, otherwise supply
5572 NULL_TREE and leave it up to the caller to figure out a proper
5573 type for the use stmt. */
5574 if (*dt == vect_internal_def
5575 || *dt == vect_induction_def
5576 || *dt == vect_reduction_def
5577 || *dt == vect_double_reduction_def
5578 || *dt == vect_nested_cycle)
5579 {
5580 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5581 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5582 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5583 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5584 gcc_assert (*vectype != NULL_TREE);
5585 }
5586 else if (*dt == vect_uninitialized_def
5587 || *dt == vect_constant_def
5588 || *dt == vect_external_def)
5589 *vectype = NULL_TREE;
5590 else
5591 gcc_unreachable ();
5592
5593 return true;
5594 }
5595
5596
5597 /* Function supportable_widening_operation
5598
5599 Check whether an operation represented by the code CODE is a
5600 widening operation that is supported by the target platform in
5601 vector form (i.e., when operating on arguments of type VECTYPE_IN
5602 producing a result of type VECTYPE_OUT).
5603
5604 Widening operations we currently support are NOP (CONVERT), FLOAT
5605 and WIDEN_MULT. This function checks if these operations are supported
5606 by the target platform either directly (via vector tree-codes), or via
5607 target builtins.
5608
5609 Output:
5610 - CODE1 and CODE2 are codes of vector operations to be used when
5611 vectorizing the operation, if available.
5612 - DECL1 and DECL2 are decls of target builtin functions to be used
5613 when vectorizing the operation, if available. In this case,
5614 CODE1 and CODE2 are CALL_EXPR.
5615 - MULTI_STEP_CVT determines the number of required intermediate steps in
5616 case of multi-step conversion (like char->short->int - in that case
5617 MULTI_STEP_CVT will be 1).
5618 - INTERM_TYPES contains the intermediate type required to perform the
5619 widening operation (short in the above example). */
5620
5621 bool
5622 supportable_widening_operation (enum tree_code code, gimple stmt,
5623 tree vectype_out, tree vectype_in,
5624 tree *decl1, tree *decl2,
5625 enum tree_code *code1, enum tree_code *code2,
5626 int *multi_step_cvt,
5627 VEC (tree, heap) **interm_types)
5628 {
5629 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5630 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5631 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5632 bool ordered_p;
5633 enum machine_mode vec_mode;
5634 enum insn_code icode1, icode2;
5635 optab optab1, optab2;
5636 tree vectype = vectype_in;
5637 tree wide_vectype = vectype_out;
5638 enum tree_code c1, c2;
5639
5640 /* The result of a vectorized widening operation usually requires two vectors
5641 (because the widened results do not fit int one vector). The generated
5642 vector results would normally be expected to be generated in the same
5643 order as in the original scalar computation, i.e. if 8 results are
5644 generated in each vector iteration, they are to be organized as follows:
5645 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5646
5647 However, in the special case that the result of the widening operation is
5648 used in a reduction computation only, the order doesn't matter (because
5649 when vectorizing a reduction we change the order of the computation).
5650 Some targets can take advantage of this and generate more efficient code.
5651 For example, targets like Altivec, that support widen_mult using a sequence
5652 of {mult_even,mult_odd} generate the following vectors:
5653 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5654
5655 When vectorizing outer-loops, we execute the inner-loop sequentially
5656 (each vectorized inner-loop iteration contributes to VF outer-loop
5657 iterations in parallel). We therefore don't allow to change the order
5658 of the computation in the inner-loop during outer-loop vectorization. */
5659
5660 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5661 && !nested_in_vect_loop_p (vect_loop, stmt))
5662 ordered_p = false;
5663 else
5664 ordered_p = true;
5665
5666 if (!ordered_p
5667 && code == WIDEN_MULT_EXPR
5668 && targetm.vectorize.builtin_mul_widen_even
5669 && targetm.vectorize.builtin_mul_widen_even (vectype)
5670 && targetm.vectorize.builtin_mul_widen_odd
5671 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5672 {
5673 if (vect_print_dump_info (REPORT_DETAILS))
5674 fprintf (vect_dump, "Unordered widening operation detected.");
5675
5676 *code1 = *code2 = CALL_EXPR;
5677 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5678 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5679 return true;
5680 }
5681
5682 switch (code)
5683 {
5684 case WIDEN_MULT_EXPR:
5685 if (BYTES_BIG_ENDIAN)
5686 {
5687 c1 = VEC_WIDEN_MULT_HI_EXPR;
5688 c2 = VEC_WIDEN_MULT_LO_EXPR;
5689 }
5690 else
5691 {
5692 c2 = VEC_WIDEN_MULT_HI_EXPR;
5693 c1 = VEC_WIDEN_MULT_LO_EXPR;
5694 }
5695 break;
5696
5697 CASE_CONVERT:
5698 if (BYTES_BIG_ENDIAN)
5699 {
5700 c1 = VEC_UNPACK_HI_EXPR;
5701 c2 = VEC_UNPACK_LO_EXPR;
5702 }
5703 else
5704 {
5705 c2 = VEC_UNPACK_HI_EXPR;
5706 c1 = VEC_UNPACK_LO_EXPR;
5707 }
5708 break;
5709
5710 case FLOAT_EXPR:
5711 if (BYTES_BIG_ENDIAN)
5712 {
5713 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5714 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5715 }
5716 else
5717 {
5718 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5719 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5720 }
5721 break;
5722
5723 case FIX_TRUNC_EXPR:
5724 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5725 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5726 computing the operation. */
5727 return false;
5728
5729 default:
5730 gcc_unreachable ();
5731 }
5732
5733 if (code == FIX_TRUNC_EXPR)
5734 {
5735 /* The signedness is determined from output operand. */
5736 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5737 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5738 }
5739 else
5740 {
5741 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5742 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5743 }
5744
5745 if (!optab1 || !optab2)
5746 return false;
5747
5748 vec_mode = TYPE_MODE (vectype);
5749 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5750 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5751 return false;
5752
5753 /* Check if it's a multi-step conversion that can be done using intermediate
5754 types. */
5755 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5756 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5757 {
5758 int i;
5759 tree prev_type = vectype, intermediate_type;
5760 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5761 optab optab3, optab4;
5762
5763 if (!CONVERT_EXPR_CODE_P (code))
5764 return false;
5765
5766 *code1 = c1;
5767 *code2 = c2;
5768
5769 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5770 intermediate steps in promotion sequence. We try
5771 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5772 not. */
5773 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5774 for (i = 0; i < 3; i++)
5775 {
5776 intermediate_mode = insn_data[icode1].operand[0].mode;
5777 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5778 TYPE_UNSIGNED (prev_type));
5779 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5780 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5781
5782 if (!optab3 || !optab4
5783 || ((icode1 = optab_handler (optab1, prev_mode))
5784 == CODE_FOR_nothing)
5785 || insn_data[icode1].operand[0].mode != intermediate_mode
5786 || ((icode2 = optab_handler (optab2, prev_mode))
5787 == CODE_FOR_nothing)
5788 || insn_data[icode2].operand[0].mode != intermediate_mode
5789 || ((icode1 = optab_handler (optab3, intermediate_mode))
5790 == CODE_FOR_nothing)
5791 || ((icode2 = optab_handler (optab4, intermediate_mode))
5792 == CODE_FOR_nothing))
5793 return false;
5794
5795 VEC_quick_push (tree, *interm_types, intermediate_type);
5796 (*multi_step_cvt)++;
5797
5798 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5799 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5800 return true;
5801
5802 prev_type = intermediate_type;
5803 prev_mode = intermediate_mode;
5804 }
5805
5806 return false;
5807 }
5808
5809 *code1 = c1;
5810 *code2 = c2;
5811 return true;
5812 }
5813
5814
5815 /* Function supportable_narrowing_operation
5816
5817 Check whether an operation represented by the code CODE is a
5818 narrowing operation that is supported by the target platform in
5819 vector form (i.e., when operating on arguments of type VECTYPE_IN
5820 and producing a result of type VECTYPE_OUT).
5821
5822 Narrowing operations we currently support are NOP (CONVERT) and
5823 FIX_TRUNC. This function checks if these operations are supported by
5824 the target platform directly via vector tree-codes.
5825
5826 Output:
5827 - CODE1 is the code of a vector operation to be used when
5828 vectorizing the operation, if available.
5829 - MULTI_STEP_CVT determines the number of required intermediate steps in
5830 case of multi-step conversion (like int->short->char - in that case
5831 MULTI_STEP_CVT will be 1).
5832 - INTERM_TYPES contains the intermediate type required to perform the
5833 narrowing operation (short in the above example). */
5834
5835 bool
5836 supportable_narrowing_operation (enum tree_code code,
5837 tree vectype_out, tree vectype_in,
5838 enum tree_code *code1, int *multi_step_cvt,
5839 VEC (tree, heap) **interm_types)
5840 {
5841 enum machine_mode vec_mode;
5842 enum insn_code icode1;
5843 optab optab1, interm_optab;
5844 tree vectype = vectype_in;
5845 tree narrow_vectype = vectype_out;
5846 enum tree_code c1;
5847 tree intermediate_type, prev_type;
5848 int i;
5849
5850 switch (code)
5851 {
5852 CASE_CONVERT:
5853 c1 = VEC_PACK_TRUNC_EXPR;
5854 break;
5855
5856 case FIX_TRUNC_EXPR:
5857 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5858 break;
5859
5860 case FLOAT_EXPR:
5861 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5862 tree code and optabs used for computing the operation. */
5863 return false;
5864
5865 default:
5866 gcc_unreachable ();
5867 }
5868
5869 if (code == FIX_TRUNC_EXPR)
5870 /* The signedness is determined from output operand. */
5871 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5872 else
5873 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5874
5875 if (!optab1)
5876 return false;
5877
5878 vec_mode = TYPE_MODE (vectype);
5879 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5880 return false;
5881
5882 /* Check if it's a multi-step conversion that can be done using intermediate
5883 types. */
5884 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5885 {
5886 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5887
5888 *code1 = c1;
5889 prev_type = vectype;
5890 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5891 intermediate steps in promotion sequence. We try
5892 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5893 not. */
5894 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5895 for (i = 0; i < 3; i++)
5896 {
5897 intermediate_mode = insn_data[icode1].operand[0].mode;
5898 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5899 TYPE_UNSIGNED (prev_type));
5900 interm_optab = optab_for_tree_code (c1, intermediate_type,
5901 optab_default);
5902 if (!interm_optab
5903 || ((icode1 = optab_handler (optab1, prev_mode))
5904 == CODE_FOR_nothing)
5905 || insn_data[icode1].operand[0].mode != intermediate_mode
5906 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5907 == CODE_FOR_nothing))
5908 return false;
5909
5910 VEC_quick_push (tree, *interm_types, intermediate_type);
5911 (*multi_step_cvt)++;
5912
5913 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5914 return true;
5915
5916 prev_type = intermediate_type;
5917 prev_mode = intermediate_mode;
5918 }
5919
5920 return false;
5921 }
5922
5923 *code1 = c1;
5924 return true;
5925 }