intrinsic.h (gfc_check_selected_real_kind, [...]): Update prototypes.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "toplev.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
43
44
45 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46
47 /* Function vect_mark_relevant.
48
49 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50
51 static void
52 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
53 enum vect_relevant relevant, bool live_p)
54 {
55 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
56 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
57 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58
59 if (vect_print_dump_info (REPORT_DETAILS))
60 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61
62 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 {
64 gimple pattern_stmt;
65
66 /* This is the last stmt in a sequence that was detected as a
67 pattern that can potentially be vectorized. Don't mark the stmt
68 as relevant/live because it's not going to be vectorized.
69 Instead mark the pattern-stmt that replaces it. */
70
71 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72
73 if (vect_print_dump_info (REPORT_DETAILS))
74 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
75 stmt_info = vinfo_for_stmt (pattern_stmt);
76 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
77 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
78 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
79 stmt = pattern_stmt;
80 }
81
82 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
83 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
84 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85
86 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
87 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 {
89 if (vect_print_dump_info (REPORT_DETAILS))
90 fprintf (vect_dump, "already marked relevant/live.");
91 return;
92 }
93
94 VEC_safe_push (gimple, heap, *worklist, stmt);
95 }
96
97
98 /* Function vect_stmt_relevant_p.
99
100 Return true if STMT in loop that is represented by LOOP_VINFO is
101 "relevant for vectorization".
102
103 A stmt is considered "relevant for vectorization" if:
104 - it has uses outside the loop.
105 - it has vdefs (it alters memory).
106 - control stmts in the loop (except for the exit condition).
107
108 CHECKME: what other side effects would the vectorizer allow? */
109
110 static bool
111 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
112 enum vect_relevant *relevant, bool *live_p)
113 {
114 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
115 ssa_op_iter op_iter;
116 imm_use_iterator imm_iter;
117 use_operand_p use_p;
118 def_operand_p def_p;
119
120 *relevant = vect_unused_in_scope;
121 *live_p = false;
122
123 /* cond stmt other than loop exit cond. */
124 if (is_ctrl_stmt (stmt)
125 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
126 != loop_exit_ctrl_vec_info_type)
127 *relevant = vect_used_in_scope;
128
129 /* changing memory. */
130 if (gimple_code (stmt) != GIMPLE_PHI)
131 if (gimple_vdef (stmt))
132 {
133 if (vect_print_dump_info (REPORT_DETAILS))
134 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
135 *relevant = vect_used_in_scope;
136 }
137
138 /* uses outside the loop. */
139 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 {
141 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 {
143 basic_block bb = gimple_bb (USE_STMT (use_p));
144 if (!flow_bb_inside_loop_p (loop, bb))
145 {
146 if (vect_print_dump_info (REPORT_DETAILS))
147 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148
149 if (is_gimple_debug (USE_STMT (use_p)))
150 continue;
151
152 /* We expect all such uses to be in the loop exit phis
153 (because of loop closed form) */
154 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
155 gcc_assert (bb == single_exit (loop)->dest);
156
157 *live_p = true;
158 }
159 }
160 }
161
162 return (*live_p || *relevant);
163 }
164
165
166 /* Function exist_non_indexing_operands_for_use_p
167
168 USE is one of the uses attached to STMT. Check if USE is
169 used in STMT for anything other than indexing an array. */
170
171 static bool
172 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 {
174 tree operand;
175 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176
177 /* USE corresponds to some operand in STMT. If there is no data
178 reference in STMT, then any operand that corresponds to USE
179 is not indexing an array. */
180 if (!STMT_VINFO_DATA_REF (stmt_info))
181 return true;
182
183 /* STMT has a data_ref. FORNOW this means that its of one of
184 the following forms:
185 -1- ARRAY_REF = var
186 -2- var = ARRAY_REF
187 (This should have been verified in analyze_data_refs).
188
189 'var' in the second case corresponds to a def, not a use,
190 so USE cannot correspond to any operands that are not used
191 for array indexing.
192
193 Therefore, all we need to check is if STMT falls into the
194 first case, and whether var corresponds to USE. */
195
196 if (!gimple_assign_copy_p (stmt))
197 return false;
198 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
199 return false;
200 operand = gimple_assign_rhs1 (stmt);
201 if (TREE_CODE (operand) != SSA_NAME)
202 return false;
203
204 if (operand == use)
205 return true;
206
207 return false;
208 }
209
210
211 /*
212 Function process_use.
213
214 Inputs:
215 - a USE in STMT in a loop represented by LOOP_VINFO
216 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
217 that defined USE. This is done by calling mark_relevant and passing it
218 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219
220 Outputs:
221 Generally, LIVE_P and RELEVANT are used to define the liveness and
222 relevance info of the DEF_STMT of this USE:
223 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
224 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
225 Exceptions:
226 - case 1: If USE is used only for address computations (e.g. array indexing),
227 which does not need to be directly vectorized, then the liveness/relevance
228 of the respective DEF_STMT is left unchanged.
229 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
230 skip DEF_STMT cause it had already been processed.
231 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
232 be modified accordingly.
233
234 Return true if everything is as expected. Return false otherwise. */
235
236 static bool
237 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
238 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 {
240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
241 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
242 stmt_vec_info dstmt_vinfo;
243 basic_block bb, def_bb;
244 tree def;
245 gimple def_stmt;
246 enum vect_def_type dt;
247
248 /* case 1: we are only interested in uses that need to be vectorized. Uses
249 that are used for address computation are not considered relevant. */
250 if (!exist_non_indexing_operands_for_use_p (use, stmt))
251 return true;
252
253 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 {
255 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
256 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
257 return false;
258 }
259
260 if (!def_stmt || gimple_nop_p (def_stmt))
261 return true;
262
263 def_bb = gimple_bb (def_stmt);
264 if (!flow_bb_inside_loop_p (loop, def_bb))
265 {
266 if (vect_print_dump_info (REPORT_DETAILS))
267 fprintf (vect_dump, "def_stmt is out of loop.");
268 return true;
269 }
270
271 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
272 DEF_STMT must have already been processed, because this should be the
273 only way that STMT, which is a reduction-phi, was put in the worklist,
274 as there should be no other uses for DEF_STMT in the loop. So we just
275 check that everything is as expected, and we are done. */
276 dstmt_vinfo = vinfo_for_stmt (def_stmt);
277 bb = gimple_bb (stmt);
278 if (gimple_code (stmt) == GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
280 && gimple_code (def_stmt) != GIMPLE_PHI
281 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
282 && bb->loop_father == def_bb->loop_father)
283 {
284 if (vect_print_dump_info (REPORT_DETAILS))
285 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
286 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
287 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
288 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
289 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
290 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
291 return true;
292 }
293
294 /* case 3a: outer-loop stmt defining an inner-loop stmt:
295 outer-loop-header-bb:
296 d = def_stmt
297 inner-loop:
298 stmt # use (d)
299 outer-loop-tail-bb:
300 ... */
301 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 {
303 if (vect_print_dump_info (REPORT_DETAILS))
304 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305
306 switch (relevant)
307 {
308 case vect_unused_in_scope:
309 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
310 vect_used_in_scope : vect_unused_in_scope;
311 break;
312
313 case vect_used_in_outer_by_reduction:
314 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
315 relevant = vect_used_by_reduction;
316 break;
317
318 case vect_used_in_outer:
319 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
320 relevant = vect_used_in_scope;
321 break;
322
323 case vect_used_in_scope:
324 break;
325
326 default:
327 gcc_unreachable ();
328 }
329 }
330
331 /* case 3b: inner-loop stmt defining an outer-loop stmt:
332 outer-loop-header-bb:
333 ...
334 inner-loop:
335 d = def_stmt
336 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
337 stmt # use (d) */
338 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 {
340 if (vect_print_dump_info (REPORT_DETAILS))
341 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342
343 switch (relevant)
344 {
345 case vect_unused_in_scope:
346 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
347 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
348 vect_used_in_outer_by_reduction : vect_unused_in_scope;
349 break;
350
351 case vect_used_by_reduction:
352 relevant = vect_used_in_outer_by_reduction;
353 break;
354
355 case vect_used_in_scope:
356 relevant = vect_used_in_outer;
357 break;
358
359 default:
360 gcc_unreachable ();
361 }
362 }
363
364 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
365 return true;
366 }
367
368
369 /* Function vect_mark_stmts_to_be_vectorized.
370
371 Not all stmts in the loop need to be vectorized. For example:
372
373 for i...
374 for j...
375 1. T0 = i + j
376 2. T1 = a[T0]
377
378 3. j = j + 1
379
380 Stmt 1 and 3 do not need to be vectorized, because loop control and
381 addressing of vectorized data-refs are handled differently.
382
383 This pass detects such stmts. */
384
385 bool
386 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 {
388 VEC(gimple,heap) *worklist;
389 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
390 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
391 unsigned int nbbs = loop->num_nodes;
392 gimple_stmt_iterator si;
393 gimple stmt;
394 unsigned int i;
395 stmt_vec_info stmt_vinfo;
396 basic_block bb;
397 gimple phi;
398 bool live_p;
399 enum vect_relevant relevant, tmp_relevant;
400 enum vect_def_type def_type;
401
402 if (vect_print_dump_info (REPORT_DETAILS))
403 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404
405 worklist = VEC_alloc (gimple, heap, 64);
406
407 /* 1. Init worklist. */
408 for (i = 0; i < nbbs; i++)
409 {
410 bb = bbs[i];
411 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 {
413 phi = gsi_stmt (si);
414 if (vect_print_dump_info (REPORT_DETAILS))
415 {
416 fprintf (vect_dump, "init: phi relevant? ");
417 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
418 }
419
420 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
421 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 }
423 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 {
425 stmt = gsi_stmt (si);
426 if (vect_print_dump_info (REPORT_DETAILS))
427 {
428 fprintf (vect_dump, "init: stmt relevant? ");
429 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
430 }
431
432 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
433 vect_mark_relevant (&worklist, stmt, relevant, live_p);
434 }
435 }
436
437 /* 2. Process_worklist */
438 while (VEC_length (gimple, worklist) > 0)
439 {
440 use_operand_p use_p;
441 ssa_op_iter iter;
442
443 stmt = VEC_pop (gimple, worklist);
444 if (vect_print_dump_info (REPORT_DETAILS))
445 {
446 fprintf (vect_dump, "worklist: examine stmt: ");
447 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
448 }
449
450 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
451 (DEF_STMT) as relevant/irrelevant and live/dead according to the
452 liveness and relevance properties of STMT. */
453 stmt_vinfo = vinfo_for_stmt (stmt);
454 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
455 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456
457 /* Generally, the liveness and relevance properties of STMT are
458 propagated as is to the DEF_STMTs of its USEs:
459 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
460 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461
462 One exception is when STMT has been identified as defining a reduction
463 variable; in this case we set the liveness/relevance as follows:
464 live_p = false
465 relevant = vect_used_by_reduction
466 This is because we distinguish between two kinds of relevant stmts -
467 those that are used by a reduction computation, and those that are
468 (also) used by a regular computation. This allows us later on to
469 identify stmts that are used solely by a reduction, and therefore the
470 order of the results that they produce does not have to be kept. */
471
472 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
473 tmp_relevant = relevant;
474 switch (def_type)
475 {
476 case vect_reduction_def:
477 switch (tmp_relevant)
478 {
479 case vect_unused_in_scope:
480 relevant = vect_used_by_reduction;
481 break;
482
483 case vect_used_by_reduction:
484 if (gimple_code (stmt) == GIMPLE_PHI)
485 break;
486 /* fall through */
487
488 default:
489 if (vect_print_dump_info (REPORT_DETAILS))
490 fprintf (vect_dump, "unsupported use of reduction.");
491
492 VEC_free (gimple, heap, worklist);
493 return false;
494 }
495
496 live_p = false;
497 break;
498
499 case vect_nested_cycle:
500 if (tmp_relevant != vect_unused_in_scope
501 && tmp_relevant != vect_used_in_outer_by_reduction
502 && tmp_relevant != vect_used_in_outer)
503 {
504 if (vect_print_dump_info (REPORT_DETAILS))
505 fprintf (vect_dump, "unsupported use of nested cycle.");
506
507 VEC_free (gimple, heap, worklist);
508 return false;
509 }
510
511 live_p = false;
512 break;
513
514 case vect_double_reduction_def:
515 if (tmp_relevant != vect_unused_in_scope
516 && tmp_relevant != vect_used_by_reduction)
517 {
518 if (vect_print_dump_info (REPORT_DETAILS))
519 fprintf (vect_dump, "unsupported use of double reduction.");
520
521 VEC_free (gimple, heap, worklist);
522 return false;
523 }
524
525 live_p = false;
526 break;
527
528 default:
529 break;
530 }
531
532 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 {
534 tree op = USE_FROM_PTR (use_p);
535 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 {
537 VEC_free (gimple, heap, worklist);
538 return false;
539 }
540 }
541 } /* while worklist */
542
543 VEC_free (gimple, heap, worklist);
544 return true;
545 }
546
547
548 int
549 cost_for_stmt (gimple stmt)
550 {
551 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552
553 switch (STMT_VINFO_TYPE (stmt_info))
554 {
555 case load_vec_info_type:
556 return targetm.vectorize.builtin_vectorization_cost (scalar_load);
557 case store_vec_info_type:
558 return targetm.vectorize.builtin_vectorization_cost (scalar_store);
559 case op_vec_info_type:
560 case condition_vec_info_type:
561 case assignment_vec_info_type:
562 case reduc_vec_info_type:
563 case induc_vec_info_type:
564 case type_promotion_vec_info_type:
565 case type_demotion_vec_info_type:
566 case type_conversion_vec_info_type:
567 case call_vec_info_type:
568 return targetm.vectorize.builtin_vectorization_cost (scalar_stmt);
569 case undef_vec_info_type:
570 default:
571 gcc_unreachable ();
572 }
573 }
574
575 /* Function vect_model_simple_cost.
576
577 Models cost for simple operations, i.e. those that only emit ncopies of a
578 single op. Right now, this does not account for multiple insns that could
579 be generated for the single vector op. We will handle that shortly. */
580
581 void
582 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
583 enum vect_def_type *dt, slp_tree slp_node)
584 {
585 int i;
586 int inside_cost = 0, outside_cost = 0;
587
588 /* The SLP costs were already calculated during SLP tree build. */
589 if (PURE_SLP_STMT (stmt_info))
590 return;
591
592 inside_cost = ncopies
593 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
594
595 /* FORNOW: Assuming maximum 2 args per stmts. */
596 for (i = 0; i < 2; i++)
597 {
598 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
599 outside_cost
600 += targetm.vectorize.builtin_vectorization_cost (vector_stmt);
601 }
602
603 if (vect_print_dump_info (REPORT_COST))
604 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
605 "outside_cost = %d .", inside_cost, outside_cost);
606
607 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
608 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
609 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 }
611
612
613 /* Function vect_cost_strided_group_size
614
615 For strided load or store, return the group_size only if it is the first
616 load or store of a group, else return 1. This ensures that group size is
617 only returned once per group. */
618
619 static int
620 vect_cost_strided_group_size (stmt_vec_info stmt_info)
621 {
622 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
623
624 if (first_stmt == STMT_VINFO_STMT (stmt_info))
625 return DR_GROUP_SIZE (stmt_info);
626
627 return 1;
628 }
629
630
631 /* Function vect_model_store_cost
632
633 Models cost for stores. In the case of strided accesses, one access
634 has the overhead of the strided access attributed to it. */
635
636 void
637 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
638 enum vect_def_type dt, slp_tree slp_node)
639 {
640 int group_size;
641 int inside_cost = 0, outside_cost = 0;
642
643 /* The SLP costs were already calculated during SLP tree build. */
644 if (PURE_SLP_STMT (stmt_info))
645 return;
646
647 if (dt == vect_constant_def || dt == vect_external_def)
648 outside_cost
649 = targetm.vectorize.builtin_vectorization_cost (scalar_to_vec);
650
651 /* Strided access? */
652 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
653 group_size = vect_cost_strided_group_size (stmt_info);
654 /* Not a strided access. */
655 else
656 group_size = 1;
657
658 /* Is this an access in a group of stores, which provide strided access?
659 If so, add in the cost of the permutes. */
660 if (group_size > 1)
661 {
662 /* Uses a high and low interleave operation for each needed permute. */
663 inside_cost = ncopies * exact_log2(group_size) * group_size
664 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
665
666 if (vect_print_dump_info (REPORT_COST))
667 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
668 group_size);
669
670 }
671
672 /* Costs of the stores. */
673 inside_cost += ncopies
674 * targetm.vectorize.builtin_vectorization_cost (vector_store);
675
676 if (vect_print_dump_info (REPORT_COST))
677 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
678 "outside_cost = %d .", inside_cost, outside_cost);
679
680 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
681 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
682 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
683 }
684
685
686 /* Function vect_model_load_cost
687
688 Models cost for loads. In the case of strided accesses, the last access
689 has the overhead of the strided access attributed to it. Since unaligned
690 accesses are supported for loads, we also account for the costs of the
691 access scheme chosen. */
692
693 void
694 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
695
696 {
697 int group_size;
698 int alignment_support_cheme;
699 gimple first_stmt;
700 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
701 int inside_cost = 0, outside_cost = 0;
702
703 /* The SLP costs were already calculated during SLP tree build. */
704 if (PURE_SLP_STMT (stmt_info))
705 return;
706
707 /* Strided accesses? */
708 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
709 if (first_stmt && !slp_node)
710 {
711 group_size = vect_cost_strided_group_size (stmt_info);
712 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
713 }
714 /* Not a strided access. */
715 else
716 {
717 group_size = 1;
718 first_dr = dr;
719 }
720
721 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
722
723 /* Is this an access in a group of loads providing strided access?
724 If so, add in the cost of the permutes. */
725 if (group_size > 1)
726 {
727 /* Uses an even and odd extract operations for each needed permute. */
728 inside_cost = ncopies * exact_log2(group_size) * group_size
729 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
730
731 if (vect_print_dump_info (REPORT_COST))
732 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
733 group_size);
734
735 }
736
737 /* The loads themselves. */
738 switch (alignment_support_cheme)
739 {
740 case dr_aligned:
741 {
742 inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (vector_load);
744
745 if (vect_print_dump_info (REPORT_COST))
746 fprintf (vect_dump, "vect_model_load_cost: aligned.");
747
748 break;
749 }
750 case dr_unaligned_supported:
751 {
752 /* Here, we assign an additional cost for the unaligned load. */
753 inside_cost += ncopies
754 * targetm.vectorize.builtin_vectorization_cost (unaligned_load);
755
756 if (vect_print_dump_info (REPORT_COST))
757 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
758 "hardware.");
759
760 break;
761 }
762 case dr_explicit_realign:
763 {
764 inside_cost += ncopies * (2
765 * targetm.vectorize.builtin_vectorization_cost (vector_load)
766 + targetm.vectorize.builtin_vectorization_cost (vector_stmt));
767
768 /* FIXME: If the misalignment remains fixed across the iterations of
769 the containing loop, the following cost should be added to the
770 outside costs. */
771 if (targetm.vectorize.builtin_mask_for_load)
772 inside_cost
773 += targetm.vectorize.builtin_vectorization_cost (vector_stmt);
774
775 break;
776 }
777 case dr_explicit_realign_optimized:
778 {
779 if (vect_print_dump_info (REPORT_COST))
780 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
781 "pipelined.");
782
783 /* Unaligned software pipeline has a load of an address, an initial
784 load, and possibly a mask operation to "prime" the loop. However,
785 if this is an access in a group of loads, which provide strided
786 access, then the above cost should only be considered for one
787 access in the group. Inside the loop, there is a load op
788 and a realignment op. */
789
790 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
791 {
792 outside_cost = 2
793 * targetm.vectorize.builtin_vectorization_cost (vector_stmt);
794 if (targetm.vectorize.builtin_mask_for_load)
795 outside_cost
796 += targetm.vectorize.builtin_vectorization_cost (vector_stmt);
797 }
798
799 inside_cost += ncopies
800 * (targetm.vectorize.builtin_vectorization_cost (vector_load)
801 + targetm.vectorize.builtin_vectorization_cost (vector_stmt));
802 break;
803 }
804
805 default:
806 gcc_unreachable ();
807 }
808
809 if (vect_print_dump_info (REPORT_COST))
810 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
811 "outside_cost = %d .", inside_cost, outside_cost);
812
813 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
814 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
815 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
816 }
817
818
819 /* Function vect_init_vector.
820
821 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
822 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
823 is not NULL. Otherwise, place the initialization at the loop preheader.
824 Return the DEF of INIT_STMT.
825 It will be used in the vectorization of STMT. */
826
827 tree
828 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
829 gimple_stmt_iterator *gsi)
830 {
831 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
832 tree new_var;
833 gimple init_stmt;
834 tree vec_oprnd;
835 edge pe;
836 tree new_temp;
837 basic_block new_bb;
838
839 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
840 add_referenced_var (new_var);
841 init_stmt = gimple_build_assign (new_var, vector_var);
842 new_temp = make_ssa_name (new_var, init_stmt);
843 gimple_assign_set_lhs (init_stmt, new_temp);
844
845 if (gsi)
846 vect_finish_stmt_generation (stmt, init_stmt, gsi);
847 else
848 {
849 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
850
851 if (loop_vinfo)
852 {
853 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
854
855 if (nested_in_vect_loop_p (loop, stmt))
856 loop = loop->inner;
857
858 pe = loop_preheader_edge (loop);
859 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
860 gcc_assert (!new_bb);
861 }
862 else
863 {
864 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
865 basic_block bb;
866 gimple_stmt_iterator gsi_bb_start;
867
868 gcc_assert (bb_vinfo);
869 bb = BB_VINFO_BB (bb_vinfo);
870 gsi_bb_start = gsi_after_labels (bb);
871 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
872 }
873 }
874
875 if (vect_print_dump_info (REPORT_DETAILS))
876 {
877 fprintf (vect_dump, "created new init_stmt: ");
878 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
879 }
880
881 vec_oprnd = gimple_assign_lhs (init_stmt);
882 return vec_oprnd;
883 }
884
885
886 /* Function vect_get_vec_def_for_operand.
887
888 OP is an operand in STMT. This function returns a (vector) def that will be
889 used in the vectorized stmt for STMT.
890
891 In the case that OP is an SSA_NAME which is defined in the loop, then
892 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
893
894 In case OP is an invariant or constant, a new stmt that creates a vector def
895 needs to be introduced. */
896
897 tree
898 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
899 {
900 tree vec_oprnd;
901 gimple vec_stmt;
902 gimple def_stmt;
903 stmt_vec_info def_stmt_info = NULL;
904 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
905 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
906 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
907 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
908 tree vec_inv;
909 tree vec_cst;
910 tree t = NULL_TREE;
911 tree def;
912 int i;
913 enum vect_def_type dt;
914 bool is_simple_use;
915 tree vector_type;
916
917 if (vect_print_dump_info (REPORT_DETAILS))
918 {
919 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
920 print_generic_expr (vect_dump, op, TDF_SLIM);
921 }
922
923 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
924 &dt);
925 gcc_assert (is_simple_use);
926 if (vect_print_dump_info (REPORT_DETAILS))
927 {
928 if (def)
929 {
930 fprintf (vect_dump, "def = ");
931 print_generic_expr (vect_dump, def, TDF_SLIM);
932 }
933 if (def_stmt)
934 {
935 fprintf (vect_dump, " def_stmt = ");
936 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
937 }
938 }
939
940 switch (dt)
941 {
942 /* Case 1: operand is a constant. */
943 case vect_constant_def:
944 {
945 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
946 gcc_assert (vector_type);
947
948 if (scalar_def)
949 *scalar_def = op;
950
951 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
952 if (vect_print_dump_info (REPORT_DETAILS))
953 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
954
955 for (i = nunits - 1; i >= 0; --i)
956 {
957 t = tree_cons (NULL_TREE, op, t);
958 }
959 vec_cst = build_vector (vector_type, t);
960 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
961 }
962
963 /* Case 2: operand is defined outside the loop - loop invariant. */
964 case vect_external_def:
965 {
966 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
967 gcc_assert (vector_type);
968 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
969
970 if (scalar_def)
971 *scalar_def = def;
972
973 /* Create 'vec_inv = {inv,inv,..,inv}' */
974 if (vect_print_dump_info (REPORT_DETAILS))
975 fprintf (vect_dump, "Create vector_inv.");
976
977 for (i = nunits - 1; i >= 0; --i)
978 {
979 t = tree_cons (NULL_TREE, def, t);
980 }
981
982 /* FIXME: use build_constructor directly. */
983 vec_inv = build_constructor_from_list (vector_type, t);
984 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
985 }
986
987 /* Case 3: operand is defined inside the loop. */
988 case vect_internal_def:
989 {
990 if (scalar_def)
991 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
992
993 /* Get the def from the vectorized stmt. */
994 def_stmt_info = vinfo_for_stmt (def_stmt);
995 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
996 gcc_assert (vec_stmt);
997 if (gimple_code (vec_stmt) == GIMPLE_PHI)
998 vec_oprnd = PHI_RESULT (vec_stmt);
999 else if (is_gimple_call (vec_stmt))
1000 vec_oprnd = gimple_call_lhs (vec_stmt);
1001 else
1002 vec_oprnd = gimple_assign_lhs (vec_stmt);
1003 return vec_oprnd;
1004 }
1005
1006 /* Case 4: operand is defined by a loop header phi - reduction */
1007 case vect_reduction_def:
1008 case vect_double_reduction_def:
1009 case vect_nested_cycle:
1010 {
1011 struct loop *loop;
1012
1013 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1014 loop = (gimple_bb (def_stmt))->loop_father;
1015
1016 /* Get the def before the loop */
1017 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1018 return get_initial_def_for_reduction (stmt, op, scalar_def);
1019 }
1020
1021 /* Case 5: operand is defined by loop-header phi - induction. */
1022 case vect_induction_def:
1023 {
1024 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1025
1026 /* Get the def from the vectorized stmt. */
1027 def_stmt_info = vinfo_for_stmt (def_stmt);
1028 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1029 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1030 vec_oprnd = PHI_RESULT (vec_stmt);
1031 return vec_oprnd;
1032 }
1033
1034 default:
1035 gcc_unreachable ();
1036 }
1037 }
1038
1039
1040 /* Function vect_get_vec_def_for_stmt_copy
1041
1042 Return a vector-def for an operand. This function is used when the
1043 vectorized stmt to be created (by the caller to this function) is a "copy"
1044 created in case the vectorized result cannot fit in one vector, and several
1045 copies of the vector-stmt are required. In this case the vector-def is
1046 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1047 of the stmt that defines VEC_OPRND.
1048 DT is the type of the vector def VEC_OPRND.
1049
1050 Context:
1051 In case the vectorization factor (VF) is bigger than the number
1052 of elements that can fit in a vectype (nunits), we have to generate
1053 more than one vector stmt to vectorize the scalar stmt. This situation
1054 arises when there are multiple data-types operated upon in the loop; the
1055 smallest data-type determines the VF, and as a result, when vectorizing
1056 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1057 vector stmt (each computing a vector of 'nunits' results, and together
1058 computing 'VF' results in each iteration). This function is called when
1059 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1060 which VF=16 and nunits=4, so the number of copies required is 4):
1061
1062 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1063
1064 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1065 VS1.1: vx.1 = memref1 VS1.2
1066 VS1.2: vx.2 = memref2 VS1.3
1067 VS1.3: vx.3 = memref3
1068
1069 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1070 VSnew.1: vz1 = vx.1 + ... VSnew.2
1071 VSnew.2: vz2 = vx.2 + ... VSnew.3
1072 VSnew.3: vz3 = vx.3 + ...
1073
1074 The vectorization of S1 is explained in vectorizable_load.
1075 The vectorization of S2:
1076 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1077 the function 'vect_get_vec_def_for_operand' is called to
1078 get the relevant vector-def for each operand of S2. For operand x it
1079 returns the vector-def 'vx.0'.
1080
1081 To create the remaining copies of the vector-stmt (VSnew.j), this
1082 function is called to get the relevant vector-def for each operand. It is
1083 obtained from the respective VS1.j stmt, which is recorded in the
1084 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1085
1086 For example, to obtain the vector-def 'vx.1' in order to create the
1087 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1088 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1089 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1090 and return its def ('vx.1').
1091 Overall, to create the above sequence this function will be called 3 times:
1092 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1093 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1094 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1095
1096 tree
1097 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1098 {
1099 gimple vec_stmt_for_operand;
1100 stmt_vec_info def_stmt_info;
1101
1102 /* Do nothing; can reuse same def. */
1103 if (dt == vect_external_def || dt == vect_constant_def )
1104 return vec_oprnd;
1105
1106 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1107 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1108 gcc_assert (def_stmt_info);
1109 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1110 gcc_assert (vec_stmt_for_operand);
1111 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1112 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1113 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1114 else
1115 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1116 return vec_oprnd;
1117 }
1118
1119
1120 /* Get vectorized definitions for the operands to create a copy of an original
1121 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1122
1123 static void
1124 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1125 VEC(tree,heap) **vec_oprnds0,
1126 VEC(tree,heap) **vec_oprnds1)
1127 {
1128 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1129
1130 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1131 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1132
1133 if (vec_oprnds1 && *vec_oprnds1)
1134 {
1135 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1136 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1137 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1138 }
1139 }
1140
1141
1142 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1143
1144 static void
1145 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1146 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1147 slp_tree slp_node)
1148 {
1149 if (slp_node)
1150 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
1151 else
1152 {
1153 tree vec_oprnd;
1154
1155 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1156 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1157 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1158
1159 if (op1)
1160 {
1161 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1162 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1163 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1164 }
1165 }
1166 }
1167
1168
1169 /* Function vect_finish_stmt_generation.
1170
1171 Insert a new stmt. */
1172
1173 void
1174 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1175 gimple_stmt_iterator *gsi)
1176 {
1177 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1178 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1179 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1180
1181 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1182
1183 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1184
1185 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1186 bb_vinfo));
1187
1188 if (vect_print_dump_info (REPORT_DETAILS))
1189 {
1190 fprintf (vect_dump, "add new stmt: ");
1191 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1192 }
1193
1194 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1195 }
1196
1197 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1198 a function declaration if the target has a vectorized version
1199 of the function, or NULL_TREE if the function cannot be vectorized. */
1200
1201 tree
1202 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1203 {
1204 tree fndecl = gimple_call_fndecl (call);
1205
1206 /* We only handle functions that do not read or clobber memory -- i.e.
1207 const or novops ones. */
1208 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1209 return NULL_TREE;
1210
1211 if (!fndecl
1212 || TREE_CODE (fndecl) != FUNCTION_DECL
1213 || !DECL_BUILT_IN (fndecl))
1214 return NULL_TREE;
1215
1216 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1217 vectype_in);
1218 }
1219
1220 /* Function vectorizable_call.
1221
1222 Check if STMT performs a function call that can be vectorized.
1223 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1224 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1225 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1226
1227 static bool
1228 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1229 {
1230 tree vec_dest;
1231 tree scalar_dest;
1232 tree op, type;
1233 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1234 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1235 tree vectype_out, vectype_in;
1236 int nunits_in;
1237 int nunits_out;
1238 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1239 tree fndecl, new_temp, def, rhs_type;
1240 gimple def_stmt;
1241 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1242 gimple new_stmt = NULL;
1243 int ncopies, j;
1244 VEC(tree, heap) *vargs = NULL;
1245 enum { NARROW, NONE, WIDEN } modifier;
1246 size_t i, nargs;
1247
1248 /* FORNOW: unsupported in basic block SLP. */
1249 gcc_assert (loop_vinfo);
1250
1251 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1252 return false;
1253
1254 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1255 return false;
1256
1257 /* FORNOW: SLP not supported. */
1258 if (STMT_SLP_TYPE (stmt_info))
1259 return false;
1260
1261 /* Is STMT a vectorizable call? */
1262 if (!is_gimple_call (stmt))
1263 return false;
1264
1265 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1266 return false;
1267
1268 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1269
1270 /* Process function arguments. */
1271 rhs_type = NULL_TREE;
1272 vectype_in = NULL_TREE;
1273 nargs = gimple_call_num_args (stmt);
1274
1275 /* Bail out if the function has more than two arguments, we
1276 do not have interesting builtin functions to vectorize with
1277 more than two arguments. No arguments is also not good. */
1278 if (nargs == 0 || nargs > 2)
1279 return false;
1280
1281 for (i = 0; i < nargs; i++)
1282 {
1283 tree opvectype;
1284
1285 op = gimple_call_arg (stmt, i);
1286
1287 /* We can only handle calls with arguments of the same type. */
1288 if (rhs_type
1289 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1290 {
1291 if (vect_print_dump_info (REPORT_DETAILS))
1292 fprintf (vect_dump, "argument types differ.");
1293 return false;
1294 }
1295 if (!rhs_type)
1296 rhs_type = TREE_TYPE (op);
1297
1298 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1299 &def_stmt, &def, &dt[i], &opvectype))
1300 {
1301 if (vect_print_dump_info (REPORT_DETAILS))
1302 fprintf (vect_dump, "use not simple.");
1303 return false;
1304 }
1305
1306 if (!vectype_in)
1307 vectype_in = opvectype;
1308 else if (opvectype
1309 && opvectype != vectype_in)
1310 {
1311 if (vect_print_dump_info (REPORT_DETAILS))
1312 fprintf (vect_dump, "argument vector types differ.");
1313 return false;
1314 }
1315 }
1316 /* If all arguments are external or constant defs use a vector type with
1317 the same size as the output vector type. */
1318 if (!vectype_in)
1319 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1320 if (vec_stmt)
1321 gcc_assert (vectype_in);
1322 if (!vectype_in)
1323 {
1324 if (vect_print_dump_info (REPORT_DETAILS))
1325 {
1326 fprintf (vect_dump, "no vectype for scalar type ");
1327 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1328 }
1329
1330 return false;
1331 }
1332
1333 /* FORNOW */
1334 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1335 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1336 if (nunits_in == nunits_out / 2)
1337 modifier = NARROW;
1338 else if (nunits_out == nunits_in)
1339 modifier = NONE;
1340 else if (nunits_out == nunits_in / 2)
1341 modifier = WIDEN;
1342 else
1343 return false;
1344
1345 /* For now, we only vectorize functions if a target specific builtin
1346 is available. TODO -- in some cases, it might be profitable to
1347 insert the calls for pieces of the vector, in order to be able
1348 to vectorize other operations in the loop. */
1349 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1350 if (fndecl == NULL_TREE)
1351 {
1352 if (vect_print_dump_info (REPORT_DETAILS))
1353 fprintf (vect_dump, "function is not vectorizable.");
1354
1355 return false;
1356 }
1357
1358 gcc_assert (!gimple_vuse (stmt));
1359
1360 if (modifier == NARROW)
1361 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1362 else
1363 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1364
1365 /* Sanity check: make sure that at least one copy of the vectorized stmt
1366 needs to be generated. */
1367 gcc_assert (ncopies >= 1);
1368
1369 if (!vec_stmt) /* transformation not required. */
1370 {
1371 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1372 if (vect_print_dump_info (REPORT_DETAILS))
1373 fprintf (vect_dump, "=== vectorizable_call ===");
1374 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1375 return true;
1376 }
1377
1378 /** Transform. **/
1379
1380 if (vect_print_dump_info (REPORT_DETAILS))
1381 fprintf (vect_dump, "transform operation.");
1382
1383 /* Handle def. */
1384 scalar_dest = gimple_call_lhs (stmt);
1385 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1386
1387 prev_stmt_info = NULL;
1388 switch (modifier)
1389 {
1390 case NONE:
1391 for (j = 0; j < ncopies; ++j)
1392 {
1393 /* Build argument list for the vectorized call. */
1394 if (j == 0)
1395 vargs = VEC_alloc (tree, heap, nargs);
1396 else
1397 VEC_truncate (tree, vargs, 0);
1398
1399 for (i = 0; i < nargs; i++)
1400 {
1401 op = gimple_call_arg (stmt, i);
1402 if (j == 0)
1403 vec_oprnd0
1404 = vect_get_vec_def_for_operand (op, stmt, NULL);
1405 else
1406 {
1407 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1408 vec_oprnd0
1409 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1410 }
1411
1412 VEC_quick_push (tree, vargs, vec_oprnd0);
1413 }
1414
1415 new_stmt = gimple_build_call_vec (fndecl, vargs);
1416 new_temp = make_ssa_name (vec_dest, new_stmt);
1417 gimple_call_set_lhs (new_stmt, new_temp);
1418
1419 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1420 mark_symbols_for_renaming (new_stmt);
1421
1422 if (j == 0)
1423 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1424 else
1425 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1426
1427 prev_stmt_info = vinfo_for_stmt (new_stmt);
1428 }
1429
1430 break;
1431
1432 case NARROW:
1433 for (j = 0; j < ncopies; ++j)
1434 {
1435 /* Build argument list for the vectorized call. */
1436 if (j == 0)
1437 vargs = VEC_alloc (tree, heap, nargs * 2);
1438 else
1439 VEC_truncate (tree, vargs, 0);
1440
1441 for (i = 0; i < nargs; i++)
1442 {
1443 op = gimple_call_arg (stmt, i);
1444 if (j == 0)
1445 {
1446 vec_oprnd0
1447 = vect_get_vec_def_for_operand (op, stmt, NULL);
1448 vec_oprnd1
1449 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1450 }
1451 else
1452 {
1453 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1454 vec_oprnd0
1455 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1456 vec_oprnd1
1457 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1458 }
1459
1460 VEC_quick_push (tree, vargs, vec_oprnd0);
1461 VEC_quick_push (tree, vargs, vec_oprnd1);
1462 }
1463
1464 new_stmt = gimple_build_call_vec (fndecl, vargs);
1465 new_temp = make_ssa_name (vec_dest, new_stmt);
1466 gimple_call_set_lhs (new_stmt, new_temp);
1467
1468 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1469 mark_symbols_for_renaming (new_stmt);
1470
1471 if (j == 0)
1472 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1473 else
1474 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1475
1476 prev_stmt_info = vinfo_for_stmt (new_stmt);
1477 }
1478
1479 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1480
1481 break;
1482
1483 case WIDEN:
1484 /* No current target implements this case. */
1485 return false;
1486 }
1487
1488 VEC_free (tree, heap, vargs);
1489
1490 /* Update the exception handling table with the vector stmt if necessary. */
1491 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1492 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1493
1494 /* The call in STMT might prevent it from being removed in dce.
1495 We however cannot remove it here, due to the way the ssa name
1496 it defines is mapped to the new definition. So just replace
1497 rhs of the statement with something harmless. */
1498
1499 type = TREE_TYPE (scalar_dest);
1500 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1501 fold_convert (type, integer_zero_node));
1502 set_vinfo_for_stmt (new_stmt, stmt_info);
1503 set_vinfo_for_stmt (stmt, NULL);
1504 STMT_VINFO_STMT (stmt_info) = new_stmt;
1505 gsi_replace (gsi, new_stmt, false);
1506 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1507
1508 return true;
1509 }
1510
1511
1512 /* Function vect_gen_widened_results_half
1513
1514 Create a vector stmt whose code, type, number of arguments, and result
1515 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1516 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1517 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1518 needs to be created (DECL is a function-decl of a target-builtin).
1519 STMT is the original scalar stmt that we are vectorizing. */
1520
1521 static gimple
1522 vect_gen_widened_results_half (enum tree_code code,
1523 tree decl,
1524 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1525 tree vec_dest, gimple_stmt_iterator *gsi,
1526 gimple stmt)
1527 {
1528 gimple new_stmt;
1529 tree new_temp;
1530
1531 /* Generate half of the widened result: */
1532 if (code == CALL_EXPR)
1533 {
1534 /* Target specific support */
1535 if (op_type == binary_op)
1536 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1537 else
1538 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1539 new_temp = make_ssa_name (vec_dest, new_stmt);
1540 gimple_call_set_lhs (new_stmt, new_temp);
1541 }
1542 else
1543 {
1544 /* Generic support */
1545 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1546 if (op_type != binary_op)
1547 vec_oprnd1 = NULL;
1548 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1549 vec_oprnd1);
1550 new_temp = make_ssa_name (vec_dest, new_stmt);
1551 gimple_assign_set_lhs (new_stmt, new_temp);
1552 }
1553 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1554
1555 return new_stmt;
1556 }
1557
1558
1559 /* Check if STMT performs a conversion operation, that can be vectorized.
1560 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1561 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1562 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1563
1564 static bool
1565 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1566 gimple *vec_stmt, slp_tree slp_node)
1567 {
1568 tree vec_dest;
1569 tree scalar_dest;
1570 tree op0;
1571 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1572 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1573 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1574 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1575 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1576 tree new_temp;
1577 tree def;
1578 gimple def_stmt;
1579 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1580 gimple new_stmt = NULL;
1581 stmt_vec_info prev_stmt_info;
1582 int nunits_in;
1583 int nunits_out;
1584 tree vectype_out, vectype_in;
1585 int ncopies, j;
1586 tree rhs_type;
1587 tree builtin_decl;
1588 enum { NARROW, NONE, WIDEN } modifier;
1589 int i;
1590 VEC(tree,heap) *vec_oprnds0 = NULL;
1591 tree vop0;
1592 VEC(tree,heap) *dummy = NULL;
1593 int dummy_int;
1594
1595 /* Is STMT a vectorizable conversion? */
1596
1597 /* FORNOW: unsupported in basic block SLP. */
1598 gcc_assert (loop_vinfo);
1599
1600 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1601 return false;
1602
1603 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1604 return false;
1605
1606 if (!is_gimple_assign (stmt))
1607 return false;
1608
1609 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1610 return false;
1611
1612 code = gimple_assign_rhs_code (stmt);
1613 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1614 return false;
1615
1616 /* Check types of lhs and rhs. */
1617 scalar_dest = gimple_assign_lhs (stmt);
1618 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1619
1620 op0 = gimple_assign_rhs1 (stmt);
1621 rhs_type = TREE_TYPE (op0);
1622 /* Check the operands of the operation. */
1623 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1624 &def_stmt, &def, &dt[0], &vectype_in))
1625 {
1626 if (vect_print_dump_info (REPORT_DETAILS))
1627 fprintf (vect_dump, "use not simple.");
1628 return false;
1629 }
1630 /* If op0 is an external or constant defs use a vector type of
1631 the same size as the output vector type. */
1632 if (!vectype_in)
1633 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1634 if (vec_stmt)
1635 gcc_assert (vectype_in);
1636 if (!vectype_in)
1637 {
1638 if (vect_print_dump_info (REPORT_DETAILS))
1639 {
1640 fprintf (vect_dump, "no vectype for scalar type ");
1641 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1642 }
1643
1644 return false;
1645 }
1646
1647 /* FORNOW */
1648 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1649 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1650 if (nunits_in == nunits_out / 2)
1651 modifier = NARROW;
1652 else if (nunits_out == nunits_in)
1653 modifier = NONE;
1654 else if (nunits_out == nunits_in / 2)
1655 modifier = WIDEN;
1656 else
1657 return false;
1658
1659 if (modifier == NARROW)
1660 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1661 else
1662 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1663
1664 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1665 this, so we can safely override NCOPIES with 1 here. */
1666 if (slp_node)
1667 ncopies = 1;
1668
1669 /* Sanity check: make sure that at least one copy of the vectorized stmt
1670 needs to be generated. */
1671 gcc_assert (ncopies >= 1);
1672
1673 /* Supportable by target? */
1674 if ((modifier == NONE
1675 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1676 || (modifier == WIDEN
1677 && !supportable_widening_operation (code, stmt,
1678 vectype_out, vectype_in,
1679 &decl1, &decl2,
1680 &code1, &code2,
1681 &dummy_int, &dummy))
1682 || (modifier == NARROW
1683 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1684 &code1, &dummy_int, &dummy)))
1685 {
1686 if (vect_print_dump_info (REPORT_DETAILS))
1687 fprintf (vect_dump, "conversion not supported by target.");
1688 return false;
1689 }
1690
1691 if (modifier != NONE)
1692 {
1693 /* FORNOW: SLP not supported. */
1694 if (STMT_SLP_TYPE (stmt_info))
1695 return false;
1696 }
1697
1698 if (!vec_stmt) /* transformation not required. */
1699 {
1700 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1701 return true;
1702 }
1703
1704 /** Transform. **/
1705 if (vect_print_dump_info (REPORT_DETAILS))
1706 fprintf (vect_dump, "transform conversion.");
1707
1708 /* Handle def. */
1709 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1710
1711 if (modifier == NONE && !slp_node)
1712 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1713
1714 prev_stmt_info = NULL;
1715 switch (modifier)
1716 {
1717 case NONE:
1718 for (j = 0; j < ncopies; j++)
1719 {
1720 if (j == 0)
1721 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1722 else
1723 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1724
1725 builtin_decl =
1726 targetm.vectorize.builtin_conversion (code,
1727 vectype_out, vectype_in);
1728 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1729 {
1730 /* Arguments are ready. create the new vector stmt. */
1731 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1732 new_temp = make_ssa_name (vec_dest, new_stmt);
1733 gimple_call_set_lhs (new_stmt, new_temp);
1734 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1735 if (slp_node)
1736 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1737 }
1738
1739 if (j == 0)
1740 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1741 else
1742 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1743 prev_stmt_info = vinfo_for_stmt (new_stmt);
1744 }
1745 break;
1746
1747 case WIDEN:
1748 /* In case the vectorization factor (VF) is bigger than the number
1749 of elements that we can fit in a vectype (nunits), we have to
1750 generate more than one vector stmt - i.e - we need to "unroll"
1751 the vector stmt by a factor VF/nunits. */
1752 for (j = 0; j < ncopies; j++)
1753 {
1754 if (j == 0)
1755 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1756 else
1757 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1758
1759 /* Generate first half of the widened result: */
1760 new_stmt
1761 = vect_gen_widened_results_half (code1, decl1,
1762 vec_oprnd0, vec_oprnd1,
1763 unary_op, vec_dest, gsi, stmt);
1764 if (j == 0)
1765 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1766 else
1767 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1768 prev_stmt_info = vinfo_for_stmt (new_stmt);
1769
1770 /* Generate second half of the widened result: */
1771 new_stmt
1772 = vect_gen_widened_results_half (code2, decl2,
1773 vec_oprnd0, vec_oprnd1,
1774 unary_op, vec_dest, gsi, stmt);
1775 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1776 prev_stmt_info = vinfo_for_stmt (new_stmt);
1777 }
1778 break;
1779
1780 case NARROW:
1781 /* In case the vectorization factor (VF) is bigger than the number
1782 of elements that we can fit in a vectype (nunits), we have to
1783 generate more than one vector stmt - i.e - we need to "unroll"
1784 the vector stmt by a factor VF/nunits. */
1785 for (j = 0; j < ncopies; j++)
1786 {
1787 /* Handle uses. */
1788 if (j == 0)
1789 {
1790 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1791 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1792 }
1793 else
1794 {
1795 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1796 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1797 }
1798
1799 /* Arguments are ready. Create the new vector stmt. */
1800 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1801 vec_oprnd1);
1802 new_temp = make_ssa_name (vec_dest, new_stmt);
1803 gimple_assign_set_lhs (new_stmt, new_temp);
1804 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1805
1806 if (j == 0)
1807 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1808 else
1809 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1810
1811 prev_stmt_info = vinfo_for_stmt (new_stmt);
1812 }
1813
1814 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1815 }
1816
1817 if (vec_oprnds0)
1818 VEC_free (tree, heap, vec_oprnds0);
1819
1820 return true;
1821 }
1822 /* Function vectorizable_assignment.
1823
1824 Check if STMT performs an assignment (copy) that can be vectorized.
1825 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1826 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1827 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1828
1829 static bool
1830 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1831 gimple *vec_stmt, slp_tree slp_node)
1832 {
1833 tree vec_dest;
1834 tree scalar_dest;
1835 tree op;
1836 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1837 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1838 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1839 tree new_temp;
1840 tree def;
1841 gimple def_stmt;
1842 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1843 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1844 int ncopies;
1845 int i, j;
1846 VEC(tree,heap) *vec_oprnds = NULL;
1847 tree vop;
1848 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1849 gimple new_stmt = NULL;
1850 stmt_vec_info prev_stmt_info = NULL;
1851 enum tree_code code;
1852 tree vectype_in;
1853
1854 /* Multiple types in SLP are handled by creating the appropriate number of
1855 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1856 case of SLP. */
1857 if (slp_node)
1858 ncopies = 1;
1859 else
1860 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1861
1862 gcc_assert (ncopies >= 1);
1863
1864 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1865 return false;
1866
1867 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1868 return false;
1869
1870 /* Is vectorizable assignment? */
1871 if (!is_gimple_assign (stmt))
1872 return false;
1873
1874 scalar_dest = gimple_assign_lhs (stmt);
1875 if (TREE_CODE (scalar_dest) != SSA_NAME)
1876 return false;
1877
1878 code = gimple_assign_rhs_code (stmt);
1879 if (gimple_assign_single_p (stmt)
1880 || code == PAREN_EXPR
1881 || CONVERT_EXPR_CODE_P (code))
1882 op = gimple_assign_rhs1 (stmt);
1883 else
1884 return false;
1885
1886 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1887 &def_stmt, &def, &dt[0], &vectype_in))
1888 {
1889 if (vect_print_dump_info (REPORT_DETAILS))
1890 fprintf (vect_dump, "use not simple.");
1891 return false;
1892 }
1893
1894 /* We can handle NOP_EXPR conversions that do not change the number
1895 of elements or the vector size. */
1896 if (CONVERT_EXPR_CODE_P (code)
1897 && (!vectype_in
1898 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1899 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1900 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1901 return false;
1902
1903 if (!vec_stmt) /* transformation not required. */
1904 {
1905 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1906 if (vect_print_dump_info (REPORT_DETAILS))
1907 fprintf (vect_dump, "=== vectorizable_assignment ===");
1908 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1909 return true;
1910 }
1911
1912 /** Transform. **/
1913 if (vect_print_dump_info (REPORT_DETAILS))
1914 fprintf (vect_dump, "transform assignment.");
1915
1916 /* Handle def. */
1917 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1918
1919 /* Handle use. */
1920 for (j = 0; j < ncopies; j++)
1921 {
1922 /* Handle uses. */
1923 if (j == 0)
1924 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1925 else
1926 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1927
1928 /* Arguments are ready. create the new vector stmt. */
1929 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1930 {
1931 if (CONVERT_EXPR_CODE_P (code))
1932 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
1933 new_stmt = gimple_build_assign (vec_dest, vop);
1934 new_temp = make_ssa_name (vec_dest, new_stmt);
1935 gimple_assign_set_lhs (new_stmt, new_temp);
1936 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1937 if (slp_node)
1938 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1939 }
1940
1941 if (slp_node)
1942 continue;
1943
1944 if (j == 0)
1945 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1946 else
1947 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1948
1949 prev_stmt_info = vinfo_for_stmt (new_stmt);
1950 }
1951
1952 VEC_free (tree, heap, vec_oprnds);
1953 return true;
1954 }
1955
1956 /* Function vectorizable_operation.
1957
1958 Check if STMT performs a binary or unary operation that can be vectorized.
1959 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1960 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1961 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1962
1963 static bool
1964 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1965 gimple *vec_stmt, slp_tree slp_node)
1966 {
1967 tree vec_dest;
1968 tree scalar_dest;
1969 tree op0, op1 = NULL;
1970 tree vec_oprnd1 = NULL_TREE;
1971 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1972 tree vectype;
1973 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1974 enum tree_code code;
1975 enum machine_mode vec_mode;
1976 tree new_temp;
1977 int op_type;
1978 optab optab;
1979 int icode;
1980 enum machine_mode optab_op2_mode;
1981 tree def;
1982 gimple def_stmt;
1983 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1984 gimple new_stmt = NULL;
1985 stmt_vec_info prev_stmt_info;
1986 int nunits_in;
1987 int nunits_out;
1988 tree vectype_out;
1989 int ncopies;
1990 int j, i;
1991 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1992 tree vop0, vop1;
1993 unsigned int k;
1994 bool scalar_shift_arg = false;
1995 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1996 int vf;
1997
1998 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1999 return false;
2000
2001 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2002 return false;
2003
2004 /* Is STMT a vectorizable binary/unary operation? */
2005 if (!is_gimple_assign (stmt))
2006 return false;
2007
2008 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2009 return false;
2010
2011 code = gimple_assign_rhs_code (stmt);
2012
2013 /* For pointer addition, we should use the normal plus for
2014 the vector addition. */
2015 if (code == POINTER_PLUS_EXPR)
2016 code = PLUS_EXPR;
2017
2018 /* Support only unary or binary operations. */
2019 op_type = TREE_CODE_LENGTH (code);
2020 if (op_type != unary_op && op_type != binary_op)
2021 {
2022 if (vect_print_dump_info (REPORT_DETAILS))
2023 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2024 return false;
2025 }
2026
2027 scalar_dest = gimple_assign_lhs (stmt);
2028 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2029
2030 op0 = gimple_assign_rhs1 (stmt);
2031 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2032 &def_stmt, &def, &dt[0], &vectype))
2033 {
2034 if (vect_print_dump_info (REPORT_DETAILS))
2035 fprintf (vect_dump, "use not simple.");
2036 return false;
2037 }
2038 /* If op0 is an external or constant def use a vector type with
2039 the same size as the output vector type. */
2040 if (!vectype)
2041 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2042 if (vec_stmt)
2043 gcc_assert (vectype);
2044 if (!vectype)
2045 {
2046 if (vect_print_dump_info (REPORT_DETAILS))
2047 {
2048 fprintf (vect_dump, "no vectype for scalar type ");
2049 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2050 }
2051
2052 return false;
2053 }
2054
2055 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2056 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2057 if (nunits_out != nunits_in)
2058 return false;
2059
2060 if (op_type == binary_op)
2061 {
2062 op1 = gimple_assign_rhs2 (stmt);
2063 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2064 &dt[1]))
2065 {
2066 if (vect_print_dump_info (REPORT_DETAILS))
2067 fprintf (vect_dump, "use not simple.");
2068 return false;
2069 }
2070 }
2071
2072 if (loop_vinfo)
2073 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2074 else
2075 vf = 1;
2076
2077 /* Multiple types in SLP are handled by creating the appropriate number of
2078 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2079 case of SLP. */
2080 if (slp_node)
2081 ncopies = 1;
2082 else
2083 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2084
2085 gcc_assert (ncopies >= 1);
2086
2087 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2088 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2089 shift optabs. */
2090 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2091 || code == RROTATE_EXPR)
2092 {
2093 /* vector shifted by vector */
2094 if (dt[1] == vect_internal_def)
2095 {
2096 optab = optab_for_tree_code (code, vectype, optab_vector);
2097 if (vect_print_dump_info (REPORT_DETAILS))
2098 fprintf (vect_dump, "vector/vector shift/rotate found.");
2099 }
2100
2101 /* See if the machine has a vector shifted by scalar insn and if not
2102 then see if it has a vector shifted by vector insn */
2103 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2104 {
2105 optab = optab_for_tree_code (code, vectype, optab_scalar);
2106 if (optab
2107 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2108 != CODE_FOR_nothing))
2109 {
2110 scalar_shift_arg = true;
2111 if (vect_print_dump_info (REPORT_DETAILS))
2112 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2113 }
2114 else
2115 {
2116 optab = optab_for_tree_code (code, vectype, optab_vector);
2117 if (optab
2118 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2119 != CODE_FOR_nothing))
2120 {
2121 if (vect_print_dump_info (REPORT_DETAILS))
2122 fprintf (vect_dump, "vector/vector shift/rotate found.");
2123
2124 /* Unlike the other binary operators, shifts/rotates have
2125 the rhs being int, instead of the same type as the lhs,
2126 so make sure the scalar is the right type if we are
2127 dealing with vectors of short/char. */
2128 if (dt[1] == vect_constant_def)
2129 op1 = fold_convert (TREE_TYPE (vectype), op1);
2130 }
2131 }
2132 }
2133
2134 else
2135 {
2136 if (vect_print_dump_info (REPORT_DETAILS))
2137 fprintf (vect_dump, "operand mode requires invariant argument.");
2138 return false;
2139 }
2140 }
2141 else
2142 optab = optab_for_tree_code (code, vectype, optab_default);
2143
2144 /* Supportable by target? */
2145 if (!optab)
2146 {
2147 if (vect_print_dump_info (REPORT_DETAILS))
2148 fprintf (vect_dump, "no optab.");
2149 return false;
2150 }
2151 vec_mode = TYPE_MODE (vectype);
2152 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2153 if (icode == CODE_FOR_nothing)
2154 {
2155 if (vect_print_dump_info (REPORT_DETAILS))
2156 fprintf (vect_dump, "op not supported by target.");
2157 /* Check only during analysis. */
2158 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2159 || (vf < vect_min_worthwhile_factor (code)
2160 && !vec_stmt))
2161 return false;
2162 if (vect_print_dump_info (REPORT_DETAILS))
2163 fprintf (vect_dump, "proceeding using word mode.");
2164 }
2165
2166 /* Worthwhile without SIMD support? Check only during analysis. */
2167 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2168 && vf < vect_min_worthwhile_factor (code)
2169 && !vec_stmt)
2170 {
2171 if (vect_print_dump_info (REPORT_DETAILS))
2172 fprintf (vect_dump, "not worthwhile without SIMD support.");
2173 return false;
2174 }
2175
2176 if (!vec_stmt) /* transformation not required. */
2177 {
2178 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2179 if (vect_print_dump_info (REPORT_DETAILS))
2180 fprintf (vect_dump, "=== vectorizable_operation ===");
2181 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2182 return true;
2183 }
2184
2185 /** Transform. **/
2186
2187 if (vect_print_dump_info (REPORT_DETAILS))
2188 fprintf (vect_dump, "transform binary/unary operation.");
2189
2190 /* Handle def. */
2191 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2192
2193 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2194 created in the previous stages of the recursion, so no allocation is
2195 needed, except for the case of shift with scalar shift argument. In that
2196 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2197 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2198 In case of loop-based vectorization we allocate VECs of size 1. We
2199 allocate VEC_OPRNDS1 only in case of binary operation. */
2200 if (!slp_node)
2201 {
2202 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2203 if (op_type == binary_op)
2204 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2205 }
2206 else if (scalar_shift_arg)
2207 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2208
2209 /* In case the vectorization factor (VF) is bigger than the number
2210 of elements that we can fit in a vectype (nunits), we have to generate
2211 more than one vector stmt - i.e - we need to "unroll" the
2212 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2213 from one copy of the vector stmt to the next, in the field
2214 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2215 stages to find the correct vector defs to be used when vectorizing
2216 stmts that use the defs of the current stmt. The example below illustrates
2217 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2218 4 vectorized stmts):
2219
2220 before vectorization:
2221 RELATED_STMT VEC_STMT
2222 S1: x = memref - -
2223 S2: z = x + 1 - -
2224
2225 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2226 there):
2227 RELATED_STMT VEC_STMT
2228 VS1_0: vx0 = memref0 VS1_1 -
2229 VS1_1: vx1 = memref1 VS1_2 -
2230 VS1_2: vx2 = memref2 VS1_3 -
2231 VS1_3: vx3 = memref3 - -
2232 S1: x = load - VS1_0
2233 S2: z = x + 1 - -
2234
2235 step2: vectorize stmt S2 (done here):
2236 To vectorize stmt S2 we first need to find the relevant vector
2237 def for the first operand 'x'. This is, as usual, obtained from
2238 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2239 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2240 relevant vector def 'vx0'. Having found 'vx0' we can generate
2241 the vector stmt VS2_0, and as usual, record it in the
2242 STMT_VINFO_VEC_STMT of stmt S2.
2243 When creating the second copy (VS2_1), we obtain the relevant vector
2244 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2245 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2246 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2247 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2248 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2249 chain of stmts and pointers:
2250 RELATED_STMT VEC_STMT
2251 VS1_0: vx0 = memref0 VS1_1 -
2252 VS1_1: vx1 = memref1 VS1_2 -
2253 VS1_2: vx2 = memref2 VS1_3 -
2254 VS1_3: vx3 = memref3 - -
2255 S1: x = load - VS1_0
2256 VS2_0: vz0 = vx0 + v1 VS2_1 -
2257 VS2_1: vz1 = vx1 + v1 VS2_2 -
2258 VS2_2: vz2 = vx2 + v1 VS2_3 -
2259 VS2_3: vz3 = vx3 + v1 - -
2260 S2: z = x + 1 - VS2_0 */
2261
2262 prev_stmt_info = NULL;
2263 for (j = 0; j < ncopies; j++)
2264 {
2265 /* Handle uses. */
2266 if (j == 0)
2267 {
2268 if (op_type == binary_op && scalar_shift_arg)
2269 {
2270 /* Vector shl and shr insn patterns can be defined with scalar
2271 operand 2 (shift operand). In this case, use constant or loop
2272 invariant op1 directly, without extending it to vector mode
2273 first. */
2274 optab_op2_mode = insn_data[icode].operand[2].mode;
2275 if (!VECTOR_MODE_P (optab_op2_mode))
2276 {
2277 if (vect_print_dump_info (REPORT_DETAILS))
2278 fprintf (vect_dump, "operand 1 using scalar mode.");
2279 vec_oprnd1 = op1;
2280 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2281 if (slp_node)
2282 {
2283 /* Store vec_oprnd1 for every vector stmt to be created
2284 for SLP_NODE. We check during the analysis that all the
2285 shift arguments are the same.
2286 TODO: Allow different constants for different vector
2287 stmts generated for an SLP instance. */
2288 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2289 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2290 }
2291 }
2292 }
2293
2294 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2295 (a special case for certain kind of vector shifts); otherwise,
2296 operand 1 should be of a vector type (the usual case). */
2297 if (op_type == binary_op && !vec_oprnd1)
2298 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2299 slp_node);
2300 else
2301 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2302 slp_node);
2303 }
2304 else
2305 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2306
2307 /* Arguments are ready. Create the new vector stmt. */
2308 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2309 {
2310 vop1 = ((op_type == binary_op)
2311 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2312 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2313 new_temp = make_ssa_name (vec_dest, new_stmt);
2314 gimple_assign_set_lhs (new_stmt, new_temp);
2315 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2316 if (slp_node)
2317 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2318 }
2319
2320 if (slp_node)
2321 continue;
2322
2323 if (j == 0)
2324 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2325 else
2326 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2327 prev_stmt_info = vinfo_for_stmt (new_stmt);
2328 }
2329
2330 VEC_free (tree, heap, vec_oprnds0);
2331 if (vec_oprnds1)
2332 VEC_free (tree, heap, vec_oprnds1);
2333
2334 return true;
2335 }
2336
2337
2338 /* Get vectorized definitions for loop-based vectorization. For the first
2339 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2340 scalar operand), and for the rest we get a copy with
2341 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2342 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2343 The vectors are collected into VEC_OPRNDS. */
2344
2345 static void
2346 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2347 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2348 {
2349 tree vec_oprnd;
2350
2351 /* Get first vector operand. */
2352 /* All the vector operands except the very first one (that is scalar oprnd)
2353 are stmt copies. */
2354 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2355 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2356 else
2357 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2358
2359 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2360
2361 /* Get second vector operand. */
2362 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2363 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2364
2365 *oprnd = vec_oprnd;
2366
2367 /* For conversion in multiple steps, continue to get operands
2368 recursively. */
2369 if (multi_step_cvt)
2370 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2371 }
2372
2373
2374 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2375 For multi-step conversions store the resulting vectors and call the function
2376 recursively. */
2377
2378 static void
2379 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2380 int multi_step_cvt, gimple stmt,
2381 VEC (tree, heap) *vec_dsts,
2382 gimple_stmt_iterator *gsi,
2383 slp_tree slp_node, enum tree_code code,
2384 stmt_vec_info *prev_stmt_info)
2385 {
2386 unsigned int i;
2387 tree vop0, vop1, new_tmp, vec_dest;
2388 gimple new_stmt;
2389 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2390
2391 vec_dest = VEC_pop (tree, vec_dsts);
2392
2393 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2394 {
2395 /* Create demotion operation. */
2396 vop0 = VEC_index (tree, *vec_oprnds, i);
2397 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2398 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2399 new_tmp = make_ssa_name (vec_dest, new_stmt);
2400 gimple_assign_set_lhs (new_stmt, new_tmp);
2401 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2402
2403 if (multi_step_cvt)
2404 /* Store the resulting vector for next recursive call. */
2405 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2406 else
2407 {
2408 /* This is the last step of the conversion sequence. Store the
2409 vectors in SLP_NODE or in vector info of the scalar statement
2410 (or in STMT_VINFO_RELATED_STMT chain). */
2411 if (slp_node)
2412 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2413 else
2414 {
2415 if (!*prev_stmt_info)
2416 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2417 else
2418 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2419
2420 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2421 }
2422 }
2423 }
2424
2425 /* For multi-step demotion operations we first generate demotion operations
2426 from the source type to the intermediate types, and then combine the
2427 results (stored in VEC_OPRNDS) in demotion operation to the destination
2428 type. */
2429 if (multi_step_cvt)
2430 {
2431 /* At each level of recursion we have have of the operands we had at the
2432 previous level. */
2433 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2434 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2435 stmt, vec_dsts, gsi, slp_node,
2436 code, prev_stmt_info);
2437 }
2438 }
2439
2440
2441 /* Function vectorizable_type_demotion
2442
2443 Check if STMT performs a binary or unary operation that involves
2444 type demotion, and if it can be vectorized.
2445 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2446 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2447 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2448
2449 static bool
2450 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2451 gimple *vec_stmt, slp_tree slp_node)
2452 {
2453 tree vec_dest;
2454 tree scalar_dest;
2455 tree op0;
2456 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2457 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2458 enum tree_code code, code1 = ERROR_MARK;
2459 tree def;
2460 gimple def_stmt;
2461 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2462 stmt_vec_info prev_stmt_info;
2463 int nunits_in;
2464 int nunits_out;
2465 tree vectype_out;
2466 int ncopies;
2467 int j, i;
2468 tree vectype_in;
2469 int multi_step_cvt = 0;
2470 VEC (tree, heap) *vec_oprnds0 = NULL;
2471 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2472 tree last_oprnd, intermediate_type;
2473
2474 /* FORNOW: not supported by basic block SLP vectorization. */
2475 gcc_assert (loop_vinfo);
2476
2477 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2478 return false;
2479
2480 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2481 return false;
2482
2483 /* Is STMT a vectorizable type-demotion operation? */
2484 if (!is_gimple_assign (stmt))
2485 return false;
2486
2487 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2488 return false;
2489
2490 code = gimple_assign_rhs_code (stmt);
2491 if (!CONVERT_EXPR_CODE_P (code))
2492 return false;
2493
2494 scalar_dest = gimple_assign_lhs (stmt);
2495 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2496
2497 /* Check the operands of the operation. */
2498 op0 = gimple_assign_rhs1 (stmt);
2499 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2500 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2501 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2502 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2503 && CONVERT_EXPR_CODE_P (code))))
2504 return false;
2505 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2506 &def_stmt, &def, &dt[0], &vectype_in))
2507 {
2508 if (vect_print_dump_info (REPORT_DETAILS))
2509 fprintf (vect_dump, "use not simple.");
2510 return false;
2511 }
2512 /* If op0 is an external def use a vector type with the
2513 same size as the output vector type if possible. */
2514 if (!vectype_in)
2515 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2516 if (vec_stmt)
2517 gcc_assert (vectype_in);
2518 if (!vectype_in)
2519 {
2520 if (vect_print_dump_info (REPORT_DETAILS))
2521 {
2522 fprintf (vect_dump, "no vectype for scalar type ");
2523 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2524 }
2525
2526 return false;
2527 }
2528
2529 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2530 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2531 if (nunits_in >= nunits_out)
2532 return false;
2533
2534 /* Multiple types in SLP are handled by creating the appropriate number of
2535 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2536 case of SLP. */
2537 if (slp_node)
2538 ncopies = 1;
2539 else
2540 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2541 gcc_assert (ncopies >= 1);
2542
2543 /* Supportable by target? */
2544 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2545 &code1, &multi_step_cvt, &interm_types))
2546 return false;
2547
2548 if (!vec_stmt) /* transformation not required. */
2549 {
2550 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2551 if (vect_print_dump_info (REPORT_DETAILS))
2552 fprintf (vect_dump, "=== vectorizable_demotion ===");
2553 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2554 return true;
2555 }
2556
2557 /** Transform. **/
2558 if (vect_print_dump_info (REPORT_DETAILS))
2559 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2560 ncopies);
2561
2562 /* In case of multi-step demotion, we first generate demotion operations to
2563 the intermediate types, and then from that types to the final one.
2564 We create vector destinations for the intermediate type (TYPES) received
2565 from supportable_narrowing_operation, and store them in the correct order
2566 for future use in vect_create_vectorized_demotion_stmts(). */
2567 if (multi_step_cvt)
2568 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2569 else
2570 vec_dsts = VEC_alloc (tree, heap, 1);
2571
2572 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2573 VEC_quick_push (tree, vec_dsts, vec_dest);
2574
2575 if (multi_step_cvt)
2576 {
2577 for (i = VEC_length (tree, interm_types) - 1;
2578 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2579 {
2580 vec_dest = vect_create_destination_var (scalar_dest,
2581 intermediate_type);
2582 VEC_quick_push (tree, vec_dsts, vec_dest);
2583 }
2584 }
2585
2586 /* In case the vectorization factor (VF) is bigger than the number
2587 of elements that we can fit in a vectype (nunits), we have to generate
2588 more than one vector stmt - i.e - we need to "unroll" the
2589 vector stmt by a factor VF/nunits. */
2590 last_oprnd = op0;
2591 prev_stmt_info = NULL;
2592 for (j = 0; j < ncopies; j++)
2593 {
2594 /* Handle uses. */
2595 if (slp_node)
2596 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
2597 else
2598 {
2599 VEC_free (tree, heap, vec_oprnds0);
2600 vec_oprnds0 = VEC_alloc (tree, heap,
2601 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2602 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2603 vect_pow2 (multi_step_cvt) - 1);
2604 }
2605
2606 /* Arguments are ready. Create the new vector stmts. */
2607 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2608 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2609 multi_step_cvt, stmt, tmp_vec_dsts,
2610 gsi, slp_node, code1,
2611 &prev_stmt_info);
2612 }
2613
2614 VEC_free (tree, heap, vec_oprnds0);
2615 VEC_free (tree, heap, vec_dsts);
2616 VEC_free (tree, heap, tmp_vec_dsts);
2617 VEC_free (tree, heap, interm_types);
2618
2619 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2620 return true;
2621 }
2622
2623
2624 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2625 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2626 the resulting vectors and call the function recursively. */
2627
2628 static void
2629 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2630 VEC (tree, heap) **vec_oprnds1,
2631 int multi_step_cvt, gimple stmt,
2632 VEC (tree, heap) *vec_dsts,
2633 gimple_stmt_iterator *gsi,
2634 slp_tree slp_node, enum tree_code code1,
2635 enum tree_code code2, tree decl1,
2636 tree decl2, int op_type,
2637 stmt_vec_info *prev_stmt_info)
2638 {
2639 int i;
2640 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2641 gimple new_stmt1, new_stmt2;
2642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2643 VEC (tree, heap) *vec_tmp;
2644
2645 vec_dest = VEC_pop (tree, vec_dsts);
2646 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2647
2648 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2649 {
2650 if (op_type == binary_op)
2651 vop1 = VEC_index (tree, *vec_oprnds1, i);
2652 else
2653 vop1 = NULL_TREE;
2654
2655 /* Generate the two halves of promotion operation. */
2656 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2657 op_type, vec_dest, gsi, stmt);
2658 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2659 op_type, vec_dest, gsi, stmt);
2660 if (is_gimple_call (new_stmt1))
2661 {
2662 new_tmp1 = gimple_call_lhs (new_stmt1);
2663 new_tmp2 = gimple_call_lhs (new_stmt2);
2664 }
2665 else
2666 {
2667 new_tmp1 = gimple_assign_lhs (new_stmt1);
2668 new_tmp2 = gimple_assign_lhs (new_stmt2);
2669 }
2670
2671 if (multi_step_cvt)
2672 {
2673 /* Store the results for the recursive call. */
2674 VEC_quick_push (tree, vec_tmp, new_tmp1);
2675 VEC_quick_push (tree, vec_tmp, new_tmp2);
2676 }
2677 else
2678 {
2679 /* Last step of promotion sequience - store the results. */
2680 if (slp_node)
2681 {
2682 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2683 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2684 }
2685 else
2686 {
2687 if (!*prev_stmt_info)
2688 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2689 else
2690 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2691
2692 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2693 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2694 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2695 }
2696 }
2697 }
2698
2699 if (multi_step_cvt)
2700 {
2701 /* For multi-step promotion operation we first generate we call the
2702 function recurcively for every stage. We start from the input type,
2703 create promotion operations to the intermediate types, and then
2704 create promotions to the output type. */
2705 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2706 VEC_free (tree, heap, vec_tmp);
2707 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2708 multi_step_cvt - 1, stmt,
2709 vec_dsts, gsi, slp_node, code1,
2710 code2, decl2, decl2, op_type,
2711 prev_stmt_info);
2712 }
2713 }
2714
2715
2716 /* Function vectorizable_type_promotion
2717
2718 Check if STMT performs a binary or unary operation that involves
2719 type promotion, and if it can be vectorized.
2720 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2721 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2722 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2723
2724 static bool
2725 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2726 gimple *vec_stmt, slp_tree slp_node)
2727 {
2728 tree vec_dest;
2729 tree scalar_dest;
2730 tree op0, op1 = NULL;
2731 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2732 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2733 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2734 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2735 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2736 int op_type;
2737 tree def;
2738 gimple def_stmt;
2739 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2740 stmt_vec_info prev_stmt_info;
2741 int nunits_in;
2742 int nunits_out;
2743 tree vectype_out;
2744 int ncopies;
2745 int j, i;
2746 tree vectype_in;
2747 tree intermediate_type = NULL_TREE;
2748 int multi_step_cvt = 0;
2749 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2750 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2751
2752 /* FORNOW: not supported by basic block SLP vectorization. */
2753 gcc_assert (loop_vinfo);
2754
2755 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2756 return false;
2757
2758 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2759 return false;
2760
2761 /* Is STMT a vectorizable type-promotion operation? */
2762 if (!is_gimple_assign (stmt))
2763 return false;
2764
2765 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2766 return false;
2767
2768 code = gimple_assign_rhs_code (stmt);
2769 if (!CONVERT_EXPR_CODE_P (code)
2770 && code != WIDEN_MULT_EXPR)
2771 return false;
2772
2773 scalar_dest = gimple_assign_lhs (stmt);
2774 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2775
2776 /* Check the operands of the operation. */
2777 op0 = gimple_assign_rhs1 (stmt);
2778 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2779 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2780 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2781 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2782 && CONVERT_EXPR_CODE_P (code))))
2783 return false;
2784 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2785 &def_stmt, &def, &dt[0], &vectype_in))
2786 {
2787 if (vect_print_dump_info (REPORT_DETAILS))
2788 fprintf (vect_dump, "use not simple.");
2789 return false;
2790 }
2791 /* If op0 is an external or constant def use a vector type with
2792 the same size as the output vector type. */
2793 if (!vectype_in)
2794 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2795 if (vec_stmt)
2796 gcc_assert (vectype_in);
2797 if (!vectype_in)
2798 {
2799 if (vect_print_dump_info (REPORT_DETAILS))
2800 {
2801 fprintf (vect_dump, "no vectype for scalar type ");
2802 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2803 }
2804
2805 return false;
2806 }
2807
2808 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2809 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2810 if (nunits_in <= nunits_out)
2811 return false;
2812
2813 /* Multiple types in SLP are handled by creating the appropriate number of
2814 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2815 case of SLP. */
2816 if (slp_node)
2817 ncopies = 1;
2818 else
2819 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2820
2821 gcc_assert (ncopies >= 1);
2822
2823 op_type = TREE_CODE_LENGTH (code);
2824 if (op_type == binary_op)
2825 {
2826 op1 = gimple_assign_rhs2 (stmt);
2827 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2828 {
2829 if (vect_print_dump_info (REPORT_DETAILS))
2830 fprintf (vect_dump, "use not simple.");
2831 return false;
2832 }
2833 }
2834
2835 /* Supportable by target? */
2836 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2837 &decl1, &decl2, &code1, &code2,
2838 &multi_step_cvt, &interm_types))
2839 return false;
2840
2841 /* Binary widening operation can only be supported directly by the
2842 architecture. */
2843 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2844
2845 if (!vec_stmt) /* transformation not required. */
2846 {
2847 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2848 if (vect_print_dump_info (REPORT_DETAILS))
2849 fprintf (vect_dump, "=== vectorizable_promotion ===");
2850 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2851 return true;
2852 }
2853
2854 /** Transform. **/
2855
2856 if (vect_print_dump_info (REPORT_DETAILS))
2857 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2858 ncopies);
2859
2860 /* Handle def. */
2861 /* In case of multi-step promotion, we first generate promotion operations
2862 to the intermediate types, and then from that types to the final one.
2863 We store vector destination in VEC_DSTS in the correct order for
2864 recursive creation of promotion operations in
2865 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2866 according to TYPES recieved from supportable_widening_operation(). */
2867 if (multi_step_cvt)
2868 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2869 else
2870 vec_dsts = VEC_alloc (tree, heap, 1);
2871
2872 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2873 VEC_quick_push (tree, vec_dsts, vec_dest);
2874
2875 if (multi_step_cvt)
2876 {
2877 for (i = VEC_length (tree, interm_types) - 1;
2878 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2879 {
2880 vec_dest = vect_create_destination_var (scalar_dest,
2881 intermediate_type);
2882 VEC_quick_push (tree, vec_dsts, vec_dest);
2883 }
2884 }
2885
2886 if (!slp_node)
2887 {
2888 vec_oprnds0 = VEC_alloc (tree, heap,
2889 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2890 if (op_type == binary_op)
2891 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2892 }
2893
2894 /* In case the vectorization factor (VF) is bigger than the number
2895 of elements that we can fit in a vectype (nunits), we have to generate
2896 more than one vector stmt - i.e - we need to "unroll" the
2897 vector stmt by a factor VF/nunits. */
2898
2899 prev_stmt_info = NULL;
2900 for (j = 0; j < ncopies; j++)
2901 {
2902 /* Handle uses. */
2903 if (j == 0)
2904 {
2905 if (slp_node)
2906 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
2907 else
2908 {
2909 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2910 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2911 if (op_type == binary_op)
2912 {
2913 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2914 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2915 }
2916 }
2917 }
2918 else
2919 {
2920 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2921 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2922 if (op_type == binary_op)
2923 {
2924 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2925 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2926 }
2927 }
2928
2929 /* Arguments are ready. Create the new vector stmts. */
2930 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2931 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2932 multi_step_cvt, stmt,
2933 tmp_vec_dsts,
2934 gsi, slp_node, code1, code2,
2935 decl1, decl2, op_type,
2936 &prev_stmt_info);
2937 }
2938
2939 VEC_free (tree, heap, vec_dsts);
2940 VEC_free (tree, heap, tmp_vec_dsts);
2941 VEC_free (tree, heap, interm_types);
2942 VEC_free (tree, heap, vec_oprnds0);
2943 VEC_free (tree, heap, vec_oprnds1);
2944
2945 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2946 return true;
2947 }
2948
2949
2950 /* Function vectorizable_store.
2951
2952 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2953 can be vectorized.
2954 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2955 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2956 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2957
2958 static bool
2959 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2960 slp_tree slp_node)
2961 {
2962 tree scalar_dest;
2963 tree data_ref;
2964 tree op;
2965 tree vec_oprnd = NULL_TREE;
2966 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2967 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2968 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2969 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2970 struct loop *loop = NULL;
2971 enum machine_mode vec_mode;
2972 tree dummy;
2973 enum dr_alignment_support alignment_support_scheme;
2974 tree def;
2975 gimple def_stmt;
2976 enum vect_def_type dt;
2977 stmt_vec_info prev_stmt_info = NULL;
2978 tree dataref_ptr = NULL_TREE;
2979 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2980 int ncopies;
2981 int j;
2982 gimple next_stmt, first_stmt = NULL;
2983 bool strided_store = false;
2984 unsigned int group_size, i;
2985 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2986 bool inv_p;
2987 VEC(tree,heap) *vec_oprnds = NULL;
2988 bool slp = (slp_node != NULL);
2989 unsigned int vec_num;
2990 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2991
2992 if (loop_vinfo)
2993 loop = LOOP_VINFO_LOOP (loop_vinfo);
2994
2995 /* Multiple types in SLP are handled by creating the appropriate number of
2996 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2997 case of SLP. */
2998 if (slp)
2999 ncopies = 1;
3000 else
3001 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3002
3003 gcc_assert (ncopies >= 1);
3004
3005 /* FORNOW. This restriction should be relaxed. */
3006 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3007 {
3008 if (vect_print_dump_info (REPORT_DETAILS))
3009 fprintf (vect_dump, "multiple types in nested loop.");
3010 return false;
3011 }
3012
3013 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3014 return false;
3015
3016 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3017 return false;
3018
3019 /* Is vectorizable store? */
3020
3021 if (!is_gimple_assign (stmt))
3022 return false;
3023
3024 scalar_dest = gimple_assign_lhs (stmt);
3025 if (TREE_CODE (scalar_dest) != ARRAY_REF
3026 && TREE_CODE (scalar_dest) != INDIRECT_REF
3027 && TREE_CODE (scalar_dest) != COMPONENT_REF
3028 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3029 && TREE_CODE (scalar_dest) != REALPART_EXPR)
3030 return false;
3031
3032 gcc_assert (gimple_assign_single_p (stmt));
3033 op = gimple_assign_rhs1 (stmt);
3034 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3035 {
3036 if (vect_print_dump_info (REPORT_DETAILS))
3037 fprintf (vect_dump, "use not simple.");
3038 return false;
3039 }
3040
3041 /* The scalar rhs type needs to be trivially convertible to the vector
3042 component type. This should always be the case. */
3043 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3044 {
3045 if (vect_print_dump_info (REPORT_DETAILS))
3046 fprintf (vect_dump, "??? operands of different types");
3047 return false;
3048 }
3049
3050 vec_mode = TYPE_MODE (vectype);
3051 /* FORNOW. In some cases can vectorize even if data-type not supported
3052 (e.g. - array initialization with 0). */
3053 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
3054 return false;
3055
3056 if (!STMT_VINFO_DATA_REF (stmt_info))
3057 return false;
3058
3059 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3060 {
3061 strided_store = true;
3062 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3063 if (!vect_strided_store_supported (vectype)
3064 && !PURE_SLP_STMT (stmt_info) && !slp)
3065 return false;
3066
3067 if (first_stmt == stmt)
3068 {
3069 /* STMT is the leader of the group. Check the operands of all the
3070 stmts of the group. */
3071 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3072 while (next_stmt)
3073 {
3074 gcc_assert (gimple_assign_single_p (next_stmt));
3075 op = gimple_assign_rhs1 (next_stmt);
3076 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3077 &def, &dt))
3078 {
3079 if (vect_print_dump_info (REPORT_DETAILS))
3080 fprintf (vect_dump, "use not simple.");
3081 return false;
3082 }
3083 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3084 }
3085 }
3086 }
3087
3088 if (!vec_stmt) /* transformation not required. */
3089 {
3090 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3091 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3092 return true;
3093 }
3094
3095 /** Transform. **/
3096
3097 if (strided_store)
3098 {
3099 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3100 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3101
3102 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3103
3104 /* FORNOW */
3105 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3106
3107 /* We vectorize all the stmts of the interleaving group when we
3108 reach the last stmt in the group. */
3109 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3110 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3111 && !slp)
3112 {
3113 *vec_stmt = NULL;
3114 return true;
3115 }
3116
3117 if (slp)
3118 {
3119 strided_store = false;
3120 /* VEC_NUM is the number of vect stmts to be created for this
3121 group. */
3122 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3123 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3124 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3125 }
3126 else
3127 /* VEC_NUM is the number of vect stmts to be created for this
3128 group. */
3129 vec_num = group_size;
3130 }
3131 else
3132 {
3133 first_stmt = stmt;
3134 first_dr = dr;
3135 group_size = vec_num = 1;
3136 }
3137
3138 if (vect_print_dump_info (REPORT_DETAILS))
3139 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3140
3141 dr_chain = VEC_alloc (tree, heap, group_size);
3142 oprnds = VEC_alloc (tree, heap, group_size);
3143
3144 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3145 gcc_assert (alignment_support_scheme);
3146
3147 /* In case the vectorization factor (VF) is bigger than the number
3148 of elements that we can fit in a vectype (nunits), we have to generate
3149 more than one vector stmt - i.e - we need to "unroll" the
3150 vector stmt by a factor VF/nunits. For more details see documentation in
3151 vect_get_vec_def_for_copy_stmt. */
3152
3153 /* In case of interleaving (non-unit strided access):
3154
3155 S1: &base + 2 = x2
3156 S2: &base = x0
3157 S3: &base + 1 = x1
3158 S4: &base + 3 = x3
3159
3160 We create vectorized stores starting from base address (the access of the
3161 first stmt in the chain (S2 in the above example), when the last store stmt
3162 of the chain (S4) is reached:
3163
3164 VS1: &base = vx2
3165 VS2: &base + vec_size*1 = vx0
3166 VS3: &base + vec_size*2 = vx1
3167 VS4: &base + vec_size*3 = vx3
3168
3169 Then permutation statements are generated:
3170
3171 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3172 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3173 ...
3174
3175 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3176 (the order of the data-refs in the output of vect_permute_store_chain
3177 corresponds to the order of scalar stmts in the interleaving chain - see
3178 the documentation of vect_permute_store_chain()).
3179
3180 In case of both multiple types and interleaving, above vector stores and
3181 permutation stmts are created for every copy. The result vector stmts are
3182 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3183 STMT_VINFO_RELATED_STMT for the next copies.
3184 */
3185
3186 prev_stmt_info = NULL;
3187 for (j = 0; j < ncopies; j++)
3188 {
3189 gimple new_stmt;
3190 gimple ptr_incr;
3191
3192 if (j == 0)
3193 {
3194 if (slp)
3195 {
3196 /* Get vectorized arguments for SLP_NODE. */
3197 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
3198
3199 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3200 }
3201 else
3202 {
3203 /* For interleaved stores we collect vectorized defs for all the
3204 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3205 used as an input to vect_permute_store_chain(), and OPRNDS as
3206 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3207
3208 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3209 OPRNDS are of size 1. */
3210 next_stmt = first_stmt;
3211 for (i = 0; i < group_size; i++)
3212 {
3213 /* Since gaps are not supported for interleaved stores,
3214 GROUP_SIZE is the exact number of stmts in the chain.
3215 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3216 there is no interleaving, GROUP_SIZE is 1, and only one
3217 iteration of the loop will be executed. */
3218 gcc_assert (next_stmt
3219 && gimple_assign_single_p (next_stmt));
3220 op = gimple_assign_rhs1 (next_stmt);
3221
3222 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3223 NULL);
3224 VEC_quick_push(tree, dr_chain, vec_oprnd);
3225 VEC_quick_push(tree, oprnds, vec_oprnd);
3226 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3227 }
3228 }
3229
3230 /* We should have catched mismatched types earlier. */
3231 gcc_assert (useless_type_conversion_p (vectype,
3232 TREE_TYPE (vec_oprnd)));
3233 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3234 &dummy, &ptr_incr, false,
3235 &inv_p);
3236 gcc_assert (bb_vinfo || !inv_p);
3237 }
3238 else
3239 {
3240 /* For interleaved stores we created vectorized defs for all the
3241 defs stored in OPRNDS in the previous iteration (previous copy).
3242 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3243 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3244 next copy.
3245 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3246 OPRNDS are of size 1. */
3247 for (i = 0; i < group_size; i++)
3248 {
3249 op = VEC_index (tree, oprnds, i);
3250 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3251 &dt);
3252 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3253 VEC_replace(tree, dr_chain, i, vec_oprnd);
3254 VEC_replace(tree, oprnds, i, vec_oprnd);
3255 }
3256 dataref_ptr =
3257 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3258 }
3259
3260 if (strided_store)
3261 {
3262 result_chain = VEC_alloc (tree, heap, group_size);
3263 /* Permute. */
3264 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3265 &result_chain))
3266 return false;
3267 }
3268
3269 next_stmt = first_stmt;
3270 for (i = 0; i < vec_num; i++)
3271 {
3272 if (i > 0)
3273 /* Bump the vector pointer. */
3274 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3275 NULL_TREE);
3276
3277 if (slp)
3278 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3279 else if (strided_store)
3280 /* For strided stores vectorized defs are interleaved in
3281 vect_permute_store_chain(). */
3282 vec_oprnd = VEC_index (tree, result_chain, i);
3283
3284 if (aligned_access_p (first_dr))
3285 data_ref = build_fold_indirect_ref (dataref_ptr);
3286 else
3287 {
3288 int mis = DR_MISALIGNMENT (first_dr);
3289 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3290 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3291 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3292 }
3293
3294 /* If accesses through a pointer to vectype do not alias the original
3295 memory reference we have a problem. This should never happen. */
3296 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3297 get_alias_set (gimple_assign_lhs (stmt))));
3298
3299 /* Arguments are ready. Create the new vector stmt. */
3300 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3301 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3302 mark_symbols_for_renaming (new_stmt);
3303
3304 if (slp)
3305 continue;
3306
3307 if (j == 0)
3308 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3309 else
3310 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3311
3312 prev_stmt_info = vinfo_for_stmt (new_stmt);
3313 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3314 if (!next_stmt)
3315 break;
3316 }
3317 }
3318
3319 VEC_free (tree, heap, dr_chain);
3320 VEC_free (tree, heap, oprnds);
3321 if (result_chain)
3322 VEC_free (tree, heap, result_chain);
3323
3324 return true;
3325 }
3326
3327 /* vectorizable_load.
3328
3329 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3330 can be vectorized.
3331 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3332 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3333 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3334
3335 static bool
3336 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3337 slp_tree slp_node, slp_instance slp_node_instance)
3338 {
3339 tree scalar_dest;
3340 tree vec_dest = NULL;
3341 tree data_ref = NULL;
3342 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3343 stmt_vec_info prev_stmt_info;
3344 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3345 struct loop *loop = NULL;
3346 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3347 bool nested_in_vect_loop = false;
3348 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3349 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3350 tree new_temp;
3351 int mode;
3352 gimple new_stmt = NULL;
3353 tree dummy;
3354 enum dr_alignment_support alignment_support_scheme;
3355 tree dataref_ptr = NULL_TREE;
3356 gimple ptr_incr;
3357 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3358 int ncopies;
3359 int i, j, group_size;
3360 tree msq = NULL_TREE, lsq;
3361 tree offset = NULL_TREE;
3362 tree realignment_token = NULL_TREE;
3363 gimple phi = NULL;
3364 VEC(tree,heap) *dr_chain = NULL;
3365 bool strided_load = false;
3366 gimple first_stmt;
3367 tree scalar_type;
3368 bool inv_p;
3369 bool compute_in_loop = false;
3370 struct loop *at_loop;
3371 int vec_num;
3372 bool slp = (slp_node != NULL);
3373 bool slp_perm = false;
3374 enum tree_code code;
3375 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3376 int vf;
3377
3378 if (loop_vinfo)
3379 {
3380 loop = LOOP_VINFO_LOOP (loop_vinfo);
3381 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3382 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3383 }
3384 else
3385 vf = 1;
3386
3387 /* Multiple types in SLP are handled by creating the appropriate number of
3388 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3389 case of SLP. */
3390 if (slp)
3391 ncopies = 1;
3392 else
3393 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3394
3395 gcc_assert (ncopies >= 1);
3396
3397 /* FORNOW. This restriction should be relaxed. */
3398 if (nested_in_vect_loop && ncopies > 1)
3399 {
3400 if (vect_print_dump_info (REPORT_DETAILS))
3401 fprintf (vect_dump, "multiple types in nested loop.");
3402 return false;
3403 }
3404
3405 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3406 return false;
3407
3408 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3409 return false;
3410
3411 /* Is vectorizable load? */
3412 if (!is_gimple_assign (stmt))
3413 return false;
3414
3415 scalar_dest = gimple_assign_lhs (stmt);
3416 if (TREE_CODE (scalar_dest) != SSA_NAME)
3417 return false;
3418
3419 code = gimple_assign_rhs_code (stmt);
3420 if (code != ARRAY_REF
3421 && code != INDIRECT_REF
3422 && code != COMPONENT_REF
3423 && code != IMAGPART_EXPR
3424 && code != REALPART_EXPR)
3425 return false;
3426
3427 if (!STMT_VINFO_DATA_REF (stmt_info))
3428 return false;
3429
3430 scalar_type = TREE_TYPE (DR_REF (dr));
3431 mode = (int) TYPE_MODE (vectype);
3432
3433 /* FORNOW. In some cases can vectorize even if data-type not supported
3434 (e.g. - data copies). */
3435 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3436 {
3437 if (vect_print_dump_info (REPORT_DETAILS))
3438 fprintf (vect_dump, "Aligned load, but unsupported type.");
3439 return false;
3440 }
3441
3442 /* The vector component type needs to be trivially convertible to the
3443 scalar lhs. This should always be the case. */
3444 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3445 {
3446 if (vect_print_dump_info (REPORT_DETAILS))
3447 fprintf (vect_dump, "??? operands of different types");
3448 return false;
3449 }
3450
3451 /* Check if the load is a part of an interleaving chain. */
3452 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3453 {
3454 strided_load = true;
3455 /* FORNOW */
3456 gcc_assert (! nested_in_vect_loop);
3457
3458 /* Check if interleaving is supported. */
3459 if (!vect_strided_load_supported (vectype)
3460 && !PURE_SLP_STMT (stmt_info) && !slp)
3461 return false;
3462 }
3463
3464 if (!vec_stmt) /* transformation not required. */
3465 {
3466 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3467 vect_model_load_cost (stmt_info, ncopies, NULL);
3468 return true;
3469 }
3470
3471 if (vect_print_dump_info (REPORT_DETAILS))
3472 fprintf (vect_dump, "transform load.");
3473
3474 /** Transform. **/
3475
3476 if (strided_load)
3477 {
3478 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3479 /* Check if the chain of loads is already vectorized. */
3480 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3481 {
3482 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3483 return true;
3484 }
3485 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3486 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3487
3488 /* VEC_NUM is the number of vect stmts to be created for this group. */
3489 if (slp)
3490 {
3491 strided_load = false;
3492 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3493 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3494 slp_perm = true;
3495 }
3496 else
3497 vec_num = group_size;
3498
3499 dr_chain = VEC_alloc (tree, heap, vec_num);
3500 }
3501 else
3502 {
3503 first_stmt = stmt;
3504 first_dr = dr;
3505 group_size = vec_num = 1;
3506 }
3507
3508 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3509 gcc_assert (alignment_support_scheme);
3510
3511 /* In case the vectorization factor (VF) is bigger than the number
3512 of elements that we can fit in a vectype (nunits), we have to generate
3513 more than one vector stmt - i.e - we need to "unroll" the
3514 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3515 from one copy of the vector stmt to the next, in the field
3516 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3517 stages to find the correct vector defs to be used when vectorizing
3518 stmts that use the defs of the current stmt. The example below illustrates
3519 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3520 4 vectorized stmts):
3521
3522 before vectorization:
3523 RELATED_STMT VEC_STMT
3524 S1: x = memref - -
3525 S2: z = x + 1 - -
3526
3527 step 1: vectorize stmt S1:
3528 We first create the vector stmt VS1_0, and, as usual, record a
3529 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3530 Next, we create the vector stmt VS1_1, and record a pointer to
3531 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3532 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3533 stmts and pointers:
3534 RELATED_STMT VEC_STMT
3535 VS1_0: vx0 = memref0 VS1_1 -
3536 VS1_1: vx1 = memref1 VS1_2 -
3537 VS1_2: vx2 = memref2 VS1_3 -
3538 VS1_3: vx3 = memref3 - -
3539 S1: x = load - VS1_0
3540 S2: z = x + 1 - -
3541
3542 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3543 information we recorded in RELATED_STMT field is used to vectorize
3544 stmt S2. */
3545
3546 /* In case of interleaving (non-unit strided access):
3547
3548 S1: x2 = &base + 2
3549 S2: x0 = &base
3550 S3: x1 = &base + 1
3551 S4: x3 = &base + 3
3552
3553 Vectorized loads are created in the order of memory accesses
3554 starting from the access of the first stmt of the chain:
3555
3556 VS1: vx0 = &base
3557 VS2: vx1 = &base + vec_size*1
3558 VS3: vx3 = &base + vec_size*2
3559 VS4: vx4 = &base + vec_size*3
3560
3561 Then permutation statements are generated:
3562
3563 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3564 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3565 ...
3566
3567 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3568 (the order of the data-refs in the output of vect_permute_load_chain
3569 corresponds to the order of scalar stmts in the interleaving chain - see
3570 the documentation of vect_permute_load_chain()).
3571 The generation of permutation stmts and recording them in
3572 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3573
3574 In case of both multiple types and interleaving, the vector loads and
3575 permutation stmts above are created for every copy. The result vector stmts
3576 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3577 STMT_VINFO_RELATED_STMT for the next copies. */
3578
3579 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3580 on a target that supports unaligned accesses (dr_unaligned_supported)
3581 we generate the following code:
3582 p = initial_addr;
3583 indx = 0;
3584 loop {
3585 p = p + indx * vectype_size;
3586 vec_dest = *(p);
3587 indx = indx + 1;
3588 }
3589
3590 Otherwise, the data reference is potentially unaligned on a target that
3591 does not support unaligned accesses (dr_explicit_realign_optimized) -
3592 then generate the following code, in which the data in each iteration is
3593 obtained by two vector loads, one from the previous iteration, and one
3594 from the current iteration:
3595 p1 = initial_addr;
3596 msq_init = *(floor(p1))
3597 p2 = initial_addr + VS - 1;
3598 realignment_token = call target_builtin;
3599 indx = 0;
3600 loop {
3601 p2 = p2 + indx * vectype_size
3602 lsq = *(floor(p2))
3603 vec_dest = realign_load (msq, lsq, realignment_token)
3604 indx = indx + 1;
3605 msq = lsq;
3606 } */
3607
3608 /* If the misalignment remains the same throughout the execution of the
3609 loop, we can create the init_addr and permutation mask at the loop
3610 preheader. Otherwise, it needs to be created inside the loop.
3611 This can only occur when vectorizing memory accesses in the inner-loop
3612 nested within an outer-loop that is being vectorized. */
3613
3614 if (loop && nested_in_vect_loop_p (loop, stmt)
3615 && (TREE_INT_CST_LOW (DR_STEP (dr))
3616 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3617 {
3618 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3619 compute_in_loop = true;
3620 }
3621
3622 if ((alignment_support_scheme == dr_explicit_realign_optimized
3623 || alignment_support_scheme == dr_explicit_realign)
3624 && !compute_in_loop)
3625 {
3626 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3627 alignment_support_scheme, NULL_TREE,
3628 &at_loop);
3629 if (alignment_support_scheme == dr_explicit_realign_optimized)
3630 {
3631 phi = SSA_NAME_DEF_STMT (msq);
3632 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3633 }
3634 }
3635 else
3636 at_loop = loop;
3637
3638 prev_stmt_info = NULL;
3639 for (j = 0; j < ncopies; j++)
3640 {
3641 /* 1. Create the vector pointer update chain. */
3642 if (j == 0)
3643 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3644 at_loop, offset,
3645 &dummy, &ptr_incr, false,
3646 &inv_p);
3647 else
3648 dataref_ptr =
3649 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3650
3651 for (i = 0; i < vec_num; i++)
3652 {
3653 if (i > 0)
3654 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3655 NULL_TREE);
3656
3657 /* 2. Create the vector-load in the loop. */
3658 switch (alignment_support_scheme)
3659 {
3660 case dr_aligned:
3661 gcc_assert (aligned_access_p (first_dr));
3662 data_ref = build_fold_indirect_ref (dataref_ptr);
3663 break;
3664 case dr_unaligned_supported:
3665 {
3666 int mis = DR_MISALIGNMENT (first_dr);
3667 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3668
3669 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3670 data_ref =
3671 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3672 break;
3673 }
3674 case dr_explicit_realign:
3675 {
3676 tree ptr, bump;
3677 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3678
3679 if (compute_in_loop)
3680 msq = vect_setup_realignment (first_stmt, gsi,
3681 &realignment_token,
3682 dr_explicit_realign,
3683 dataref_ptr, NULL);
3684
3685 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3686 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3687 new_stmt = gimple_build_assign (vec_dest, data_ref);
3688 new_temp = make_ssa_name (vec_dest, new_stmt);
3689 gimple_assign_set_lhs (new_stmt, new_temp);
3690 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3691 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3692 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3693 msq = new_temp;
3694
3695 bump = size_binop (MULT_EXPR, vs_minus_1,
3696 TYPE_SIZE_UNIT (scalar_type));
3697 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3698 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3699 break;
3700 }
3701 case dr_explicit_realign_optimized:
3702 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3703 break;
3704 default:
3705 gcc_unreachable ();
3706 }
3707 /* If accesses through a pointer to vectype do not alias the original
3708 memory reference we have a problem. This should never happen. */
3709 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3710 get_alias_set (gimple_assign_rhs1 (stmt))));
3711 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3712 new_stmt = gimple_build_assign (vec_dest, data_ref);
3713 new_temp = make_ssa_name (vec_dest, new_stmt);
3714 gimple_assign_set_lhs (new_stmt, new_temp);
3715 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3716 mark_symbols_for_renaming (new_stmt);
3717
3718 /* 3. Handle explicit realignment if necessary/supported. Create in
3719 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3720 if (alignment_support_scheme == dr_explicit_realign_optimized
3721 || alignment_support_scheme == dr_explicit_realign)
3722 {
3723 tree tmp;
3724
3725 lsq = gimple_assign_lhs (new_stmt);
3726 if (!realignment_token)
3727 realignment_token = dataref_ptr;
3728 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3729 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3730 realignment_token);
3731 new_stmt = gimple_build_assign (vec_dest, tmp);
3732 new_temp = make_ssa_name (vec_dest, new_stmt);
3733 gimple_assign_set_lhs (new_stmt, new_temp);
3734 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3735
3736 if (alignment_support_scheme == dr_explicit_realign_optimized)
3737 {
3738 gcc_assert (phi);
3739 if (i == vec_num - 1 && j == ncopies - 1)
3740 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3741 UNKNOWN_LOCATION);
3742 msq = lsq;
3743 }
3744 }
3745
3746 /* 4. Handle invariant-load. */
3747 if (inv_p && !bb_vinfo)
3748 {
3749 gcc_assert (!strided_load);
3750 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3751 if (j == 0)
3752 {
3753 int k;
3754 tree t = NULL_TREE;
3755 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3756
3757 /* CHECKME: bitpos depends on endianess? */
3758 bitpos = bitsize_zero_node;
3759 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3760 bitsize, bitpos);
3761 vec_dest =
3762 vect_create_destination_var (scalar_dest, NULL_TREE);
3763 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3764 new_temp = make_ssa_name (vec_dest, new_stmt);
3765 gimple_assign_set_lhs (new_stmt, new_temp);
3766 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3767
3768 for (k = nunits - 1; k >= 0; --k)
3769 t = tree_cons (NULL_TREE, new_temp, t);
3770 /* FIXME: use build_constructor directly. */
3771 vec_inv = build_constructor_from_list (vectype, t);
3772 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3773 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3774 }
3775 else
3776 gcc_unreachable (); /* FORNOW. */
3777 }
3778
3779 /* Collect vector loads and later create their permutation in
3780 vect_transform_strided_load (). */
3781 if (strided_load || slp_perm)
3782 VEC_quick_push (tree, dr_chain, new_temp);
3783
3784 /* Store vector loads in the corresponding SLP_NODE. */
3785 if (slp && !slp_perm)
3786 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3787 }
3788
3789 if (slp && !slp_perm)
3790 continue;
3791
3792 if (slp_perm)
3793 {
3794 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3795 slp_node_instance, false))
3796 {
3797 VEC_free (tree, heap, dr_chain);
3798 return false;
3799 }
3800 }
3801 else
3802 {
3803 if (strided_load)
3804 {
3805 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3806 return false;
3807
3808 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3809 VEC_free (tree, heap, dr_chain);
3810 dr_chain = VEC_alloc (tree, heap, group_size);
3811 }
3812 else
3813 {
3814 if (j == 0)
3815 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3816 else
3817 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3818 prev_stmt_info = vinfo_for_stmt (new_stmt);
3819 }
3820 }
3821 }
3822
3823 if (dr_chain)
3824 VEC_free (tree, heap, dr_chain);
3825
3826 return true;
3827 }
3828
3829 /* Function vect_is_simple_cond.
3830
3831 Input:
3832 LOOP - the loop that is being vectorized.
3833 COND - Condition that is checked for simple use.
3834
3835 Returns whether a COND can be vectorized. Checks whether
3836 condition operands are supportable using vec_is_simple_use. */
3837
3838 static bool
3839 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3840 {
3841 tree lhs, rhs;
3842 tree def;
3843 enum vect_def_type dt;
3844
3845 if (!COMPARISON_CLASS_P (cond))
3846 return false;
3847
3848 lhs = TREE_OPERAND (cond, 0);
3849 rhs = TREE_OPERAND (cond, 1);
3850
3851 if (TREE_CODE (lhs) == SSA_NAME)
3852 {
3853 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3854 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3855 &dt))
3856 return false;
3857 }
3858 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3859 && TREE_CODE (lhs) != FIXED_CST)
3860 return false;
3861
3862 if (TREE_CODE (rhs) == SSA_NAME)
3863 {
3864 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3865 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3866 &dt))
3867 return false;
3868 }
3869 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3870 && TREE_CODE (rhs) != FIXED_CST)
3871 return false;
3872
3873 return true;
3874 }
3875
3876 /* vectorizable_condition.
3877
3878 Check if STMT is conditional modify expression that can be vectorized.
3879 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3880 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3881 at GSI.
3882
3883 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3884 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3885 else caluse if it is 2).
3886
3887 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3888
3889 bool
3890 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3891 gimple *vec_stmt, tree reduc_def, int reduc_index)
3892 {
3893 tree scalar_dest = NULL_TREE;
3894 tree vec_dest = NULL_TREE;
3895 tree op = NULL_TREE;
3896 tree cond_expr, then_clause, else_clause;
3897 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3898 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3899 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3900 tree vec_compare, vec_cond_expr;
3901 tree new_temp;
3902 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3903 enum machine_mode vec_mode;
3904 tree def;
3905 enum vect_def_type dt;
3906 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3907 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3908 enum tree_code code;
3909
3910 /* FORNOW: unsupported in basic block SLP. */
3911 gcc_assert (loop_vinfo);
3912
3913 gcc_assert (ncopies >= 1);
3914 if (ncopies > 1)
3915 return false; /* FORNOW */
3916
3917 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3918 return false;
3919
3920 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3921 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3922 && reduc_def))
3923 return false;
3924
3925 /* FORNOW: SLP not supported. */
3926 if (STMT_SLP_TYPE (stmt_info))
3927 return false;
3928
3929 /* FORNOW: not yet supported. */
3930 if (STMT_VINFO_LIVE_P (stmt_info))
3931 {
3932 if (vect_print_dump_info (REPORT_DETAILS))
3933 fprintf (vect_dump, "value used after loop.");
3934 return false;
3935 }
3936
3937 /* Is vectorizable conditional operation? */
3938 if (!is_gimple_assign (stmt))
3939 return false;
3940
3941 code = gimple_assign_rhs_code (stmt);
3942
3943 if (code != COND_EXPR)
3944 return false;
3945
3946 gcc_assert (gimple_assign_single_p (stmt));
3947 op = gimple_assign_rhs1 (stmt);
3948 cond_expr = TREE_OPERAND (op, 0);
3949 then_clause = TREE_OPERAND (op, 1);
3950 else_clause = TREE_OPERAND (op, 2);
3951
3952 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3953 return false;
3954
3955 /* We do not handle two different vector types for the condition
3956 and the values. */
3957 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
3958 TREE_TYPE (vectype)))
3959 return false;
3960
3961 if (TREE_CODE (then_clause) == SSA_NAME)
3962 {
3963 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3964 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3965 &then_def_stmt, &def, &dt))
3966 return false;
3967 }
3968 else if (TREE_CODE (then_clause) != INTEGER_CST
3969 && TREE_CODE (then_clause) != REAL_CST
3970 && TREE_CODE (then_clause) != FIXED_CST)
3971 return false;
3972
3973 if (TREE_CODE (else_clause) == SSA_NAME)
3974 {
3975 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3976 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3977 &else_def_stmt, &def, &dt))
3978 return false;
3979 }
3980 else if (TREE_CODE (else_clause) != INTEGER_CST
3981 && TREE_CODE (else_clause) != REAL_CST
3982 && TREE_CODE (else_clause) != FIXED_CST)
3983 return false;
3984
3985
3986 vec_mode = TYPE_MODE (vectype);
3987
3988 if (!vec_stmt)
3989 {
3990 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3991 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3992 }
3993
3994 /* Transform */
3995
3996 /* Handle def. */
3997 scalar_dest = gimple_assign_lhs (stmt);
3998 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3999
4000 /* Handle cond expr. */
4001 vec_cond_lhs =
4002 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
4003 vec_cond_rhs =
4004 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
4005 if (reduc_index == 1)
4006 vec_then_clause = reduc_def;
4007 else
4008 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
4009 if (reduc_index == 2)
4010 vec_else_clause = reduc_def;
4011 else
4012 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
4013
4014 /* Arguments are ready. Create the new vector stmt. */
4015 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4016 vec_cond_lhs, vec_cond_rhs);
4017 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4018 vec_compare, vec_then_clause, vec_else_clause);
4019
4020 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4021 new_temp = make_ssa_name (vec_dest, *vec_stmt);
4022 gimple_assign_set_lhs (*vec_stmt, new_temp);
4023 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
4024
4025 return true;
4026 }
4027
4028
4029 /* Make sure the statement is vectorizable. */
4030
4031 bool
4032 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4033 {
4034 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4035 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4036 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4037 bool ok;
4038 tree scalar_type, vectype;
4039
4040 if (vect_print_dump_info (REPORT_DETAILS))
4041 {
4042 fprintf (vect_dump, "==> examining statement: ");
4043 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4044 }
4045
4046 if (gimple_has_volatile_ops (stmt))
4047 {
4048 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4049 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4050
4051 return false;
4052 }
4053
4054 /* Skip stmts that do not need to be vectorized. In loops this is expected
4055 to include:
4056 - the COND_EXPR which is the loop exit condition
4057 - any LABEL_EXPRs in the loop
4058 - computations that are used only for array indexing or loop control.
4059 In basic blocks we only analyze statements that are a part of some SLP
4060 instance, therefore, all the statements are relevant. */
4061
4062 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4063 && !STMT_VINFO_LIVE_P (stmt_info))
4064 {
4065 if (vect_print_dump_info (REPORT_DETAILS))
4066 fprintf (vect_dump, "irrelevant.");
4067
4068 return true;
4069 }
4070
4071 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4072 {
4073 case vect_internal_def:
4074 break;
4075
4076 case vect_reduction_def:
4077 case vect_nested_cycle:
4078 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4079 || relevance == vect_used_in_outer_by_reduction
4080 || relevance == vect_unused_in_scope));
4081 break;
4082
4083 case vect_induction_def:
4084 case vect_constant_def:
4085 case vect_external_def:
4086 case vect_unknown_def_type:
4087 default:
4088 gcc_unreachable ();
4089 }
4090
4091 if (bb_vinfo)
4092 {
4093 gcc_assert (PURE_SLP_STMT (stmt_info));
4094
4095 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4096 if (vect_print_dump_info (REPORT_DETAILS))
4097 {
4098 fprintf (vect_dump, "get vectype for scalar type: ");
4099 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4100 }
4101
4102 vectype = get_vectype_for_scalar_type (scalar_type);
4103 if (!vectype)
4104 {
4105 if (vect_print_dump_info (REPORT_DETAILS))
4106 {
4107 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4108 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4109 }
4110 return false;
4111 }
4112
4113 if (vect_print_dump_info (REPORT_DETAILS))
4114 {
4115 fprintf (vect_dump, "vectype: ");
4116 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4117 }
4118
4119 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4120 }
4121
4122 if (STMT_VINFO_RELEVANT_P (stmt_info))
4123 {
4124 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4125 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4126 *need_to_vectorize = true;
4127 }
4128
4129 ok = true;
4130 if (!bb_vinfo
4131 && (STMT_VINFO_RELEVANT_P (stmt_info)
4132 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4133 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4134 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4135 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4136 || vectorizable_operation (stmt, NULL, NULL, NULL)
4137 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4138 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4139 || vectorizable_call (stmt, NULL, NULL)
4140 || vectorizable_store (stmt, NULL, NULL, NULL)
4141 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4142 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4143 else
4144 {
4145 if (bb_vinfo)
4146 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4147 || vectorizable_assignment (stmt, NULL, NULL, node)
4148 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4149 || vectorizable_store (stmt, NULL, NULL, node));
4150 }
4151
4152 if (!ok)
4153 {
4154 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4155 {
4156 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4157 fprintf (vect_dump, "supported: ");
4158 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4159 }
4160
4161 return false;
4162 }
4163
4164 if (bb_vinfo)
4165 return true;
4166
4167 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4168 need extra handling, except for vectorizable reductions. */
4169 if (STMT_VINFO_LIVE_P (stmt_info)
4170 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4171 ok = vectorizable_live_operation (stmt, NULL, NULL);
4172
4173 if (!ok)
4174 {
4175 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4176 {
4177 fprintf (vect_dump, "not vectorized: live stmt not ");
4178 fprintf (vect_dump, "supported: ");
4179 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4180 }
4181
4182 return false;
4183 }
4184
4185 if (!PURE_SLP_STMT (stmt_info))
4186 {
4187 /* Groups of strided accesses whose size is not a power of 2 are not
4188 vectorizable yet using loop-vectorization. Therefore, if this stmt
4189 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4190 loop-based vectorized), the loop cannot be vectorized. */
4191 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4192 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4193 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4194 {
4195 if (vect_print_dump_info (REPORT_DETAILS))
4196 {
4197 fprintf (vect_dump, "not vectorized: the size of group "
4198 "of strided accesses is not a power of 2");
4199 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4200 }
4201
4202 return false;
4203 }
4204 }
4205
4206 return true;
4207 }
4208
4209
4210 /* Function vect_transform_stmt.
4211
4212 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4213
4214 bool
4215 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4216 bool *strided_store, slp_tree slp_node,
4217 slp_instance slp_node_instance)
4218 {
4219 bool is_store = false;
4220 gimple vec_stmt = NULL;
4221 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4222 gimple orig_stmt_in_pattern;
4223 bool done;
4224
4225 switch (STMT_VINFO_TYPE (stmt_info))
4226 {
4227 case type_demotion_vec_info_type:
4228 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4229 gcc_assert (done);
4230 break;
4231
4232 case type_promotion_vec_info_type:
4233 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4234 gcc_assert (done);
4235 break;
4236
4237 case type_conversion_vec_info_type:
4238 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4239 gcc_assert (done);
4240 break;
4241
4242 case induc_vec_info_type:
4243 gcc_assert (!slp_node);
4244 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4245 gcc_assert (done);
4246 break;
4247
4248 case op_vec_info_type:
4249 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4250 gcc_assert (done);
4251 break;
4252
4253 case assignment_vec_info_type:
4254 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4255 gcc_assert (done);
4256 break;
4257
4258 case load_vec_info_type:
4259 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4260 slp_node_instance);
4261 gcc_assert (done);
4262 break;
4263
4264 case store_vec_info_type:
4265 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4266 gcc_assert (done);
4267 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4268 {
4269 /* In case of interleaving, the whole chain is vectorized when the
4270 last store in the chain is reached. Store stmts before the last
4271 one are skipped, and there vec_stmt_info shouldn't be freed
4272 meanwhile. */
4273 *strided_store = true;
4274 if (STMT_VINFO_VEC_STMT (stmt_info))
4275 is_store = true;
4276 }
4277 else
4278 is_store = true;
4279 break;
4280
4281 case condition_vec_info_type:
4282 gcc_assert (!slp_node);
4283 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4284 gcc_assert (done);
4285 break;
4286
4287 case call_vec_info_type:
4288 gcc_assert (!slp_node);
4289 done = vectorizable_call (stmt, gsi, &vec_stmt);
4290 break;
4291
4292 case reduc_vec_info_type:
4293 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4294 gcc_assert (done);
4295 break;
4296
4297 default:
4298 if (!STMT_VINFO_LIVE_P (stmt_info))
4299 {
4300 if (vect_print_dump_info (REPORT_DETAILS))
4301 fprintf (vect_dump, "stmt not supported.");
4302 gcc_unreachable ();
4303 }
4304 }
4305
4306 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4307 is being vectorized, but outside the immediately enclosing loop. */
4308 if (vec_stmt
4309 && STMT_VINFO_LOOP_VINFO (stmt_info)
4310 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4311 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4312 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4313 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4314 || STMT_VINFO_RELEVANT (stmt_info) ==
4315 vect_used_in_outer_by_reduction))
4316 {
4317 struct loop *innerloop = LOOP_VINFO_LOOP (
4318 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4319 imm_use_iterator imm_iter;
4320 use_operand_p use_p;
4321 tree scalar_dest;
4322 gimple exit_phi;
4323
4324 if (vect_print_dump_info (REPORT_DETAILS))
4325 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4326
4327 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4328 (to be used when vectorizing outer-loop stmts that use the DEF of
4329 STMT). */
4330 if (gimple_code (stmt) == GIMPLE_PHI)
4331 scalar_dest = PHI_RESULT (stmt);
4332 else
4333 scalar_dest = gimple_assign_lhs (stmt);
4334
4335 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4336 {
4337 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4338 {
4339 exit_phi = USE_STMT (use_p);
4340 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4341 }
4342 }
4343 }
4344
4345 /* Handle stmts whose DEF is used outside the loop-nest that is
4346 being vectorized. */
4347 if (STMT_VINFO_LIVE_P (stmt_info)
4348 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4349 {
4350 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4351 gcc_assert (done);
4352 }
4353
4354 if (vec_stmt)
4355 {
4356 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4357 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4358 if (orig_stmt_in_pattern)
4359 {
4360 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4361 /* STMT was inserted by the vectorizer to replace a computation idiom.
4362 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4363 computed this idiom. We need to record a pointer to VEC_STMT in
4364 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4365 documentation of vect_pattern_recog. */
4366 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4367 {
4368 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4369 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4370 }
4371 }
4372 }
4373
4374 return is_store;
4375 }
4376
4377
4378 /* Remove a group of stores (for SLP or interleaving), free their
4379 stmt_vec_info. */
4380
4381 void
4382 vect_remove_stores (gimple first_stmt)
4383 {
4384 gimple next = first_stmt;
4385 gimple tmp;
4386 gimple_stmt_iterator next_si;
4387
4388 while (next)
4389 {
4390 /* Free the attached stmt_vec_info and remove the stmt. */
4391 next_si = gsi_for_stmt (next);
4392 gsi_remove (&next_si, true);
4393 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4394 free_stmt_vec_info (next);
4395 next = tmp;
4396 }
4397 }
4398
4399
4400 /* Function new_stmt_vec_info.
4401
4402 Create and initialize a new stmt_vec_info struct for STMT. */
4403
4404 stmt_vec_info
4405 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4406 bb_vec_info bb_vinfo)
4407 {
4408 stmt_vec_info res;
4409 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4410
4411 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4412 STMT_VINFO_STMT (res) = stmt;
4413 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4414 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4415 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4416 STMT_VINFO_LIVE_P (res) = false;
4417 STMT_VINFO_VECTYPE (res) = NULL;
4418 STMT_VINFO_VEC_STMT (res) = NULL;
4419 STMT_VINFO_VECTORIZABLE (res) = true;
4420 STMT_VINFO_IN_PATTERN_P (res) = false;
4421 STMT_VINFO_RELATED_STMT (res) = NULL;
4422 STMT_VINFO_DATA_REF (res) = NULL;
4423
4424 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4425 STMT_VINFO_DR_OFFSET (res) = NULL;
4426 STMT_VINFO_DR_INIT (res) = NULL;
4427 STMT_VINFO_DR_STEP (res) = NULL;
4428 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4429
4430 if (gimple_code (stmt) == GIMPLE_PHI
4431 && is_loop_header_bb_p (gimple_bb (stmt)))
4432 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4433 else
4434 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4435
4436 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4437 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4438 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4439 STMT_SLP_TYPE (res) = loop_vect;
4440 DR_GROUP_FIRST_DR (res) = NULL;
4441 DR_GROUP_NEXT_DR (res) = NULL;
4442 DR_GROUP_SIZE (res) = 0;
4443 DR_GROUP_STORE_COUNT (res) = 0;
4444 DR_GROUP_GAP (res) = 0;
4445 DR_GROUP_SAME_DR_STMT (res) = NULL;
4446 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4447
4448 return res;
4449 }
4450
4451
4452 /* Create a hash table for stmt_vec_info. */
4453
4454 void
4455 init_stmt_vec_info_vec (void)
4456 {
4457 gcc_assert (!stmt_vec_info_vec);
4458 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4459 }
4460
4461
4462 /* Free hash table for stmt_vec_info. */
4463
4464 void
4465 free_stmt_vec_info_vec (void)
4466 {
4467 gcc_assert (stmt_vec_info_vec);
4468 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4469 }
4470
4471
4472 /* Free stmt vectorization related info. */
4473
4474 void
4475 free_stmt_vec_info (gimple stmt)
4476 {
4477 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4478
4479 if (!stmt_info)
4480 return;
4481
4482 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4483 set_vinfo_for_stmt (stmt, NULL);
4484 free (stmt_info);
4485 }
4486
4487
4488 /* Function get_vectype_for_scalar_type.
4489
4490 Returns the vector type corresponding to SCALAR_TYPE as supported
4491 by the target. */
4492
4493 tree
4494 get_vectype_for_scalar_type (tree scalar_type)
4495 {
4496 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4497 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4498 int nunits;
4499 tree vectype;
4500
4501 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4502 return NULL_TREE;
4503
4504 /* We can't build a vector type of elements with alignment bigger than
4505 their size. */
4506 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4507 return NULL_TREE;
4508
4509 /* If we'd build a vector type of elements whose mode precision doesn't
4510 match their types precision we'll get mismatched types on vector
4511 extracts via BIT_FIELD_REFs. This effectively means we disable
4512 vectorization of bool and/or enum types in some languages. */
4513 if (INTEGRAL_TYPE_P (scalar_type)
4514 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4515 return NULL_TREE;
4516
4517 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4518 is expected. */
4519 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4520
4521 vectype = build_vector_type (scalar_type, nunits);
4522 if (vect_print_dump_info (REPORT_DETAILS))
4523 {
4524 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4525 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4526 }
4527
4528 if (!vectype)
4529 return NULL_TREE;
4530
4531 if (vect_print_dump_info (REPORT_DETAILS))
4532 {
4533 fprintf (vect_dump, "vectype: ");
4534 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4535 }
4536
4537 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4538 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4539 {
4540 if (vect_print_dump_info (REPORT_DETAILS))
4541 fprintf (vect_dump, "mode not supported by target.");
4542 return NULL_TREE;
4543 }
4544
4545 return vectype;
4546 }
4547
4548 /* Function get_same_sized_vectype
4549
4550 Returns a vector type corresponding to SCALAR_TYPE of size
4551 VECTOR_TYPE if supported by the target. */
4552
4553 tree
4554 get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4555 {
4556 return get_vectype_for_scalar_type (scalar_type);
4557 }
4558
4559 /* Function vect_is_simple_use.
4560
4561 Input:
4562 LOOP_VINFO - the vect info of the loop that is being vectorized.
4563 BB_VINFO - the vect info of the basic block that is being vectorized.
4564 OPERAND - operand of a stmt in the loop or bb.
4565 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4566
4567 Returns whether a stmt with OPERAND can be vectorized.
4568 For loops, supportable operands are constants, loop invariants, and operands
4569 that are defined by the current iteration of the loop. Unsupportable
4570 operands are those that are defined by a previous iteration of the loop (as
4571 is the case in reduction/induction computations).
4572 For basic blocks, supportable operands are constants and bb invariants.
4573 For now, operands defined outside the basic block are not supported. */
4574
4575 bool
4576 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4577 bb_vec_info bb_vinfo, gimple *def_stmt,
4578 tree *def, enum vect_def_type *dt)
4579 {
4580 basic_block bb;
4581 stmt_vec_info stmt_vinfo;
4582 struct loop *loop = NULL;
4583
4584 if (loop_vinfo)
4585 loop = LOOP_VINFO_LOOP (loop_vinfo);
4586
4587 *def_stmt = NULL;
4588 *def = NULL_TREE;
4589
4590 if (vect_print_dump_info (REPORT_DETAILS))
4591 {
4592 fprintf (vect_dump, "vect_is_simple_use: operand ");
4593 print_generic_expr (vect_dump, operand, TDF_SLIM);
4594 }
4595
4596 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4597 {
4598 *dt = vect_constant_def;
4599 return true;
4600 }
4601
4602 if (is_gimple_min_invariant (operand))
4603 {
4604 *def = operand;
4605 *dt = vect_external_def;
4606 return true;
4607 }
4608
4609 if (TREE_CODE (operand) == PAREN_EXPR)
4610 {
4611 if (vect_print_dump_info (REPORT_DETAILS))
4612 fprintf (vect_dump, "non-associatable copy.");
4613 operand = TREE_OPERAND (operand, 0);
4614 }
4615
4616 if (TREE_CODE (operand) != SSA_NAME)
4617 {
4618 if (vect_print_dump_info (REPORT_DETAILS))
4619 fprintf (vect_dump, "not ssa-name.");
4620 return false;
4621 }
4622
4623 *def_stmt = SSA_NAME_DEF_STMT (operand);
4624 if (*def_stmt == NULL)
4625 {
4626 if (vect_print_dump_info (REPORT_DETAILS))
4627 fprintf (vect_dump, "no def_stmt.");
4628 return false;
4629 }
4630
4631 if (vect_print_dump_info (REPORT_DETAILS))
4632 {
4633 fprintf (vect_dump, "def_stmt: ");
4634 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4635 }
4636
4637 /* Empty stmt is expected only in case of a function argument.
4638 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4639 if (gimple_nop_p (*def_stmt))
4640 {
4641 *def = operand;
4642 *dt = vect_external_def;
4643 return true;
4644 }
4645
4646 bb = gimple_bb (*def_stmt);
4647
4648 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4649 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4650 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4651 *dt = vect_external_def;
4652 else
4653 {
4654 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4655 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4656 }
4657
4658 if (*dt == vect_unknown_def_type)
4659 {
4660 if (vect_print_dump_info (REPORT_DETAILS))
4661 fprintf (vect_dump, "Unsupported pattern.");
4662 return false;
4663 }
4664
4665 if (vect_print_dump_info (REPORT_DETAILS))
4666 fprintf (vect_dump, "type of def: %d.",*dt);
4667
4668 switch (gimple_code (*def_stmt))
4669 {
4670 case GIMPLE_PHI:
4671 *def = gimple_phi_result (*def_stmt);
4672 break;
4673
4674 case GIMPLE_ASSIGN:
4675 *def = gimple_assign_lhs (*def_stmt);
4676 break;
4677
4678 case GIMPLE_CALL:
4679 *def = gimple_call_lhs (*def_stmt);
4680 if (*def != NULL)
4681 break;
4682 /* FALLTHRU */
4683 default:
4684 if (vect_print_dump_info (REPORT_DETAILS))
4685 fprintf (vect_dump, "unsupported defining stmt: ");
4686 return false;
4687 }
4688
4689 return true;
4690 }
4691
4692 /* Function vect_is_simple_use_1.
4693
4694 Same as vect_is_simple_use_1 but also determines the vector operand
4695 type of OPERAND and stores it to *VECTYPE. If the definition of
4696 OPERAND is vect_uninitialized_def, vect_constant_def or
4697 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4698 is responsible to compute the best suited vector type for the
4699 scalar operand. */
4700
4701 bool
4702 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4703 bb_vec_info bb_vinfo, gimple *def_stmt,
4704 tree *def, enum vect_def_type *dt, tree *vectype)
4705 {
4706 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4707 return false;
4708
4709 /* Now get a vector type if the def is internal, otherwise supply
4710 NULL_TREE and leave it up to the caller to figure out a proper
4711 type for the use stmt. */
4712 if (*dt == vect_internal_def
4713 || *dt == vect_induction_def
4714 || *dt == vect_reduction_def
4715 || *dt == vect_double_reduction_def
4716 || *dt == vect_nested_cycle)
4717 {
4718 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4719 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4720 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4721 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4722 gcc_assert (*vectype != NULL_TREE);
4723 }
4724 else if (*dt == vect_uninitialized_def
4725 || *dt == vect_constant_def
4726 || *dt == vect_external_def)
4727 *vectype = NULL_TREE;
4728 else
4729 gcc_unreachable ();
4730
4731 return true;
4732 }
4733
4734
4735 /* Function supportable_widening_operation
4736
4737 Check whether an operation represented by the code CODE is a
4738 widening operation that is supported by the target platform in
4739 vector form (i.e., when operating on arguments of type VECTYPE_IN
4740 producing a result of type VECTYPE_OUT).
4741
4742 Widening operations we currently support are NOP (CONVERT), FLOAT
4743 and WIDEN_MULT. This function checks if these operations are supported
4744 by the target platform either directly (via vector tree-codes), or via
4745 target builtins.
4746
4747 Output:
4748 - CODE1 and CODE2 are codes of vector operations to be used when
4749 vectorizing the operation, if available.
4750 - DECL1 and DECL2 are decls of target builtin functions to be used
4751 when vectorizing the operation, if available. In this case,
4752 CODE1 and CODE2 are CALL_EXPR.
4753 - MULTI_STEP_CVT determines the number of required intermediate steps in
4754 case of multi-step conversion (like char->short->int - in that case
4755 MULTI_STEP_CVT will be 1).
4756 - INTERM_TYPES contains the intermediate type required to perform the
4757 widening operation (short in the above example). */
4758
4759 bool
4760 supportable_widening_operation (enum tree_code code, gimple stmt,
4761 tree vectype_out, tree vectype_in,
4762 tree *decl1, tree *decl2,
4763 enum tree_code *code1, enum tree_code *code2,
4764 int *multi_step_cvt,
4765 VEC (tree, heap) **interm_types)
4766 {
4767 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4768 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4769 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4770 bool ordered_p;
4771 enum machine_mode vec_mode;
4772 enum insn_code icode1, icode2;
4773 optab optab1, optab2;
4774 tree vectype = vectype_in;
4775 tree wide_vectype = vectype_out;
4776 enum tree_code c1, c2;
4777
4778 /* The result of a vectorized widening operation usually requires two vectors
4779 (because the widened results do not fit int one vector). The generated
4780 vector results would normally be expected to be generated in the same
4781 order as in the original scalar computation, i.e. if 8 results are
4782 generated in each vector iteration, they are to be organized as follows:
4783 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4784
4785 However, in the special case that the result of the widening operation is
4786 used in a reduction computation only, the order doesn't matter (because
4787 when vectorizing a reduction we change the order of the computation).
4788 Some targets can take advantage of this and generate more efficient code.
4789 For example, targets like Altivec, that support widen_mult using a sequence
4790 of {mult_even,mult_odd} generate the following vectors:
4791 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4792
4793 When vectorizing outer-loops, we execute the inner-loop sequentially
4794 (each vectorized inner-loop iteration contributes to VF outer-loop
4795 iterations in parallel). We therefore don't allow to change the order
4796 of the computation in the inner-loop during outer-loop vectorization. */
4797
4798 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4799 && !nested_in_vect_loop_p (vect_loop, stmt))
4800 ordered_p = false;
4801 else
4802 ordered_p = true;
4803
4804 if (!ordered_p
4805 && code == WIDEN_MULT_EXPR
4806 && targetm.vectorize.builtin_mul_widen_even
4807 && targetm.vectorize.builtin_mul_widen_even (vectype)
4808 && targetm.vectorize.builtin_mul_widen_odd
4809 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4810 {
4811 if (vect_print_dump_info (REPORT_DETAILS))
4812 fprintf (vect_dump, "Unordered widening operation detected.");
4813
4814 *code1 = *code2 = CALL_EXPR;
4815 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4816 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4817 return true;
4818 }
4819
4820 switch (code)
4821 {
4822 case WIDEN_MULT_EXPR:
4823 if (BYTES_BIG_ENDIAN)
4824 {
4825 c1 = VEC_WIDEN_MULT_HI_EXPR;
4826 c2 = VEC_WIDEN_MULT_LO_EXPR;
4827 }
4828 else
4829 {
4830 c2 = VEC_WIDEN_MULT_HI_EXPR;
4831 c1 = VEC_WIDEN_MULT_LO_EXPR;
4832 }
4833 break;
4834
4835 CASE_CONVERT:
4836 if (BYTES_BIG_ENDIAN)
4837 {
4838 c1 = VEC_UNPACK_HI_EXPR;
4839 c2 = VEC_UNPACK_LO_EXPR;
4840 }
4841 else
4842 {
4843 c2 = VEC_UNPACK_HI_EXPR;
4844 c1 = VEC_UNPACK_LO_EXPR;
4845 }
4846 break;
4847
4848 case FLOAT_EXPR:
4849 if (BYTES_BIG_ENDIAN)
4850 {
4851 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4852 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4853 }
4854 else
4855 {
4856 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4857 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4858 }
4859 break;
4860
4861 case FIX_TRUNC_EXPR:
4862 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4863 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4864 computing the operation. */
4865 return false;
4866
4867 default:
4868 gcc_unreachable ();
4869 }
4870
4871 if (code == FIX_TRUNC_EXPR)
4872 {
4873 /* The signedness is determined from output operand. */
4874 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4875 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
4876 }
4877 else
4878 {
4879 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4880 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4881 }
4882
4883 if (!optab1 || !optab2)
4884 return false;
4885
4886 vec_mode = TYPE_MODE (vectype);
4887 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4888 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4889 == CODE_FOR_nothing)
4890 return false;
4891
4892 /* Check if it's a multi-step conversion that can be done using intermediate
4893 types. */
4894 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4895 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4896 {
4897 int i;
4898 tree prev_type = vectype, intermediate_type;
4899 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4900 optab optab3, optab4;
4901
4902 if (!CONVERT_EXPR_CODE_P (code))
4903 return false;
4904
4905 *code1 = c1;
4906 *code2 = c2;
4907
4908 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4909 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4910 to get to NARROW_VECTYPE, and fail if we do not. */
4911 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4912 for (i = 0; i < 3; i++)
4913 {
4914 intermediate_mode = insn_data[icode1].operand[0].mode;
4915 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4916 TYPE_UNSIGNED (prev_type));
4917 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4918 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4919
4920 if (!optab3 || !optab4
4921 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4922 == CODE_FOR_nothing
4923 || insn_data[icode1].operand[0].mode != intermediate_mode
4924 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4925 == CODE_FOR_nothing
4926 || insn_data[icode2].operand[0].mode != intermediate_mode
4927 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4928 == CODE_FOR_nothing
4929 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4930 == CODE_FOR_nothing)
4931 return false;
4932
4933 VEC_quick_push (tree, *interm_types, intermediate_type);
4934 (*multi_step_cvt)++;
4935
4936 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4937 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4938 return true;
4939
4940 prev_type = intermediate_type;
4941 prev_mode = intermediate_mode;
4942 }
4943
4944 return false;
4945 }
4946
4947 *code1 = c1;
4948 *code2 = c2;
4949 return true;
4950 }
4951
4952
4953 /* Function supportable_narrowing_operation
4954
4955 Check whether an operation represented by the code CODE is a
4956 narrowing operation that is supported by the target platform in
4957 vector form (i.e., when operating on arguments of type VECTYPE_IN
4958 and producing a result of type VECTYPE_OUT).
4959
4960 Narrowing operations we currently support are NOP (CONVERT) and
4961 FIX_TRUNC. This function checks if these operations are supported by
4962 the target platform directly via vector tree-codes.
4963
4964 Output:
4965 - CODE1 is the code of a vector operation to be used when
4966 vectorizing the operation, if available.
4967 - MULTI_STEP_CVT determines the number of required intermediate steps in
4968 case of multi-step conversion (like int->short->char - in that case
4969 MULTI_STEP_CVT will be 1).
4970 - INTERM_TYPES contains the intermediate type required to perform the
4971 narrowing operation (short in the above example). */
4972
4973 bool
4974 supportable_narrowing_operation (enum tree_code code,
4975 tree vectype_out, tree vectype_in,
4976 enum tree_code *code1, int *multi_step_cvt,
4977 VEC (tree, heap) **interm_types)
4978 {
4979 enum machine_mode vec_mode;
4980 enum insn_code icode1;
4981 optab optab1, interm_optab;
4982 tree vectype = vectype_in;
4983 tree narrow_vectype = vectype_out;
4984 enum tree_code c1;
4985 tree intermediate_type, prev_type;
4986 int i;
4987
4988 switch (code)
4989 {
4990 CASE_CONVERT:
4991 c1 = VEC_PACK_TRUNC_EXPR;
4992 break;
4993
4994 case FIX_TRUNC_EXPR:
4995 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4996 break;
4997
4998 case FLOAT_EXPR:
4999 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5000 tree code and optabs used for computing the operation. */
5001 return false;
5002
5003 default:
5004 gcc_unreachable ();
5005 }
5006
5007 if (code == FIX_TRUNC_EXPR)
5008 /* The signedness is determined from output operand. */
5009 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5010 else
5011 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5012
5013 if (!optab1)
5014 return false;
5015
5016 vec_mode = TYPE_MODE (vectype);
5017 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
5018 == CODE_FOR_nothing)
5019 return false;
5020
5021 /* Check if it's a multi-step conversion that can be done using intermediate
5022 types. */
5023 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5024 {
5025 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5026
5027 *code1 = c1;
5028 prev_type = vectype;
5029 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5030 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5031 to get to NARROW_VECTYPE, and fail if we do not. */
5032 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5033 for (i = 0; i < 3; i++)
5034 {
5035 intermediate_mode = insn_data[icode1].operand[0].mode;
5036 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5037 TYPE_UNSIGNED (prev_type));
5038 interm_optab = optab_for_tree_code (c1, intermediate_type,
5039 optab_default);
5040 if (!interm_optab
5041 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
5042 == CODE_FOR_nothing
5043 || insn_data[icode1].operand[0].mode != intermediate_mode
5044 || (icode1
5045 = interm_optab->handlers[(int) intermediate_mode].insn_code)
5046 == CODE_FOR_nothing)
5047 return false;
5048
5049 VEC_quick_push (tree, *interm_types, intermediate_type);
5050 (*multi_step_cvt)++;
5051
5052 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5053 return true;
5054
5055 prev_type = intermediate_type;
5056 prev_mode = intermediate_mode;
5057 }
5058
5059 return false;
5060 }
5061
5062 *code1 = c1;
5063 return true;
5064 }