gimple.c (gimple_rhs_class_table): POLYNOMIAL_CHREC is not a valid RHS.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
43
44
45 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46
47 /* Function vect_mark_relevant.
48
49 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50
51 static void
52 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
53 enum vect_relevant relevant, bool live_p)
54 {
55 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
56 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
57 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58
59 if (vect_print_dump_info (REPORT_DETAILS))
60 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61
62 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 {
64 gimple pattern_stmt;
65
66 /* This is the last stmt in a sequence that was detected as a
67 pattern that can potentially be vectorized. Don't mark the stmt
68 as relevant/live because it's not going to be vectorized.
69 Instead mark the pattern-stmt that replaces it. */
70
71 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72
73 if (vect_print_dump_info (REPORT_DETAILS))
74 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
75 stmt_info = vinfo_for_stmt (pattern_stmt);
76 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
77 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
78 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
79 stmt = pattern_stmt;
80 }
81
82 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
83 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
84 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85
86 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
87 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 {
89 if (vect_print_dump_info (REPORT_DETAILS))
90 fprintf (vect_dump, "already marked relevant/live.");
91 return;
92 }
93
94 VEC_safe_push (gimple, heap, *worklist, stmt);
95 }
96
97
98 /* Function vect_stmt_relevant_p.
99
100 Return true if STMT in loop that is represented by LOOP_VINFO is
101 "relevant for vectorization".
102
103 A stmt is considered "relevant for vectorization" if:
104 - it has uses outside the loop.
105 - it has vdefs (it alters memory).
106 - control stmts in the loop (except for the exit condition).
107
108 CHECKME: what other side effects would the vectorizer allow? */
109
110 static bool
111 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
112 enum vect_relevant *relevant, bool *live_p)
113 {
114 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
115 ssa_op_iter op_iter;
116 imm_use_iterator imm_iter;
117 use_operand_p use_p;
118 def_operand_p def_p;
119
120 *relevant = vect_unused_in_scope;
121 *live_p = false;
122
123 /* cond stmt other than loop exit cond. */
124 if (is_ctrl_stmt (stmt)
125 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
126 != loop_exit_ctrl_vec_info_type)
127 *relevant = vect_used_in_scope;
128
129 /* changing memory. */
130 if (gimple_code (stmt) != GIMPLE_PHI)
131 if (gimple_vdef (stmt))
132 {
133 if (vect_print_dump_info (REPORT_DETAILS))
134 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
135 *relevant = vect_used_in_scope;
136 }
137
138 /* uses outside the loop. */
139 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 {
141 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 {
143 basic_block bb = gimple_bb (USE_STMT (use_p));
144 if (!flow_bb_inside_loop_p (loop, bb))
145 {
146 if (vect_print_dump_info (REPORT_DETAILS))
147 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148
149 if (is_gimple_debug (USE_STMT (use_p)))
150 continue;
151
152 /* We expect all such uses to be in the loop exit phis
153 (because of loop closed form) */
154 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
155 gcc_assert (bb == single_exit (loop)->dest);
156
157 *live_p = true;
158 }
159 }
160 }
161
162 return (*live_p || *relevant);
163 }
164
165
166 /* Function exist_non_indexing_operands_for_use_p
167
168 USE is one of the uses attached to STMT. Check if USE is
169 used in STMT for anything other than indexing an array. */
170
171 static bool
172 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 {
174 tree operand;
175 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176
177 /* USE corresponds to some operand in STMT. If there is no data
178 reference in STMT, then any operand that corresponds to USE
179 is not indexing an array. */
180 if (!STMT_VINFO_DATA_REF (stmt_info))
181 return true;
182
183 /* STMT has a data_ref. FORNOW this means that its of one of
184 the following forms:
185 -1- ARRAY_REF = var
186 -2- var = ARRAY_REF
187 (This should have been verified in analyze_data_refs).
188
189 'var' in the second case corresponds to a def, not a use,
190 so USE cannot correspond to any operands that are not used
191 for array indexing.
192
193 Therefore, all we need to check is if STMT falls into the
194 first case, and whether var corresponds to USE. */
195
196 if (!gimple_assign_copy_p (stmt))
197 return false;
198 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
199 return false;
200 operand = gimple_assign_rhs1 (stmt);
201 if (TREE_CODE (operand) != SSA_NAME)
202 return false;
203
204 if (operand == use)
205 return true;
206
207 return false;
208 }
209
210
211 /*
212 Function process_use.
213
214 Inputs:
215 - a USE in STMT in a loop represented by LOOP_VINFO
216 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
217 that defined USE. This is done by calling mark_relevant and passing it
218 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219
220 Outputs:
221 Generally, LIVE_P and RELEVANT are used to define the liveness and
222 relevance info of the DEF_STMT of this USE:
223 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
224 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
225 Exceptions:
226 - case 1: If USE is used only for address computations (e.g. array indexing),
227 which does not need to be directly vectorized, then the liveness/relevance
228 of the respective DEF_STMT is left unchanged.
229 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
230 skip DEF_STMT cause it had already been processed.
231 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
232 be modified accordingly.
233
234 Return true if everything is as expected. Return false otherwise. */
235
236 static bool
237 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
238 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 {
240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
241 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
242 stmt_vec_info dstmt_vinfo;
243 basic_block bb, def_bb;
244 tree def;
245 gimple def_stmt;
246 enum vect_def_type dt;
247
248 /* case 1: we are only interested in uses that need to be vectorized. Uses
249 that are used for address computation are not considered relevant. */
250 if (!exist_non_indexing_operands_for_use_p (use, stmt))
251 return true;
252
253 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 {
255 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
256 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
257 return false;
258 }
259
260 if (!def_stmt || gimple_nop_p (def_stmt))
261 return true;
262
263 def_bb = gimple_bb (def_stmt);
264 if (!flow_bb_inside_loop_p (loop, def_bb))
265 {
266 if (vect_print_dump_info (REPORT_DETAILS))
267 fprintf (vect_dump, "def_stmt is out of loop.");
268 return true;
269 }
270
271 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
272 DEF_STMT must have already been processed, because this should be the
273 only way that STMT, which is a reduction-phi, was put in the worklist,
274 as there should be no other uses for DEF_STMT in the loop. So we just
275 check that everything is as expected, and we are done. */
276 dstmt_vinfo = vinfo_for_stmt (def_stmt);
277 bb = gimple_bb (stmt);
278 if (gimple_code (stmt) == GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
280 && gimple_code (def_stmt) != GIMPLE_PHI
281 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
282 && bb->loop_father == def_bb->loop_father)
283 {
284 if (vect_print_dump_info (REPORT_DETAILS))
285 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
286 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
287 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
288 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
289 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
290 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
291 return true;
292 }
293
294 /* case 3a: outer-loop stmt defining an inner-loop stmt:
295 outer-loop-header-bb:
296 d = def_stmt
297 inner-loop:
298 stmt # use (d)
299 outer-loop-tail-bb:
300 ... */
301 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 {
303 if (vect_print_dump_info (REPORT_DETAILS))
304 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305
306 switch (relevant)
307 {
308 case vect_unused_in_scope:
309 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
310 vect_used_in_scope : vect_unused_in_scope;
311 break;
312
313 case vect_used_in_outer_by_reduction:
314 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
315 relevant = vect_used_by_reduction;
316 break;
317
318 case vect_used_in_outer:
319 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
320 relevant = vect_used_in_scope;
321 break;
322
323 case vect_used_in_scope:
324 break;
325
326 default:
327 gcc_unreachable ();
328 }
329 }
330
331 /* case 3b: inner-loop stmt defining an outer-loop stmt:
332 outer-loop-header-bb:
333 ...
334 inner-loop:
335 d = def_stmt
336 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
337 stmt # use (d) */
338 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 {
340 if (vect_print_dump_info (REPORT_DETAILS))
341 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342
343 switch (relevant)
344 {
345 case vect_unused_in_scope:
346 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
347 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
348 vect_used_in_outer_by_reduction : vect_unused_in_scope;
349 break;
350
351 case vect_used_by_reduction:
352 relevant = vect_used_in_outer_by_reduction;
353 break;
354
355 case vect_used_in_scope:
356 relevant = vect_used_in_outer;
357 break;
358
359 default:
360 gcc_unreachable ();
361 }
362 }
363
364 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
365 return true;
366 }
367
368
369 /* Function vect_mark_stmts_to_be_vectorized.
370
371 Not all stmts in the loop need to be vectorized. For example:
372
373 for i...
374 for j...
375 1. T0 = i + j
376 2. T1 = a[T0]
377
378 3. j = j + 1
379
380 Stmt 1 and 3 do not need to be vectorized, because loop control and
381 addressing of vectorized data-refs are handled differently.
382
383 This pass detects such stmts. */
384
385 bool
386 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 {
388 VEC(gimple,heap) *worklist;
389 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
390 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
391 unsigned int nbbs = loop->num_nodes;
392 gimple_stmt_iterator si;
393 gimple stmt;
394 unsigned int i;
395 stmt_vec_info stmt_vinfo;
396 basic_block bb;
397 gimple phi;
398 bool live_p;
399 enum vect_relevant relevant, tmp_relevant;
400 enum vect_def_type def_type;
401
402 if (vect_print_dump_info (REPORT_DETAILS))
403 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404
405 worklist = VEC_alloc (gimple, heap, 64);
406
407 /* 1. Init worklist. */
408 for (i = 0; i < nbbs; i++)
409 {
410 bb = bbs[i];
411 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 {
413 phi = gsi_stmt (si);
414 if (vect_print_dump_info (REPORT_DETAILS))
415 {
416 fprintf (vect_dump, "init: phi relevant? ");
417 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
418 }
419
420 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
421 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 }
423 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 {
425 stmt = gsi_stmt (si);
426 if (vect_print_dump_info (REPORT_DETAILS))
427 {
428 fprintf (vect_dump, "init: stmt relevant? ");
429 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
430 }
431
432 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
433 vect_mark_relevant (&worklist, stmt, relevant, live_p);
434 }
435 }
436
437 /* 2. Process_worklist */
438 while (VEC_length (gimple, worklist) > 0)
439 {
440 use_operand_p use_p;
441 ssa_op_iter iter;
442
443 stmt = VEC_pop (gimple, worklist);
444 if (vect_print_dump_info (REPORT_DETAILS))
445 {
446 fprintf (vect_dump, "worklist: examine stmt: ");
447 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
448 }
449
450 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
451 (DEF_STMT) as relevant/irrelevant and live/dead according to the
452 liveness and relevance properties of STMT. */
453 stmt_vinfo = vinfo_for_stmt (stmt);
454 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
455 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456
457 /* Generally, the liveness and relevance properties of STMT are
458 propagated as is to the DEF_STMTs of its USEs:
459 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
460 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461
462 One exception is when STMT has been identified as defining a reduction
463 variable; in this case we set the liveness/relevance as follows:
464 live_p = false
465 relevant = vect_used_by_reduction
466 This is because we distinguish between two kinds of relevant stmts -
467 those that are used by a reduction computation, and those that are
468 (also) used by a regular computation. This allows us later on to
469 identify stmts that are used solely by a reduction, and therefore the
470 order of the results that they produce does not have to be kept. */
471
472 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
473 tmp_relevant = relevant;
474 switch (def_type)
475 {
476 case vect_reduction_def:
477 switch (tmp_relevant)
478 {
479 case vect_unused_in_scope:
480 relevant = vect_used_by_reduction;
481 break;
482
483 case vect_used_by_reduction:
484 if (gimple_code (stmt) == GIMPLE_PHI)
485 break;
486 /* fall through */
487
488 default:
489 if (vect_print_dump_info (REPORT_DETAILS))
490 fprintf (vect_dump, "unsupported use of reduction.");
491
492 VEC_free (gimple, heap, worklist);
493 return false;
494 }
495
496 live_p = false;
497 break;
498
499 case vect_nested_cycle:
500 if (tmp_relevant != vect_unused_in_scope
501 && tmp_relevant != vect_used_in_outer_by_reduction
502 && tmp_relevant != vect_used_in_outer)
503 {
504 if (vect_print_dump_info (REPORT_DETAILS))
505 fprintf (vect_dump, "unsupported use of nested cycle.");
506
507 VEC_free (gimple, heap, worklist);
508 return false;
509 }
510
511 live_p = false;
512 break;
513
514 case vect_double_reduction_def:
515 if (tmp_relevant != vect_unused_in_scope
516 && tmp_relevant != vect_used_by_reduction)
517 {
518 if (vect_print_dump_info (REPORT_DETAILS))
519 fprintf (vect_dump, "unsupported use of double reduction.");
520
521 VEC_free (gimple, heap, worklist);
522 return false;
523 }
524
525 live_p = false;
526 break;
527
528 default:
529 break;
530 }
531
532 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 {
534 tree op = USE_FROM_PTR (use_p);
535 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 {
537 VEC_free (gimple, heap, worklist);
538 return false;
539 }
540 }
541 } /* while worklist */
542
543 VEC_free (gimple, heap, worklist);
544 return true;
545 }
546
547
548 /* Get cost by calling cost target builtin. */
549
550 static inline
551 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
552 {
553 tree dummy_type = NULL;
554 int dummy = 0;
555
556 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
557 dummy_type, dummy);
558 }
559
560
561 /* Get cost for STMT. */
562
563 int
564 cost_for_stmt (gimple stmt)
565 {
566 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
567
568 switch (STMT_VINFO_TYPE (stmt_info))
569 {
570 case load_vec_info_type:
571 return vect_get_stmt_cost (scalar_load);
572 case store_vec_info_type:
573 return vect_get_stmt_cost (scalar_store);
574 case op_vec_info_type:
575 case condition_vec_info_type:
576 case assignment_vec_info_type:
577 case reduc_vec_info_type:
578 case induc_vec_info_type:
579 case type_promotion_vec_info_type:
580 case type_demotion_vec_info_type:
581 case type_conversion_vec_info_type:
582 case call_vec_info_type:
583 return vect_get_stmt_cost (scalar_stmt);
584 case undef_vec_info_type:
585 default:
586 gcc_unreachable ();
587 }
588 }
589
590 /* Function vect_model_simple_cost.
591
592 Models cost for simple operations, i.e. those that only emit ncopies of a
593 single op. Right now, this does not account for multiple insns that could
594 be generated for the single vector op. We will handle that shortly. */
595
596 void
597 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
598 enum vect_def_type *dt, slp_tree slp_node)
599 {
600 int i;
601 int inside_cost = 0, outside_cost = 0;
602
603 /* The SLP costs were already calculated during SLP tree build. */
604 if (PURE_SLP_STMT (stmt_info))
605 return;
606
607 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
608
609 /* FORNOW: Assuming maximum 2 args per stmts. */
610 for (i = 0; i < 2; i++)
611 {
612 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
613 outside_cost += vect_get_stmt_cost (vector_stmt);
614 }
615
616 if (vect_print_dump_info (REPORT_COST))
617 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
618 "outside_cost = %d .", inside_cost, outside_cost);
619
620 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
621 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
622 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
623 }
624
625
626 /* Function vect_cost_strided_group_size
627
628 For strided load or store, return the group_size only if it is the first
629 load or store of a group, else return 1. This ensures that group size is
630 only returned once per group. */
631
632 static int
633 vect_cost_strided_group_size (stmt_vec_info stmt_info)
634 {
635 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
636
637 if (first_stmt == STMT_VINFO_STMT (stmt_info))
638 return DR_GROUP_SIZE (stmt_info);
639
640 return 1;
641 }
642
643
644 /* Function vect_model_store_cost
645
646 Models cost for stores. In the case of strided accesses, one access
647 has the overhead of the strided access attributed to it. */
648
649 void
650 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
651 enum vect_def_type dt, slp_tree slp_node)
652 {
653 int group_size;
654 unsigned int inside_cost = 0, outside_cost = 0;
655 struct data_reference *first_dr;
656 gimple first_stmt;
657
658 /* The SLP costs were already calculated during SLP tree build. */
659 if (PURE_SLP_STMT (stmt_info))
660 return;
661
662 if (dt == vect_constant_def || dt == vect_external_def)
663 outside_cost = vect_get_stmt_cost (scalar_to_vec);
664
665 /* Strided access? */
666 if (DR_GROUP_FIRST_DR (stmt_info))
667 {
668 if (slp_node)
669 {
670 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
671 group_size = 1;
672 }
673 else
674 {
675 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
676 group_size = vect_cost_strided_group_size (stmt_info);
677 }
678
679 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
680 }
681 /* Not a strided access. */
682 else
683 {
684 group_size = 1;
685 first_dr = STMT_VINFO_DATA_REF (stmt_info);
686 }
687
688 /* Is this an access in a group of stores, which provide strided access?
689 If so, add in the cost of the permutes. */
690 if (group_size > 1)
691 {
692 /* Uses a high and low interleave operation for each needed permute. */
693 inside_cost = ncopies * exact_log2(group_size) * group_size
694 * vect_get_stmt_cost (vector_stmt);
695
696 if (vect_print_dump_info (REPORT_COST))
697 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
698 group_size);
699
700 }
701
702 /* Costs of the stores. */
703 vect_get_store_cost (first_dr, ncopies, &inside_cost);
704
705 if (vect_print_dump_info (REPORT_COST))
706 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
707 "outside_cost = %d .", inside_cost, outside_cost);
708
709 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
710 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
711 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
712 }
713
714
715 /* Calculate cost of DR's memory access. */
716 void
717 vect_get_store_cost (struct data_reference *dr, int ncopies,
718 unsigned int *inside_cost)
719 {
720 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
721
722 switch (alignment_support_scheme)
723 {
724 case dr_aligned:
725 {
726 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
727
728 if (vect_print_dump_info (REPORT_COST))
729 fprintf (vect_dump, "vect_model_store_cost: aligned.");
730
731 break;
732 }
733
734 case dr_unaligned_supported:
735 {
736 gimple stmt = DR_STMT (dr);
737 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
738 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
739
740 /* Here, we assign an additional cost for the unaligned store. */
741 *inside_cost += ncopies
742 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
743 vectype, DR_MISALIGNMENT (dr));
744
745 if (vect_print_dump_info (REPORT_COST))
746 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
747 "hardware.");
748
749 break;
750 }
751
752 default:
753 gcc_unreachable ();
754 }
755 }
756
757
758 /* Function vect_model_load_cost
759
760 Models cost for loads. In the case of strided accesses, the last access
761 has the overhead of the strided access attributed to it. Since unaligned
762 accesses are supported for loads, we also account for the costs of the
763 access scheme chosen. */
764
765 void
766 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
767
768 {
769 int group_size;
770 gimple first_stmt;
771 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
772 unsigned int inside_cost = 0, outside_cost = 0;
773
774 /* The SLP costs were already calculated during SLP tree build. */
775 if (PURE_SLP_STMT (stmt_info))
776 return;
777
778 /* Strided accesses? */
779 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
780 if (first_stmt && !slp_node)
781 {
782 group_size = vect_cost_strided_group_size (stmt_info);
783 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
784 }
785 /* Not a strided access. */
786 else
787 {
788 group_size = 1;
789 first_dr = dr;
790 }
791
792 /* Is this an access in a group of loads providing strided access?
793 If so, add in the cost of the permutes. */
794 if (group_size > 1)
795 {
796 /* Uses an even and odd extract operations for each needed permute. */
797 inside_cost = ncopies * exact_log2(group_size) * group_size
798 * vect_get_stmt_cost (vector_stmt);
799
800 if (vect_print_dump_info (REPORT_COST))
801 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
802 group_size);
803 }
804
805 /* The loads themselves. */
806 vect_get_load_cost (first_dr, ncopies,
807 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
808 &inside_cost, &outside_cost);
809
810 if (vect_print_dump_info (REPORT_COST))
811 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
812 "outside_cost = %d .", inside_cost, outside_cost);
813
814 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
815 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
816 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
817 }
818
819
820 /* Calculate cost of DR's memory access. */
821 void
822 vect_get_load_cost (struct data_reference *dr, int ncopies,
823 bool add_realign_cost, unsigned int *inside_cost,
824 unsigned int *outside_cost)
825 {
826 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
827
828 switch (alignment_support_scheme)
829 {
830 case dr_aligned:
831 {
832 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
833
834 if (vect_print_dump_info (REPORT_COST))
835 fprintf (vect_dump, "vect_model_load_cost: aligned.");
836
837 break;
838 }
839 case dr_unaligned_supported:
840 {
841 gimple stmt = DR_STMT (dr);
842 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
843 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
844
845 /* Here, we assign an additional cost for the unaligned load. */
846 *inside_cost += ncopies
847 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
848 vectype, DR_MISALIGNMENT (dr));
849 if (vect_print_dump_info (REPORT_COST))
850 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
851 "hardware.");
852
853 break;
854 }
855 case dr_explicit_realign:
856 {
857 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
858 + vect_get_stmt_cost (vector_stmt));
859
860 /* FIXME: If the misalignment remains fixed across the iterations of
861 the containing loop, the following cost should be added to the
862 outside costs. */
863 if (targetm.vectorize.builtin_mask_for_load)
864 *inside_cost += vect_get_stmt_cost (vector_stmt);
865
866 break;
867 }
868 case dr_explicit_realign_optimized:
869 {
870 if (vect_print_dump_info (REPORT_COST))
871 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
872 "pipelined.");
873
874 /* Unaligned software pipeline has a load of an address, an initial
875 load, and possibly a mask operation to "prime" the loop. However,
876 if this is an access in a group of loads, which provide strided
877 access, then the above cost should only be considered for one
878 access in the group. Inside the loop, there is a load op
879 and a realignment op. */
880
881 if (add_realign_cost)
882 {
883 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
884 if (targetm.vectorize.builtin_mask_for_load)
885 *outside_cost += vect_get_stmt_cost (vector_stmt);
886 }
887
888 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
889 + vect_get_stmt_cost (vector_stmt));
890 break;
891 }
892
893 default:
894 gcc_unreachable ();
895 }
896 }
897
898
899 /* Function vect_init_vector.
900
901 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
902 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
903 is not NULL. Otherwise, place the initialization at the loop preheader.
904 Return the DEF of INIT_STMT.
905 It will be used in the vectorization of STMT. */
906
907 tree
908 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
909 gimple_stmt_iterator *gsi)
910 {
911 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
912 tree new_var;
913 gimple init_stmt;
914 tree vec_oprnd;
915 edge pe;
916 tree new_temp;
917 basic_block new_bb;
918
919 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
920 add_referenced_var (new_var);
921 init_stmt = gimple_build_assign (new_var, vector_var);
922 new_temp = make_ssa_name (new_var, init_stmt);
923 gimple_assign_set_lhs (init_stmt, new_temp);
924
925 if (gsi)
926 vect_finish_stmt_generation (stmt, init_stmt, gsi);
927 else
928 {
929 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
930
931 if (loop_vinfo)
932 {
933 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
934
935 if (nested_in_vect_loop_p (loop, stmt))
936 loop = loop->inner;
937
938 pe = loop_preheader_edge (loop);
939 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
940 gcc_assert (!new_bb);
941 }
942 else
943 {
944 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
945 basic_block bb;
946 gimple_stmt_iterator gsi_bb_start;
947
948 gcc_assert (bb_vinfo);
949 bb = BB_VINFO_BB (bb_vinfo);
950 gsi_bb_start = gsi_after_labels (bb);
951 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
952 }
953 }
954
955 if (vect_print_dump_info (REPORT_DETAILS))
956 {
957 fprintf (vect_dump, "created new init_stmt: ");
958 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
959 }
960
961 vec_oprnd = gimple_assign_lhs (init_stmt);
962 return vec_oprnd;
963 }
964
965
966 /* Function vect_get_vec_def_for_operand.
967
968 OP is an operand in STMT. This function returns a (vector) def that will be
969 used in the vectorized stmt for STMT.
970
971 In the case that OP is an SSA_NAME which is defined in the loop, then
972 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
973
974 In case OP is an invariant or constant, a new stmt that creates a vector def
975 needs to be introduced. */
976
977 tree
978 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
979 {
980 tree vec_oprnd;
981 gimple vec_stmt;
982 gimple def_stmt;
983 stmt_vec_info def_stmt_info = NULL;
984 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
985 unsigned int nunits;
986 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
987 tree vec_inv;
988 tree vec_cst;
989 tree t = NULL_TREE;
990 tree def;
991 int i;
992 enum vect_def_type dt;
993 bool is_simple_use;
994 tree vector_type;
995
996 if (vect_print_dump_info (REPORT_DETAILS))
997 {
998 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
999 print_generic_expr (vect_dump, op, TDF_SLIM);
1000 }
1001
1002 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1003 &dt);
1004 gcc_assert (is_simple_use);
1005 if (vect_print_dump_info (REPORT_DETAILS))
1006 {
1007 if (def)
1008 {
1009 fprintf (vect_dump, "def = ");
1010 print_generic_expr (vect_dump, def, TDF_SLIM);
1011 }
1012 if (def_stmt)
1013 {
1014 fprintf (vect_dump, " def_stmt = ");
1015 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1016 }
1017 }
1018
1019 switch (dt)
1020 {
1021 /* Case 1: operand is a constant. */
1022 case vect_constant_def:
1023 {
1024 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1025 gcc_assert (vector_type);
1026 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1027
1028 if (scalar_def)
1029 *scalar_def = op;
1030
1031 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1032 if (vect_print_dump_info (REPORT_DETAILS))
1033 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1034
1035 vec_cst = build_vector_from_val (vector_type, op);
1036 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1037 }
1038
1039 /* Case 2: operand is defined outside the loop - loop invariant. */
1040 case vect_external_def:
1041 {
1042 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1043 gcc_assert (vector_type);
1044 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1045
1046 if (scalar_def)
1047 *scalar_def = def;
1048
1049 /* Create 'vec_inv = {inv,inv,..,inv}' */
1050 if (vect_print_dump_info (REPORT_DETAILS))
1051 fprintf (vect_dump, "Create vector_inv.");
1052
1053 for (i = nunits - 1; i >= 0; --i)
1054 {
1055 t = tree_cons (NULL_TREE, def, t);
1056 }
1057
1058 /* FIXME: use build_constructor directly. */
1059 vec_inv = build_constructor_from_list (vector_type, t);
1060 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1061 }
1062
1063 /* Case 3: operand is defined inside the loop. */
1064 case vect_internal_def:
1065 {
1066 if (scalar_def)
1067 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1068
1069 /* Get the def from the vectorized stmt. */
1070 def_stmt_info = vinfo_for_stmt (def_stmt);
1071 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1072 gcc_assert (vec_stmt);
1073 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1074 vec_oprnd = PHI_RESULT (vec_stmt);
1075 else if (is_gimple_call (vec_stmt))
1076 vec_oprnd = gimple_call_lhs (vec_stmt);
1077 else
1078 vec_oprnd = gimple_assign_lhs (vec_stmt);
1079 return vec_oprnd;
1080 }
1081
1082 /* Case 4: operand is defined by a loop header phi - reduction */
1083 case vect_reduction_def:
1084 case vect_double_reduction_def:
1085 case vect_nested_cycle:
1086 {
1087 struct loop *loop;
1088
1089 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1090 loop = (gimple_bb (def_stmt))->loop_father;
1091
1092 /* Get the def before the loop */
1093 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1094 return get_initial_def_for_reduction (stmt, op, scalar_def);
1095 }
1096
1097 /* Case 5: operand is defined by loop-header phi - induction. */
1098 case vect_induction_def:
1099 {
1100 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1101
1102 /* Get the def from the vectorized stmt. */
1103 def_stmt_info = vinfo_for_stmt (def_stmt);
1104 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1105 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1106 vec_oprnd = PHI_RESULT (vec_stmt);
1107 else
1108 vec_oprnd = gimple_get_lhs (vec_stmt);
1109 return vec_oprnd;
1110 }
1111
1112 default:
1113 gcc_unreachable ();
1114 }
1115 }
1116
1117
1118 /* Function vect_get_vec_def_for_stmt_copy
1119
1120 Return a vector-def for an operand. This function is used when the
1121 vectorized stmt to be created (by the caller to this function) is a "copy"
1122 created in case the vectorized result cannot fit in one vector, and several
1123 copies of the vector-stmt are required. In this case the vector-def is
1124 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1125 of the stmt that defines VEC_OPRND.
1126 DT is the type of the vector def VEC_OPRND.
1127
1128 Context:
1129 In case the vectorization factor (VF) is bigger than the number
1130 of elements that can fit in a vectype (nunits), we have to generate
1131 more than one vector stmt to vectorize the scalar stmt. This situation
1132 arises when there are multiple data-types operated upon in the loop; the
1133 smallest data-type determines the VF, and as a result, when vectorizing
1134 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1135 vector stmt (each computing a vector of 'nunits' results, and together
1136 computing 'VF' results in each iteration). This function is called when
1137 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1138 which VF=16 and nunits=4, so the number of copies required is 4):
1139
1140 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1141
1142 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1143 VS1.1: vx.1 = memref1 VS1.2
1144 VS1.2: vx.2 = memref2 VS1.3
1145 VS1.3: vx.3 = memref3
1146
1147 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1148 VSnew.1: vz1 = vx.1 + ... VSnew.2
1149 VSnew.2: vz2 = vx.2 + ... VSnew.3
1150 VSnew.3: vz3 = vx.3 + ...
1151
1152 The vectorization of S1 is explained in vectorizable_load.
1153 The vectorization of S2:
1154 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1155 the function 'vect_get_vec_def_for_operand' is called to
1156 get the relevant vector-def for each operand of S2. For operand x it
1157 returns the vector-def 'vx.0'.
1158
1159 To create the remaining copies of the vector-stmt (VSnew.j), this
1160 function is called to get the relevant vector-def for each operand. It is
1161 obtained from the respective VS1.j stmt, which is recorded in the
1162 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1163
1164 For example, to obtain the vector-def 'vx.1' in order to create the
1165 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1166 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1167 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1168 and return its def ('vx.1').
1169 Overall, to create the above sequence this function will be called 3 times:
1170 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1171 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1172 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1173
1174 tree
1175 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1176 {
1177 gimple vec_stmt_for_operand;
1178 stmt_vec_info def_stmt_info;
1179
1180 /* Do nothing; can reuse same def. */
1181 if (dt == vect_external_def || dt == vect_constant_def )
1182 return vec_oprnd;
1183
1184 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1185 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1186 gcc_assert (def_stmt_info);
1187 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1188 gcc_assert (vec_stmt_for_operand);
1189 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1190 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1191 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1192 else
1193 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1194 return vec_oprnd;
1195 }
1196
1197
1198 /* Get vectorized definitions for the operands to create a copy of an original
1199 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1200
1201 static void
1202 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1203 VEC(tree,heap) **vec_oprnds0,
1204 VEC(tree,heap) **vec_oprnds1)
1205 {
1206 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1207
1208 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1209 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1210
1211 if (vec_oprnds1 && *vec_oprnds1)
1212 {
1213 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1214 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1215 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1216 }
1217 }
1218
1219
1220 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1221 NULL. */
1222
1223 static void
1224 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1225 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1226 slp_tree slp_node)
1227 {
1228 if (slp_node)
1229 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1230 else
1231 {
1232 tree vec_oprnd;
1233
1234 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1235 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1236 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1237
1238 if (op1)
1239 {
1240 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1241 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1242 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1243 }
1244 }
1245 }
1246
1247
1248 /* Function vect_finish_stmt_generation.
1249
1250 Insert a new stmt. */
1251
1252 void
1253 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1254 gimple_stmt_iterator *gsi)
1255 {
1256 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1257 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1258 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1259
1260 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1261
1262 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1263
1264 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1265 bb_vinfo));
1266
1267 if (vect_print_dump_info (REPORT_DETAILS))
1268 {
1269 fprintf (vect_dump, "add new stmt: ");
1270 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1271 }
1272
1273 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1274 }
1275
1276 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1277 a function declaration if the target has a vectorized version
1278 of the function, or NULL_TREE if the function cannot be vectorized. */
1279
1280 tree
1281 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1282 {
1283 tree fndecl = gimple_call_fndecl (call);
1284
1285 /* We only handle functions that do not read or clobber memory -- i.e.
1286 const or novops ones. */
1287 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1288 return NULL_TREE;
1289
1290 if (!fndecl
1291 || TREE_CODE (fndecl) != FUNCTION_DECL
1292 || !DECL_BUILT_IN (fndecl))
1293 return NULL_TREE;
1294
1295 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1296 vectype_in);
1297 }
1298
1299 /* Function vectorizable_call.
1300
1301 Check if STMT performs a function call that can be vectorized.
1302 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1303 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1304 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1305
1306 static bool
1307 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1308 {
1309 tree vec_dest;
1310 tree scalar_dest;
1311 tree op, type;
1312 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1313 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1314 tree vectype_out, vectype_in;
1315 int nunits_in;
1316 int nunits_out;
1317 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1318 tree fndecl, new_temp, def, rhs_type;
1319 gimple def_stmt;
1320 enum vect_def_type dt[3]
1321 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1322 gimple new_stmt = NULL;
1323 int ncopies, j;
1324 VEC(tree, heap) *vargs = NULL;
1325 enum { NARROW, NONE, WIDEN } modifier;
1326 size_t i, nargs;
1327
1328 /* FORNOW: unsupported in basic block SLP. */
1329 gcc_assert (loop_vinfo);
1330
1331 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1332 return false;
1333
1334 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1335 return false;
1336
1337 /* FORNOW: SLP not supported. */
1338 if (STMT_SLP_TYPE (stmt_info))
1339 return false;
1340
1341 /* Is STMT a vectorizable call? */
1342 if (!is_gimple_call (stmt))
1343 return false;
1344
1345 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1346 return false;
1347
1348 if (stmt_can_throw_internal (stmt))
1349 return false;
1350
1351 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1352
1353 /* Process function arguments. */
1354 rhs_type = NULL_TREE;
1355 vectype_in = NULL_TREE;
1356 nargs = gimple_call_num_args (stmt);
1357
1358 /* Bail out if the function has more than three arguments, we do not have
1359 interesting builtin functions to vectorize with more than two arguments
1360 except for fma. No arguments is also not good. */
1361 if (nargs == 0 || nargs > 3)
1362 return false;
1363
1364 for (i = 0; i < nargs; i++)
1365 {
1366 tree opvectype;
1367
1368 op = gimple_call_arg (stmt, i);
1369
1370 /* We can only handle calls with arguments of the same type. */
1371 if (rhs_type
1372 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1373 {
1374 if (vect_print_dump_info (REPORT_DETAILS))
1375 fprintf (vect_dump, "argument types differ.");
1376 return false;
1377 }
1378 if (!rhs_type)
1379 rhs_type = TREE_TYPE (op);
1380
1381 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1382 &def_stmt, &def, &dt[i], &opvectype))
1383 {
1384 if (vect_print_dump_info (REPORT_DETAILS))
1385 fprintf (vect_dump, "use not simple.");
1386 return false;
1387 }
1388
1389 if (!vectype_in)
1390 vectype_in = opvectype;
1391 else if (opvectype
1392 && opvectype != vectype_in)
1393 {
1394 if (vect_print_dump_info (REPORT_DETAILS))
1395 fprintf (vect_dump, "argument vector types differ.");
1396 return false;
1397 }
1398 }
1399 /* If all arguments are external or constant defs use a vector type with
1400 the same size as the output vector type. */
1401 if (!vectype_in)
1402 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1403 if (vec_stmt)
1404 gcc_assert (vectype_in);
1405 if (!vectype_in)
1406 {
1407 if (vect_print_dump_info (REPORT_DETAILS))
1408 {
1409 fprintf (vect_dump, "no vectype for scalar type ");
1410 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1411 }
1412
1413 return false;
1414 }
1415
1416 /* FORNOW */
1417 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1418 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1419 if (nunits_in == nunits_out / 2)
1420 modifier = NARROW;
1421 else if (nunits_out == nunits_in)
1422 modifier = NONE;
1423 else if (nunits_out == nunits_in / 2)
1424 modifier = WIDEN;
1425 else
1426 return false;
1427
1428 /* For now, we only vectorize functions if a target specific builtin
1429 is available. TODO -- in some cases, it might be profitable to
1430 insert the calls for pieces of the vector, in order to be able
1431 to vectorize other operations in the loop. */
1432 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1433 if (fndecl == NULL_TREE)
1434 {
1435 if (vect_print_dump_info (REPORT_DETAILS))
1436 fprintf (vect_dump, "function is not vectorizable.");
1437
1438 return false;
1439 }
1440
1441 gcc_assert (!gimple_vuse (stmt));
1442
1443 if (modifier == NARROW)
1444 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1445 else
1446 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1447
1448 /* Sanity check: make sure that at least one copy of the vectorized stmt
1449 needs to be generated. */
1450 gcc_assert (ncopies >= 1);
1451
1452 if (!vec_stmt) /* transformation not required. */
1453 {
1454 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1455 if (vect_print_dump_info (REPORT_DETAILS))
1456 fprintf (vect_dump, "=== vectorizable_call ===");
1457 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1458 return true;
1459 }
1460
1461 /** Transform. **/
1462
1463 if (vect_print_dump_info (REPORT_DETAILS))
1464 fprintf (vect_dump, "transform operation.");
1465
1466 /* Handle def. */
1467 scalar_dest = gimple_call_lhs (stmt);
1468 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1469
1470 prev_stmt_info = NULL;
1471 switch (modifier)
1472 {
1473 case NONE:
1474 for (j = 0; j < ncopies; ++j)
1475 {
1476 /* Build argument list for the vectorized call. */
1477 if (j == 0)
1478 vargs = VEC_alloc (tree, heap, nargs);
1479 else
1480 VEC_truncate (tree, vargs, 0);
1481
1482 for (i = 0; i < nargs; i++)
1483 {
1484 op = gimple_call_arg (stmt, i);
1485 if (j == 0)
1486 vec_oprnd0
1487 = vect_get_vec_def_for_operand (op, stmt, NULL);
1488 else
1489 {
1490 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1491 vec_oprnd0
1492 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1493 }
1494
1495 VEC_quick_push (tree, vargs, vec_oprnd0);
1496 }
1497
1498 new_stmt = gimple_build_call_vec (fndecl, vargs);
1499 new_temp = make_ssa_name (vec_dest, new_stmt);
1500 gimple_call_set_lhs (new_stmt, new_temp);
1501
1502 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1503 mark_symbols_for_renaming (new_stmt);
1504
1505 if (j == 0)
1506 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1507 else
1508 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1509
1510 prev_stmt_info = vinfo_for_stmt (new_stmt);
1511 }
1512
1513 break;
1514
1515 case NARROW:
1516 for (j = 0; j < ncopies; ++j)
1517 {
1518 /* Build argument list for the vectorized call. */
1519 if (j == 0)
1520 vargs = VEC_alloc (tree, heap, nargs * 2);
1521 else
1522 VEC_truncate (tree, vargs, 0);
1523
1524 for (i = 0; i < nargs; i++)
1525 {
1526 op = gimple_call_arg (stmt, i);
1527 if (j == 0)
1528 {
1529 vec_oprnd0
1530 = vect_get_vec_def_for_operand (op, stmt, NULL);
1531 vec_oprnd1
1532 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1533 }
1534 else
1535 {
1536 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1537 vec_oprnd0
1538 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1539 vec_oprnd1
1540 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1541 }
1542
1543 VEC_quick_push (tree, vargs, vec_oprnd0);
1544 VEC_quick_push (tree, vargs, vec_oprnd1);
1545 }
1546
1547 new_stmt = gimple_build_call_vec (fndecl, vargs);
1548 new_temp = make_ssa_name (vec_dest, new_stmt);
1549 gimple_call_set_lhs (new_stmt, new_temp);
1550
1551 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1552 mark_symbols_for_renaming (new_stmt);
1553
1554 if (j == 0)
1555 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1556 else
1557 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1558
1559 prev_stmt_info = vinfo_for_stmt (new_stmt);
1560 }
1561
1562 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1563
1564 break;
1565
1566 case WIDEN:
1567 /* No current target implements this case. */
1568 return false;
1569 }
1570
1571 VEC_free (tree, heap, vargs);
1572
1573 /* Update the exception handling table with the vector stmt if necessary. */
1574 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1575 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1576
1577 /* The call in STMT might prevent it from being removed in dce.
1578 We however cannot remove it here, due to the way the ssa name
1579 it defines is mapped to the new definition. So just replace
1580 rhs of the statement with something harmless. */
1581
1582 type = TREE_TYPE (scalar_dest);
1583 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1584 build_zero_cst (type));
1585 set_vinfo_for_stmt (new_stmt, stmt_info);
1586 set_vinfo_for_stmt (stmt, NULL);
1587 STMT_VINFO_STMT (stmt_info) = new_stmt;
1588 gsi_replace (gsi, new_stmt, false);
1589 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1590
1591 return true;
1592 }
1593
1594
1595 /* Function vect_gen_widened_results_half
1596
1597 Create a vector stmt whose code, type, number of arguments, and result
1598 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1599 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1600 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1601 needs to be created (DECL is a function-decl of a target-builtin).
1602 STMT is the original scalar stmt that we are vectorizing. */
1603
1604 static gimple
1605 vect_gen_widened_results_half (enum tree_code code,
1606 tree decl,
1607 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1608 tree vec_dest, gimple_stmt_iterator *gsi,
1609 gimple stmt)
1610 {
1611 gimple new_stmt;
1612 tree new_temp;
1613
1614 /* Generate half of the widened result: */
1615 if (code == CALL_EXPR)
1616 {
1617 /* Target specific support */
1618 if (op_type == binary_op)
1619 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1620 else
1621 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1622 new_temp = make_ssa_name (vec_dest, new_stmt);
1623 gimple_call_set_lhs (new_stmt, new_temp);
1624 }
1625 else
1626 {
1627 /* Generic support */
1628 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1629 if (op_type != binary_op)
1630 vec_oprnd1 = NULL;
1631 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1632 vec_oprnd1);
1633 new_temp = make_ssa_name (vec_dest, new_stmt);
1634 gimple_assign_set_lhs (new_stmt, new_temp);
1635 }
1636 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1637
1638 return new_stmt;
1639 }
1640
1641
1642 /* Check if STMT performs a conversion operation, that can be vectorized.
1643 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1644 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1645 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1646
1647 static bool
1648 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1649 gimple *vec_stmt, slp_tree slp_node)
1650 {
1651 tree vec_dest;
1652 tree scalar_dest;
1653 tree op0;
1654 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1655 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1656 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1657 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1658 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1659 tree new_temp;
1660 tree def;
1661 gimple def_stmt;
1662 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1663 gimple new_stmt = NULL;
1664 stmt_vec_info prev_stmt_info;
1665 int nunits_in;
1666 int nunits_out;
1667 tree vectype_out, vectype_in;
1668 int ncopies, j;
1669 tree rhs_type;
1670 tree builtin_decl;
1671 enum { NARROW, NONE, WIDEN } modifier;
1672 int i;
1673 VEC(tree,heap) *vec_oprnds0 = NULL;
1674 tree vop0;
1675 VEC(tree,heap) *dummy = NULL;
1676 int dummy_int;
1677
1678 /* Is STMT a vectorizable conversion? */
1679
1680 /* FORNOW: unsupported in basic block SLP. */
1681 gcc_assert (loop_vinfo);
1682
1683 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1684 return false;
1685
1686 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1687 return false;
1688
1689 if (!is_gimple_assign (stmt))
1690 return false;
1691
1692 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1693 return false;
1694
1695 code = gimple_assign_rhs_code (stmt);
1696 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1697 return false;
1698
1699 /* Check types of lhs and rhs. */
1700 scalar_dest = gimple_assign_lhs (stmt);
1701 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1702
1703 op0 = gimple_assign_rhs1 (stmt);
1704 rhs_type = TREE_TYPE (op0);
1705 /* Check the operands of the operation. */
1706 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1707 &def_stmt, &def, &dt[0], &vectype_in))
1708 {
1709 if (vect_print_dump_info (REPORT_DETAILS))
1710 fprintf (vect_dump, "use not simple.");
1711 return false;
1712 }
1713 /* If op0 is an external or constant defs use a vector type of
1714 the same size as the output vector type. */
1715 if (!vectype_in)
1716 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1717 if (vec_stmt)
1718 gcc_assert (vectype_in);
1719 if (!vectype_in)
1720 {
1721 if (vect_print_dump_info (REPORT_DETAILS))
1722 {
1723 fprintf (vect_dump, "no vectype for scalar type ");
1724 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1725 }
1726
1727 return false;
1728 }
1729
1730 /* FORNOW */
1731 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1732 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1733 if (nunits_in == nunits_out / 2)
1734 modifier = NARROW;
1735 else if (nunits_out == nunits_in)
1736 modifier = NONE;
1737 else if (nunits_out == nunits_in / 2)
1738 modifier = WIDEN;
1739 else
1740 return false;
1741
1742 if (modifier == NARROW)
1743 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1744 else
1745 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1746
1747 /* Multiple types in SLP are handled by creating the appropriate number of
1748 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1749 case of SLP. */
1750 if (slp_node)
1751 ncopies = 1;
1752
1753 /* Sanity check: make sure that at least one copy of the vectorized stmt
1754 needs to be generated. */
1755 gcc_assert (ncopies >= 1);
1756
1757 /* Supportable by target? */
1758 if ((modifier == NONE
1759 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1760 || (modifier == WIDEN
1761 && !supportable_widening_operation (code, stmt,
1762 vectype_out, vectype_in,
1763 &decl1, &decl2,
1764 &code1, &code2,
1765 &dummy_int, &dummy))
1766 || (modifier == NARROW
1767 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1768 &code1, &dummy_int, &dummy)))
1769 {
1770 if (vect_print_dump_info (REPORT_DETAILS))
1771 fprintf (vect_dump, "conversion not supported by target.");
1772 return false;
1773 }
1774
1775 if (modifier != NONE)
1776 {
1777 /* FORNOW: SLP not supported. */
1778 if (STMT_SLP_TYPE (stmt_info))
1779 return false;
1780 }
1781
1782 if (!vec_stmt) /* transformation not required. */
1783 {
1784 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1785 return true;
1786 }
1787
1788 /** Transform. **/
1789 if (vect_print_dump_info (REPORT_DETAILS))
1790 fprintf (vect_dump, "transform conversion.");
1791
1792 /* Handle def. */
1793 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1794
1795 if (modifier == NONE && !slp_node)
1796 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1797
1798 prev_stmt_info = NULL;
1799 switch (modifier)
1800 {
1801 case NONE:
1802 for (j = 0; j < ncopies; j++)
1803 {
1804 if (j == 0)
1805 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1806 else
1807 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1808
1809 builtin_decl =
1810 targetm.vectorize.builtin_conversion (code,
1811 vectype_out, vectype_in);
1812 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1813 {
1814 /* Arguments are ready. create the new vector stmt. */
1815 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1816 new_temp = make_ssa_name (vec_dest, new_stmt);
1817 gimple_call_set_lhs (new_stmt, new_temp);
1818 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1819 if (slp_node)
1820 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1821 }
1822
1823 if (j == 0)
1824 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1825 else
1826 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1827 prev_stmt_info = vinfo_for_stmt (new_stmt);
1828 }
1829 break;
1830
1831 case WIDEN:
1832 /* In case the vectorization factor (VF) is bigger than the number
1833 of elements that we can fit in a vectype (nunits), we have to
1834 generate more than one vector stmt - i.e - we need to "unroll"
1835 the vector stmt by a factor VF/nunits. */
1836 for (j = 0; j < ncopies; j++)
1837 {
1838 if (j == 0)
1839 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1840 else
1841 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1842
1843 /* Generate first half of the widened result: */
1844 new_stmt
1845 = vect_gen_widened_results_half (code1, decl1,
1846 vec_oprnd0, vec_oprnd1,
1847 unary_op, vec_dest, gsi, stmt);
1848 if (j == 0)
1849 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1850 else
1851 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1852 prev_stmt_info = vinfo_for_stmt (new_stmt);
1853
1854 /* Generate second half of the widened result: */
1855 new_stmt
1856 = vect_gen_widened_results_half (code2, decl2,
1857 vec_oprnd0, vec_oprnd1,
1858 unary_op, vec_dest, gsi, stmt);
1859 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1860 prev_stmt_info = vinfo_for_stmt (new_stmt);
1861 }
1862 break;
1863
1864 case NARROW:
1865 /* In case the vectorization factor (VF) is bigger than the number
1866 of elements that we can fit in a vectype (nunits), we have to
1867 generate more than one vector stmt - i.e - we need to "unroll"
1868 the vector stmt by a factor VF/nunits. */
1869 for (j = 0; j < ncopies; j++)
1870 {
1871 /* Handle uses. */
1872 if (j == 0)
1873 {
1874 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1875 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1876 }
1877 else
1878 {
1879 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1880 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1881 }
1882
1883 /* Arguments are ready. Create the new vector stmt. */
1884 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1885 vec_oprnd1);
1886 new_temp = make_ssa_name (vec_dest, new_stmt);
1887 gimple_assign_set_lhs (new_stmt, new_temp);
1888 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1889
1890 if (j == 0)
1891 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1892 else
1893 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1894
1895 prev_stmt_info = vinfo_for_stmt (new_stmt);
1896 }
1897
1898 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1899 }
1900
1901 if (vec_oprnds0)
1902 VEC_free (tree, heap, vec_oprnds0);
1903
1904 return true;
1905 }
1906
1907
1908 /* Function vectorizable_assignment.
1909
1910 Check if STMT performs an assignment (copy) that can be vectorized.
1911 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1912 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1913 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1914
1915 static bool
1916 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1917 gimple *vec_stmt, slp_tree slp_node)
1918 {
1919 tree vec_dest;
1920 tree scalar_dest;
1921 tree op;
1922 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1923 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1924 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1925 tree new_temp;
1926 tree def;
1927 gimple def_stmt;
1928 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1929 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1930 int ncopies;
1931 int i, j;
1932 VEC(tree,heap) *vec_oprnds = NULL;
1933 tree vop;
1934 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1935 gimple new_stmt = NULL;
1936 stmt_vec_info prev_stmt_info = NULL;
1937 enum tree_code code;
1938 tree vectype_in;
1939
1940 /* Multiple types in SLP are handled by creating the appropriate number of
1941 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1942 case of SLP. */
1943 if (slp_node)
1944 ncopies = 1;
1945 else
1946 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1947
1948 gcc_assert (ncopies >= 1);
1949
1950 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1951 return false;
1952
1953 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1954 return false;
1955
1956 /* Is vectorizable assignment? */
1957 if (!is_gimple_assign (stmt))
1958 return false;
1959
1960 scalar_dest = gimple_assign_lhs (stmt);
1961 if (TREE_CODE (scalar_dest) != SSA_NAME)
1962 return false;
1963
1964 code = gimple_assign_rhs_code (stmt);
1965 if (gimple_assign_single_p (stmt)
1966 || code == PAREN_EXPR
1967 || CONVERT_EXPR_CODE_P (code))
1968 op = gimple_assign_rhs1 (stmt);
1969 else
1970 return false;
1971
1972 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1973 &def_stmt, &def, &dt[0], &vectype_in))
1974 {
1975 if (vect_print_dump_info (REPORT_DETAILS))
1976 fprintf (vect_dump, "use not simple.");
1977 return false;
1978 }
1979
1980 /* We can handle NOP_EXPR conversions that do not change the number
1981 of elements or the vector size. */
1982 if (CONVERT_EXPR_CODE_P (code)
1983 && (!vectype_in
1984 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1985 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1986 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1987 return false;
1988
1989 if (!vec_stmt) /* transformation not required. */
1990 {
1991 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1992 if (vect_print_dump_info (REPORT_DETAILS))
1993 fprintf (vect_dump, "=== vectorizable_assignment ===");
1994 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1995 return true;
1996 }
1997
1998 /** Transform. **/
1999 if (vect_print_dump_info (REPORT_DETAILS))
2000 fprintf (vect_dump, "transform assignment.");
2001
2002 /* Handle def. */
2003 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2004
2005 /* Handle use. */
2006 for (j = 0; j < ncopies; j++)
2007 {
2008 /* Handle uses. */
2009 if (j == 0)
2010 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2011 else
2012 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2013
2014 /* Arguments are ready. create the new vector stmt. */
2015 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2016 {
2017 if (CONVERT_EXPR_CODE_P (code))
2018 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2019 new_stmt = gimple_build_assign (vec_dest, vop);
2020 new_temp = make_ssa_name (vec_dest, new_stmt);
2021 gimple_assign_set_lhs (new_stmt, new_temp);
2022 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2023 if (slp_node)
2024 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2025 }
2026
2027 if (slp_node)
2028 continue;
2029
2030 if (j == 0)
2031 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2032 else
2033 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2034
2035 prev_stmt_info = vinfo_for_stmt (new_stmt);
2036 }
2037
2038 VEC_free (tree, heap, vec_oprnds);
2039 return true;
2040 }
2041
2042
2043 /* Function vectorizable_shift.
2044
2045 Check if STMT performs a shift operation that can be vectorized.
2046 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2047 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2048 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2049
2050 static bool
2051 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2052 gimple *vec_stmt, slp_tree slp_node)
2053 {
2054 tree vec_dest;
2055 tree scalar_dest;
2056 tree op0, op1 = NULL;
2057 tree vec_oprnd1 = NULL_TREE;
2058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2059 tree vectype;
2060 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2061 enum tree_code code;
2062 enum machine_mode vec_mode;
2063 tree new_temp;
2064 optab optab;
2065 int icode;
2066 enum machine_mode optab_op2_mode;
2067 tree def;
2068 gimple def_stmt;
2069 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2070 gimple new_stmt = NULL;
2071 stmt_vec_info prev_stmt_info;
2072 int nunits_in;
2073 int nunits_out;
2074 tree vectype_out;
2075 int ncopies;
2076 int j, i;
2077 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2078 tree vop0, vop1;
2079 unsigned int k;
2080 bool scalar_shift_arg = false;
2081 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2082 int vf;
2083
2084 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2085 return false;
2086
2087 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2088 return false;
2089
2090 /* Is STMT a vectorizable binary/unary operation? */
2091 if (!is_gimple_assign (stmt))
2092 return false;
2093
2094 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2095 return false;
2096
2097 code = gimple_assign_rhs_code (stmt);
2098
2099 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2100 || code == RROTATE_EXPR))
2101 return false;
2102
2103 scalar_dest = gimple_assign_lhs (stmt);
2104 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2105
2106 op0 = gimple_assign_rhs1 (stmt);
2107 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2108 &def_stmt, &def, &dt[0], &vectype))
2109 {
2110 if (vect_print_dump_info (REPORT_DETAILS))
2111 fprintf (vect_dump, "use not simple.");
2112 return false;
2113 }
2114 /* If op0 is an external or constant def use a vector type with
2115 the same size as the output vector type. */
2116 if (!vectype)
2117 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2118 if (vec_stmt)
2119 gcc_assert (vectype);
2120 if (!vectype)
2121 {
2122 if (vect_print_dump_info (REPORT_DETAILS))
2123 {
2124 fprintf (vect_dump, "no vectype for scalar type ");
2125 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2126 }
2127
2128 return false;
2129 }
2130
2131 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2132 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2133 if (nunits_out != nunits_in)
2134 return false;
2135
2136 op1 = gimple_assign_rhs2 (stmt);
2137 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2138 {
2139 if (vect_print_dump_info (REPORT_DETAILS))
2140 fprintf (vect_dump, "use not simple.");
2141 return false;
2142 }
2143
2144 if (loop_vinfo)
2145 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2146 else
2147 vf = 1;
2148
2149 /* Multiple types in SLP are handled by creating the appropriate number of
2150 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2151 case of SLP. */
2152 if (slp_node)
2153 ncopies = 1;
2154 else
2155 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2156
2157 gcc_assert (ncopies >= 1);
2158
2159 /* Determine whether the shift amount is a vector, or scalar. If the
2160 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2161
2162 /* Vector shifted by vector. */
2163 if (dt[1] == vect_internal_def)
2164 {
2165 optab = optab_for_tree_code (code, vectype, optab_vector);
2166 if (vect_print_dump_info (REPORT_DETAILS))
2167 fprintf (vect_dump, "vector/vector shift/rotate found.");
2168 }
2169 /* See if the machine has a vector shifted by scalar insn and if not
2170 then see if it has a vector shifted by vector insn. */
2171 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2172 {
2173 optab = optab_for_tree_code (code, vectype, optab_scalar);
2174 if (optab
2175 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2176 {
2177 scalar_shift_arg = true;
2178 if (vect_print_dump_info (REPORT_DETAILS))
2179 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2180 }
2181 else
2182 {
2183 optab = optab_for_tree_code (code, vectype, optab_vector);
2184 if (optab
2185 && (optab_handler (optab, TYPE_MODE (vectype))
2186 != CODE_FOR_nothing))
2187 {
2188 if (vect_print_dump_info (REPORT_DETAILS))
2189 fprintf (vect_dump, "vector/vector shift/rotate found.");
2190
2191 /* Unlike the other binary operators, shifts/rotates have
2192 the rhs being int, instead of the same type as the lhs,
2193 so make sure the scalar is the right type if we are
2194 dealing with vectors of short/char. */
2195 if (dt[1] == vect_constant_def)
2196 op1 = fold_convert (TREE_TYPE (vectype), op1);
2197 }
2198 }
2199 }
2200 else
2201 {
2202 if (vect_print_dump_info (REPORT_DETAILS))
2203 fprintf (vect_dump, "operand mode requires invariant argument.");
2204 return false;
2205 }
2206
2207 /* Supportable by target? */
2208 if (!optab)
2209 {
2210 if (vect_print_dump_info (REPORT_DETAILS))
2211 fprintf (vect_dump, "no optab.");
2212 return false;
2213 }
2214 vec_mode = TYPE_MODE (vectype);
2215 icode = (int) optab_handler (optab, vec_mode);
2216 if (icode == CODE_FOR_nothing)
2217 {
2218 if (vect_print_dump_info (REPORT_DETAILS))
2219 fprintf (vect_dump, "op not supported by target.");
2220 /* Check only during analysis. */
2221 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2222 || (vf < vect_min_worthwhile_factor (code)
2223 && !vec_stmt))
2224 return false;
2225 if (vect_print_dump_info (REPORT_DETAILS))
2226 fprintf (vect_dump, "proceeding using word mode.");
2227 }
2228
2229 /* Worthwhile without SIMD support? Check only during analysis. */
2230 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2231 && vf < vect_min_worthwhile_factor (code)
2232 && !vec_stmt)
2233 {
2234 if (vect_print_dump_info (REPORT_DETAILS))
2235 fprintf (vect_dump, "not worthwhile without SIMD support.");
2236 return false;
2237 }
2238
2239 if (!vec_stmt) /* transformation not required. */
2240 {
2241 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2242 if (vect_print_dump_info (REPORT_DETAILS))
2243 fprintf (vect_dump, "=== vectorizable_shift ===");
2244 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2245 return true;
2246 }
2247
2248 /** Transform. **/
2249
2250 if (vect_print_dump_info (REPORT_DETAILS))
2251 fprintf (vect_dump, "transform binary/unary operation.");
2252
2253 /* Handle def. */
2254 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2255
2256 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2257 created in the previous stages of the recursion, so no allocation is
2258 needed, except for the case of shift with scalar shift argument. In that
2259 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2260 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2261 In case of loop-based vectorization we allocate VECs of size 1. We
2262 allocate VEC_OPRNDS1 only in case of binary operation. */
2263 if (!slp_node)
2264 {
2265 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2266 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2267 }
2268 else if (scalar_shift_arg)
2269 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2270
2271 prev_stmt_info = NULL;
2272 for (j = 0; j < ncopies; j++)
2273 {
2274 /* Handle uses. */
2275 if (j == 0)
2276 {
2277 if (scalar_shift_arg)
2278 {
2279 /* Vector shl and shr insn patterns can be defined with scalar
2280 operand 2 (shift operand). In this case, use constant or loop
2281 invariant op1 directly, without extending it to vector mode
2282 first. */
2283 optab_op2_mode = insn_data[icode].operand[2].mode;
2284 if (!VECTOR_MODE_P (optab_op2_mode))
2285 {
2286 if (vect_print_dump_info (REPORT_DETAILS))
2287 fprintf (vect_dump, "operand 1 using scalar mode.");
2288 vec_oprnd1 = op1;
2289 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2290 if (slp_node)
2291 {
2292 /* Store vec_oprnd1 for every vector stmt to be created
2293 for SLP_NODE. We check during the analysis that all
2294 the shift arguments are the same.
2295 TODO: Allow different constants for different vector
2296 stmts generated for an SLP instance. */
2297 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2298 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2299 }
2300 }
2301 }
2302
2303 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2304 (a special case for certain kind of vector shifts); otherwise,
2305 operand 1 should be of a vector type (the usual case). */
2306 if (vec_oprnd1)
2307 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2308 slp_node);
2309 else
2310 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2311 slp_node);
2312 }
2313 else
2314 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2315
2316 /* Arguments are ready. Create the new vector stmt. */
2317 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2318 {
2319 vop1 = VEC_index (tree, vec_oprnds1, i);
2320 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2321 new_temp = make_ssa_name (vec_dest, new_stmt);
2322 gimple_assign_set_lhs (new_stmt, new_temp);
2323 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2324 if (slp_node)
2325 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2326 }
2327
2328 if (slp_node)
2329 continue;
2330
2331 if (j == 0)
2332 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2333 else
2334 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2335 prev_stmt_info = vinfo_for_stmt (new_stmt);
2336 }
2337
2338 VEC_free (tree, heap, vec_oprnds0);
2339 VEC_free (tree, heap, vec_oprnds1);
2340
2341 return true;
2342 }
2343
2344
2345 /* Function vectorizable_operation.
2346
2347 Check if STMT performs a binary, unary or ternary operation that can
2348 be vectorized.
2349 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2350 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2351 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2352
2353 static bool
2354 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2355 gimple *vec_stmt, slp_tree slp_node)
2356 {
2357 tree vec_dest;
2358 tree scalar_dest;
2359 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
2360 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2361 tree vectype;
2362 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2363 enum tree_code code;
2364 enum machine_mode vec_mode;
2365 tree new_temp;
2366 int op_type;
2367 optab optab;
2368 int icode;
2369 tree def;
2370 gimple def_stmt;
2371 enum vect_def_type dt[3]
2372 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2373 gimple new_stmt = NULL;
2374 stmt_vec_info prev_stmt_info;
2375 int nunits_in;
2376 int nunits_out;
2377 tree vectype_out;
2378 int ncopies;
2379 int j, i;
2380 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2381 tree vop0, vop1, vop2;
2382 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2383 int vf;
2384
2385 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2386 return false;
2387
2388 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2389 return false;
2390
2391 /* Is STMT a vectorizable binary/unary operation? */
2392 if (!is_gimple_assign (stmt))
2393 return false;
2394
2395 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2396 return false;
2397
2398 code = gimple_assign_rhs_code (stmt);
2399
2400 /* For pointer addition, we should use the normal plus for
2401 the vector addition. */
2402 if (code == POINTER_PLUS_EXPR)
2403 code = PLUS_EXPR;
2404
2405 /* Support only unary or binary operations. */
2406 op_type = TREE_CODE_LENGTH (code);
2407 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
2408 {
2409 if (vect_print_dump_info (REPORT_DETAILS))
2410 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2411 op_type);
2412 return false;
2413 }
2414
2415 scalar_dest = gimple_assign_lhs (stmt);
2416 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2417
2418 op0 = gimple_assign_rhs1 (stmt);
2419 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2420 &def_stmt, &def, &dt[0], &vectype))
2421 {
2422 if (vect_print_dump_info (REPORT_DETAILS))
2423 fprintf (vect_dump, "use not simple.");
2424 return false;
2425 }
2426 /* If op0 is an external or constant def use a vector type with
2427 the same size as the output vector type. */
2428 if (!vectype)
2429 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2430 if (vec_stmt)
2431 gcc_assert (vectype);
2432 if (!vectype)
2433 {
2434 if (vect_print_dump_info (REPORT_DETAILS))
2435 {
2436 fprintf (vect_dump, "no vectype for scalar type ");
2437 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2438 }
2439
2440 return false;
2441 }
2442
2443 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2444 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2445 if (nunits_out != nunits_in)
2446 return false;
2447
2448 if (op_type == binary_op || op_type == ternary_op)
2449 {
2450 op1 = gimple_assign_rhs2 (stmt);
2451 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2452 &dt[1]))
2453 {
2454 if (vect_print_dump_info (REPORT_DETAILS))
2455 fprintf (vect_dump, "use not simple.");
2456 return false;
2457 }
2458 }
2459 if (op_type == ternary_op)
2460 {
2461 op2 = gimple_assign_rhs3 (stmt);
2462 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2463 &dt[2]))
2464 {
2465 if (vect_print_dump_info (REPORT_DETAILS))
2466 fprintf (vect_dump, "use not simple.");
2467 return false;
2468 }
2469 }
2470
2471 if (loop_vinfo)
2472 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2473 else
2474 vf = 1;
2475
2476 /* Multiple types in SLP are handled by creating the appropriate number of
2477 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2478 case of SLP. */
2479 if (slp_node)
2480 ncopies = 1;
2481 else
2482 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2483
2484 gcc_assert (ncopies >= 1);
2485
2486 /* Shifts are handled in vectorizable_shift (). */
2487 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2488 || code == RROTATE_EXPR)
2489 return false;
2490
2491 optab = optab_for_tree_code (code, vectype, optab_default);
2492
2493 /* Supportable by target? */
2494 if (!optab)
2495 {
2496 if (vect_print_dump_info (REPORT_DETAILS))
2497 fprintf (vect_dump, "no optab.");
2498 return false;
2499 }
2500 vec_mode = TYPE_MODE (vectype);
2501 icode = (int) optab_handler (optab, vec_mode);
2502 if (icode == CODE_FOR_nothing)
2503 {
2504 if (vect_print_dump_info (REPORT_DETAILS))
2505 fprintf (vect_dump, "op not supported by target.");
2506 /* Check only during analysis. */
2507 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2508 || (vf < vect_min_worthwhile_factor (code)
2509 && !vec_stmt))
2510 return false;
2511 if (vect_print_dump_info (REPORT_DETAILS))
2512 fprintf (vect_dump, "proceeding using word mode.");
2513 }
2514
2515 /* Worthwhile without SIMD support? Check only during analysis. */
2516 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2517 && vf < vect_min_worthwhile_factor (code)
2518 && !vec_stmt)
2519 {
2520 if (vect_print_dump_info (REPORT_DETAILS))
2521 fprintf (vect_dump, "not worthwhile without SIMD support.");
2522 return false;
2523 }
2524
2525 if (!vec_stmt) /* transformation not required. */
2526 {
2527 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2528 if (vect_print_dump_info (REPORT_DETAILS))
2529 fprintf (vect_dump, "=== vectorizable_operation ===");
2530 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2531 return true;
2532 }
2533
2534 /** Transform. **/
2535
2536 if (vect_print_dump_info (REPORT_DETAILS))
2537 fprintf (vect_dump, "transform binary/unary operation.");
2538
2539 /* Handle def. */
2540 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2541
2542 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2543 created in the previous stages of the recursion, so no allocation is
2544 needed, except for the case of shift with scalar shift argument. In that
2545 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2546 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2547 In case of loop-based vectorization we allocate VECs of size 1. We
2548 allocate VEC_OPRNDS1 only in case of binary operation. */
2549 if (!slp_node)
2550 {
2551 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2552 if (op_type == binary_op || op_type == ternary_op)
2553 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2554 if (op_type == ternary_op)
2555 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2556 }
2557
2558 /* In case the vectorization factor (VF) is bigger than the number
2559 of elements that we can fit in a vectype (nunits), we have to generate
2560 more than one vector stmt - i.e - we need to "unroll" the
2561 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2562 from one copy of the vector stmt to the next, in the field
2563 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2564 stages to find the correct vector defs to be used when vectorizing
2565 stmts that use the defs of the current stmt. The example below
2566 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2567 we need to create 4 vectorized stmts):
2568
2569 before vectorization:
2570 RELATED_STMT VEC_STMT
2571 S1: x = memref - -
2572 S2: z = x + 1 - -
2573
2574 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2575 there):
2576 RELATED_STMT VEC_STMT
2577 VS1_0: vx0 = memref0 VS1_1 -
2578 VS1_1: vx1 = memref1 VS1_2 -
2579 VS1_2: vx2 = memref2 VS1_3 -
2580 VS1_3: vx3 = memref3 - -
2581 S1: x = load - VS1_0
2582 S2: z = x + 1 - -
2583
2584 step2: vectorize stmt S2 (done here):
2585 To vectorize stmt S2 we first need to find the relevant vector
2586 def for the first operand 'x'. This is, as usual, obtained from
2587 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2588 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2589 relevant vector def 'vx0'. Having found 'vx0' we can generate
2590 the vector stmt VS2_0, and as usual, record it in the
2591 STMT_VINFO_VEC_STMT of stmt S2.
2592 When creating the second copy (VS2_1), we obtain the relevant vector
2593 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2594 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2595 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2596 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2597 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2598 chain of stmts and pointers:
2599 RELATED_STMT VEC_STMT
2600 VS1_0: vx0 = memref0 VS1_1 -
2601 VS1_1: vx1 = memref1 VS1_2 -
2602 VS1_2: vx2 = memref2 VS1_3 -
2603 VS1_3: vx3 = memref3 - -
2604 S1: x = load - VS1_0
2605 VS2_0: vz0 = vx0 + v1 VS2_1 -
2606 VS2_1: vz1 = vx1 + v1 VS2_2 -
2607 VS2_2: vz2 = vx2 + v1 VS2_3 -
2608 VS2_3: vz3 = vx3 + v1 - -
2609 S2: z = x + 1 - VS2_0 */
2610
2611 prev_stmt_info = NULL;
2612 for (j = 0; j < ncopies; j++)
2613 {
2614 /* Handle uses. */
2615 if (j == 0)
2616 {
2617 if (op_type == binary_op || op_type == ternary_op)
2618 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2619 slp_node);
2620 else
2621 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2622 slp_node);
2623 if (op_type == ternary_op)
2624 {
2625 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2626 VEC_quick_push (tree, vec_oprnds2,
2627 vect_get_vec_def_for_operand (op2, stmt, NULL));
2628 }
2629 }
2630 else
2631 {
2632 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2633 if (op_type == ternary_op)
2634 {
2635 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2636 VEC_quick_push (tree, vec_oprnds2,
2637 vect_get_vec_def_for_stmt_copy (dt[2],
2638 vec_oprnd));
2639 }
2640 }
2641
2642 /* Arguments are ready. Create the new vector stmt. */
2643 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2644 {
2645 vop1 = ((op_type == binary_op || op_type == ternary_op)
2646 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2647 vop2 = ((op_type == ternary_op)
2648 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2649 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2650 vop0, vop1, vop2);
2651 new_temp = make_ssa_name (vec_dest, new_stmt);
2652 gimple_assign_set_lhs (new_stmt, new_temp);
2653 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2654 if (slp_node)
2655 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2656 }
2657
2658 if (slp_node)
2659 continue;
2660
2661 if (j == 0)
2662 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2663 else
2664 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2665 prev_stmt_info = vinfo_for_stmt (new_stmt);
2666 }
2667
2668 VEC_free (tree, heap, vec_oprnds0);
2669 if (vec_oprnds1)
2670 VEC_free (tree, heap, vec_oprnds1);
2671 if (vec_oprnds2)
2672 VEC_free (tree, heap, vec_oprnds2);
2673
2674 return true;
2675 }
2676
2677
2678 /* Get vectorized definitions for loop-based vectorization. For the first
2679 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2680 scalar operand), and for the rest we get a copy with
2681 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2682 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2683 The vectors are collected into VEC_OPRNDS. */
2684
2685 static void
2686 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2687 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2688 {
2689 tree vec_oprnd;
2690
2691 /* Get first vector operand. */
2692 /* All the vector operands except the very first one (that is scalar oprnd)
2693 are stmt copies. */
2694 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2695 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2696 else
2697 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2698
2699 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2700
2701 /* Get second vector operand. */
2702 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2703 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2704
2705 *oprnd = vec_oprnd;
2706
2707 /* For conversion in multiple steps, continue to get operands
2708 recursively. */
2709 if (multi_step_cvt)
2710 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2711 }
2712
2713
2714 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2715 For multi-step conversions store the resulting vectors and call the function
2716 recursively. */
2717
2718 static void
2719 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2720 int multi_step_cvt, gimple stmt,
2721 VEC (tree, heap) *vec_dsts,
2722 gimple_stmt_iterator *gsi,
2723 slp_tree slp_node, enum tree_code code,
2724 stmt_vec_info *prev_stmt_info)
2725 {
2726 unsigned int i;
2727 tree vop0, vop1, new_tmp, vec_dest;
2728 gimple new_stmt;
2729 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2730
2731 vec_dest = VEC_pop (tree, vec_dsts);
2732
2733 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2734 {
2735 /* Create demotion operation. */
2736 vop0 = VEC_index (tree, *vec_oprnds, i);
2737 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2738 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2739 new_tmp = make_ssa_name (vec_dest, new_stmt);
2740 gimple_assign_set_lhs (new_stmt, new_tmp);
2741 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2742
2743 if (multi_step_cvt)
2744 /* Store the resulting vector for next recursive call. */
2745 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2746 else
2747 {
2748 /* This is the last step of the conversion sequence. Store the
2749 vectors in SLP_NODE or in vector info of the scalar statement
2750 (or in STMT_VINFO_RELATED_STMT chain). */
2751 if (slp_node)
2752 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2753 else
2754 {
2755 if (!*prev_stmt_info)
2756 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2757 else
2758 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2759
2760 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2761 }
2762 }
2763 }
2764
2765 /* For multi-step demotion operations we first generate demotion operations
2766 from the source type to the intermediate types, and then combine the
2767 results (stored in VEC_OPRNDS) in demotion operation to the destination
2768 type. */
2769 if (multi_step_cvt)
2770 {
2771 /* At each level of recursion we have have of the operands we had at the
2772 previous level. */
2773 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2774 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2775 stmt, vec_dsts, gsi, slp_node,
2776 code, prev_stmt_info);
2777 }
2778 }
2779
2780
2781 /* Function vectorizable_type_demotion
2782
2783 Check if STMT performs a binary or unary operation that involves
2784 type demotion, and if it can be vectorized.
2785 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2786 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2787 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2788
2789 static bool
2790 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2791 gimple *vec_stmt, slp_tree slp_node)
2792 {
2793 tree vec_dest;
2794 tree scalar_dest;
2795 tree op0;
2796 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2797 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2798 enum tree_code code, code1 = ERROR_MARK;
2799 tree def;
2800 gimple def_stmt;
2801 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2802 stmt_vec_info prev_stmt_info;
2803 int nunits_in;
2804 int nunits_out;
2805 tree vectype_out;
2806 int ncopies;
2807 int j, i;
2808 tree vectype_in;
2809 int multi_step_cvt = 0;
2810 VEC (tree, heap) *vec_oprnds0 = NULL;
2811 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2812 tree last_oprnd, intermediate_type;
2813
2814 /* FORNOW: not supported by basic block SLP vectorization. */
2815 gcc_assert (loop_vinfo);
2816
2817 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2818 return false;
2819
2820 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2821 return false;
2822
2823 /* Is STMT a vectorizable type-demotion operation? */
2824 if (!is_gimple_assign (stmt))
2825 return false;
2826
2827 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2828 return false;
2829
2830 code = gimple_assign_rhs_code (stmt);
2831 if (!CONVERT_EXPR_CODE_P (code))
2832 return false;
2833
2834 scalar_dest = gimple_assign_lhs (stmt);
2835 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2836
2837 /* Check the operands of the operation. */
2838 op0 = gimple_assign_rhs1 (stmt);
2839 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2840 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2841 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2842 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2843 && CONVERT_EXPR_CODE_P (code))))
2844 return false;
2845 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2846 &def_stmt, &def, &dt[0], &vectype_in))
2847 {
2848 if (vect_print_dump_info (REPORT_DETAILS))
2849 fprintf (vect_dump, "use not simple.");
2850 return false;
2851 }
2852 /* If op0 is an external def use a vector type with the
2853 same size as the output vector type if possible. */
2854 if (!vectype_in)
2855 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2856 if (vec_stmt)
2857 gcc_assert (vectype_in);
2858 if (!vectype_in)
2859 {
2860 if (vect_print_dump_info (REPORT_DETAILS))
2861 {
2862 fprintf (vect_dump, "no vectype for scalar type ");
2863 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2864 }
2865
2866 return false;
2867 }
2868
2869 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2870 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2871 if (nunits_in >= nunits_out)
2872 return false;
2873
2874 /* Multiple types in SLP are handled by creating the appropriate number of
2875 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2876 case of SLP. */
2877 if (slp_node)
2878 ncopies = 1;
2879 else
2880 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2881 gcc_assert (ncopies >= 1);
2882
2883 /* Supportable by target? */
2884 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2885 &code1, &multi_step_cvt, &interm_types))
2886 return false;
2887
2888 if (!vec_stmt) /* transformation not required. */
2889 {
2890 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2891 if (vect_print_dump_info (REPORT_DETAILS))
2892 fprintf (vect_dump, "=== vectorizable_demotion ===");
2893 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2894 return true;
2895 }
2896
2897 /** Transform. **/
2898 if (vect_print_dump_info (REPORT_DETAILS))
2899 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2900 ncopies);
2901
2902 /* In case of multi-step demotion, we first generate demotion operations to
2903 the intermediate types, and then from that types to the final one.
2904 We create vector destinations for the intermediate type (TYPES) received
2905 from supportable_narrowing_operation, and store them in the correct order
2906 for future use in vect_create_vectorized_demotion_stmts(). */
2907 if (multi_step_cvt)
2908 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2909 else
2910 vec_dsts = VEC_alloc (tree, heap, 1);
2911
2912 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2913 VEC_quick_push (tree, vec_dsts, vec_dest);
2914
2915 if (multi_step_cvt)
2916 {
2917 for (i = VEC_length (tree, interm_types) - 1;
2918 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2919 {
2920 vec_dest = vect_create_destination_var (scalar_dest,
2921 intermediate_type);
2922 VEC_quick_push (tree, vec_dsts, vec_dest);
2923 }
2924 }
2925
2926 /* In case the vectorization factor (VF) is bigger than the number
2927 of elements that we can fit in a vectype (nunits), we have to generate
2928 more than one vector stmt - i.e - we need to "unroll" the
2929 vector stmt by a factor VF/nunits. */
2930 last_oprnd = op0;
2931 prev_stmt_info = NULL;
2932 for (j = 0; j < ncopies; j++)
2933 {
2934 /* Handle uses. */
2935 if (slp_node)
2936 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
2937 else
2938 {
2939 VEC_free (tree, heap, vec_oprnds0);
2940 vec_oprnds0 = VEC_alloc (tree, heap,
2941 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2942 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2943 vect_pow2 (multi_step_cvt) - 1);
2944 }
2945
2946 /* Arguments are ready. Create the new vector stmts. */
2947 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2948 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2949 multi_step_cvt, stmt, tmp_vec_dsts,
2950 gsi, slp_node, code1,
2951 &prev_stmt_info);
2952 }
2953
2954 VEC_free (tree, heap, vec_oprnds0);
2955 VEC_free (tree, heap, vec_dsts);
2956 VEC_free (tree, heap, tmp_vec_dsts);
2957 VEC_free (tree, heap, interm_types);
2958
2959 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2960 return true;
2961 }
2962
2963
2964 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2965 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2966 the resulting vectors and call the function recursively. */
2967
2968 static void
2969 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2970 VEC (tree, heap) **vec_oprnds1,
2971 int multi_step_cvt, gimple stmt,
2972 VEC (tree, heap) *vec_dsts,
2973 gimple_stmt_iterator *gsi,
2974 slp_tree slp_node, enum tree_code code1,
2975 enum tree_code code2, tree decl1,
2976 tree decl2, int op_type,
2977 stmt_vec_info *prev_stmt_info)
2978 {
2979 int i;
2980 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2981 gimple new_stmt1, new_stmt2;
2982 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2983 VEC (tree, heap) *vec_tmp;
2984
2985 vec_dest = VEC_pop (tree, vec_dsts);
2986 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2987
2988 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
2989 {
2990 if (op_type == binary_op)
2991 vop1 = VEC_index (tree, *vec_oprnds1, i);
2992 else
2993 vop1 = NULL_TREE;
2994
2995 /* Generate the two halves of promotion operation. */
2996 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2997 op_type, vec_dest, gsi, stmt);
2998 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2999 op_type, vec_dest, gsi, stmt);
3000 if (is_gimple_call (new_stmt1))
3001 {
3002 new_tmp1 = gimple_call_lhs (new_stmt1);
3003 new_tmp2 = gimple_call_lhs (new_stmt2);
3004 }
3005 else
3006 {
3007 new_tmp1 = gimple_assign_lhs (new_stmt1);
3008 new_tmp2 = gimple_assign_lhs (new_stmt2);
3009 }
3010
3011 if (multi_step_cvt)
3012 {
3013 /* Store the results for the recursive call. */
3014 VEC_quick_push (tree, vec_tmp, new_tmp1);
3015 VEC_quick_push (tree, vec_tmp, new_tmp2);
3016 }
3017 else
3018 {
3019 /* Last step of promotion sequience - store the results. */
3020 if (slp_node)
3021 {
3022 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3023 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3024 }
3025 else
3026 {
3027 if (!*prev_stmt_info)
3028 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3029 else
3030 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3031
3032 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3033 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3034 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3035 }
3036 }
3037 }
3038
3039 if (multi_step_cvt)
3040 {
3041 /* For multi-step promotion operation we first generate we call the
3042 function recurcively for every stage. We start from the input type,
3043 create promotion operations to the intermediate types, and then
3044 create promotions to the output type. */
3045 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3046 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3047 multi_step_cvt - 1, stmt,
3048 vec_dsts, gsi, slp_node, code1,
3049 code2, decl2, decl2, op_type,
3050 prev_stmt_info);
3051 }
3052
3053 VEC_free (tree, heap, vec_tmp);
3054 }
3055
3056
3057 /* Function vectorizable_type_promotion
3058
3059 Check if STMT performs a binary or unary operation that involves
3060 type promotion, and if it can be vectorized.
3061 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3062 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3063 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3064
3065 static bool
3066 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3067 gimple *vec_stmt, slp_tree slp_node)
3068 {
3069 tree vec_dest;
3070 tree scalar_dest;
3071 tree op0, op1 = NULL;
3072 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3073 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3074 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3075 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3076 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3077 int op_type;
3078 tree def;
3079 gimple def_stmt;
3080 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3081 stmt_vec_info prev_stmt_info;
3082 int nunits_in;
3083 int nunits_out;
3084 tree vectype_out;
3085 int ncopies;
3086 int j, i;
3087 tree vectype_in;
3088 tree intermediate_type = NULL_TREE;
3089 int multi_step_cvt = 0;
3090 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3091 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3092
3093 /* FORNOW: not supported by basic block SLP vectorization. */
3094 gcc_assert (loop_vinfo);
3095
3096 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3097 return false;
3098
3099 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3100 return false;
3101
3102 /* Is STMT a vectorizable type-promotion operation? */
3103 if (!is_gimple_assign (stmt))
3104 return false;
3105
3106 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3107 return false;
3108
3109 code = gimple_assign_rhs_code (stmt);
3110 if (!CONVERT_EXPR_CODE_P (code)
3111 && code != WIDEN_MULT_EXPR)
3112 return false;
3113
3114 scalar_dest = gimple_assign_lhs (stmt);
3115 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3116
3117 /* Check the operands of the operation. */
3118 op0 = gimple_assign_rhs1 (stmt);
3119 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3120 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3121 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3122 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3123 && CONVERT_EXPR_CODE_P (code))))
3124 return false;
3125 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3126 &def_stmt, &def, &dt[0], &vectype_in))
3127 {
3128 if (vect_print_dump_info (REPORT_DETAILS))
3129 fprintf (vect_dump, "use not simple.");
3130 return false;
3131 }
3132 /* If op0 is an external or constant def use a vector type with
3133 the same size as the output vector type. */
3134 if (!vectype_in)
3135 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3136 if (vec_stmt)
3137 gcc_assert (vectype_in);
3138 if (!vectype_in)
3139 {
3140 if (vect_print_dump_info (REPORT_DETAILS))
3141 {
3142 fprintf (vect_dump, "no vectype for scalar type ");
3143 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3144 }
3145
3146 return false;
3147 }
3148
3149 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3150 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3151 if (nunits_in <= nunits_out)
3152 return false;
3153
3154 /* Multiple types in SLP are handled by creating the appropriate number of
3155 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3156 case of SLP. */
3157 if (slp_node)
3158 ncopies = 1;
3159 else
3160 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3161
3162 gcc_assert (ncopies >= 1);
3163
3164 op_type = TREE_CODE_LENGTH (code);
3165 if (op_type == binary_op)
3166 {
3167 op1 = gimple_assign_rhs2 (stmt);
3168 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3169 {
3170 if (vect_print_dump_info (REPORT_DETAILS))
3171 fprintf (vect_dump, "use not simple.");
3172 return false;
3173 }
3174 }
3175
3176 /* Supportable by target? */
3177 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3178 &decl1, &decl2, &code1, &code2,
3179 &multi_step_cvt, &interm_types))
3180 return false;
3181
3182 /* Binary widening operation can only be supported directly by the
3183 architecture. */
3184 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3185
3186 if (!vec_stmt) /* transformation not required. */
3187 {
3188 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3189 if (vect_print_dump_info (REPORT_DETAILS))
3190 fprintf (vect_dump, "=== vectorizable_promotion ===");
3191 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3192 return true;
3193 }
3194
3195 /** Transform. **/
3196
3197 if (vect_print_dump_info (REPORT_DETAILS))
3198 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3199 ncopies);
3200
3201 /* Handle def. */
3202 /* In case of multi-step promotion, we first generate promotion operations
3203 to the intermediate types, and then from that types to the final one.
3204 We store vector destination in VEC_DSTS in the correct order for
3205 recursive creation of promotion operations in
3206 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3207 according to TYPES recieved from supportable_widening_operation(). */
3208 if (multi_step_cvt)
3209 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3210 else
3211 vec_dsts = VEC_alloc (tree, heap, 1);
3212
3213 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3214 VEC_quick_push (tree, vec_dsts, vec_dest);
3215
3216 if (multi_step_cvt)
3217 {
3218 for (i = VEC_length (tree, interm_types) - 1;
3219 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3220 {
3221 vec_dest = vect_create_destination_var (scalar_dest,
3222 intermediate_type);
3223 VEC_quick_push (tree, vec_dsts, vec_dest);
3224 }
3225 }
3226
3227 if (!slp_node)
3228 {
3229 vec_oprnds0 = VEC_alloc (tree, heap,
3230 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3231 if (op_type == binary_op)
3232 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3233 }
3234
3235 /* In case the vectorization factor (VF) is bigger than the number
3236 of elements that we can fit in a vectype (nunits), we have to generate
3237 more than one vector stmt - i.e - we need to "unroll" the
3238 vector stmt by a factor VF/nunits. */
3239
3240 prev_stmt_info = NULL;
3241 for (j = 0; j < ncopies; j++)
3242 {
3243 /* Handle uses. */
3244 if (j == 0)
3245 {
3246 if (slp_node)
3247 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3248 &vec_oprnds1, -1);
3249 else
3250 {
3251 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3252 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3253 if (op_type == binary_op)
3254 {
3255 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3256 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3257 }
3258 }
3259 }
3260 else
3261 {
3262 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3263 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3264 if (op_type == binary_op)
3265 {
3266 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3267 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3268 }
3269 }
3270
3271 /* Arguments are ready. Create the new vector stmts. */
3272 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3273 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3274 multi_step_cvt, stmt,
3275 tmp_vec_dsts,
3276 gsi, slp_node, code1, code2,
3277 decl1, decl2, op_type,
3278 &prev_stmt_info);
3279 }
3280
3281 VEC_free (tree, heap, vec_dsts);
3282 VEC_free (tree, heap, tmp_vec_dsts);
3283 VEC_free (tree, heap, interm_types);
3284 VEC_free (tree, heap, vec_oprnds0);
3285 VEC_free (tree, heap, vec_oprnds1);
3286
3287 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3288 return true;
3289 }
3290
3291
3292 /* Function vectorizable_store.
3293
3294 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3295 can be vectorized.
3296 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3297 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3298 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3299
3300 static bool
3301 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3302 slp_tree slp_node)
3303 {
3304 tree scalar_dest;
3305 tree data_ref;
3306 tree op;
3307 tree vec_oprnd = NULL_TREE;
3308 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3309 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3310 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3311 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3312 struct loop *loop = NULL;
3313 enum machine_mode vec_mode;
3314 tree dummy;
3315 enum dr_alignment_support alignment_support_scheme;
3316 tree def;
3317 gimple def_stmt;
3318 enum vect_def_type dt;
3319 stmt_vec_info prev_stmt_info = NULL;
3320 tree dataref_ptr = NULL_TREE;
3321 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3322 int ncopies;
3323 int j;
3324 gimple next_stmt, first_stmt = NULL;
3325 bool strided_store = false;
3326 unsigned int group_size, i;
3327 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3328 bool inv_p;
3329 VEC(tree,heap) *vec_oprnds = NULL;
3330 bool slp = (slp_node != NULL);
3331 unsigned int vec_num;
3332 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3333
3334 if (loop_vinfo)
3335 loop = LOOP_VINFO_LOOP (loop_vinfo);
3336
3337 /* Multiple types in SLP are handled by creating the appropriate number of
3338 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3339 case of SLP. */
3340 if (slp)
3341 ncopies = 1;
3342 else
3343 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3344
3345 gcc_assert (ncopies >= 1);
3346
3347 /* FORNOW. This restriction should be relaxed. */
3348 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3349 {
3350 if (vect_print_dump_info (REPORT_DETAILS))
3351 fprintf (vect_dump, "multiple types in nested loop.");
3352 return false;
3353 }
3354
3355 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3356 return false;
3357
3358 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3359 return false;
3360
3361 /* Is vectorizable store? */
3362
3363 if (!is_gimple_assign (stmt))
3364 return false;
3365
3366 scalar_dest = gimple_assign_lhs (stmt);
3367 if (TREE_CODE (scalar_dest) != ARRAY_REF
3368 && TREE_CODE (scalar_dest) != INDIRECT_REF
3369 && TREE_CODE (scalar_dest) != COMPONENT_REF
3370 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3371 && TREE_CODE (scalar_dest) != REALPART_EXPR
3372 && TREE_CODE (scalar_dest) != MEM_REF)
3373 return false;
3374
3375 gcc_assert (gimple_assign_single_p (stmt));
3376 op = gimple_assign_rhs1 (stmt);
3377 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3378 {
3379 if (vect_print_dump_info (REPORT_DETAILS))
3380 fprintf (vect_dump, "use not simple.");
3381 return false;
3382 }
3383
3384 /* The scalar rhs type needs to be trivially convertible to the vector
3385 component type. This should always be the case. */
3386 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3387 {
3388 if (vect_print_dump_info (REPORT_DETAILS))
3389 fprintf (vect_dump, "??? operands of different types");
3390 return false;
3391 }
3392
3393 vec_mode = TYPE_MODE (vectype);
3394 /* FORNOW. In some cases can vectorize even if data-type not supported
3395 (e.g. - array initialization with 0). */
3396 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3397 return false;
3398
3399 if (!STMT_VINFO_DATA_REF (stmt_info))
3400 return false;
3401
3402 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3403 {
3404 if (vect_print_dump_info (REPORT_DETAILS))
3405 fprintf (vect_dump, "negative step for store.");
3406 return false;
3407 }
3408
3409 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3410 {
3411 strided_store = true;
3412 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3413 if (!vect_strided_store_supported (vectype)
3414 && !PURE_SLP_STMT (stmt_info) && !slp)
3415 return false;
3416
3417 if (first_stmt == stmt)
3418 {
3419 /* STMT is the leader of the group. Check the operands of all the
3420 stmts of the group. */
3421 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3422 while (next_stmt)
3423 {
3424 gcc_assert (gimple_assign_single_p (next_stmt));
3425 op = gimple_assign_rhs1 (next_stmt);
3426 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3427 &def, &dt))
3428 {
3429 if (vect_print_dump_info (REPORT_DETAILS))
3430 fprintf (vect_dump, "use not simple.");
3431 return false;
3432 }
3433 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3434 }
3435 }
3436 }
3437
3438 if (!vec_stmt) /* transformation not required. */
3439 {
3440 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3441 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3442 return true;
3443 }
3444
3445 /** Transform. **/
3446
3447 if (strided_store)
3448 {
3449 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3450 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3451
3452 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3453
3454 /* FORNOW */
3455 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3456
3457 /* We vectorize all the stmts of the interleaving group when we
3458 reach the last stmt in the group. */
3459 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3460 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3461 && !slp)
3462 {
3463 *vec_stmt = NULL;
3464 return true;
3465 }
3466
3467 if (slp)
3468 {
3469 strided_store = false;
3470 /* VEC_NUM is the number of vect stmts to be created for this
3471 group. */
3472 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3473 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3474 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3475 }
3476 else
3477 /* VEC_NUM is the number of vect stmts to be created for this
3478 group. */
3479 vec_num = group_size;
3480 }
3481 else
3482 {
3483 first_stmt = stmt;
3484 first_dr = dr;
3485 group_size = vec_num = 1;
3486 }
3487
3488 if (vect_print_dump_info (REPORT_DETAILS))
3489 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3490
3491 dr_chain = VEC_alloc (tree, heap, group_size);
3492 oprnds = VEC_alloc (tree, heap, group_size);
3493
3494 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3495 gcc_assert (alignment_support_scheme);
3496
3497 /* In case the vectorization factor (VF) is bigger than the number
3498 of elements that we can fit in a vectype (nunits), we have to generate
3499 more than one vector stmt - i.e - we need to "unroll" the
3500 vector stmt by a factor VF/nunits. For more details see documentation in
3501 vect_get_vec_def_for_copy_stmt. */
3502
3503 /* In case of interleaving (non-unit strided access):
3504
3505 S1: &base + 2 = x2
3506 S2: &base = x0
3507 S3: &base + 1 = x1
3508 S4: &base + 3 = x3
3509
3510 We create vectorized stores starting from base address (the access of the
3511 first stmt in the chain (S2 in the above example), when the last store stmt
3512 of the chain (S4) is reached:
3513
3514 VS1: &base = vx2
3515 VS2: &base + vec_size*1 = vx0
3516 VS3: &base + vec_size*2 = vx1
3517 VS4: &base + vec_size*3 = vx3
3518
3519 Then permutation statements are generated:
3520
3521 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3522 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3523 ...
3524
3525 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3526 (the order of the data-refs in the output of vect_permute_store_chain
3527 corresponds to the order of scalar stmts in the interleaving chain - see
3528 the documentation of vect_permute_store_chain()).
3529
3530 In case of both multiple types and interleaving, above vector stores and
3531 permutation stmts are created for every copy. The result vector stmts are
3532 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3533 STMT_VINFO_RELATED_STMT for the next copies.
3534 */
3535
3536 prev_stmt_info = NULL;
3537 for (j = 0; j < ncopies; j++)
3538 {
3539 gimple new_stmt;
3540 gimple ptr_incr;
3541
3542 if (j == 0)
3543 {
3544 if (slp)
3545 {
3546 /* Get vectorized arguments for SLP_NODE. */
3547 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3548 NULL, -1);
3549
3550 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3551 }
3552 else
3553 {
3554 /* For interleaved stores we collect vectorized defs for all the
3555 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3556 used as an input to vect_permute_store_chain(), and OPRNDS as
3557 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3558
3559 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3560 OPRNDS are of size 1. */
3561 next_stmt = first_stmt;
3562 for (i = 0; i < group_size; i++)
3563 {
3564 /* Since gaps are not supported for interleaved stores,
3565 GROUP_SIZE is the exact number of stmts in the chain.
3566 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3567 there is no interleaving, GROUP_SIZE is 1, and only one
3568 iteration of the loop will be executed. */
3569 gcc_assert (next_stmt
3570 && gimple_assign_single_p (next_stmt));
3571 op = gimple_assign_rhs1 (next_stmt);
3572
3573 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3574 NULL);
3575 VEC_quick_push(tree, dr_chain, vec_oprnd);
3576 VEC_quick_push(tree, oprnds, vec_oprnd);
3577 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3578 }
3579 }
3580
3581 /* We should have catched mismatched types earlier. */
3582 gcc_assert (useless_type_conversion_p (vectype,
3583 TREE_TYPE (vec_oprnd)));
3584 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3585 &dummy, &ptr_incr, false,
3586 &inv_p);
3587 gcc_assert (bb_vinfo || !inv_p);
3588 }
3589 else
3590 {
3591 /* For interleaved stores we created vectorized defs for all the
3592 defs stored in OPRNDS in the previous iteration (previous copy).
3593 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3594 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3595 next copy.
3596 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3597 OPRNDS are of size 1. */
3598 for (i = 0; i < group_size; i++)
3599 {
3600 op = VEC_index (tree, oprnds, i);
3601 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3602 &dt);
3603 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3604 VEC_replace(tree, dr_chain, i, vec_oprnd);
3605 VEC_replace(tree, oprnds, i, vec_oprnd);
3606 }
3607 dataref_ptr =
3608 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3609 }
3610
3611 if (strided_store)
3612 {
3613 result_chain = VEC_alloc (tree, heap, group_size);
3614 /* Permute. */
3615 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3616 &result_chain))
3617 return false;
3618 }
3619
3620 next_stmt = first_stmt;
3621 for (i = 0; i < vec_num; i++)
3622 {
3623 struct ptr_info_def *pi;
3624
3625 if (i > 0)
3626 /* Bump the vector pointer. */
3627 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3628 NULL_TREE);
3629
3630 if (slp)
3631 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3632 else if (strided_store)
3633 /* For strided stores vectorized defs are interleaved in
3634 vect_permute_store_chain(). */
3635 vec_oprnd = VEC_index (tree, result_chain, i);
3636
3637 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3638 build_int_cst (reference_alias_ptr_type
3639 (DR_REF (first_dr)), 0));
3640 pi = get_ptr_info (dataref_ptr);
3641 pi->align = TYPE_ALIGN_UNIT (vectype);
3642 if (aligned_access_p (first_dr))
3643 pi->misalign = 0;
3644 else if (DR_MISALIGNMENT (first_dr) == -1)
3645 {
3646 TREE_TYPE (data_ref)
3647 = build_aligned_type (TREE_TYPE (data_ref),
3648 TYPE_ALIGN (TREE_TYPE (vectype)));
3649 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3650 pi->misalign = 0;
3651 }
3652 else
3653 {
3654 TREE_TYPE (data_ref)
3655 = build_aligned_type (TREE_TYPE (data_ref),
3656 TYPE_ALIGN (TREE_TYPE (vectype)));
3657 pi->misalign = DR_MISALIGNMENT (first_dr);
3658 }
3659
3660 /* Arguments are ready. Create the new vector stmt. */
3661 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3662 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3663 mark_symbols_for_renaming (new_stmt);
3664
3665 if (slp)
3666 continue;
3667
3668 if (j == 0)
3669 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3670 else
3671 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3672
3673 prev_stmt_info = vinfo_for_stmt (new_stmt);
3674 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3675 if (!next_stmt)
3676 break;
3677 }
3678 }
3679
3680 VEC_free (tree, heap, dr_chain);
3681 VEC_free (tree, heap, oprnds);
3682 if (result_chain)
3683 VEC_free (tree, heap, result_chain);
3684 if (vec_oprnds)
3685 VEC_free (tree, heap, vec_oprnds);
3686
3687 return true;
3688 }
3689
3690 /* Given a vector type VECTYPE returns a builtin DECL to be used
3691 for vector permutation and stores a mask into *MASK that implements
3692 reversal of the vector elements. If that is impossible to do
3693 returns NULL (and *MASK is unchanged). */
3694
3695 static tree
3696 perm_mask_for_reverse (tree vectype, tree *mask)
3697 {
3698 tree builtin_decl;
3699 tree mask_element_type, mask_type;
3700 tree mask_vec = NULL;
3701 int i;
3702 int nunits;
3703 if (!targetm.vectorize.builtin_vec_perm)
3704 return NULL;
3705
3706 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3707 &mask_element_type);
3708 if (!builtin_decl || !mask_element_type)
3709 return NULL;
3710
3711 mask_type = get_vectype_for_scalar_type (mask_element_type);
3712 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3713 if (!mask_type
3714 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3715 return NULL;
3716
3717 for (i = 0; i < nunits; i++)
3718 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3719 mask_vec = build_vector (mask_type, mask_vec);
3720
3721 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3722 return NULL;
3723 if (mask)
3724 *mask = mask_vec;
3725 return builtin_decl;
3726 }
3727
3728 /* Given a vector variable X, that was generated for the scalar LHS of
3729 STMT, generate instructions to reverse the vector elements of X,
3730 insert them a *GSI and return the permuted vector variable. */
3731
3732 static tree
3733 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3734 {
3735 tree vectype = TREE_TYPE (x);
3736 tree mask_vec, builtin_decl;
3737 tree perm_dest, data_ref;
3738 gimple perm_stmt;
3739
3740 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3741
3742 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3743
3744 /* Generate the permute statement. */
3745 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3746 if (!useless_type_conversion_p (vectype,
3747 TREE_TYPE (TREE_TYPE (builtin_decl))))
3748 {
3749 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3750 tem = make_ssa_name (tem, perm_stmt);
3751 gimple_call_set_lhs (perm_stmt, tem);
3752 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3753 perm_stmt = gimple_build_assign (NULL_TREE,
3754 build1 (VIEW_CONVERT_EXPR,
3755 vectype, tem));
3756 }
3757 data_ref = make_ssa_name (perm_dest, perm_stmt);
3758 gimple_set_lhs (perm_stmt, data_ref);
3759 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3760
3761 return data_ref;
3762 }
3763
3764 /* vectorizable_load.
3765
3766 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3767 can be vectorized.
3768 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3769 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3770 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3771
3772 static bool
3773 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3774 slp_tree slp_node, slp_instance slp_node_instance)
3775 {
3776 tree scalar_dest;
3777 tree vec_dest = NULL;
3778 tree data_ref = NULL;
3779 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3780 stmt_vec_info prev_stmt_info;
3781 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3782 struct loop *loop = NULL;
3783 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3784 bool nested_in_vect_loop = false;
3785 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3786 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3787 tree new_temp;
3788 enum machine_mode mode;
3789 gimple new_stmt = NULL;
3790 tree dummy;
3791 enum dr_alignment_support alignment_support_scheme;
3792 tree dataref_ptr = NULL_TREE;
3793 gimple ptr_incr;
3794 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3795 int ncopies;
3796 int i, j, group_size;
3797 tree msq = NULL_TREE, lsq;
3798 tree offset = NULL_TREE;
3799 tree realignment_token = NULL_TREE;
3800 gimple phi = NULL;
3801 VEC(tree,heap) *dr_chain = NULL;
3802 bool strided_load = false;
3803 gimple first_stmt;
3804 tree scalar_type;
3805 bool inv_p;
3806 bool negative;
3807 bool compute_in_loop = false;
3808 struct loop *at_loop;
3809 int vec_num;
3810 bool slp = (slp_node != NULL);
3811 bool slp_perm = false;
3812 enum tree_code code;
3813 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3814 int vf;
3815
3816 if (loop_vinfo)
3817 {
3818 loop = LOOP_VINFO_LOOP (loop_vinfo);
3819 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3820 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3821 }
3822 else
3823 vf = 1;
3824
3825 /* Multiple types in SLP are handled by creating the appropriate number of
3826 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3827 case of SLP. */
3828 if (slp)
3829 ncopies = 1;
3830 else
3831 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3832
3833 gcc_assert (ncopies >= 1);
3834
3835 /* FORNOW. This restriction should be relaxed. */
3836 if (nested_in_vect_loop && ncopies > 1)
3837 {
3838 if (vect_print_dump_info (REPORT_DETAILS))
3839 fprintf (vect_dump, "multiple types in nested loop.");
3840 return false;
3841 }
3842
3843 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3844 return false;
3845
3846 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3847 return false;
3848
3849 /* Is vectorizable load? */
3850 if (!is_gimple_assign (stmt))
3851 return false;
3852
3853 scalar_dest = gimple_assign_lhs (stmt);
3854 if (TREE_CODE (scalar_dest) != SSA_NAME)
3855 return false;
3856
3857 code = gimple_assign_rhs_code (stmt);
3858 if (code != ARRAY_REF
3859 && code != INDIRECT_REF
3860 && code != COMPONENT_REF
3861 && code != IMAGPART_EXPR
3862 && code != REALPART_EXPR
3863 && code != MEM_REF)
3864 return false;
3865
3866 if (!STMT_VINFO_DATA_REF (stmt_info))
3867 return false;
3868
3869 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
3870 if (negative && ncopies > 1)
3871 {
3872 if (vect_print_dump_info (REPORT_DETAILS))
3873 fprintf (vect_dump, "multiple types with negative step.");
3874 return false;
3875 }
3876
3877 scalar_type = TREE_TYPE (DR_REF (dr));
3878 mode = TYPE_MODE (vectype);
3879
3880 /* FORNOW. In some cases can vectorize even if data-type not supported
3881 (e.g. - data copies). */
3882 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3883 {
3884 if (vect_print_dump_info (REPORT_DETAILS))
3885 fprintf (vect_dump, "Aligned load, but unsupported type.");
3886 return false;
3887 }
3888
3889 /* The vector component type needs to be trivially convertible to the
3890 scalar lhs. This should always be the case. */
3891 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3892 {
3893 if (vect_print_dump_info (REPORT_DETAILS))
3894 fprintf (vect_dump, "??? operands of different types");
3895 return false;
3896 }
3897
3898 /* Check if the load is a part of an interleaving chain. */
3899 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3900 {
3901 strided_load = true;
3902 /* FORNOW */
3903 gcc_assert (! nested_in_vect_loop);
3904
3905 /* Check if interleaving is supported. */
3906 if (!vect_strided_load_supported (vectype)
3907 && !PURE_SLP_STMT (stmt_info) && !slp)
3908 return false;
3909 }
3910
3911 if (negative)
3912 {
3913 gcc_assert (!strided_load);
3914 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
3915 if (alignment_support_scheme != dr_aligned
3916 && alignment_support_scheme != dr_unaligned_supported)
3917 {
3918 if (vect_print_dump_info (REPORT_DETAILS))
3919 fprintf (vect_dump, "negative step but alignment required.");
3920 return false;
3921 }
3922 if (!perm_mask_for_reverse (vectype, NULL))
3923 {
3924 if (vect_print_dump_info (REPORT_DETAILS))
3925 fprintf (vect_dump, "negative step and reversing not supported.");
3926 return false;
3927 }
3928 }
3929
3930 if (!vec_stmt) /* transformation not required. */
3931 {
3932 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3933 vect_model_load_cost (stmt_info, ncopies, NULL);
3934 return true;
3935 }
3936
3937 if (vect_print_dump_info (REPORT_DETAILS))
3938 fprintf (vect_dump, "transform load.");
3939
3940 /** Transform. **/
3941
3942 if (strided_load)
3943 {
3944 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3945 /* Check if the chain of loads is already vectorized. */
3946 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3947 {
3948 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3949 return true;
3950 }
3951 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3952 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3953
3954 /* VEC_NUM is the number of vect stmts to be created for this group. */
3955 if (slp)
3956 {
3957 strided_load = false;
3958 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3959 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3960 slp_perm = true;
3961 }
3962 else
3963 vec_num = group_size;
3964
3965 dr_chain = VEC_alloc (tree, heap, vec_num);
3966 }
3967 else
3968 {
3969 first_stmt = stmt;
3970 first_dr = dr;
3971 group_size = vec_num = 1;
3972 }
3973
3974 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3975 gcc_assert (alignment_support_scheme);
3976
3977 /* In case the vectorization factor (VF) is bigger than the number
3978 of elements that we can fit in a vectype (nunits), we have to generate
3979 more than one vector stmt - i.e - we need to "unroll" the
3980 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3981 from one copy of the vector stmt to the next, in the field
3982 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3983 stages to find the correct vector defs to be used when vectorizing
3984 stmts that use the defs of the current stmt. The example below
3985 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3986 need to create 4 vectorized stmts):
3987
3988 before vectorization:
3989 RELATED_STMT VEC_STMT
3990 S1: x = memref - -
3991 S2: z = x + 1 - -
3992
3993 step 1: vectorize stmt S1:
3994 We first create the vector stmt VS1_0, and, as usual, record a
3995 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3996 Next, we create the vector stmt VS1_1, and record a pointer to
3997 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3998 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3999 stmts and pointers:
4000 RELATED_STMT VEC_STMT
4001 VS1_0: vx0 = memref0 VS1_1 -
4002 VS1_1: vx1 = memref1 VS1_2 -
4003 VS1_2: vx2 = memref2 VS1_3 -
4004 VS1_3: vx3 = memref3 - -
4005 S1: x = load - VS1_0
4006 S2: z = x + 1 - -
4007
4008 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4009 information we recorded in RELATED_STMT field is used to vectorize
4010 stmt S2. */
4011
4012 /* In case of interleaving (non-unit strided access):
4013
4014 S1: x2 = &base + 2
4015 S2: x0 = &base
4016 S3: x1 = &base + 1
4017 S4: x3 = &base + 3
4018
4019 Vectorized loads are created in the order of memory accesses
4020 starting from the access of the first stmt of the chain:
4021
4022 VS1: vx0 = &base
4023 VS2: vx1 = &base + vec_size*1
4024 VS3: vx3 = &base + vec_size*2
4025 VS4: vx4 = &base + vec_size*3
4026
4027 Then permutation statements are generated:
4028
4029 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4030 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4031 ...
4032
4033 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4034 (the order of the data-refs in the output of vect_permute_load_chain
4035 corresponds to the order of scalar stmts in the interleaving chain - see
4036 the documentation of vect_permute_load_chain()).
4037 The generation of permutation stmts and recording them in
4038 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4039
4040 In case of both multiple types and interleaving, the vector loads and
4041 permutation stmts above are created for every copy. The result vector
4042 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4043 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4044
4045 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4046 on a target that supports unaligned accesses (dr_unaligned_supported)
4047 we generate the following code:
4048 p = initial_addr;
4049 indx = 0;
4050 loop {
4051 p = p + indx * vectype_size;
4052 vec_dest = *(p);
4053 indx = indx + 1;
4054 }
4055
4056 Otherwise, the data reference is potentially unaligned on a target that
4057 does not support unaligned accesses (dr_explicit_realign_optimized) -
4058 then generate the following code, in which the data in each iteration is
4059 obtained by two vector loads, one from the previous iteration, and one
4060 from the current iteration:
4061 p1 = initial_addr;
4062 msq_init = *(floor(p1))
4063 p2 = initial_addr + VS - 1;
4064 realignment_token = call target_builtin;
4065 indx = 0;
4066 loop {
4067 p2 = p2 + indx * vectype_size
4068 lsq = *(floor(p2))
4069 vec_dest = realign_load (msq, lsq, realignment_token)
4070 indx = indx + 1;
4071 msq = lsq;
4072 } */
4073
4074 /* If the misalignment remains the same throughout the execution of the
4075 loop, we can create the init_addr and permutation mask at the loop
4076 preheader. Otherwise, it needs to be created inside the loop.
4077 This can only occur when vectorizing memory accesses in the inner-loop
4078 nested within an outer-loop that is being vectorized. */
4079
4080 if (loop && nested_in_vect_loop_p (loop, stmt)
4081 && (TREE_INT_CST_LOW (DR_STEP (dr))
4082 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4083 {
4084 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4085 compute_in_loop = true;
4086 }
4087
4088 if ((alignment_support_scheme == dr_explicit_realign_optimized
4089 || alignment_support_scheme == dr_explicit_realign)
4090 && !compute_in_loop)
4091 {
4092 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4093 alignment_support_scheme, NULL_TREE,
4094 &at_loop);
4095 if (alignment_support_scheme == dr_explicit_realign_optimized)
4096 {
4097 phi = SSA_NAME_DEF_STMT (msq);
4098 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4099 }
4100 }
4101 else
4102 at_loop = loop;
4103
4104 if (negative)
4105 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4106
4107 prev_stmt_info = NULL;
4108 for (j = 0; j < ncopies; j++)
4109 {
4110 /* 1. Create the vector pointer update chain. */
4111 if (j == 0)
4112 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
4113 at_loop, offset,
4114 &dummy, &ptr_incr, false,
4115 &inv_p);
4116 else
4117 dataref_ptr =
4118 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
4119
4120 for (i = 0; i < vec_num; i++)
4121 {
4122 if (i > 0)
4123 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4124 NULL_TREE);
4125
4126 /* 2. Create the vector-load in the loop. */
4127 switch (alignment_support_scheme)
4128 {
4129 case dr_aligned:
4130 case dr_unaligned_supported:
4131 {
4132 struct ptr_info_def *pi;
4133 data_ref
4134 = build2 (MEM_REF, vectype, dataref_ptr,
4135 build_int_cst (reference_alias_ptr_type
4136 (DR_REF (first_dr)), 0));
4137 pi = get_ptr_info (dataref_ptr);
4138 pi->align = TYPE_ALIGN_UNIT (vectype);
4139 if (alignment_support_scheme == dr_aligned)
4140 {
4141 gcc_assert (aligned_access_p (first_dr));
4142 pi->misalign = 0;
4143 }
4144 else if (DR_MISALIGNMENT (first_dr) == -1)
4145 {
4146 TREE_TYPE (data_ref)
4147 = build_aligned_type (TREE_TYPE (data_ref),
4148 TYPE_ALIGN (TREE_TYPE (vectype)));
4149 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
4150 pi->misalign = 0;
4151 }
4152 else
4153 {
4154 TREE_TYPE (data_ref)
4155 = build_aligned_type (TREE_TYPE (data_ref),
4156 TYPE_ALIGN (TREE_TYPE (vectype)));
4157 pi->misalign = DR_MISALIGNMENT (first_dr);
4158 }
4159 break;
4160 }
4161 case dr_explicit_realign:
4162 {
4163 tree ptr, bump;
4164 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4165
4166 if (compute_in_loop)
4167 msq = vect_setup_realignment (first_stmt, gsi,
4168 &realignment_token,
4169 dr_explicit_realign,
4170 dataref_ptr, NULL);
4171
4172 new_stmt = gimple_build_assign_with_ops
4173 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4174 build_int_cst
4175 (TREE_TYPE (dataref_ptr),
4176 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4177 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4178 gimple_assign_set_lhs (new_stmt, ptr);
4179 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4180 data_ref
4181 = build2 (MEM_REF, vectype, ptr,
4182 build_int_cst (reference_alias_ptr_type
4183 (DR_REF (first_dr)), 0));
4184 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4185 new_stmt = gimple_build_assign (vec_dest, data_ref);
4186 new_temp = make_ssa_name (vec_dest, new_stmt);
4187 gimple_assign_set_lhs (new_stmt, new_temp);
4188 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4189 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4190 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4191 msq = new_temp;
4192
4193 bump = size_binop (MULT_EXPR, vs_minus_1,
4194 TYPE_SIZE_UNIT (scalar_type));
4195 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4196 new_stmt = gimple_build_assign_with_ops
4197 (BIT_AND_EXPR, NULL_TREE, ptr,
4198 build_int_cst
4199 (TREE_TYPE (ptr),
4200 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4201 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4202 gimple_assign_set_lhs (new_stmt, ptr);
4203 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4204 data_ref
4205 = build2 (MEM_REF, vectype, ptr,
4206 build_int_cst (reference_alias_ptr_type
4207 (DR_REF (first_dr)), 0));
4208 break;
4209 }
4210 case dr_explicit_realign_optimized:
4211 new_stmt = gimple_build_assign_with_ops
4212 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4213 build_int_cst
4214 (TREE_TYPE (dataref_ptr),
4215 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4216 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4217 gimple_assign_set_lhs (new_stmt, new_temp);
4218 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4219 data_ref
4220 = build2 (MEM_REF, vectype, new_temp,
4221 build_int_cst (reference_alias_ptr_type
4222 (DR_REF (first_dr)), 0));
4223 break;
4224 default:
4225 gcc_unreachable ();
4226 }
4227 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4228 new_stmt = gimple_build_assign (vec_dest, data_ref);
4229 new_temp = make_ssa_name (vec_dest, new_stmt);
4230 gimple_assign_set_lhs (new_stmt, new_temp);
4231 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4232 mark_symbols_for_renaming (new_stmt);
4233
4234 /* 3. Handle explicit realignment if necessary/supported. Create in
4235 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
4236 if (alignment_support_scheme == dr_explicit_realign_optimized
4237 || alignment_support_scheme == dr_explicit_realign)
4238 {
4239 lsq = gimple_assign_lhs (new_stmt);
4240 if (!realignment_token)
4241 realignment_token = dataref_ptr;
4242 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4243 new_stmt
4244 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR, vec_dest,
4245 msq, lsq, realignment_token);
4246 new_temp = make_ssa_name (vec_dest, new_stmt);
4247 gimple_assign_set_lhs (new_stmt, new_temp);
4248 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4249
4250 if (alignment_support_scheme == dr_explicit_realign_optimized)
4251 {
4252 gcc_assert (phi);
4253 if (i == vec_num - 1 && j == ncopies - 1)
4254 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
4255 UNKNOWN_LOCATION);
4256 msq = lsq;
4257 }
4258 }
4259
4260 /* 4. Handle invariant-load. */
4261 if (inv_p && !bb_vinfo)
4262 {
4263 gcc_assert (!strided_load);
4264 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4265 if (j == 0)
4266 {
4267 int k;
4268 tree t = NULL_TREE;
4269 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4270
4271 /* CHECKME: bitpos depends on endianess? */
4272 bitpos = bitsize_zero_node;
4273 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4274 bitsize, bitpos);
4275 vec_dest =
4276 vect_create_destination_var (scalar_dest, NULL_TREE);
4277 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4278 new_temp = make_ssa_name (vec_dest, new_stmt);
4279 gimple_assign_set_lhs (new_stmt, new_temp);
4280 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4281
4282 for (k = nunits - 1; k >= 0; --k)
4283 t = tree_cons (NULL_TREE, new_temp, t);
4284 /* FIXME: use build_constructor directly. */
4285 vec_inv = build_constructor_from_list (vectype, t);
4286 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
4287 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4288 }
4289 else
4290 gcc_unreachable (); /* FORNOW. */
4291 }
4292
4293 if (negative)
4294 {
4295 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4296 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4297 }
4298
4299 /* Collect vector loads and later create their permutation in
4300 vect_transform_strided_load (). */
4301 if (strided_load || slp_perm)
4302 VEC_quick_push (tree, dr_chain, new_temp);
4303
4304 /* Store vector loads in the corresponding SLP_NODE. */
4305 if (slp && !slp_perm)
4306 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4307 }
4308
4309 if (slp && !slp_perm)
4310 continue;
4311
4312 if (slp_perm)
4313 {
4314 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4315 slp_node_instance, false))
4316 {
4317 VEC_free (tree, heap, dr_chain);
4318 return false;
4319 }
4320 }
4321 else
4322 {
4323 if (strided_load)
4324 {
4325 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
4326 return false;
4327
4328 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4329 VEC_free (tree, heap, dr_chain);
4330 dr_chain = VEC_alloc (tree, heap, group_size);
4331 }
4332 else
4333 {
4334 if (j == 0)
4335 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4336 else
4337 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4338 prev_stmt_info = vinfo_for_stmt (new_stmt);
4339 }
4340 }
4341 }
4342
4343 if (dr_chain)
4344 VEC_free (tree, heap, dr_chain);
4345
4346 return true;
4347 }
4348
4349 /* Function vect_is_simple_cond.
4350
4351 Input:
4352 LOOP - the loop that is being vectorized.
4353 COND - Condition that is checked for simple use.
4354
4355 Returns whether a COND can be vectorized. Checks whether
4356 condition operands are supportable using vec_is_simple_use. */
4357
4358 static bool
4359 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4360 {
4361 tree lhs, rhs;
4362 tree def;
4363 enum vect_def_type dt;
4364
4365 if (!COMPARISON_CLASS_P (cond))
4366 return false;
4367
4368 lhs = TREE_OPERAND (cond, 0);
4369 rhs = TREE_OPERAND (cond, 1);
4370
4371 if (TREE_CODE (lhs) == SSA_NAME)
4372 {
4373 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4374 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4375 &dt))
4376 return false;
4377 }
4378 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4379 && TREE_CODE (lhs) != FIXED_CST)
4380 return false;
4381
4382 if (TREE_CODE (rhs) == SSA_NAME)
4383 {
4384 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4385 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4386 &dt))
4387 return false;
4388 }
4389 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4390 && TREE_CODE (rhs) != FIXED_CST)
4391 return false;
4392
4393 return true;
4394 }
4395
4396 /* vectorizable_condition.
4397
4398 Check if STMT is conditional modify expression that can be vectorized.
4399 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4400 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4401 at GSI.
4402
4403 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4404 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4405 else caluse if it is 2).
4406
4407 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4408
4409 bool
4410 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4411 gimple *vec_stmt, tree reduc_def, int reduc_index)
4412 {
4413 tree scalar_dest = NULL_TREE;
4414 tree vec_dest = NULL_TREE;
4415 tree op = NULL_TREE;
4416 tree cond_expr, then_clause, else_clause;
4417 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4418 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4419 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4420 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4421 tree vec_compare, vec_cond_expr;
4422 tree new_temp;
4423 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4424 enum machine_mode vec_mode;
4425 tree def;
4426 enum vect_def_type dt, dts[4];
4427 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4428 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4429 enum tree_code code;
4430 stmt_vec_info prev_stmt_info = NULL;
4431 int j;
4432
4433 /* FORNOW: unsupported in basic block SLP. */
4434 gcc_assert (loop_vinfo);
4435
4436 gcc_assert (ncopies >= 1);
4437 if (reduc_index && ncopies > 1)
4438 return false; /* FORNOW */
4439
4440 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4441 return false;
4442
4443 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4444 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4445 && reduc_def))
4446 return false;
4447
4448 /* FORNOW: SLP not supported. */
4449 if (STMT_SLP_TYPE (stmt_info))
4450 return false;
4451
4452 /* FORNOW: not yet supported. */
4453 if (STMT_VINFO_LIVE_P (stmt_info))
4454 {
4455 if (vect_print_dump_info (REPORT_DETAILS))
4456 fprintf (vect_dump, "value used after loop.");
4457 return false;
4458 }
4459
4460 /* Is vectorizable conditional operation? */
4461 if (!is_gimple_assign (stmt))
4462 return false;
4463
4464 code = gimple_assign_rhs_code (stmt);
4465
4466 if (code != COND_EXPR)
4467 return false;
4468
4469 gcc_assert (gimple_assign_single_p (stmt));
4470 op = gimple_assign_rhs1 (stmt);
4471 cond_expr = TREE_OPERAND (op, 0);
4472 then_clause = TREE_OPERAND (op, 1);
4473 else_clause = TREE_OPERAND (op, 2);
4474
4475 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4476 return false;
4477
4478 /* We do not handle two different vector types for the condition
4479 and the values. */
4480 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4481 TREE_TYPE (vectype)))
4482 return false;
4483
4484 if (TREE_CODE (then_clause) == SSA_NAME)
4485 {
4486 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4487 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4488 &then_def_stmt, &def, &dt))
4489 return false;
4490 }
4491 else if (TREE_CODE (then_clause) != INTEGER_CST
4492 && TREE_CODE (then_clause) != REAL_CST
4493 && TREE_CODE (then_clause) != FIXED_CST)
4494 return false;
4495
4496 if (TREE_CODE (else_clause) == SSA_NAME)
4497 {
4498 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4499 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4500 &else_def_stmt, &def, &dt))
4501 return false;
4502 }
4503 else if (TREE_CODE (else_clause) != INTEGER_CST
4504 && TREE_CODE (else_clause) != REAL_CST
4505 && TREE_CODE (else_clause) != FIXED_CST)
4506 return false;
4507
4508
4509 vec_mode = TYPE_MODE (vectype);
4510
4511 if (!vec_stmt)
4512 {
4513 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4514 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4515 }
4516
4517 /* Transform */
4518
4519 /* Handle def. */
4520 scalar_dest = gimple_assign_lhs (stmt);
4521 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4522
4523 /* Handle cond expr. */
4524 for (j = 0; j < ncopies; j++)
4525 {
4526 gimple new_stmt;
4527 if (j == 0)
4528 {
4529 gimple gtemp;
4530 vec_cond_lhs =
4531 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4532 stmt, NULL);
4533 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4534 NULL, &gtemp, &def, &dts[0]);
4535 vec_cond_rhs =
4536 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4537 stmt, NULL);
4538 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4539 NULL, &gtemp, &def, &dts[1]);
4540 if (reduc_index == 1)
4541 vec_then_clause = reduc_def;
4542 else
4543 {
4544 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4545 stmt, NULL);
4546 vect_is_simple_use (then_clause, loop_vinfo,
4547 NULL, &gtemp, &def, &dts[2]);
4548 }
4549 if (reduc_index == 2)
4550 vec_else_clause = reduc_def;
4551 else
4552 {
4553 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4554 stmt, NULL);
4555 vect_is_simple_use (else_clause, loop_vinfo,
4556 NULL, &gtemp, &def, &dts[3]);
4557 }
4558 }
4559 else
4560 {
4561 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4562 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4563 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4564 vec_then_clause);
4565 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4566 vec_else_clause);
4567 }
4568
4569 /* Arguments are ready. Create the new vector stmt. */
4570 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4571 vec_cond_lhs, vec_cond_rhs);
4572 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4573 vec_compare, vec_then_clause, vec_else_clause);
4574
4575 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4576 new_temp = make_ssa_name (vec_dest, new_stmt);
4577 gimple_assign_set_lhs (new_stmt, new_temp);
4578 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4579 if (j == 0)
4580 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4581 else
4582 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4583
4584 prev_stmt_info = vinfo_for_stmt (new_stmt);
4585 }
4586
4587 return true;
4588 }
4589
4590
4591 /* Make sure the statement is vectorizable. */
4592
4593 bool
4594 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4595 {
4596 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4597 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4598 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4599 bool ok;
4600 tree scalar_type, vectype;
4601
4602 if (vect_print_dump_info (REPORT_DETAILS))
4603 {
4604 fprintf (vect_dump, "==> examining statement: ");
4605 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4606 }
4607
4608 if (gimple_has_volatile_ops (stmt))
4609 {
4610 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4611 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4612
4613 return false;
4614 }
4615
4616 /* Skip stmts that do not need to be vectorized. In loops this is expected
4617 to include:
4618 - the COND_EXPR which is the loop exit condition
4619 - any LABEL_EXPRs in the loop
4620 - computations that are used only for array indexing or loop control.
4621 In basic blocks we only analyze statements that are a part of some SLP
4622 instance, therefore, all the statements are relevant. */
4623
4624 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4625 && !STMT_VINFO_LIVE_P (stmt_info))
4626 {
4627 if (vect_print_dump_info (REPORT_DETAILS))
4628 fprintf (vect_dump, "irrelevant.");
4629
4630 return true;
4631 }
4632
4633 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4634 {
4635 case vect_internal_def:
4636 break;
4637
4638 case vect_reduction_def:
4639 case vect_nested_cycle:
4640 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4641 || relevance == vect_used_in_outer_by_reduction
4642 || relevance == vect_unused_in_scope));
4643 break;
4644
4645 case vect_induction_def:
4646 case vect_constant_def:
4647 case vect_external_def:
4648 case vect_unknown_def_type:
4649 default:
4650 gcc_unreachable ();
4651 }
4652
4653 if (bb_vinfo)
4654 {
4655 gcc_assert (PURE_SLP_STMT (stmt_info));
4656
4657 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4658 if (vect_print_dump_info (REPORT_DETAILS))
4659 {
4660 fprintf (vect_dump, "get vectype for scalar type: ");
4661 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4662 }
4663
4664 vectype = get_vectype_for_scalar_type (scalar_type);
4665 if (!vectype)
4666 {
4667 if (vect_print_dump_info (REPORT_DETAILS))
4668 {
4669 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4670 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4671 }
4672 return false;
4673 }
4674
4675 if (vect_print_dump_info (REPORT_DETAILS))
4676 {
4677 fprintf (vect_dump, "vectype: ");
4678 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4679 }
4680
4681 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4682 }
4683
4684 if (STMT_VINFO_RELEVANT_P (stmt_info))
4685 {
4686 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4687 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4688 *need_to_vectorize = true;
4689 }
4690
4691 ok = true;
4692 if (!bb_vinfo
4693 && (STMT_VINFO_RELEVANT_P (stmt_info)
4694 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4695 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4696 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4697 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4698 || vectorizable_shift (stmt, NULL, NULL, NULL)
4699 || vectorizable_operation (stmt, NULL, NULL, NULL)
4700 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4701 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4702 || vectorizable_call (stmt, NULL, NULL)
4703 || vectorizable_store (stmt, NULL, NULL, NULL)
4704 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4705 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4706 else
4707 {
4708 if (bb_vinfo)
4709 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4710 || vectorizable_operation (stmt, NULL, NULL, node)
4711 || vectorizable_assignment (stmt, NULL, NULL, node)
4712 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4713 || vectorizable_store (stmt, NULL, NULL, node));
4714 }
4715
4716 if (!ok)
4717 {
4718 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4719 {
4720 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4721 fprintf (vect_dump, "supported: ");
4722 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4723 }
4724
4725 return false;
4726 }
4727
4728 if (bb_vinfo)
4729 return true;
4730
4731 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4732 need extra handling, except for vectorizable reductions. */
4733 if (STMT_VINFO_LIVE_P (stmt_info)
4734 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4735 ok = vectorizable_live_operation (stmt, NULL, NULL);
4736
4737 if (!ok)
4738 {
4739 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4740 {
4741 fprintf (vect_dump, "not vectorized: live stmt not ");
4742 fprintf (vect_dump, "supported: ");
4743 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4744 }
4745
4746 return false;
4747 }
4748
4749 if (!PURE_SLP_STMT (stmt_info))
4750 {
4751 /* Groups of strided accesses whose size is not a power of 2 are not
4752 vectorizable yet using loop-vectorization. Therefore, if this stmt
4753 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4754 loop-based vectorized), the loop cannot be vectorized. */
4755 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4756 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4757 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4758 {
4759 if (vect_print_dump_info (REPORT_DETAILS))
4760 {
4761 fprintf (vect_dump, "not vectorized: the size of group "
4762 "of strided accesses is not a power of 2");
4763 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4764 }
4765
4766 return false;
4767 }
4768 }
4769
4770 return true;
4771 }
4772
4773
4774 /* Function vect_transform_stmt.
4775
4776 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4777
4778 bool
4779 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4780 bool *strided_store, slp_tree slp_node,
4781 slp_instance slp_node_instance)
4782 {
4783 bool is_store = false;
4784 gimple vec_stmt = NULL;
4785 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4786 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4787 bool done;
4788
4789 switch (STMT_VINFO_TYPE (stmt_info))
4790 {
4791 case type_demotion_vec_info_type:
4792 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4793 gcc_assert (done);
4794 break;
4795
4796 case type_promotion_vec_info_type:
4797 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4798 gcc_assert (done);
4799 break;
4800
4801 case type_conversion_vec_info_type:
4802 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4803 gcc_assert (done);
4804 break;
4805
4806 case induc_vec_info_type:
4807 gcc_assert (!slp_node);
4808 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4809 gcc_assert (done);
4810 break;
4811
4812 case shift_vec_info_type:
4813 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4814 gcc_assert (done);
4815 break;
4816
4817 case op_vec_info_type:
4818 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4819 gcc_assert (done);
4820 break;
4821
4822 case assignment_vec_info_type:
4823 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4824 gcc_assert (done);
4825 break;
4826
4827 case load_vec_info_type:
4828 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4829 slp_node_instance);
4830 gcc_assert (done);
4831 break;
4832
4833 case store_vec_info_type:
4834 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4835 gcc_assert (done);
4836 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4837 {
4838 /* In case of interleaving, the whole chain is vectorized when the
4839 last store in the chain is reached. Store stmts before the last
4840 one are skipped, and there vec_stmt_info shouldn't be freed
4841 meanwhile. */
4842 *strided_store = true;
4843 if (STMT_VINFO_VEC_STMT (stmt_info))
4844 is_store = true;
4845 }
4846 else
4847 is_store = true;
4848 break;
4849
4850 case condition_vec_info_type:
4851 gcc_assert (!slp_node);
4852 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4853 gcc_assert (done);
4854 break;
4855
4856 case call_vec_info_type:
4857 gcc_assert (!slp_node);
4858 done = vectorizable_call (stmt, gsi, &vec_stmt);
4859 stmt = gsi_stmt (*gsi);
4860 break;
4861
4862 case reduc_vec_info_type:
4863 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4864 gcc_assert (done);
4865 break;
4866
4867 default:
4868 if (!STMT_VINFO_LIVE_P (stmt_info))
4869 {
4870 if (vect_print_dump_info (REPORT_DETAILS))
4871 fprintf (vect_dump, "stmt not supported.");
4872 gcc_unreachable ();
4873 }
4874 }
4875
4876 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4877 is being vectorized, but outside the immediately enclosing loop. */
4878 if (vec_stmt
4879 && STMT_VINFO_LOOP_VINFO (stmt_info)
4880 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4881 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4882 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4883 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4884 || STMT_VINFO_RELEVANT (stmt_info) ==
4885 vect_used_in_outer_by_reduction))
4886 {
4887 struct loop *innerloop = LOOP_VINFO_LOOP (
4888 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4889 imm_use_iterator imm_iter;
4890 use_operand_p use_p;
4891 tree scalar_dest;
4892 gimple exit_phi;
4893
4894 if (vect_print_dump_info (REPORT_DETAILS))
4895 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4896
4897 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4898 (to be used when vectorizing outer-loop stmts that use the DEF of
4899 STMT). */
4900 if (gimple_code (stmt) == GIMPLE_PHI)
4901 scalar_dest = PHI_RESULT (stmt);
4902 else
4903 scalar_dest = gimple_assign_lhs (stmt);
4904
4905 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4906 {
4907 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4908 {
4909 exit_phi = USE_STMT (use_p);
4910 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4911 }
4912 }
4913 }
4914
4915 /* Handle stmts whose DEF is used outside the loop-nest that is
4916 being vectorized. */
4917 if (STMT_VINFO_LIVE_P (stmt_info)
4918 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4919 {
4920 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4921 gcc_assert (done);
4922 }
4923
4924 if (vec_stmt)
4925 {
4926 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4927 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4928 if (orig_stmt_in_pattern)
4929 {
4930 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4931 /* STMT was inserted by the vectorizer to replace a computation idiom.
4932 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4933 computed this idiom. We need to record a pointer to VEC_STMT in
4934 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4935 documentation of vect_pattern_recog. */
4936 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4937 {
4938 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4939 == orig_scalar_stmt);
4940 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4941 }
4942 }
4943 }
4944
4945 return is_store;
4946 }
4947
4948
4949 /* Remove a group of stores (for SLP or interleaving), free their
4950 stmt_vec_info. */
4951
4952 void
4953 vect_remove_stores (gimple first_stmt)
4954 {
4955 gimple next = first_stmt;
4956 gimple tmp;
4957 gimple_stmt_iterator next_si;
4958
4959 while (next)
4960 {
4961 /* Free the attached stmt_vec_info and remove the stmt. */
4962 next_si = gsi_for_stmt (next);
4963 gsi_remove (&next_si, true);
4964 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4965 free_stmt_vec_info (next);
4966 next = tmp;
4967 }
4968 }
4969
4970
4971 /* Function new_stmt_vec_info.
4972
4973 Create and initialize a new stmt_vec_info struct for STMT. */
4974
4975 stmt_vec_info
4976 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4977 bb_vec_info bb_vinfo)
4978 {
4979 stmt_vec_info res;
4980 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4981
4982 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4983 STMT_VINFO_STMT (res) = stmt;
4984 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4985 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4986 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4987 STMT_VINFO_LIVE_P (res) = false;
4988 STMT_VINFO_VECTYPE (res) = NULL;
4989 STMT_VINFO_VEC_STMT (res) = NULL;
4990 STMT_VINFO_VECTORIZABLE (res) = true;
4991 STMT_VINFO_IN_PATTERN_P (res) = false;
4992 STMT_VINFO_RELATED_STMT (res) = NULL;
4993 STMT_VINFO_DATA_REF (res) = NULL;
4994
4995 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4996 STMT_VINFO_DR_OFFSET (res) = NULL;
4997 STMT_VINFO_DR_INIT (res) = NULL;
4998 STMT_VINFO_DR_STEP (res) = NULL;
4999 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5000
5001 if (gimple_code (stmt) == GIMPLE_PHI
5002 && is_loop_header_bb_p (gimple_bb (stmt)))
5003 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5004 else
5005 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5006
5007 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5008 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5009 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
5010 STMT_SLP_TYPE (res) = loop_vect;
5011 DR_GROUP_FIRST_DR (res) = NULL;
5012 DR_GROUP_NEXT_DR (res) = NULL;
5013 DR_GROUP_SIZE (res) = 0;
5014 DR_GROUP_STORE_COUNT (res) = 0;
5015 DR_GROUP_GAP (res) = 0;
5016 DR_GROUP_SAME_DR_STMT (res) = NULL;
5017 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
5018
5019 return res;
5020 }
5021
5022
5023 /* Create a hash table for stmt_vec_info. */
5024
5025 void
5026 init_stmt_vec_info_vec (void)
5027 {
5028 gcc_assert (!stmt_vec_info_vec);
5029 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5030 }
5031
5032
5033 /* Free hash table for stmt_vec_info. */
5034
5035 void
5036 free_stmt_vec_info_vec (void)
5037 {
5038 gcc_assert (stmt_vec_info_vec);
5039 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5040 }
5041
5042
5043 /* Free stmt vectorization related info. */
5044
5045 void
5046 free_stmt_vec_info (gimple stmt)
5047 {
5048 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5049
5050 if (!stmt_info)
5051 return;
5052
5053 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5054 set_vinfo_for_stmt (stmt, NULL);
5055 free (stmt_info);
5056 }
5057
5058
5059 /* Function get_vectype_for_scalar_type_and_size.
5060
5061 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5062 by the target. */
5063
5064 static tree
5065 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5066 {
5067 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5068 enum machine_mode simd_mode;
5069 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5070 int nunits;
5071 tree vectype;
5072
5073 if (nbytes == 0)
5074 return NULL_TREE;
5075
5076 /* We can't build a vector type of elements with alignment bigger than
5077 their size. */
5078 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5079 return NULL_TREE;
5080
5081 /* If we'd build a vector type of elements whose mode precision doesn't
5082 match their types precision we'll get mismatched types on vector
5083 extracts via BIT_FIELD_REFs. This effectively means we disable
5084 vectorization of bool and/or enum types in some languages. */
5085 if (INTEGRAL_TYPE_P (scalar_type)
5086 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5087 return NULL_TREE;
5088
5089 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5090 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5091 return NULL_TREE;
5092
5093 /* If no size was supplied use the mode the target prefers. Otherwise
5094 lookup a vector mode of the specified size. */
5095 if (size == 0)
5096 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5097 else
5098 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5099 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5100 if (nunits <= 1)
5101 return NULL_TREE;
5102
5103 vectype = build_vector_type (scalar_type, nunits);
5104 if (vect_print_dump_info (REPORT_DETAILS))
5105 {
5106 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5107 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5108 }
5109
5110 if (!vectype)
5111 return NULL_TREE;
5112
5113 if (vect_print_dump_info (REPORT_DETAILS))
5114 {
5115 fprintf (vect_dump, "vectype: ");
5116 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5117 }
5118
5119 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5120 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5121 {
5122 if (vect_print_dump_info (REPORT_DETAILS))
5123 fprintf (vect_dump, "mode not supported by target.");
5124 return NULL_TREE;
5125 }
5126
5127 return vectype;
5128 }
5129
5130 unsigned int current_vector_size;
5131
5132 /* Function get_vectype_for_scalar_type.
5133
5134 Returns the vector type corresponding to SCALAR_TYPE as supported
5135 by the target. */
5136
5137 tree
5138 get_vectype_for_scalar_type (tree scalar_type)
5139 {
5140 tree vectype;
5141 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5142 current_vector_size);
5143 if (vectype
5144 && current_vector_size == 0)
5145 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5146 return vectype;
5147 }
5148
5149 /* Function get_same_sized_vectype
5150
5151 Returns a vector type corresponding to SCALAR_TYPE of size
5152 VECTOR_TYPE if supported by the target. */
5153
5154 tree
5155 get_same_sized_vectype (tree scalar_type, tree vector_type)
5156 {
5157 return get_vectype_for_scalar_type_and_size
5158 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5159 }
5160
5161 /* Function vect_is_simple_use.
5162
5163 Input:
5164 LOOP_VINFO - the vect info of the loop that is being vectorized.
5165 BB_VINFO - the vect info of the basic block that is being vectorized.
5166 OPERAND - operand of a stmt in the loop or bb.
5167 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5168
5169 Returns whether a stmt with OPERAND can be vectorized.
5170 For loops, supportable operands are constants, loop invariants, and operands
5171 that are defined by the current iteration of the loop. Unsupportable
5172 operands are those that are defined by a previous iteration of the loop (as
5173 is the case in reduction/induction computations).
5174 For basic blocks, supportable operands are constants and bb invariants.
5175 For now, operands defined outside the basic block are not supported. */
5176
5177 bool
5178 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5179 bb_vec_info bb_vinfo, gimple *def_stmt,
5180 tree *def, enum vect_def_type *dt)
5181 {
5182 basic_block bb;
5183 stmt_vec_info stmt_vinfo;
5184 struct loop *loop = NULL;
5185
5186 if (loop_vinfo)
5187 loop = LOOP_VINFO_LOOP (loop_vinfo);
5188
5189 *def_stmt = NULL;
5190 *def = NULL_TREE;
5191
5192 if (vect_print_dump_info (REPORT_DETAILS))
5193 {
5194 fprintf (vect_dump, "vect_is_simple_use: operand ");
5195 print_generic_expr (vect_dump, operand, TDF_SLIM);
5196 }
5197
5198 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5199 {
5200 *dt = vect_constant_def;
5201 return true;
5202 }
5203
5204 if (is_gimple_min_invariant (operand))
5205 {
5206 *def = operand;
5207 *dt = vect_external_def;
5208 return true;
5209 }
5210
5211 if (TREE_CODE (operand) == PAREN_EXPR)
5212 {
5213 if (vect_print_dump_info (REPORT_DETAILS))
5214 fprintf (vect_dump, "non-associatable copy.");
5215 operand = TREE_OPERAND (operand, 0);
5216 }
5217
5218 if (TREE_CODE (operand) != SSA_NAME)
5219 {
5220 if (vect_print_dump_info (REPORT_DETAILS))
5221 fprintf (vect_dump, "not ssa-name.");
5222 return false;
5223 }
5224
5225 *def_stmt = SSA_NAME_DEF_STMT (operand);
5226 if (*def_stmt == NULL)
5227 {
5228 if (vect_print_dump_info (REPORT_DETAILS))
5229 fprintf (vect_dump, "no def_stmt.");
5230 return false;
5231 }
5232
5233 if (vect_print_dump_info (REPORT_DETAILS))
5234 {
5235 fprintf (vect_dump, "def_stmt: ");
5236 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5237 }
5238
5239 /* Empty stmt is expected only in case of a function argument.
5240 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5241 if (gimple_nop_p (*def_stmt))
5242 {
5243 *def = operand;
5244 *dt = vect_external_def;
5245 return true;
5246 }
5247
5248 bb = gimple_bb (*def_stmt);
5249
5250 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5251 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5252 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5253 *dt = vect_external_def;
5254 else
5255 {
5256 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5257 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5258 }
5259
5260 if (*dt == vect_unknown_def_type)
5261 {
5262 if (vect_print_dump_info (REPORT_DETAILS))
5263 fprintf (vect_dump, "Unsupported pattern.");
5264 return false;
5265 }
5266
5267 if (vect_print_dump_info (REPORT_DETAILS))
5268 fprintf (vect_dump, "type of def: %d.",*dt);
5269
5270 switch (gimple_code (*def_stmt))
5271 {
5272 case GIMPLE_PHI:
5273 *def = gimple_phi_result (*def_stmt);
5274 break;
5275
5276 case GIMPLE_ASSIGN:
5277 *def = gimple_assign_lhs (*def_stmt);
5278 break;
5279
5280 case GIMPLE_CALL:
5281 *def = gimple_call_lhs (*def_stmt);
5282 if (*def != NULL)
5283 break;
5284 /* FALLTHRU */
5285 default:
5286 if (vect_print_dump_info (REPORT_DETAILS))
5287 fprintf (vect_dump, "unsupported defining stmt: ");
5288 return false;
5289 }
5290
5291 return true;
5292 }
5293
5294 /* Function vect_is_simple_use_1.
5295
5296 Same as vect_is_simple_use_1 but also determines the vector operand
5297 type of OPERAND and stores it to *VECTYPE. If the definition of
5298 OPERAND is vect_uninitialized_def, vect_constant_def or
5299 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5300 is responsible to compute the best suited vector type for the
5301 scalar operand. */
5302
5303 bool
5304 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5305 bb_vec_info bb_vinfo, gimple *def_stmt,
5306 tree *def, enum vect_def_type *dt, tree *vectype)
5307 {
5308 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5309 return false;
5310
5311 /* Now get a vector type if the def is internal, otherwise supply
5312 NULL_TREE and leave it up to the caller to figure out a proper
5313 type for the use stmt. */
5314 if (*dt == vect_internal_def
5315 || *dt == vect_induction_def
5316 || *dt == vect_reduction_def
5317 || *dt == vect_double_reduction_def
5318 || *dt == vect_nested_cycle)
5319 {
5320 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5321 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5322 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5323 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5324 gcc_assert (*vectype != NULL_TREE);
5325 }
5326 else if (*dt == vect_uninitialized_def
5327 || *dt == vect_constant_def
5328 || *dt == vect_external_def)
5329 *vectype = NULL_TREE;
5330 else
5331 gcc_unreachable ();
5332
5333 return true;
5334 }
5335
5336
5337 /* Function supportable_widening_operation
5338
5339 Check whether an operation represented by the code CODE is a
5340 widening operation that is supported by the target platform in
5341 vector form (i.e., when operating on arguments of type VECTYPE_IN
5342 producing a result of type VECTYPE_OUT).
5343
5344 Widening operations we currently support are NOP (CONVERT), FLOAT
5345 and WIDEN_MULT. This function checks if these operations are supported
5346 by the target platform either directly (via vector tree-codes), or via
5347 target builtins.
5348
5349 Output:
5350 - CODE1 and CODE2 are codes of vector operations to be used when
5351 vectorizing the operation, if available.
5352 - DECL1 and DECL2 are decls of target builtin functions to be used
5353 when vectorizing the operation, if available. In this case,
5354 CODE1 and CODE2 are CALL_EXPR.
5355 - MULTI_STEP_CVT determines the number of required intermediate steps in
5356 case of multi-step conversion (like char->short->int - in that case
5357 MULTI_STEP_CVT will be 1).
5358 - INTERM_TYPES contains the intermediate type required to perform the
5359 widening operation (short in the above example). */
5360
5361 bool
5362 supportable_widening_operation (enum tree_code code, gimple stmt,
5363 tree vectype_out, tree vectype_in,
5364 tree *decl1, tree *decl2,
5365 enum tree_code *code1, enum tree_code *code2,
5366 int *multi_step_cvt,
5367 VEC (tree, heap) **interm_types)
5368 {
5369 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5370 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5371 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5372 bool ordered_p;
5373 enum machine_mode vec_mode;
5374 enum insn_code icode1, icode2;
5375 optab optab1, optab2;
5376 tree vectype = vectype_in;
5377 tree wide_vectype = vectype_out;
5378 enum tree_code c1, c2;
5379
5380 /* The result of a vectorized widening operation usually requires two vectors
5381 (because the widened results do not fit int one vector). The generated
5382 vector results would normally be expected to be generated in the same
5383 order as in the original scalar computation, i.e. if 8 results are
5384 generated in each vector iteration, they are to be organized as follows:
5385 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5386
5387 However, in the special case that the result of the widening operation is
5388 used in a reduction computation only, the order doesn't matter (because
5389 when vectorizing a reduction we change the order of the computation).
5390 Some targets can take advantage of this and generate more efficient code.
5391 For example, targets like Altivec, that support widen_mult using a sequence
5392 of {mult_even,mult_odd} generate the following vectors:
5393 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5394
5395 When vectorizing outer-loops, we execute the inner-loop sequentially
5396 (each vectorized inner-loop iteration contributes to VF outer-loop
5397 iterations in parallel). We therefore don't allow to change the order
5398 of the computation in the inner-loop during outer-loop vectorization. */
5399
5400 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5401 && !nested_in_vect_loop_p (vect_loop, stmt))
5402 ordered_p = false;
5403 else
5404 ordered_p = true;
5405
5406 if (!ordered_p
5407 && code == WIDEN_MULT_EXPR
5408 && targetm.vectorize.builtin_mul_widen_even
5409 && targetm.vectorize.builtin_mul_widen_even (vectype)
5410 && targetm.vectorize.builtin_mul_widen_odd
5411 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5412 {
5413 if (vect_print_dump_info (REPORT_DETAILS))
5414 fprintf (vect_dump, "Unordered widening operation detected.");
5415
5416 *code1 = *code2 = CALL_EXPR;
5417 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5418 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5419 return true;
5420 }
5421
5422 switch (code)
5423 {
5424 case WIDEN_MULT_EXPR:
5425 if (BYTES_BIG_ENDIAN)
5426 {
5427 c1 = VEC_WIDEN_MULT_HI_EXPR;
5428 c2 = VEC_WIDEN_MULT_LO_EXPR;
5429 }
5430 else
5431 {
5432 c2 = VEC_WIDEN_MULT_HI_EXPR;
5433 c1 = VEC_WIDEN_MULT_LO_EXPR;
5434 }
5435 break;
5436
5437 CASE_CONVERT:
5438 if (BYTES_BIG_ENDIAN)
5439 {
5440 c1 = VEC_UNPACK_HI_EXPR;
5441 c2 = VEC_UNPACK_LO_EXPR;
5442 }
5443 else
5444 {
5445 c2 = VEC_UNPACK_HI_EXPR;
5446 c1 = VEC_UNPACK_LO_EXPR;
5447 }
5448 break;
5449
5450 case FLOAT_EXPR:
5451 if (BYTES_BIG_ENDIAN)
5452 {
5453 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5454 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5455 }
5456 else
5457 {
5458 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5459 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5460 }
5461 break;
5462
5463 case FIX_TRUNC_EXPR:
5464 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5465 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5466 computing the operation. */
5467 return false;
5468
5469 default:
5470 gcc_unreachable ();
5471 }
5472
5473 if (code == FIX_TRUNC_EXPR)
5474 {
5475 /* The signedness is determined from output operand. */
5476 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5477 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5478 }
5479 else
5480 {
5481 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5482 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5483 }
5484
5485 if (!optab1 || !optab2)
5486 return false;
5487
5488 vec_mode = TYPE_MODE (vectype);
5489 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5490 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5491 return false;
5492
5493 /* Check if it's a multi-step conversion that can be done using intermediate
5494 types. */
5495 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5496 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5497 {
5498 int i;
5499 tree prev_type = vectype, intermediate_type;
5500 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5501 optab optab3, optab4;
5502
5503 if (!CONVERT_EXPR_CODE_P (code))
5504 return false;
5505
5506 *code1 = c1;
5507 *code2 = c2;
5508
5509 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5510 intermediate steps in promotion sequence. We try
5511 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5512 not. */
5513 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5514 for (i = 0; i < 3; i++)
5515 {
5516 intermediate_mode = insn_data[icode1].operand[0].mode;
5517 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5518 TYPE_UNSIGNED (prev_type));
5519 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5520 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5521
5522 if (!optab3 || !optab4
5523 || ((icode1 = optab_handler (optab1, prev_mode))
5524 == CODE_FOR_nothing)
5525 || insn_data[icode1].operand[0].mode != intermediate_mode
5526 || ((icode2 = optab_handler (optab2, prev_mode))
5527 == CODE_FOR_nothing)
5528 || insn_data[icode2].operand[0].mode != intermediate_mode
5529 || ((icode1 = optab_handler (optab3, intermediate_mode))
5530 == CODE_FOR_nothing)
5531 || ((icode2 = optab_handler (optab4, intermediate_mode))
5532 == CODE_FOR_nothing))
5533 return false;
5534
5535 VEC_quick_push (tree, *interm_types, intermediate_type);
5536 (*multi_step_cvt)++;
5537
5538 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5539 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5540 return true;
5541
5542 prev_type = intermediate_type;
5543 prev_mode = intermediate_mode;
5544 }
5545
5546 return false;
5547 }
5548
5549 *code1 = c1;
5550 *code2 = c2;
5551 return true;
5552 }
5553
5554
5555 /* Function supportable_narrowing_operation
5556
5557 Check whether an operation represented by the code CODE is a
5558 narrowing operation that is supported by the target platform in
5559 vector form (i.e., when operating on arguments of type VECTYPE_IN
5560 and producing a result of type VECTYPE_OUT).
5561
5562 Narrowing operations we currently support are NOP (CONVERT) and
5563 FIX_TRUNC. This function checks if these operations are supported by
5564 the target platform directly via vector tree-codes.
5565
5566 Output:
5567 - CODE1 is the code of a vector operation to be used when
5568 vectorizing the operation, if available.
5569 - MULTI_STEP_CVT determines the number of required intermediate steps in
5570 case of multi-step conversion (like int->short->char - in that case
5571 MULTI_STEP_CVT will be 1).
5572 - INTERM_TYPES contains the intermediate type required to perform the
5573 narrowing operation (short in the above example). */
5574
5575 bool
5576 supportable_narrowing_operation (enum tree_code code,
5577 tree vectype_out, tree vectype_in,
5578 enum tree_code *code1, int *multi_step_cvt,
5579 VEC (tree, heap) **interm_types)
5580 {
5581 enum machine_mode vec_mode;
5582 enum insn_code icode1;
5583 optab optab1, interm_optab;
5584 tree vectype = vectype_in;
5585 tree narrow_vectype = vectype_out;
5586 enum tree_code c1;
5587 tree intermediate_type, prev_type;
5588 int i;
5589
5590 switch (code)
5591 {
5592 CASE_CONVERT:
5593 c1 = VEC_PACK_TRUNC_EXPR;
5594 break;
5595
5596 case FIX_TRUNC_EXPR:
5597 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5598 break;
5599
5600 case FLOAT_EXPR:
5601 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5602 tree code and optabs used for computing the operation. */
5603 return false;
5604
5605 default:
5606 gcc_unreachable ();
5607 }
5608
5609 if (code == FIX_TRUNC_EXPR)
5610 /* The signedness is determined from output operand. */
5611 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5612 else
5613 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5614
5615 if (!optab1)
5616 return false;
5617
5618 vec_mode = TYPE_MODE (vectype);
5619 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5620 return false;
5621
5622 /* Check if it's a multi-step conversion that can be done using intermediate
5623 types. */
5624 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5625 {
5626 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5627
5628 *code1 = c1;
5629 prev_type = vectype;
5630 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5631 intermediate steps in promotion sequence. We try
5632 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5633 not. */
5634 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5635 for (i = 0; i < 3; i++)
5636 {
5637 intermediate_mode = insn_data[icode1].operand[0].mode;
5638 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5639 TYPE_UNSIGNED (prev_type));
5640 interm_optab = optab_for_tree_code (c1, intermediate_type,
5641 optab_default);
5642 if (!interm_optab
5643 || ((icode1 = optab_handler (optab1, prev_mode))
5644 == CODE_FOR_nothing)
5645 || insn_data[icode1].operand[0].mode != intermediate_mode
5646 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5647 == CODE_FOR_nothing))
5648 return false;
5649
5650 VEC_quick_push (tree, *interm_types, intermediate_type);
5651 (*multi_step_cvt)++;
5652
5653 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5654 return true;
5655
5656 prev_type = intermediate_type;
5657 prev_mode = intermediate_mode;
5658 }
5659
5660 return false;
5661 }
5662
5663 *code1 = c1;
5664 return true;
5665 }