tree-parloops.c (loop_parallel_p): Call vect_is_simple_reduction with additional...
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
42
43
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
45
46 /* Function vect_mark_relevant.
47
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
49
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
53 {
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
57
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
60
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
62 {
63 gimple pattern_stmt;
64
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
69
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
71
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
79 }
80
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
84
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
87 {
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
91 }
92
93 VEC_safe_push (gimple, heap, *worklist, stmt);
94 }
95
96
97 /* Function vect_stmt_relevant_p.
98
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
101
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
106
107 CHECKME: what other side effects would the vectorizer allow? */
108
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
112 {
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
118
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
121
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
127
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
131 {
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
135 }
136
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
139 {
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
141 {
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
144 {
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
147
148 /* We expect all such uses to be in the loop exit phis
149 (because of loop closed form) */
150 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
151 gcc_assert (bb == single_exit (loop)->dest);
152
153 *live_p = true;
154 }
155 }
156 }
157
158 return (*live_p || *relevant);
159 }
160
161
162 /* Function exist_non_indexing_operands_for_use_p
163
164 USE is one of the uses attached to STMT. Check if USE is
165 used in STMT for anything other than indexing an array. */
166
167 static bool
168 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
169 {
170 tree operand;
171 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
172
173 /* USE corresponds to some operand in STMT. If there is no data
174 reference in STMT, then any operand that corresponds to USE
175 is not indexing an array. */
176 if (!STMT_VINFO_DATA_REF (stmt_info))
177 return true;
178
179 /* STMT has a data_ref. FORNOW this means that its of one of
180 the following forms:
181 -1- ARRAY_REF = var
182 -2- var = ARRAY_REF
183 (This should have been verified in analyze_data_refs).
184
185 'var' in the second case corresponds to a def, not a use,
186 so USE cannot correspond to any operands that are not used
187 for array indexing.
188
189 Therefore, all we need to check is if STMT falls into the
190 first case, and whether var corresponds to USE. */
191
192 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
193 return false;
194
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 operand = gimple_assign_rhs1 (stmt);
198
199 if (TREE_CODE (operand) != SSA_NAME)
200 return false;
201
202 if (operand == use)
203 return true;
204
205 return false;
206 }
207
208
209 /*
210 Function process_use.
211
212 Inputs:
213 - a USE in STMT in a loop represented by LOOP_VINFO
214 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
215 that defined USE. This is done by calling mark_relevant and passing it
216 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
217
218 Outputs:
219 Generally, LIVE_P and RELEVANT are used to define the liveness and
220 relevance info of the DEF_STMT of this USE:
221 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
222 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
223 Exceptions:
224 - case 1: If USE is used only for address computations (e.g. array indexing),
225 which does not need to be directly vectorized, then the liveness/relevance
226 of the respective DEF_STMT is left unchanged.
227 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
228 skip DEF_STMT cause it had already been processed.
229 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
230 be modified accordingly.
231
232 Return true if everything is as expected. Return false otherwise. */
233
234 static bool
235 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
236 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
237 {
238 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
239 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
240 stmt_vec_info dstmt_vinfo;
241 basic_block bb, def_bb;
242 tree def;
243 gimple def_stmt;
244 enum vect_def_type dt;
245
246 /* case 1: we are only interested in uses that need to be vectorized. Uses
247 that are used for address computation are not considered relevant. */
248 if (!exist_non_indexing_operands_for_use_p (use, stmt))
249 return true;
250
251 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
252 {
253 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
254 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
255 return false;
256 }
257
258 if (!def_stmt || gimple_nop_p (def_stmt))
259 return true;
260
261 def_bb = gimple_bb (def_stmt);
262 if (!flow_bb_inside_loop_p (loop, def_bb))
263 {
264 if (vect_print_dump_info (REPORT_DETAILS))
265 fprintf (vect_dump, "def_stmt is out of loop.");
266 return true;
267 }
268
269 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
270 DEF_STMT must have already been processed, because this should be the
271 only way that STMT, which is a reduction-phi, was put in the worklist,
272 as there should be no other uses for DEF_STMT in the loop. So we just
273 check that everything is as expected, and we are done. */
274 dstmt_vinfo = vinfo_for_stmt (def_stmt);
275 bb = gimple_bb (stmt);
276 if (gimple_code (stmt) == GIMPLE_PHI
277 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
278 && gimple_code (def_stmt) != GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
280 && bb->loop_father == def_bb->loop_father)
281 {
282 if (vect_print_dump_info (REPORT_DETAILS))
283 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
284 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
285 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
286 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
287 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
288 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
289 return true;
290 }
291
292 /* case 3a: outer-loop stmt defining an inner-loop stmt:
293 outer-loop-header-bb:
294 d = def_stmt
295 inner-loop:
296 stmt # use (d)
297 outer-loop-tail-bb:
298 ... */
299 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
300 {
301 if (vect_print_dump_info (REPORT_DETAILS))
302 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
303
304 switch (relevant)
305 {
306 case vect_unused_in_scope:
307 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
308 vect_used_in_scope : vect_unused_in_scope;
309 break;
310
311 case vect_used_in_outer_by_reduction:
312 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
313 relevant = vect_used_by_reduction;
314 break;
315
316 case vect_used_in_outer:
317 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
318 relevant = vect_used_in_scope;
319 break;
320
321 case vect_used_in_scope:
322 break;
323
324 default:
325 gcc_unreachable ();
326 }
327 }
328
329 /* case 3b: inner-loop stmt defining an outer-loop stmt:
330 outer-loop-header-bb:
331 ...
332 inner-loop:
333 d = def_stmt
334 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
335 stmt # use (d) */
336 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
337 {
338 if (vect_print_dump_info (REPORT_DETAILS))
339 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
340
341 switch (relevant)
342 {
343 case vect_unused_in_scope:
344 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
345 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
346 vect_used_in_outer_by_reduction : vect_unused_in_scope;
347 break;
348
349 case vect_used_by_reduction:
350 relevant = vect_used_in_outer_by_reduction;
351 break;
352
353 case vect_used_in_scope:
354 relevant = vect_used_in_outer;
355 break;
356
357 default:
358 gcc_unreachable ();
359 }
360 }
361
362 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
363 return true;
364 }
365
366
367 /* Function vect_mark_stmts_to_be_vectorized.
368
369 Not all stmts in the loop need to be vectorized. For example:
370
371 for i...
372 for j...
373 1. T0 = i + j
374 2. T1 = a[T0]
375
376 3. j = j + 1
377
378 Stmt 1 and 3 do not need to be vectorized, because loop control and
379 addressing of vectorized data-refs are handled differently.
380
381 This pass detects such stmts. */
382
383 bool
384 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
385 {
386 VEC(gimple,heap) *worklist;
387 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
388 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
389 unsigned int nbbs = loop->num_nodes;
390 gimple_stmt_iterator si;
391 gimple stmt;
392 unsigned int i;
393 stmt_vec_info stmt_vinfo;
394 basic_block bb;
395 gimple phi;
396 bool live_p;
397 enum vect_relevant relevant, tmp_relevant;
398 enum vect_def_type def_type;
399
400 if (vect_print_dump_info (REPORT_DETAILS))
401 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
402
403 worklist = VEC_alloc (gimple, heap, 64);
404
405 /* 1. Init worklist. */
406 for (i = 0; i < nbbs; i++)
407 {
408 bb = bbs[i];
409 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
410 {
411 phi = gsi_stmt (si);
412 if (vect_print_dump_info (REPORT_DETAILS))
413 {
414 fprintf (vect_dump, "init: phi relevant? ");
415 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
416 }
417
418 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
419 vect_mark_relevant (&worklist, phi, relevant, live_p);
420 }
421 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
422 {
423 stmt = gsi_stmt (si);
424 if (vect_print_dump_info (REPORT_DETAILS))
425 {
426 fprintf (vect_dump, "init: stmt relevant? ");
427 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
428 }
429
430 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
431 vect_mark_relevant (&worklist, stmt, relevant, live_p);
432 }
433 }
434
435 /* 2. Process_worklist */
436 while (VEC_length (gimple, worklist) > 0)
437 {
438 use_operand_p use_p;
439 ssa_op_iter iter;
440
441 stmt = VEC_pop (gimple, worklist);
442 if (vect_print_dump_info (REPORT_DETAILS))
443 {
444 fprintf (vect_dump, "worklist: examine stmt: ");
445 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
446 }
447
448 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
449 (DEF_STMT) as relevant/irrelevant and live/dead according to the
450 liveness and relevance properties of STMT. */
451 stmt_vinfo = vinfo_for_stmt (stmt);
452 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
453 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
454
455 /* Generally, the liveness and relevance properties of STMT are
456 propagated as is to the DEF_STMTs of its USEs:
457 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
458 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
459
460 One exception is when STMT has been identified as defining a reduction
461 variable; in this case we set the liveness/relevance as follows:
462 live_p = false
463 relevant = vect_used_by_reduction
464 This is because we distinguish between two kinds of relevant stmts -
465 those that are used by a reduction computation, and those that are
466 (also) used by a regular computation. This allows us later on to
467 identify stmts that are used solely by a reduction, and therefore the
468 order of the results that they produce does not have to be kept. */
469
470 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
471 tmp_relevant = relevant;
472 switch (def_type)
473 {
474 case vect_reduction_def:
475 switch (tmp_relevant)
476 {
477 case vect_unused_in_scope:
478 relevant = vect_used_by_reduction;
479 break;
480
481 case vect_used_by_reduction:
482 if (gimple_code (stmt) == GIMPLE_PHI)
483 break;
484 /* fall through */
485
486 default:
487 if (vect_print_dump_info (REPORT_DETAILS))
488 fprintf (vect_dump, "unsupported use of reduction.");
489
490 VEC_free (gimple, heap, worklist);
491 return false;
492 }
493
494 live_p = false;
495 break;
496
497 case vect_nested_cycle:
498 if (tmp_relevant != vect_unused_in_scope
499 && tmp_relevant != vect_used_in_outer_by_reduction
500 && tmp_relevant != vect_used_in_outer)
501 {
502 if (vect_print_dump_info (REPORT_DETAILS))
503 fprintf (vect_dump, "unsupported use of nested cycle.");
504
505 VEC_free (gimple, heap, worklist);
506 return false;
507 }
508
509 live_p = false;
510 break;
511
512 case vect_double_reduction_def:
513 if (tmp_relevant != vect_unused_in_scope
514 && tmp_relevant != vect_used_by_reduction)
515 {
516 if (vect_print_dump_info (REPORT_DETAILS))
517 fprintf (vect_dump, "unsupported use of double reduction.");
518
519 VEC_free (gimple, heap, worklist);
520 return false;
521 }
522
523 live_p = false;
524 break;
525
526 default:
527 break;
528 }
529
530 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
531 {
532 tree op = USE_FROM_PTR (use_p);
533 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
534 {
535 VEC_free (gimple, heap, worklist);
536 return false;
537 }
538 }
539 } /* while worklist */
540
541 VEC_free (gimple, heap, worklist);
542 return true;
543 }
544
545
546 int
547 cost_for_stmt (gimple stmt)
548 {
549 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
550
551 switch (STMT_VINFO_TYPE (stmt_info))
552 {
553 case load_vec_info_type:
554 return TARG_SCALAR_LOAD_COST;
555 case store_vec_info_type:
556 return TARG_SCALAR_STORE_COST;
557 case op_vec_info_type:
558 case condition_vec_info_type:
559 case assignment_vec_info_type:
560 case reduc_vec_info_type:
561 case induc_vec_info_type:
562 case type_promotion_vec_info_type:
563 case type_demotion_vec_info_type:
564 case type_conversion_vec_info_type:
565 case call_vec_info_type:
566 return TARG_SCALAR_STMT_COST;
567 case undef_vec_info_type:
568 default:
569 gcc_unreachable ();
570 }
571 }
572
573 /* Function vect_model_simple_cost.
574
575 Models cost for simple operations, i.e. those that only emit ncopies of a
576 single op. Right now, this does not account for multiple insns that could
577 be generated for the single vector op. We will handle that shortly. */
578
579 void
580 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
581 enum vect_def_type *dt, slp_tree slp_node)
582 {
583 int i;
584 int inside_cost = 0, outside_cost = 0;
585
586 /* The SLP costs were already calculated during SLP tree build. */
587 if (PURE_SLP_STMT (stmt_info))
588 return;
589
590 inside_cost = ncopies * TARG_VEC_STMT_COST;
591
592 /* FORNOW: Assuming maximum 2 args per stmts. */
593 for (i = 0; i < 2; i++)
594 {
595 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
596 outside_cost += TARG_SCALAR_TO_VEC_COST;
597 }
598
599 if (vect_print_dump_info (REPORT_COST))
600 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
601 "outside_cost = %d .", inside_cost, outside_cost);
602
603 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
604 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
605 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
606 }
607
608
609 /* Function vect_cost_strided_group_size
610
611 For strided load or store, return the group_size only if it is the first
612 load or store of a group, else return 1. This ensures that group size is
613 only returned once per group. */
614
615 static int
616 vect_cost_strided_group_size (stmt_vec_info stmt_info)
617 {
618 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
619
620 if (first_stmt == STMT_VINFO_STMT (stmt_info))
621 return DR_GROUP_SIZE (stmt_info);
622
623 return 1;
624 }
625
626
627 /* Function vect_model_store_cost
628
629 Models cost for stores. In the case of strided accesses, one access
630 has the overhead of the strided access attributed to it. */
631
632 void
633 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
634 enum vect_def_type dt, slp_tree slp_node)
635 {
636 int group_size;
637 int inside_cost = 0, outside_cost = 0;
638
639 /* The SLP costs were already calculated during SLP tree build. */
640 if (PURE_SLP_STMT (stmt_info))
641 return;
642
643 if (dt == vect_constant_def || dt == vect_external_def)
644 outside_cost = TARG_SCALAR_TO_VEC_COST;
645
646 /* Strided access? */
647 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
648 group_size = vect_cost_strided_group_size (stmt_info);
649 /* Not a strided access. */
650 else
651 group_size = 1;
652
653 /* Is this an access in a group of stores, which provide strided access?
654 If so, add in the cost of the permutes. */
655 if (group_size > 1)
656 {
657 /* Uses a high and low interleave operation for each needed permute. */
658 inside_cost = ncopies * exact_log2(group_size) * group_size
659 * TARG_VEC_STMT_COST;
660
661 if (vect_print_dump_info (REPORT_COST))
662 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
663 group_size);
664
665 }
666
667 /* Costs of the stores. */
668 inside_cost += ncopies * TARG_VEC_STORE_COST;
669
670 if (vect_print_dump_info (REPORT_COST))
671 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
672 "outside_cost = %d .", inside_cost, outside_cost);
673
674 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
675 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
676 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
677 }
678
679
680 /* Function vect_model_load_cost
681
682 Models cost for loads. In the case of strided accesses, the last access
683 has the overhead of the strided access attributed to it. Since unaligned
684 accesses are supported for loads, we also account for the costs of the
685 access scheme chosen. */
686
687 void
688 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
689
690 {
691 int group_size;
692 int alignment_support_cheme;
693 gimple first_stmt;
694 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
695 int inside_cost = 0, outside_cost = 0;
696
697 /* The SLP costs were already calculated during SLP tree build. */
698 if (PURE_SLP_STMT (stmt_info))
699 return;
700
701 /* Strided accesses? */
702 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
703 if (first_stmt && !slp_node)
704 {
705 group_size = vect_cost_strided_group_size (stmt_info);
706 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
707 }
708 /* Not a strided access. */
709 else
710 {
711 group_size = 1;
712 first_dr = dr;
713 }
714
715 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
716
717 /* Is this an access in a group of loads providing strided access?
718 If so, add in the cost of the permutes. */
719 if (group_size > 1)
720 {
721 /* Uses an even and odd extract operations for each needed permute. */
722 inside_cost = ncopies * exact_log2(group_size) * group_size
723 * TARG_VEC_STMT_COST;
724
725 if (vect_print_dump_info (REPORT_COST))
726 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
727 group_size);
728
729 }
730
731 /* The loads themselves. */
732 switch (alignment_support_cheme)
733 {
734 case dr_aligned:
735 {
736 inside_cost += ncopies * TARG_VEC_LOAD_COST;
737
738 if (vect_print_dump_info (REPORT_COST))
739 fprintf (vect_dump, "vect_model_load_cost: aligned.");
740
741 break;
742 }
743 case dr_unaligned_supported:
744 {
745 /* Here, we assign an additional cost for the unaligned load. */
746 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
747
748 if (vect_print_dump_info (REPORT_COST))
749 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
750 "hardware.");
751
752 break;
753 }
754 case dr_explicit_realign:
755 {
756 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
757
758 /* FIXME: If the misalignment remains fixed across the iterations of
759 the containing loop, the following cost should be added to the
760 outside costs. */
761 if (targetm.vectorize.builtin_mask_for_load)
762 inside_cost += TARG_VEC_STMT_COST;
763
764 break;
765 }
766 case dr_explicit_realign_optimized:
767 {
768 if (vect_print_dump_info (REPORT_COST))
769 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
770 "pipelined.");
771
772 /* Unaligned software pipeline has a load of an address, an initial
773 load, and possibly a mask operation to "prime" the loop. However,
774 if this is an access in a group of loads, which provide strided
775 access, then the above cost should only be considered for one
776 access in the group. Inside the loop, there is a load op
777 and a realignment op. */
778
779 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
780 {
781 outside_cost = 2*TARG_VEC_STMT_COST;
782 if (targetm.vectorize.builtin_mask_for_load)
783 outside_cost += TARG_VEC_STMT_COST;
784 }
785
786 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
787
788 break;
789 }
790
791 default:
792 gcc_unreachable ();
793 }
794
795 if (vect_print_dump_info (REPORT_COST))
796 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
797 "outside_cost = %d .", inside_cost, outside_cost);
798
799 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
800 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
801 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
802 }
803
804
805 /* Function vect_init_vector.
806
807 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
808 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
809 is not NULL. Otherwise, place the initialization at the loop preheader.
810 Return the DEF of INIT_STMT.
811 It will be used in the vectorization of STMT. */
812
813 tree
814 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
815 gimple_stmt_iterator *gsi)
816 {
817 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
818 tree new_var;
819 gimple init_stmt;
820 tree vec_oprnd;
821 edge pe;
822 tree new_temp;
823 basic_block new_bb;
824
825 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
826 add_referenced_var (new_var);
827 init_stmt = gimple_build_assign (new_var, vector_var);
828 new_temp = make_ssa_name (new_var, init_stmt);
829 gimple_assign_set_lhs (init_stmt, new_temp);
830
831 if (gsi)
832 vect_finish_stmt_generation (stmt, init_stmt, gsi);
833 else
834 {
835 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
836
837 if (loop_vinfo)
838 {
839 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
840
841 if (nested_in_vect_loop_p (loop, stmt))
842 loop = loop->inner;
843
844 pe = loop_preheader_edge (loop);
845 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
846 gcc_assert (!new_bb);
847 }
848 else
849 {
850 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
851 basic_block bb;
852 gimple_stmt_iterator gsi_bb_start;
853
854 gcc_assert (bb_vinfo);
855 bb = BB_VINFO_BB (bb_vinfo);
856 gsi_bb_start = gsi_after_labels (bb);
857 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
858 }
859 }
860
861 if (vect_print_dump_info (REPORT_DETAILS))
862 {
863 fprintf (vect_dump, "created new init_stmt: ");
864 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
865 }
866
867 vec_oprnd = gimple_assign_lhs (init_stmt);
868 return vec_oprnd;
869 }
870
871
872 /* Function vect_get_vec_def_for_operand.
873
874 OP is an operand in STMT. This function returns a (vector) def that will be
875 used in the vectorized stmt for STMT.
876
877 In the case that OP is an SSA_NAME which is defined in the loop, then
878 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
879
880 In case OP is an invariant or constant, a new stmt that creates a vector def
881 needs to be introduced. */
882
883 tree
884 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
885 {
886 tree vec_oprnd;
887 gimple vec_stmt;
888 gimple def_stmt;
889 stmt_vec_info def_stmt_info = NULL;
890 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
891 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
892 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
893 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
894 tree vec_inv;
895 tree vec_cst;
896 tree t = NULL_TREE;
897 tree def;
898 int i;
899 enum vect_def_type dt;
900 bool is_simple_use;
901 tree vector_type;
902
903 if (vect_print_dump_info (REPORT_DETAILS))
904 {
905 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
906 print_generic_expr (vect_dump, op, TDF_SLIM);
907 }
908
909 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
910 &dt);
911 gcc_assert (is_simple_use);
912 if (vect_print_dump_info (REPORT_DETAILS))
913 {
914 if (def)
915 {
916 fprintf (vect_dump, "def = ");
917 print_generic_expr (vect_dump, def, TDF_SLIM);
918 }
919 if (def_stmt)
920 {
921 fprintf (vect_dump, " def_stmt = ");
922 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
923 }
924 }
925
926 switch (dt)
927 {
928 /* Case 1: operand is a constant. */
929 case vect_constant_def:
930 {
931 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
932 gcc_assert (vector_type);
933
934 if (scalar_def)
935 *scalar_def = op;
936
937 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
938 if (vect_print_dump_info (REPORT_DETAILS))
939 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
940
941 for (i = nunits - 1; i >= 0; --i)
942 {
943 t = tree_cons (NULL_TREE, op, t);
944 }
945 vec_cst = build_vector (vector_type, t);
946 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
947 }
948
949 /* Case 2: operand is defined outside the loop - loop invariant. */
950 case vect_external_def:
951 {
952 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
953 gcc_assert (vector_type);
954 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
955
956 if (scalar_def)
957 *scalar_def = def;
958
959 /* Create 'vec_inv = {inv,inv,..,inv}' */
960 if (vect_print_dump_info (REPORT_DETAILS))
961 fprintf (vect_dump, "Create vector_inv.");
962
963 for (i = nunits - 1; i >= 0; --i)
964 {
965 t = tree_cons (NULL_TREE, def, t);
966 }
967
968 /* FIXME: use build_constructor directly. */
969 vec_inv = build_constructor_from_list (vector_type, t);
970 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
971 }
972
973 /* Case 3: operand is defined inside the loop. */
974 case vect_internal_def:
975 {
976 if (scalar_def)
977 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
978
979 /* Get the def from the vectorized stmt. */
980 def_stmt_info = vinfo_for_stmt (def_stmt);
981 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
982 gcc_assert (vec_stmt);
983 if (gimple_code (vec_stmt) == GIMPLE_PHI)
984 vec_oprnd = PHI_RESULT (vec_stmt);
985 else if (is_gimple_call (vec_stmt))
986 vec_oprnd = gimple_call_lhs (vec_stmt);
987 else
988 vec_oprnd = gimple_assign_lhs (vec_stmt);
989 return vec_oprnd;
990 }
991
992 /* Case 4: operand is defined by a loop header phi - reduction */
993 case vect_reduction_def:
994 case vect_double_reduction_def:
995 case vect_nested_cycle:
996 {
997 struct loop *loop;
998
999 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1000 loop = (gimple_bb (def_stmt))->loop_father;
1001
1002 /* Get the def before the loop */
1003 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1004 return get_initial_def_for_reduction (stmt, op, scalar_def);
1005 }
1006
1007 /* Case 5: operand is defined by loop-header phi - induction. */
1008 case vect_induction_def:
1009 {
1010 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1011
1012 /* Get the def from the vectorized stmt. */
1013 def_stmt_info = vinfo_for_stmt (def_stmt);
1014 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1015 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1016 vec_oprnd = PHI_RESULT (vec_stmt);
1017 return vec_oprnd;
1018 }
1019
1020 default:
1021 gcc_unreachable ();
1022 }
1023 }
1024
1025
1026 /* Function vect_get_vec_def_for_stmt_copy
1027
1028 Return a vector-def for an operand. This function is used when the
1029 vectorized stmt to be created (by the caller to this function) is a "copy"
1030 created in case the vectorized result cannot fit in one vector, and several
1031 copies of the vector-stmt are required. In this case the vector-def is
1032 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1033 of the stmt that defines VEC_OPRND.
1034 DT is the type of the vector def VEC_OPRND.
1035
1036 Context:
1037 In case the vectorization factor (VF) is bigger than the number
1038 of elements that can fit in a vectype (nunits), we have to generate
1039 more than one vector stmt to vectorize the scalar stmt. This situation
1040 arises when there are multiple data-types operated upon in the loop; the
1041 smallest data-type determines the VF, and as a result, when vectorizing
1042 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1043 vector stmt (each computing a vector of 'nunits' results, and together
1044 computing 'VF' results in each iteration). This function is called when
1045 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1046 which VF=16 and nunits=4, so the number of copies required is 4):
1047
1048 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1049
1050 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1051 VS1.1: vx.1 = memref1 VS1.2
1052 VS1.2: vx.2 = memref2 VS1.3
1053 VS1.3: vx.3 = memref3
1054
1055 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1056 VSnew.1: vz1 = vx.1 + ... VSnew.2
1057 VSnew.2: vz2 = vx.2 + ... VSnew.3
1058 VSnew.3: vz3 = vx.3 + ...
1059
1060 The vectorization of S1 is explained in vectorizable_load.
1061 The vectorization of S2:
1062 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1063 the function 'vect_get_vec_def_for_operand' is called to
1064 get the relevant vector-def for each operand of S2. For operand x it
1065 returns the vector-def 'vx.0'.
1066
1067 To create the remaining copies of the vector-stmt (VSnew.j), this
1068 function is called to get the relevant vector-def for each operand. It is
1069 obtained from the respective VS1.j stmt, which is recorded in the
1070 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1071
1072 For example, to obtain the vector-def 'vx.1' in order to create the
1073 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1074 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1075 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1076 and return its def ('vx.1').
1077 Overall, to create the above sequence this function will be called 3 times:
1078 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1079 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1080 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1081
1082 tree
1083 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1084 {
1085 gimple vec_stmt_for_operand;
1086 stmt_vec_info def_stmt_info;
1087
1088 /* Do nothing; can reuse same def. */
1089 if (dt == vect_external_def || dt == vect_constant_def )
1090 return vec_oprnd;
1091
1092 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1093 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1094 gcc_assert (def_stmt_info);
1095 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1096 gcc_assert (vec_stmt_for_operand);
1097 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1098 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1099 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1100 else
1101 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1102 return vec_oprnd;
1103 }
1104
1105
1106 /* Get vectorized definitions for the operands to create a copy of an original
1107 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1108
1109 static void
1110 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1111 VEC(tree,heap) **vec_oprnds0,
1112 VEC(tree,heap) **vec_oprnds1)
1113 {
1114 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1115
1116 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1117 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1118
1119 if (vec_oprnds1 && *vec_oprnds1)
1120 {
1121 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1122 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1123 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1124 }
1125 }
1126
1127
1128 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1129
1130 static void
1131 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1132 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1133 slp_tree slp_node)
1134 {
1135 if (slp_node)
1136 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1137 else
1138 {
1139 tree vec_oprnd;
1140
1141 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1142 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1143 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1144
1145 if (op1)
1146 {
1147 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1148 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1149 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1150 }
1151 }
1152 }
1153
1154
1155 /* Function vect_finish_stmt_generation.
1156
1157 Insert a new stmt. */
1158
1159 void
1160 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1161 gimple_stmt_iterator *gsi)
1162 {
1163 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1164 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1165 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1166
1167 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1168
1169 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1170
1171 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1172 bb_vinfo));
1173
1174 if (vect_print_dump_info (REPORT_DETAILS))
1175 {
1176 fprintf (vect_dump, "add new stmt: ");
1177 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1178 }
1179
1180 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1181 }
1182
1183 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1184 a function declaration if the target has a vectorized version
1185 of the function, or NULL_TREE if the function cannot be vectorized. */
1186
1187 tree
1188 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1189 {
1190 tree fndecl = gimple_call_fndecl (call);
1191 enum built_in_function code;
1192
1193 /* We only handle functions that do not read or clobber memory -- i.e.
1194 const or novops ones. */
1195 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1196 return NULL_TREE;
1197
1198 if (!fndecl
1199 || TREE_CODE (fndecl) != FUNCTION_DECL
1200 || !DECL_BUILT_IN (fndecl))
1201 return NULL_TREE;
1202
1203 code = DECL_FUNCTION_CODE (fndecl);
1204 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
1205 vectype_in);
1206 }
1207
1208 /* Function vectorizable_call.
1209
1210 Check if STMT performs a function call that can be vectorized.
1211 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1212 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1213 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1214
1215 static bool
1216 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1217 {
1218 tree vec_dest;
1219 tree scalar_dest;
1220 tree op, type;
1221 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1222 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1223 tree vectype_out, vectype_in;
1224 int nunits_in;
1225 int nunits_out;
1226 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1227 tree fndecl, new_temp, def, rhs_type, lhs_type;
1228 gimple def_stmt;
1229 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1230 gimple new_stmt;
1231 int ncopies, j;
1232 VEC(tree, heap) *vargs = NULL;
1233 enum { NARROW, NONE, WIDEN } modifier;
1234 size_t i, nargs;
1235
1236 /* FORNOW: unsupported in basic block SLP. */
1237 gcc_assert (loop_vinfo);
1238
1239 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1240 return false;
1241
1242 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1243 return false;
1244
1245 /* FORNOW: SLP not supported. */
1246 if (STMT_SLP_TYPE (stmt_info))
1247 return false;
1248
1249 /* Is STMT a vectorizable call? */
1250 if (!is_gimple_call (stmt))
1251 return false;
1252
1253 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1254 return false;
1255
1256 /* Process function arguments. */
1257 rhs_type = NULL_TREE;
1258 nargs = gimple_call_num_args (stmt);
1259
1260 /* Bail out if the function has more than two arguments, we
1261 do not have interesting builtin functions to vectorize with
1262 more than two arguments. No arguments is also not good. */
1263 if (nargs == 0 || nargs > 2)
1264 return false;
1265
1266 for (i = 0; i < nargs; i++)
1267 {
1268 op = gimple_call_arg (stmt, i);
1269
1270 /* We can only handle calls with arguments of the same type. */
1271 if (rhs_type
1272 && rhs_type != TREE_TYPE (op))
1273 {
1274 if (vect_print_dump_info (REPORT_DETAILS))
1275 fprintf (vect_dump, "argument types differ.");
1276 return false;
1277 }
1278 rhs_type = TREE_TYPE (op);
1279
1280 if (!vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt[i]))
1281 {
1282 if (vect_print_dump_info (REPORT_DETAILS))
1283 fprintf (vect_dump, "use not simple.");
1284 return false;
1285 }
1286 }
1287
1288 vectype_in = get_vectype_for_scalar_type (rhs_type);
1289 if (!vectype_in)
1290 return false;
1291 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1292
1293 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
1294 vectype_out = get_vectype_for_scalar_type (lhs_type);
1295 if (!vectype_out)
1296 return false;
1297 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1298
1299 /* FORNOW */
1300 if (nunits_in == nunits_out / 2)
1301 modifier = NARROW;
1302 else if (nunits_out == nunits_in)
1303 modifier = NONE;
1304 else if (nunits_out == nunits_in / 2)
1305 modifier = WIDEN;
1306 else
1307 return false;
1308
1309 /* For now, we only vectorize functions if a target specific builtin
1310 is available. TODO -- in some cases, it might be profitable to
1311 insert the calls for pieces of the vector, in order to be able
1312 to vectorize other operations in the loop. */
1313 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1314 if (fndecl == NULL_TREE)
1315 {
1316 if (vect_print_dump_info (REPORT_DETAILS))
1317 fprintf (vect_dump, "function is not vectorizable.");
1318
1319 return false;
1320 }
1321
1322 gcc_assert (!gimple_vuse (stmt));
1323
1324 if (modifier == NARROW)
1325 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1326 else
1327 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1328
1329 /* Sanity check: make sure that at least one copy of the vectorized stmt
1330 needs to be generated. */
1331 gcc_assert (ncopies >= 1);
1332
1333 if (!vec_stmt) /* transformation not required. */
1334 {
1335 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1336 if (vect_print_dump_info (REPORT_DETAILS))
1337 fprintf (vect_dump, "=== vectorizable_call ===");
1338 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1339 return true;
1340 }
1341
1342 /** Transform. **/
1343
1344 if (vect_print_dump_info (REPORT_DETAILS))
1345 fprintf (vect_dump, "transform operation.");
1346
1347 /* Handle def. */
1348 scalar_dest = gimple_call_lhs (stmt);
1349 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1350
1351 prev_stmt_info = NULL;
1352 switch (modifier)
1353 {
1354 case NONE:
1355 for (j = 0; j < ncopies; ++j)
1356 {
1357 /* Build argument list for the vectorized call. */
1358 if (j == 0)
1359 vargs = VEC_alloc (tree, heap, nargs);
1360 else
1361 VEC_truncate (tree, vargs, 0);
1362
1363 for (i = 0; i < nargs; i++)
1364 {
1365 op = gimple_call_arg (stmt, i);
1366 if (j == 0)
1367 vec_oprnd0
1368 = vect_get_vec_def_for_operand (op, stmt, NULL);
1369 else
1370 vec_oprnd0
1371 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
1372
1373 VEC_quick_push (tree, vargs, vec_oprnd0);
1374 }
1375
1376 new_stmt = gimple_build_call_vec (fndecl, vargs);
1377 new_temp = make_ssa_name (vec_dest, new_stmt);
1378 gimple_call_set_lhs (new_stmt, new_temp);
1379
1380 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1381
1382 if (j == 0)
1383 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1384 else
1385 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1386
1387 prev_stmt_info = vinfo_for_stmt (new_stmt);
1388 }
1389
1390 break;
1391
1392 case NARROW:
1393 for (j = 0; j < ncopies; ++j)
1394 {
1395 /* Build argument list for the vectorized call. */
1396 if (j == 0)
1397 vargs = VEC_alloc (tree, heap, nargs * 2);
1398 else
1399 VEC_truncate (tree, vargs, 0);
1400
1401 for (i = 0; i < nargs; i++)
1402 {
1403 op = gimple_call_arg (stmt, i);
1404 if (j == 0)
1405 {
1406 vec_oprnd0
1407 = vect_get_vec_def_for_operand (op, stmt, NULL);
1408 vec_oprnd1
1409 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
1410 }
1411 else
1412 {
1413 vec_oprnd0
1414 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1);
1415 vec_oprnd1
1416 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
1417 }
1418
1419 VEC_quick_push (tree, vargs, vec_oprnd0);
1420 VEC_quick_push (tree, vargs, vec_oprnd1);
1421 }
1422
1423 new_stmt = gimple_build_call_vec (fndecl, vargs);
1424 new_temp = make_ssa_name (vec_dest, new_stmt);
1425 gimple_call_set_lhs (new_stmt, new_temp);
1426
1427 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1428
1429 if (j == 0)
1430 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1431 else
1432 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1433
1434 prev_stmt_info = vinfo_for_stmt (new_stmt);
1435 }
1436
1437 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1438
1439 break;
1440
1441 case WIDEN:
1442 /* No current target implements this case. */
1443 return false;
1444 }
1445
1446 VEC_free (tree, heap, vargs);
1447
1448 /* Update the exception handling table with the vector stmt if necessary. */
1449 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1450 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1451
1452 /* The call in STMT might prevent it from being removed in dce.
1453 We however cannot remove it here, due to the way the ssa name
1454 it defines is mapped to the new definition. So just replace
1455 rhs of the statement with something harmless. */
1456
1457 type = TREE_TYPE (scalar_dest);
1458 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1459 fold_convert (type, integer_zero_node));
1460 set_vinfo_for_stmt (new_stmt, stmt_info);
1461 set_vinfo_for_stmt (stmt, NULL);
1462 STMT_VINFO_STMT (stmt_info) = new_stmt;
1463 gsi_replace (gsi, new_stmt, false);
1464 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1465
1466 return true;
1467 }
1468
1469
1470 /* Function vect_gen_widened_results_half
1471
1472 Create a vector stmt whose code, type, number of arguments, and result
1473 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1474 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1475 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1476 needs to be created (DECL is a function-decl of a target-builtin).
1477 STMT is the original scalar stmt that we are vectorizing. */
1478
1479 static gimple
1480 vect_gen_widened_results_half (enum tree_code code,
1481 tree decl,
1482 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1483 tree vec_dest, gimple_stmt_iterator *gsi,
1484 gimple stmt)
1485 {
1486 gimple new_stmt;
1487 tree new_temp;
1488
1489 /* Generate half of the widened result: */
1490 if (code == CALL_EXPR)
1491 {
1492 /* Target specific support */
1493 if (op_type == binary_op)
1494 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1495 else
1496 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1497 new_temp = make_ssa_name (vec_dest, new_stmt);
1498 gimple_call_set_lhs (new_stmt, new_temp);
1499 }
1500 else
1501 {
1502 /* Generic support */
1503 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1504 if (op_type != binary_op)
1505 vec_oprnd1 = NULL;
1506 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1507 vec_oprnd1);
1508 new_temp = make_ssa_name (vec_dest, new_stmt);
1509 gimple_assign_set_lhs (new_stmt, new_temp);
1510 }
1511 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1512
1513 return new_stmt;
1514 }
1515
1516
1517 /* Check if STMT performs a conversion operation, that can be vectorized.
1518 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1519 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1520 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1521
1522 static bool
1523 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1524 gimple *vec_stmt, slp_tree slp_node)
1525 {
1526 tree vec_dest;
1527 tree scalar_dest;
1528 tree op0;
1529 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1530 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1531 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1532 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1533 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1534 tree new_temp;
1535 tree def;
1536 gimple def_stmt;
1537 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1538 gimple new_stmt = NULL;
1539 stmt_vec_info prev_stmt_info;
1540 int nunits_in;
1541 int nunits_out;
1542 tree vectype_out, vectype_in;
1543 int ncopies, j;
1544 tree expr;
1545 tree rhs_type, lhs_type;
1546 tree builtin_decl;
1547 enum { NARROW, NONE, WIDEN } modifier;
1548 int i;
1549 VEC(tree,heap) *vec_oprnds0 = NULL;
1550 tree vop0;
1551 tree integral_type;
1552 VEC(tree,heap) *dummy = NULL;
1553 int dummy_int;
1554
1555 /* Is STMT a vectorizable conversion? */
1556
1557 /* FORNOW: unsupported in basic block SLP. */
1558 gcc_assert (loop_vinfo);
1559
1560 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1561 return false;
1562
1563 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1564 return false;
1565
1566 if (!is_gimple_assign (stmt))
1567 return false;
1568
1569 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1570 return false;
1571
1572 code = gimple_assign_rhs_code (stmt);
1573 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1574 return false;
1575
1576 /* Check types of lhs and rhs. */
1577 op0 = gimple_assign_rhs1 (stmt);
1578 rhs_type = TREE_TYPE (op0);
1579 vectype_in = get_vectype_for_scalar_type (rhs_type);
1580 if (!vectype_in)
1581 return false;
1582 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1583
1584 scalar_dest = gimple_assign_lhs (stmt);
1585 lhs_type = TREE_TYPE (scalar_dest);
1586 vectype_out = get_vectype_for_scalar_type (lhs_type);
1587 if (!vectype_out)
1588 return false;
1589 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1590
1591 /* FORNOW */
1592 if (nunits_in == nunits_out / 2)
1593 modifier = NARROW;
1594 else if (nunits_out == nunits_in)
1595 modifier = NONE;
1596 else if (nunits_out == nunits_in / 2)
1597 modifier = WIDEN;
1598 else
1599 return false;
1600
1601 if (modifier == NONE)
1602 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
1603
1604 /* Bail out if the types are both integral or non-integral. */
1605 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
1606 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
1607 return false;
1608
1609 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1610
1611 if (modifier == NARROW)
1612 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1613 else
1614 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1615
1616 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1617 this, so we can safely override NCOPIES with 1 here. */
1618 if (slp_node)
1619 ncopies = 1;
1620
1621 /* Sanity check: make sure that at least one copy of the vectorized stmt
1622 needs to be generated. */
1623 gcc_assert (ncopies >= 1);
1624
1625 /* Check the operands of the operation. */
1626 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
1627 {
1628 if (vect_print_dump_info (REPORT_DETAILS))
1629 fprintf (vect_dump, "use not simple.");
1630 return false;
1631 }
1632
1633 /* Supportable by target? */
1634 if ((modifier == NONE
1635 && !targetm.vectorize.builtin_conversion (code, integral_type))
1636 || (modifier == WIDEN
1637 && !supportable_widening_operation (code, stmt, vectype_in,
1638 &decl1, &decl2,
1639 &code1, &code2,
1640 &dummy_int, &dummy))
1641 || (modifier == NARROW
1642 && !supportable_narrowing_operation (code, stmt, vectype_in,
1643 &code1, &dummy_int, &dummy)))
1644 {
1645 if (vect_print_dump_info (REPORT_DETAILS))
1646 fprintf (vect_dump, "conversion not supported by target.");
1647 return false;
1648 }
1649
1650 if (modifier != NONE)
1651 {
1652 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1653 /* FORNOW: SLP not supported. */
1654 if (STMT_SLP_TYPE (stmt_info))
1655 return false;
1656 }
1657
1658 if (!vec_stmt) /* transformation not required. */
1659 {
1660 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1661 return true;
1662 }
1663
1664 /** Transform. **/
1665 if (vect_print_dump_info (REPORT_DETAILS))
1666 fprintf (vect_dump, "transform conversion.");
1667
1668 /* Handle def. */
1669 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1670
1671 if (modifier == NONE && !slp_node)
1672 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1673
1674 prev_stmt_info = NULL;
1675 switch (modifier)
1676 {
1677 case NONE:
1678 for (j = 0; j < ncopies; j++)
1679 {
1680 if (j == 0)
1681 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1682 else
1683 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1684
1685 builtin_decl =
1686 targetm.vectorize.builtin_conversion (code, integral_type);
1687 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1688 {
1689 /* Arguments are ready. create the new vector stmt. */
1690 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1691 new_temp = make_ssa_name (vec_dest, new_stmt);
1692 gimple_call_set_lhs (new_stmt, new_temp);
1693 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1694 if (slp_node)
1695 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1696 }
1697
1698 if (j == 0)
1699 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1700 else
1701 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1702 prev_stmt_info = vinfo_for_stmt (new_stmt);
1703 }
1704 break;
1705
1706 case WIDEN:
1707 /* In case the vectorization factor (VF) is bigger than the number
1708 of elements that we can fit in a vectype (nunits), we have to
1709 generate more than one vector stmt - i.e - we need to "unroll"
1710 the vector stmt by a factor VF/nunits. */
1711 for (j = 0; j < ncopies; j++)
1712 {
1713 if (j == 0)
1714 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1715 else
1716 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1717
1718 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1719
1720 /* Generate first half of the widened result: */
1721 new_stmt
1722 = vect_gen_widened_results_half (code1, decl1,
1723 vec_oprnd0, vec_oprnd1,
1724 unary_op, vec_dest, gsi, stmt);
1725 if (j == 0)
1726 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1727 else
1728 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1729 prev_stmt_info = vinfo_for_stmt (new_stmt);
1730
1731 /* Generate second half of the widened result: */
1732 new_stmt
1733 = vect_gen_widened_results_half (code2, decl2,
1734 vec_oprnd0, vec_oprnd1,
1735 unary_op, vec_dest, gsi, stmt);
1736 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1737 prev_stmt_info = vinfo_for_stmt (new_stmt);
1738 }
1739 break;
1740
1741 case NARROW:
1742 /* In case the vectorization factor (VF) is bigger than the number
1743 of elements that we can fit in a vectype (nunits), we have to
1744 generate more than one vector stmt - i.e - we need to "unroll"
1745 the vector stmt by a factor VF/nunits. */
1746 for (j = 0; j < ncopies; j++)
1747 {
1748 /* Handle uses. */
1749 if (j == 0)
1750 {
1751 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1752 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1753 }
1754 else
1755 {
1756 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1757 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1758 }
1759
1760 /* Arguments are ready. Create the new vector stmt. */
1761 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
1762 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1763 vec_oprnd1);
1764 new_temp = make_ssa_name (vec_dest, new_stmt);
1765 gimple_assign_set_lhs (new_stmt, new_temp);
1766 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1767
1768 if (j == 0)
1769 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1770 else
1771 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1772
1773 prev_stmt_info = vinfo_for_stmt (new_stmt);
1774 }
1775
1776 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1777 }
1778
1779 if (vec_oprnds0)
1780 VEC_free (tree, heap, vec_oprnds0);
1781
1782 return true;
1783 }
1784 /* Function vectorizable_assignment.
1785
1786 Check if STMT performs an assignment (copy) that can be vectorized.
1787 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1788 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1789 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1790
1791 static bool
1792 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1793 gimple *vec_stmt, slp_tree slp_node)
1794 {
1795 tree vec_dest;
1796 tree scalar_dest;
1797 tree op;
1798 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1799 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1800 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1801 tree new_temp;
1802 tree def;
1803 gimple def_stmt;
1804 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1805 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1806 int ncopies;
1807 int i;
1808 VEC(tree,heap) *vec_oprnds = NULL;
1809 tree vop;
1810 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1811
1812 /* Multiple types in SLP are handled by creating the appropriate number of
1813 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1814 case of SLP. */
1815 if (slp_node)
1816 ncopies = 1;
1817 else
1818 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1819
1820 gcc_assert (ncopies >= 1);
1821 if (ncopies > 1)
1822 return false; /* FORNOW */
1823
1824 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1825 return false;
1826
1827 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1828 return false;
1829
1830 /* Is vectorizable assignment? */
1831 if (!is_gimple_assign (stmt))
1832 return false;
1833
1834 scalar_dest = gimple_assign_lhs (stmt);
1835 if (TREE_CODE (scalar_dest) != SSA_NAME)
1836 return false;
1837
1838 if (gimple_assign_single_p (stmt)
1839 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1840 op = gimple_assign_rhs1 (stmt);
1841 else
1842 return false;
1843
1844 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1845 {
1846 if (vect_print_dump_info (REPORT_DETAILS))
1847 fprintf (vect_dump, "use not simple.");
1848 return false;
1849 }
1850
1851 if (!vec_stmt) /* transformation not required. */
1852 {
1853 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1854 if (vect_print_dump_info (REPORT_DETAILS))
1855 fprintf (vect_dump, "=== vectorizable_assignment ===");
1856 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1857 return true;
1858 }
1859
1860 /** Transform. **/
1861 if (vect_print_dump_info (REPORT_DETAILS))
1862 fprintf (vect_dump, "transform assignment.");
1863
1864 /* Handle def. */
1865 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1866
1867 /* Handle use. */
1868 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1869
1870 /* Arguments are ready. create the new vector stmt. */
1871 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1872 {
1873 *vec_stmt = gimple_build_assign (vec_dest, vop);
1874 new_temp = make_ssa_name (vec_dest, *vec_stmt);
1875 gimple_assign_set_lhs (*vec_stmt, new_temp);
1876 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
1877 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt;
1878
1879 if (slp_node)
1880 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
1881 }
1882
1883 VEC_free (tree, heap, vec_oprnds);
1884 return true;
1885 }
1886
1887 /* Function vectorizable_operation.
1888
1889 Check if STMT performs a binary or unary operation that can be vectorized.
1890 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1891 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1892 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1893
1894 static bool
1895 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1896 gimple *vec_stmt, slp_tree slp_node)
1897 {
1898 tree vec_dest;
1899 tree scalar_dest;
1900 tree op0, op1 = NULL;
1901 tree vec_oprnd1 = NULL_TREE;
1902 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1903 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1904 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1905 enum tree_code code;
1906 enum machine_mode vec_mode;
1907 tree new_temp;
1908 int op_type;
1909 optab optab;
1910 int icode;
1911 enum machine_mode optab_op2_mode;
1912 tree def;
1913 gimple def_stmt;
1914 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1915 gimple new_stmt = NULL;
1916 stmt_vec_info prev_stmt_info;
1917 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1918 int nunits_out;
1919 tree vectype_out;
1920 int ncopies;
1921 int j, i;
1922 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1923 tree vop0, vop1;
1924 unsigned int k;
1925 bool shift_p = false;
1926 bool scalar_shift_arg = false;
1927 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1928 int vf;
1929
1930 if (loop_vinfo)
1931 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1932 else
1933 /* FORNOW: multiple types are not supported in basic block SLP. */
1934 vf = nunits_in;
1935
1936 /* Multiple types in SLP are handled by creating the appropriate number of
1937 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1938 case of SLP. */
1939 if (slp_node)
1940 ncopies = 1;
1941 else
1942 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1943
1944 gcc_assert (ncopies >= 1);
1945
1946 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1947 return false;
1948
1949 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1950 return false;
1951
1952 /* Is STMT a vectorizable binary/unary operation? */
1953 if (!is_gimple_assign (stmt))
1954 return false;
1955
1956 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1957 return false;
1958
1959 scalar_dest = gimple_assign_lhs (stmt);
1960 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
1961 if (!vectype_out)
1962 return false;
1963 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1964 if (nunits_out != nunits_in)
1965 return false;
1966
1967 code = gimple_assign_rhs_code (stmt);
1968
1969 /* For pointer addition, we should use the normal plus for
1970 the vector addition. */
1971 if (code == POINTER_PLUS_EXPR)
1972 code = PLUS_EXPR;
1973
1974 /* Support only unary or binary operations. */
1975 op_type = TREE_CODE_LENGTH (code);
1976 if (op_type != unary_op && op_type != binary_op)
1977 {
1978 if (vect_print_dump_info (REPORT_DETAILS))
1979 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1980 return false;
1981 }
1982
1983 op0 = gimple_assign_rhs1 (stmt);
1984 if (!vect_is_simple_use (op0, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1985 {
1986 if (vect_print_dump_info (REPORT_DETAILS))
1987 fprintf (vect_dump, "use not simple.");
1988 return false;
1989 }
1990
1991 if (op_type == binary_op)
1992 {
1993 op1 = gimple_assign_rhs2 (stmt);
1994 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
1995 &dt[1]))
1996 {
1997 if (vect_print_dump_info (REPORT_DETAILS))
1998 fprintf (vect_dump, "use not simple.");
1999 return false;
2000 }
2001 }
2002
2003 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2004 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2005 shift optabs. */
2006 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2007 || code == RROTATE_EXPR)
2008 {
2009 shift_p = true;
2010
2011 /* vector shifted by vector */
2012 if (dt[1] == vect_internal_def)
2013 {
2014 optab = optab_for_tree_code (code, vectype, optab_vector);
2015 if (vect_print_dump_info (REPORT_DETAILS))
2016 fprintf (vect_dump, "vector/vector shift/rotate found.");
2017 }
2018
2019 /* See if the machine has a vector shifted by scalar insn and if not
2020 then see if it has a vector shifted by vector insn */
2021 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2022 {
2023 optab = optab_for_tree_code (code, vectype, optab_scalar);
2024 if (optab
2025 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2026 != CODE_FOR_nothing))
2027 {
2028 scalar_shift_arg = true;
2029 if (vect_print_dump_info (REPORT_DETAILS))
2030 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2031 }
2032 else
2033 {
2034 optab = optab_for_tree_code (code, vectype, optab_vector);
2035 if (optab
2036 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2037 != CODE_FOR_nothing))
2038 {
2039 if (vect_print_dump_info (REPORT_DETAILS))
2040 fprintf (vect_dump, "vector/vector shift/rotate found.");
2041
2042 /* Unlike the other binary operators, shifts/rotates have
2043 the rhs being int, instead of the same type as the lhs,
2044 so make sure the scalar is the right type if we are
2045 dealing with vectors of short/char. */
2046 if (dt[1] == vect_constant_def)
2047 op1 = fold_convert (TREE_TYPE (vectype), op1);
2048 }
2049 }
2050 }
2051
2052 else
2053 {
2054 if (vect_print_dump_info (REPORT_DETAILS))
2055 fprintf (vect_dump, "operand mode requires invariant argument.");
2056 return false;
2057 }
2058 }
2059 else
2060 optab = optab_for_tree_code (code, vectype, optab_default);
2061
2062 /* Supportable by target? */
2063 if (!optab)
2064 {
2065 if (vect_print_dump_info (REPORT_DETAILS))
2066 fprintf (vect_dump, "no optab.");
2067 return false;
2068 }
2069 vec_mode = TYPE_MODE (vectype);
2070 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2071 if (icode == CODE_FOR_nothing)
2072 {
2073 if (vect_print_dump_info (REPORT_DETAILS))
2074 fprintf (vect_dump, "op not supported by target.");
2075 /* Check only during analysis. */
2076 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2077 || (vf < vect_min_worthwhile_factor (code)
2078 && !vec_stmt))
2079 return false;
2080 if (vect_print_dump_info (REPORT_DETAILS))
2081 fprintf (vect_dump, "proceeding using word mode.");
2082 }
2083
2084 /* Worthwhile without SIMD support? Check only during analysis. */
2085 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2086 && vf < vect_min_worthwhile_factor (code)
2087 && !vec_stmt)
2088 {
2089 if (vect_print_dump_info (REPORT_DETAILS))
2090 fprintf (vect_dump, "not worthwhile without SIMD support.");
2091 return false;
2092 }
2093
2094 if (!vec_stmt) /* transformation not required. */
2095 {
2096 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2097 if (vect_print_dump_info (REPORT_DETAILS))
2098 fprintf (vect_dump, "=== vectorizable_operation ===");
2099 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2100 return true;
2101 }
2102
2103 /** Transform. **/
2104
2105 if (vect_print_dump_info (REPORT_DETAILS))
2106 fprintf (vect_dump, "transform binary/unary operation.");
2107
2108 /* Handle def. */
2109 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2110
2111 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2112 created in the previous stages of the recursion, so no allocation is
2113 needed, except for the case of shift with scalar shift argument. In that
2114 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2115 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2116 In case of loop-based vectorization we allocate VECs of size 1. We
2117 allocate VEC_OPRNDS1 only in case of binary operation. */
2118 if (!slp_node)
2119 {
2120 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2121 if (op_type == binary_op)
2122 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2123 }
2124 else if (scalar_shift_arg)
2125 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2126
2127 /* In case the vectorization factor (VF) is bigger than the number
2128 of elements that we can fit in a vectype (nunits), we have to generate
2129 more than one vector stmt - i.e - we need to "unroll" the
2130 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2131 from one copy of the vector stmt to the next, in the field
2132 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2133 stages to find the correct vector defs to be used when vectorizing
2134 stmts that use the defs of the current stmt. The example below illustrates
2135 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2136 4 vectorized stmts):
2137
2138 before vectorization:
2139 RELATED_STMT VEC_STMT
2140 S1: x = memref - -
2141 S2: z = x + 1 - -
2142
2143 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2144 there):
2145 RELATED_STMT VEC_STMT
2146 VS1_0: vx0 = memref0 VS1_1 -
2147 VS1_1: vx1 = memref1 VS1_2 -
2148 VS1_2: vx2 = memref2 VS1_3 -
2149 VS1_3: vx3 = memref3 - -
2150 S1: x = load - VS1_0
2151 S2: z = x + 1 - -
2152
2153 step2: vectorize stmt S2 (done here):
2154 To vectorize stmt S2 we first need to find the relevant vector
2155 def for the first operand 'x'. This is, as usual, obtained from
2156 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2157 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2158 relevant vector def 'vx0'. Having found 'vx0' we can generate
2159 the vector stmt VS2_0, and as usual, record it in the
2160 STMT_VINFO_VEC_STMT of stmt S2.
2161 When creating the second copy (VS2_1), we obtain the relevant vector
2162 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2163 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2164 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2165 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2166 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2167 chain of stmts and pointers:
2168 RELATED_STMT VEC_STMT
2169 VS1_0: vx0 = memref0 VS1_1 -
2170 VS1_1: vx1 = memref1 VS1_2 -
2171 VS1_2: vx2 = memref2 VS1_3 -
2172 VS1_3: vx3 = memref3 - -
2173 S1: x = load - VS1_0
2174 VS2_0: vz0 = vx0 + v1 VS2_1 -
2175 VS2_1: vz1 = vx1 + v1 VS2_2 -
2176 VS2_2: vz2 = vx2 + v1 VS2_3 -
2177 VS2_3: vz3 = vx3 + v1 - -
2178 S2: z = x + 1 - VS2_0 */
2179
2180 prev_stmt_info = NULL;
2181 for (j = 0; j < ncopies; j++)
2182 {
2183 /* Handle uses. */
2184 if (j == 0)
2185 {
2186 if (op_type == binary_op && scalar_shift_arg)
2187 {
2188 /* Vector shl and shr insn patterns can be defined with scalar
2189 operand 2 (shift operand). In this case, use constant or loop
2190 invariant op1 directly, without extending it to vector mode
2191 first. */
2192 optab_op2_mode = insn_data[icode].operand[2].mode;
2193 if (!VECTOR_MODE_P (optab_op2_mode))
2194 {
2195 if (vect_print_dump_info (REPORT_DETAILS))
2196 fprintf (vect_dump, "operand 1 using scalar mode.");
2197 vec_oprnd1 = op1;
2198 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2199 if (slp_node)
2200 {
2201 /* Store vec_oprnd1 for every vector stmt to be created
2202 for SLP_NODE. We check during the analysis that all the
2203 shift arguments are the same.
2204 TODO: Allow different constants for different vector
2205 stmts generated for an SLP instance. */
2206 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2207 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2208 }
2209 }
2210 }
2211
2212 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2213 (a special case for certain kind of vector shifts); otherwise,
2214 operand 1 should be of a vector type (the usual case). */
2215 if (op_type == binary_op && !vec_oprnd1)
2216 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2217 slp_node);
2218 else
2219 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2220 slp_node);
2221 }
2222 else
2223 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2224
2225 /* Arguments are ready. Create the new vector stmt. */
2226 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2227 {
2228 vop1 = ((op_type == binary_op)
2229 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2230 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2231 new_temp = make_ssa_name (vec_dest, new_stmt);
2232 gimple_assign_set_lhs (new_stmt, new_temp);
2233 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2234 if (slp_node)
2235 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2236 }
2237
2238 if (slp_node)
2239 continue;
2240
2241 if (j == 0)
2242 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2243 else
2244 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2245 prev_stmt_info = vinfo_for_stmt (new_stmt);
2246 }
2247
2248 VEC_free (tree, heap, vec_oprnds0);
2249 if (vec_oprnds1)
2250 VEC_free (tree, heap, vec_oprnds1);
2251
2252 return true;
2253 }
2254
2255
2256 /* Get vectorized definitions for loop-based vectorization. For the first
2257 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2258 scalar operand), and for the rest we get a copy with
2259 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2260 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2261 The vectors are collected into VEC_OPRNDS. */
2262
2263 static void
2264 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2265 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2266 {
2267 tree vec_oprnd;
2268
2269 /* Get first vector operand. */
2270 /* All the vector operands except the very first one (that is scalar oprnd)
2271 are stmt copies. */
2272 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2273 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2274 else
2275 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2276
2277 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2278
2279 /* Get second vector operand. */
2280 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2281 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2282
2283 *oprnd = vec_oprnd;
2284
2285 /* For conversion in multiple steps, continue to get operands
2286 recursively. */
2287 if (multi_step_cvt)
2288 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2289 }
2290
2291
2292 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2293 For multi-step conversions store the resulting vectors and call the function
2294 recursively. */
2295
2296 static void
2297 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2298 int multi_step_cvt, gimple stmt,
2299 VEC (tree, heap) *vec_dsts,
2300 gimple_stmt_iterator *gsi,
2301 slp_tree slp_node, enum tree_code code,
2302 stmt_vec_info *prev_stmt_info)
2303 {
2304 unsigned int i;
2305 tree vop0, vop1, new_tmp, vec_dest;
2306 gimple new_stmt;
2307 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2308
2309 vec_dest = VEC_pop (tree, vec_dsts);
2310
2311 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2312 {
2313 /* Create demotion operation. */
2314 vop0 = VEC_index (tree, *vec_oprnds, i);
2315 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2316 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2317 new_tmp = make_ssa_name (vec_dest, new_stmt);
2318 gimple_assign_set_lhs (new_stmt, new_tmp);
2319 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2320
2321 if (multi_step_cvt)
2322 /* Store the resulting vector for next recursive call. */
2323 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2324 else
2325 {
2326 /* This is the last step of the conversion sequence. Store the
2327 vectors in SLP_NODE or in vector info of the scalar statement
2328 (or in STMT_VINFO_RELATED_STMT chain). */
2329 if (slp_node)
2330 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2331 else
2332 {
2333 if (!*prev_stmt_info)
2334 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2335 else
2336 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2337
2338 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2339 }
2340 }
2341 }
2342
2343 /* For multi-step demotion operations we first generate demotion operations
2344 from the source type to the intermediate types, and then combine the
2345 results (stored in VEC_OPRNDS) in demotion operation to the destination
2346 type. */
2347 if (multi_step_cvt)
2348 {
2349 /* At each level of recursion we have have of the operands we had at the
2350 previous level. */
2351 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2352 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2353 stmt, vec_dsts, gsi, slp_node,
2354 code, prev_stmt_info);
2355 }
2356 }
2357
2358
2359 /* Function vectorizable_type_demotion
2360
2361 Check if STMT performs a binary or unary operation that involves
2362 type demotion, and if it can be vectorized.
2363 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2364 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2365 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2366
2367 static bool
2368 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2369 gimple *vec_stmt, slp_tree slp_node)
2370 {
2371 tree vec_dest;
2372 tree scalar_dest;
2373 tree op0;
2374 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2375 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2376 enum tree_code code, code1 = ERROR_MARK;
2377 tree def;
2378 gimple def_stmt;
2379 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2380 stmt_vec_info prev_stmt_info;
2381 int nunits_in;
2382 int nunits_out;
2383 tree vectype_out;
2384 int ncopies;
2385 int j, i;
2386 tree vectype_in;
2387 int multi_step_cvt = 0;
2388 VEC (tree, heap) *vec_oprnds0 = NULL;
2389 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2390 tree last_oprnd, intermediate_type;
2391
2392 /* FORNOW: not supported by basic block SLP vectorization. */
2393 gcc_assert (loop_vinfo);
2394
2395 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2396 return false;
2397
2398 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2399 return false;
2400
2401 /* Is STMT a vectorizable type-demotion operation? */
2402 if (!is_gimple_assign (stmt))
2403 return false;
2404
2405 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2406 return false;
2407
2408 code = gimple_assign_rhs_code (stmt);
2409 if (!CONVERT_EXPR_CODE_P (code))
2410 return false;
2411
2412 op0 = gimple_assign_rhs1 (stmt);
2413 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2414 if (!vectype_in)
2415 return false;
2416 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2417
2418 scalar_dest = gimple_assign_lhs (stmt);
2419 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2420 if (!vectype_out)
2421 return false;
2422 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2423 if (nunits_in >= nunits_out)
2424 return false;
2425
2426 /* Multiple types in SLP are handled by creating the appropriate number of
2427 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2428 case of SLP. */
2429 if (slp_node)
2430 ncopies = 1;
2431 else
2432 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2433 gcc_assert (ncopies >= 1);
2434
2435 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2436 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2437 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2438 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2439 && CONVERT_EXPR_CODE_P (code))))
2440 return false;
2441
2442 /* Check the operands of the operation. */
2443 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2444 {
2445 if (vect_print_dump_info (REPORT_DETAILS))
2446 fprintf (vect_dump, "use not simple.");
2447 return false;
2448 }
2449
2450 /* Supportable by target? */
2451 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
2452 &multi_step_cvt, &interm_types))
2453 return false;
2454
2455 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2456
2457 if (!vec_stmt) /* transformation not required. */
2458 {
2459 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2460 if (vect_print_dump_info (REPORT_DETAILS))
2461 fprintf (vect_dump, "=== vectorizable_demotion ===");
2462 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2463 return true;
2464 }
2465
2466 /** Transform. **/
2467 if (vect_print_dump_info (REPORT_DETAILS))
2468 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2469 ncopies);
2470
2471 /* In case of multi-step demotion, we first generate demotion operations to
2472 the intermediate types, and then from that types to the final one.
2473 We create vector destinations for the intermediate type (TYPES) received
2474 from supportable_narrowing_operation, and store them in the correct order
2475 for future use in vect_create_vectorized_demotion_stmts(). */
2476 if (multi_step_cvt)
2477 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2478 else
2479 vec_dsts = VEC_alloc (tree, heap, 1);
2480
2481 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2482 VEC_quick_push (tree, vec_dsts, vec_dest);
2483
2484 if (multi_step_cvt)
2485 {
2486 for (i = VEC_length (tree, interm_types) - 1;
2487 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2488 {
2489 vec_dest = vect_create_destination_var (scalar_dest,
2490 intermediate_type);
2491 VEC_quick_push (tree, vec_dsts, vec_dest);
2492 }
2493 }
2494
2495 /* In case the vectorization factor (VF) is bigger than the number
2496 of elements that we can fit in a vectype (nunits), we have to generate
2497 more than one vector stmt - i.e - we need to "unroll" the
2498 vector stmt by a factor VF/nunits. */
2499 last_oprnd = op0;
2500 prev_stmt_info = NULL;
2501 for (j = 0; j < ncopies; j++)
2502 {
2503 /* Handle uses. */
2504 if (slp_node)
2505 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
2506 else
2507 {
2508 VEC_free (tree, heap, vec_oprnds0);
2509 vec_oprnds0 = VEC_alloc (tree, heap,
2510 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2511 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2512 vect_pow2 (multi_step_cvt) - 1);
2513 }
2514
2515 /* Arguments are ready. Create the new vector stmts. */
2516 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2517 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2518 multi_step_cvt, stmt, tmp_vec_dsts,
2519 gsi, slp_node, code1,
2520 &prev_stmt_info);
2521 }
2522
2523 VEC_free (tree, heap, vec_oprnds0);
2524 VEC_free (tree, heap, vec_dsts);
2525 VEC_free (tree, heap, tmp_vec_dsts);
2526 VEC_free (tree, heap, interm_types);
2527
2528 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2529 return true;
2530 }
2531
2532
2533 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2534 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2535 the resulting vectors and call the function recursively. */
2536
2537 static void
2538 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2539 VEC (tree, heap) **vec_oprnds1,
2540 int multi_step_cvt, gimple stmt,
2541 VEC (tree, heap) *vec_dsts,
2542 gimple_stmt_iterator *gsi,
2543 slp_tree slp_node, enum tree_code code1,
2544 enum tree_code code2, tree decl1,
2545 tree decl2, int op_type,
2546 stmt_vec_info *prev_stmt_info)
2547 {
2548 int i;
2549 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2550 gimple new_stmt1, new_stmt2;
2551 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2552 VEC (tree, heap) *vec_tmp;
2553
2554 vec_dest = VEC_pop (tree, vec_dsts);
2555 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2556
2557 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2558 {
2559 if (op_type == binary_op)
2560 vop1 = VEC_index (tree, *vec_oprnds1, i);
2561 else
2562 vop1 = NULL_TREE;
2563
2564 /* Generate the two halves of promotion operation. */
2565 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2566 op_type, vec_dest, gsi, stmt);
2567 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2568 op_type, vec_dest, gsi, stmt);
2569 if (is_gimple_call (new_stmt1))
2570 {
2571 new_tmp1 = gimple_call_lhs (new_stmt1);
2572 new_tmp2 = gimple_call_lhs (new_stmt2);
2573 }
2574 else
2575 {
2576 new_tmp1 = gimple_assign_lhs (new_stmt1);
2577 new_tmp2 = gimple_assign_lhs (new_stmt2);
2578 }
2579
2580 if (multi_step_cvt)
2581 {
2582 /* Store the results for the recursive call. */
2583 VEC_quick_push (tree, vec_tmp, new_tmp1);
2584 VEC_quick_push (tree, vec_tmp, new_tmp2);
2585 }
2586 else
2587 {
2588 /* Last step of promotion sequience - store the results. */
2589 if (slp_node)
2590 {
2591 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2592 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2593 }
2594 else
2595 {
2596 if (!*prev_stmt_info)
2597 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2598 else
2599 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2600
2601 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2602 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2603 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2604 }
2605 }
2606 }
2607
2608 if (multi_step_cvt)
2609 {
2610 /* For multi-step promotion operation we first generate we call the
2611 function recurcively for every stage. We start from the input type,
2612 create promotion operations to the intermediate types, and then
2613 create promotions to the output type. */
2614 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2615 VEC_free (tree, heap, vec_tmp);
2616 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2617 multi_step_cvt - 1, stmt,
2618 vec_dsts, gsi, slp_node, code1,
2619 code2, decl2, decl2, op_type,
2620 prev_stmt_info);
2621 }
2622 }
2623
2624
2625 /* Function vectorizable_type_promotion
2626
2627 Check if STMT performs a binary or unary operation that involves
2628 type promotion, and if it can be vectorized.
2629 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2630 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2631 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2632
2633 static bool
2634 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2635 gimple *vec_stmt, slp_tree slp_node)
2636 {
2637 tree vec_dest;
2638 tree scalar_dest;
2639 tree op0, op1 = NULL;
2640 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2641 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2642 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2643 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2644 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2645 int op_type;
2646 tree def;
2647 gimple def_stmt;
2648 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2649 stmt_vec_info prev_stmt_info;
2650 int nunits_in;
2651 int nunits_out;
2652 tree vectype_out;
2653 int ncopies;
2654 int j, i;
2655 tree vectype_in;
2656 tree intermediate_type = NULL_TREE;
2657 int multi_step_cvt = 0;
2658 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2659 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2660
2661 /* FORNOW: not supported by basic block SLP vectorization. */
2662 gcc_assert (loop_vinfo);
2663
2664 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2665 return false;
2666
2667 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2668 return false;
2669
2670 /* Is STMT a vectorizable type-promotion operation? */
2671 if (!is_gimple_assign (stmt))
2672 return false;
2673
2674 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2675 return false;
2676
2677 code = gimple_assign_rhs_code (stmt);
2678 if (!CONVERT_EXPR_CODE_P (code)
2679 && code != WIDEN_MULT_EXPR)
2680 return false;
2681
2682 op0 = gimple_assign_rhs1 (stmt);
2683 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2684 if (!vectype_in)
2685 return false;
2686 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2687
2688 scalar_dest = gimple_assign_lhs (stmt);
2689 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2690 if (!vectype_out)
2691 return false;
2692 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2693 if (nunits_in <= nunits_out)
2694 return false;
2695
2696 /* Multiple types in SLP are handled by creating the appropriate number of
2697 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2698 case of SLP. */
2699 if (slp_node)
2700 ncopies = 1;
2701 else
2702 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2703
2704 gcc_assert (ncopies >= 1);
2705
2706 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2707 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2708 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2709 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2710 && CONVERT_EXPR_CODE_P (code))))
2711 return false;
2712
2713 /* Check the operands of the operation. */
2714 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2715 {
2716 if (vect_print_dump_info (REPORT_DETAILS))
2717 fprintf (vect_dump, "use not simple.");
2718 return false;
2719 }
2720
2721 op_type = TREE_CODE_LENGTH (code);
2722 if (op_type == binary_op)
2723 {
2724 op1 = gimple_assign_rhs2 (stmt);
2725 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2726 {
2727 if (vect_print_dump_info (REPORT_DETAILS))
2728 fprintf (vect_dump, "use not simple.");
2729 return false;
2730 }
2731 }
2732
2733 /* Supportable by target? */
2734 if (!supportable_widening_operation (code, stmt, vectype_in,
2735 &decl1, &decl2, &code1, &code2,
2736 &multi_step_cvt, &interm_types))
2737 return false;
2738
2739 /* Binary widening operation can only be supported directly by the
2740 architecture. */
2741 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2742
2743 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2744
2745 if (!vec_stmt) /* transformation not required. */
2746 {
2747 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2748 if (vect_print_dump_info (REPORT_DETAILS))
2749 fprintf (vect_dump, "=== vectorizable_promotion ===");
2750 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2751 return true;
2752 }
2753
2754 /** Transform. **/
2755
2756 if (vect_print_dump_info (REPORT_DETAILS))
2757 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2758 ncopies);
2759
2760 /* Handle def. */
2761 /* In case of multi-step promotion, we first generate promotion operations
2762 to the intermediate types, and then from that types to the final one.
2763 We store vector destination in VEC_DSTS in the correct order for
2764 recursive creation of promotion operations in
2765 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2766 according to TYPES recieved from supportable_widening_operation(). */
2767 if (multi_step_cvt)
2768 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2769 else
2770 vec_dsts = VEC_alloc (tree, heap, 1);
2771
2772 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2773 VEC_quick_push (tree, vec_dsts, vec_dest);
2774
2775 if (multi_step_cvt)
2776 {
2777 for (i = VEC_length (tree, interm_types) - 1;
2778 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2779 {
2780 vec_dest = vect_create_destination_var (scalar_dest,
2781 intermediate_type);
2782 VEC_quick_push (tree, vec_dsts, vec_dest);
2783 }
2784 }
2785
2786 if (!slp_node)
2787 {
2788 vec_oprnds0 = VEC_alloc (tree, heap,
2789 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2790 if (op_type == binary_op)
2791 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2792 }
2793
2794 /* In case the vectorization factor (VF) is bigger than the number
2795 of elements that we can fit in a vectype (nunits), we have to generate
2796 more than one vector stmt - i.e - we need to "unroll" the
2797 vector stmt by a factor VF/nunits. */
2798
2799 prev_stmt_info = NULL;
2800 for (j = 0; j < ncopies; j++)
2801 {
2802 /* Handle uses. */
2803 if (j == 0)
2804 {
2805 if (slp_node)
2806 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2807 else
2808 {
2809 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2810 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2811 if (op_type == binary_op)
2812 {
2813 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2814 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2815 }
2816 }
2817 }
2818 else
2819 {
2820 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2821 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2822 if (op_type == binary_op)
2823 {
2824 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2825 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2826 }
2827 }
2828
2829 /* Arguments are ready. Create the new vector stmts. */
2830 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2831 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2832 multi_step_cvt, stmt,
2833 tmp_vec_dsts,
2834 gsi, slp_node, code1, code2,
2835 decl1, decl2, op_type,
2836 &prev_stmt_info);
2837 }
2838
2839 VEC_free (tree, heap, vec_dsts);
2840 VEC_free (tree, heap, tmp_vec_dsts);
2841 VEC_free (tree, heap, interm_types);
2842 VEC_free (tree, heap, vec_oprnds0);
2843 VEC_free (tree, heap, vec_oprnds1);
2844
2845 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2846 return true;
2847 }
2848
2849
2850 /* Function vectorizable_store.
2851
2852 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2853 can be vectorized.
2854 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2855 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2856 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2857
2858 static bool
2859 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2860 slp_tree slp_node)
2861 {
2862 tree scalar_dest;
2863 tree data_ref;
2864 tree op;
2865 tree vec_oprnd = NULL_TREE;
2866 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2867 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2868 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2870 struct loop *loop = NULL;
2871 enum machine_mode vec_mode;
2872 tree dummy;
2873 enum dr_alignment_support alignment_support_scheme;
2874 tree def;
2875 gimple def_stmt;
2876 enum vect_def_type dt;
2877 stmt_vec_info prev_stmt_info = NULL;
2878 tree dataref_ptr = NULL_TREE;
2879 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2880 int ncopies;
2881 int j;
2882 gimple next_stmt, first_stmt = NULL;
2883 bool strided_store = false;
2884 unsigned int group_size, i;
2885 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2886 bool inv_p;
2887 VEC(tree,heap) *vec_oprnds = NULL;
2888 bool slp = (slp_node != NULL);
2889 stmt_vec_info first_stmt_vinfo;
2890 unsigned int vec_num;
2891 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2892
2893 if (loop_vinfo)
2894 loop = LOOP_VINFO_LOOP (loop_vinfo);
2895
2896 /* Multiple types in SLP are handled by creating the appropriate number of
2897 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2898 case of SLP. */
2899 if (slp)
2900 ncopies = 1;
2901 else
2902 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2903
2904 gcc_assert (ncopies >= 1);
2905
2906 /* FORNOW. This restriction should be relaxed. */
2907 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2908 {
2909 if (vect_print_dump_info (REPORT_DETAILS))
2910 fprintf (vect_dump, "multiple types in nested loop.");
2911 return false;
2912 }
2913
2914 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2915 return false;
2916
2917 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2918 return false;
2919
2920 /* Is vectorizable store? */
2921
2922 if (!is_gimple_assign (stmt))
2923 return false;
2924
2925 scalar_dest = gimple_assign_lhs (stmt);
2926 if (TREE_CODE (scalar_dest) != ARRAY_REF
2927 && TREE_CODE (scalar_dest) != INDIRECT_REF
2928 && TREE_CODE (scalar_dest) != COMPONENT_REF
2929 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2930 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2931 return false;
2932
2933 gcc_assert (gimple_assign_single_p (stmt));
2934 op = gimple_assign_rhs1 (stmt);
2935 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2936 {
2937 if (vect_print_dump_info (REPORT_DETAILS))
2938 fprintf (vect_dump, "use not simple.");
2939 return false;
2940 }
2941
2942 /* The scalar rhs type needs to be trivially convertible to the vector
2943 component type. This should always be the case. */
2944 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2945 {
2946 if (vect_print_dump_info (REPORT_DETAILS))
2947 fprintf (vect_dump, "??? operands of different types");
2948 return false;
2949 }
2950
2951 vec_mode = TYPE_MODE (vectype);
2952 /* FORNOW. In some cases can vectorize even if data-type not supported
2953 (e.g. - array initialization with 0). */
2954 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2955 return false;
2956
2957 if (!STMT_VINFO_DATA_REF (stmt_info))
2958 return false;
2959
2960 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2961 {
2962 strided_store = true;
2963 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2964 if (!vect_strided_store_supported (vectype)
2965 && !PURE_SLP_STMT (stmt_info) && !slp)
2966 return false;
2967
2968 if (first_stmt == stmt)
2969 {
2970 /* STMT is the leader of the group. Check the operands of all the
2971 stmts of the group. */
2972 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2973 while (next_stmt)
2974 {
2975 gcc_assert (gimple_assign_single_p (next_stmt));
2976 op = gimple_assign_rhs1 (next_stmt);
2977 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2978 &def, &dt))
2979 {
2980 if (vect_print_dump_info (REPORT_DETAILS))
2981 fprintf (vect_dump, "use not simple.");
2982 return false;
2983 }
2984 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
2985 }
2986 }
2987 }
2988
2989 if (!vec_stmt) /* transformation not required. */
2990 {
2991 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
2992 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
2993 return true;
2994 }
2995
2996 /** Transform. **/
2997
2998 if (strided_store)
2999 {
3000 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3001 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3002
3003 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3004
3005 /* FORNOW */
3006 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3007
3008 /* We vectorize all the stmts of the interleaving group when we
3009 reach the last stmt in the group. */
3010 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3011 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3012 && !slp)
3013 {
3014 *vec_stmt = NULL;
3015 return true;
3016 }
3017
3018 if (slp)
3019 strided_store = false;
3020
3021 /* VEC_NUM is the number of vect stmts to be created for this group. */
3022 if (slp)
3023 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3024 else
3025 vec_num = group_size;
3026 }
3027 else
3028 {
3029 first_stmt = stmt;
3030 first_dr = dr;
3031 group_size = vec_num = 1;
3032 first_stmt_vinfo = stmt_info;
3033 }
3034
3035 if (vect_print_dump_info (REPORT_DETAILS))
3036 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3037
3038 dr_chain = VEC_alloc (tree, heap, group_size);
3039 oprnds = VEC_alloc (tree, heap, group_size);
3040
3041 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3042 gcc_assert (alignment_support_scheme);
3043
3044 /* In case the vectorization factor (VF) is bigger than the number
3045 of elements that we can fit in a vectype (nunits), we have to generate
3046 more than one vector stmt - i.e - we need to "unroll" the
3047 vector stmt by a factor VF/nunits. For more details see documentation in
3048 vect_get_vec_def_for_copy_stmt. */
3049
3050 /* In case of interleaving (non-unit strided access):
3051
3052 S1: &base + 2 = x2
3053 S2: &base = x0
3054 S3: &base + 1 = x1
3055 S4: &base + 3 = x3
3056
3057 We create vectorized stores starting from base address (the access of the
3058 first stmt in the chain (S2 in the above example), when the last store stmt
3059 of the chain (S4) is reached:
3060
3061 VS1: &base = vx2
3062 VS2: &base + vec_size*1 = vx0
3063 VS3: &base + vec_size*2 = vx1
3064 VS4: &base + vec_size*3 = vx3
3065
3066 Then permutation statements are generated:
3067
3068 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3069 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3070 ...
3071
3072 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3073 (the order of the data-refs in the output of vect_permute_store_chain
3074 corresponds to the order of scalar stmts in the interleaving chain - see
3075 the documentation of vect_permute_store_chain()).
3076
3077 In case of both multiple types and interleaving, above vector stores and
3078 permutation stmts are created for every copy. The result vector stmts are
3079 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3080 STMT_VINFO_RELATED_STMT for the next copies.
3081 */
3082
3083 prev_stmt_info = NULL;
3084 for (j = 0; j < ncopies; j++)
3085 {
3086 gimple new_stmt;
3087 gimple ptr_incr;
3088
3089 if (j == 0)
3090 {
3091 if (slp)
3092 {
3093 /* Get vectorized arguments for SLP_NODE. */
3094 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3095
3096 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3097 }
3098 else
3099 {
3100 /* For interleaved stores we collect vectorized defs for all the
3101 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3102 used as an input to vect_permute_store_chain(), and OPRNDS as
3103 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3104
3105 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3106 OPRNDS are of size 1. */
3107 next_stmt = first_stmt;
3108 for (i = 0; i < group_size; i++)
3109 {
3110 /* Since gaps are not supported for interleaved stores,
3111 GROUP_SIZE is the exact number of stmts in the chain.
3112 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3113 there is no interleaving, GROUP_SIZE is 1, and only one
3114 iteration of the loop will be executed. */
3115 gcc_assert (next_stmt
3116 && gimple_assign_single_p (next_stmt));
3117 op = gimple_assign_rhs1 (next_stmt);
3118
3119 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3120 NULL);
3121 VEC_quick_push(tree, dr_chain, vec_oprnd);
3122 VEC_quick_push(tree, oprnds, vec_oprnd);
3123 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3124 }
3125 }
3126
3127 /* We should have catched mismatched types earlier. */
3128 gcc_assert (useless_type_conversion_p (vectype,
3129 TREE_TYPE (vec_oprnd)));
3130 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3131 &dummy, &ptr_incr, false,
3132 &inv_p);
3133 gcc_assert (bb_vinfo || !inv_p);
3134 }
3135 else
3136 {
3137 /* For interleaved stores we created vectorized defs for all the
3138 defs stored in OPRNDS in the previous iteration (previous copy).
3139 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3140 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3141 next copy.
3142 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3143 OPRNDS are of size 1. */
3144 for (i = 0; i < group_size; i++)
3145 {
3146 op = VEC_index (tree, oprnds, i);
3147 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3148 &dt);
3149 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3150 VEC_replace(tree, dr_chain, i, vec_oprnd);
3151 VEC_replace(tree, oprnds, i, vec_oprnd);
3152 }
3153 dataref_ptr =
3154 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3155 }
3156
3157 if (strided_store)
3158 {
3159 result_chain = VEC_alloc (tree, heap, group_size);
3160 /* Permute. */
3161 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3162 &result_chain))
3163 return false;
3164 }
3165
3166 next_stmt = first_stmt;
3167 for (i = 0; i < vec_num; i++)
3168 {
3169 if (i > 0)
3170 /* Bump the vector pointer. */
3171 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3172 NULL_TREE);
3173
3174 if (slp)
3175 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3176 else if (strided_store)
3177 /* For strided stores vectorized defs are interleaved in
3178 vect_permute_store_chain(). */
3179 vec_oprnd = VEC_index (tree, result_chain, i);
3180
3181 if (aligned_access_p (first_dr))
3182 data_ref = build_fold_indirect_ref (dataref_ptr);
3183 else
3184 {
3185 int mis = DR_MISALIGNMENT (first_dr);
3186 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3187 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3188 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3189 }
3190
3191 /* If accesses through a pointer to vectype do not alias the original
3192 memory reference we have a problem. This should never happen. */
3193 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3194 get_alias_set (gimple_assign_lhs (stmt))));
3195
3196 /* Arguments are ready. Create the new vector stmt. */
3197 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3198 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3199 mark_symbols_for_renaming (new_stmt);
3200
3201 if (slp)
3202 continue;
3203
3204 if (j == 0)
3205 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3206 else
3207 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3208
3209 prev_stmt_info = vinfo_for_stmt (new_stmt);
3210 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3211 if (!next_stmt)
3212 break;
3213 }
3214 }
3215
3216 VEC_free (tree, heap, dr_chain);
3217 VEC_free (tree, heap, oprnds);
3218 if (result_chain)
3219 VEC_free (tree, heap, result_chain);
3220
3221 return true;
3222 }
3223
3224 /* vectorizable_load.
3225
3226 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3227 can be vectorized.
3228 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3229 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3230 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3231
3232 static bool
3233 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3234 slp_tree slp_node, slp_instance slp_node_instance)
3235 {
3236 tree scalar_dest;
3237 tree vec_dest = NULL;
3238 tree data_ref = NULL;
3239 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3240 stmt_vec_info prev_stmt_info;
3241 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3242 struct loop *loop = NULL;
3243 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3244 bool nested_in_vect_loop = false;
3245 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3246 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3247 tree new_temp;
3248 int mode;
3249 gimple new_stmt = NULL;
3250 tree dummy;
3251 enum dr_alignment_support alignment_support_scheme;
3252 tree dataref_ptr = NULL_TREE;
3253 gimple ptr_incr;
3254 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3255 int ncopies;
3256 int i, j, group_size;
3257 tree msq = NULL_TREE, lsq;
3258 tree offset = NULL_TREE;
3259 tree realignment_token = NULL_TREE;
3260 gimple phi = NULL;
3261 VEC(tree,heap) *dr_chain = NULL;
3262 bool strided_load = false;
3263 gimple first_stmt;
3264 tree scalar_type;
3265 bool inv_p;
3266 bool compute_in_loop = false;
3267 struct loop *at_loop;
3268 int vec_num;
3269 bool slp = (slp_node != NULL);
3270 bool slp_perm = false;
3271 enum tree_code code;
3272 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3273 int vf;
3274
3275 if (loop_vinfo)
3276 {
3277 loop = LOOP_VINFO_LOOP (loop_vinfo);
3278 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3279 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3280 }
3281 else
3282 /* FORNOW: multiple types are not supported in basic block SLP. */
3283 vf = nunits;
3284
3285 /* Multiple types in SLP are handled by creating the appropriate number of
3286 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3287 case of SLP. */
3288 if (slp)
3289 ncopies = 1;
3290 else
3291 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3292
3293 gcc_assert (ncopies >= 1);
3294
3295 /* FORNOW. This restriction should be relaxed. */
3296 if (nested_in_vect_loop && ncopies > 1)
3297 {
3298 if (vect_print_dump_info (REPORT_DETAILS))
3299 fprintf (vect_dump, "multiple types in nested loop.");
3300 return false;
3301 }
3302
3303 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3304 return false;
3305
3306 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3307 return false;
3308
3309 /* Is vectorizable load? */
3310 if (!is_gimple_assign (stmt))
3311 return false;
3312
3313 scalar_dest = gimple_assign_lhs (stmt);
3314 if (TREE_CODE (scalar_dest) != SSA_NAME)
3315 return false;
3316
3317 code = gimple_assign_rhs_code (stmt);
3318 if (code != ARRAY_REF
3319 && code != INDIRECT_REF
3320 && code != COMPONENT_REF
3321 && code != IMAGPART_EXPR
3322 && code != REALPART_EXPR)
3323 return false;
3324
3325 if (!STMT_VINFO_DATA_REF (stmt_info))
3326 return false;
3327
3328 scalar_type = TREE_TYPE (DR_REF (dr));
3329 mode = (int) TYPE_MODE (vectype);
3330
3331 /* FORNOW. In some cases can vectorize even if data-type not supported
3332 (e.g. - data copies). */
3333 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3334 {
3335 if (vect_print_dump_info (REPORT_DETAILS))
3336 fprintf (vect_dump, "Aligned load, but unsupported type.");
3337 return false;
3338 }
3339
3340 /* The vector component type needs to be trivially convertible to the
3341 scalar lhs. This should always be the case. */
3342 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3343 {
3344 if (vect_print_dump_info (REPORT_DETAILS))
3345 fprintf (vect_dump, "??? operands of different types");
3346 return false;
3347 }
3348
3349 /* Check if the load is a part of an interleaving chain. */
3350 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3351 {
3352 strided_load = true;
3353 /* FORNOW */
3354 gcc_assert (! nested_in_vect_loop);
3355
3356 /* Check if interleaving is supported. */
3357 if (!vect_strided_load_supported (vectype)
3358 && !PURE_SLP_STMT (stmt_info) && !slp)
3359 return false;
3360 }
3361
3362 if (!vec_stmt) /* transformation not required. */
3363 {
3364 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3365 vect_model_load_cost (stmt_info, ncopies, NULL);
3366 return true;
3367 }
3368
3369 if (vect_print_dump_info (REPORT_DETAILS))
3370 fprintf (vect_dump, "transform load.");
3371
3372 /** Transform. **/
3373
3374 if (strided_load)
3375 {
3376 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3377 /* Check if the chain of loads is already vectorized. */
3378 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3379 {
3380 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3381 return true;
3382 }
3383 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3384 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3385
3386 /* VEC_NUM is the number of vect stmts to be created for this group. */
3387 if (slp)
3388 {
3389 strided_load = false;
3390 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3391 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3392 slp_perm = true;
3393 }
3394 else
3395 vec_num = group_size;
3396
3397 dr_chain = VEC_alloc (tree, heap, vec_num);
3398 }
3399 else
3400 {
3401 first_stmt = stmt;
3402 first_dr = dr;
3403 group_size = vec_num = 1;
3404 }
3405
3406 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3407 gcc_assert (alignment_support_scheme);
3408
3409 /* In case the vectorization factor (VF) is bigger than the number
3410 of elements that we can fit in a vectype (nunits), we have to generate
3411 more than one vector stmt - i.e - we need to "unroll" the
3412 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3413 from one copy of the vector stmt to the next, in the field
3414 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3415 stages to find the correct vector defs to be used when vectorizing
3416 stmts that use the defs of the current stmt. The example below illustrates
3417 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3418 4 vectorized stmts):
3419
3420 before vectorization:
3421 RELATED_STMT VEC_STMT
3422 S1: x = memref - -
3423 S2: z = x + 1 - -
3424
3425 step 1: vectorize stmt S1:
3426 We first create the vector stmt VS1_0, and, as usual, record a
3427 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3428 Next, we create the vector stmt VS1_1, and record a pointer to
3429 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3430 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3431 stmts and pointers:
3432 RELATED_STMT VEC_STMT
3433 VS1_0: vx0 = memref0 VS1_1 -
3434 VS1_1: vx1 = memref1 VS1_2 -
3435 VS1_2: vx2 = memref2 VS1_3 -
3436 VS1_3: vx3 = memref3 - -
3437 S1: x = load - VS1_0
3438 S2: z = x + 1 - -
3439
3440 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3441 information we recorded in RELATED_STMT field is used to vectorize
3442 stmt S2. */
3443
3444 /* In case of interleaving (non-unit strided access):
3445
3446 S1: x2 = &base + 2
3447 S2: x0 = &base
3448 S3: x1 = &base + 1
3449 S4: x3 = &base + 3
3450
3451 Vectorized loads are created in the order of memory accesses
3452 starting from the access of the first stmt of the chain:
3453
3454 VS1: vx0 = &base
3455 VS2: vx1 = &base + vec_size*1
3456 VS3: vx3 = &base + vec_size*2
3457 VS4: vx4 = &base + vec_size*3
3458
3459 Then permutation statements are generated:
3460
3461 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3462 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3463 ...
3464
3465 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3466 (the order of the data-refs in the output of vect_permute_load_chain
3467 corresponds to the order of scalar stmts in the interleaving chain - see
3468 the documentation of vect_permute_load_chain()).
3469 The generation of permutation stmts and recording them in
3470 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3471
3472 In case of both multiple types and interleaving, the vector loads and
3473 permutation stmts above are created for every copy. The result vector stmts
3474 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3475 STMT_VINFO_RELATED_STMT for the next copies. */
3476
3477 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3478 on a target that supports unaligned accesses (dr_unaligned_supported)
3479 we generate the following code:
3480 p = initial_addr;
3481 indx = 0;
3482 loop {
3483 p = p + indx * vectype_size;
3484 vec_dest = *(p);
3485 indx = indx + 1;
3486 }
3487
3488 Otherwise, the data reference is potentially unaligned on a target that
3489 does not support unaligned accesses (dr_explicit_realign_optimized) -
3490 then generate the following code, in which the data in each iteration is
3491 obtained by two vector loads, one from the previous iteration, and one
3492 from the current iteration:
3493 p1 = initial_addr;
3494 msq_init = *(floor(p1))
3495 p2 = initial_addr + VS - 1;
3496 realignment_token = call target_builtin;
3497 indx = 0;
3498 loop {
3499 p2 = p2 + indx * vectype_size
3500 lsq = *(floor(p2))
3501 vec_dest = realign_load (msq, lsq, realignment_token)
3502 indx = indx + 1;
3503 msq = lsq;
3504 } */
3505
3506 /* If the misalignment remains the same throughout the execution of the
3507 loop, we can create the init_addr and permutation mask at the loop
3508 preheader. Otherwise, it needs to be created inside the loop.
3509 This can only occur when vectorizing memory accesses in the inner-loop
3510 nested within an outer-loop that is being vectorized. */
3511
3512 if (loop && nested_in_vect_loop_p (loop, stmt)
3513 && (TREE_INT_CST_LOW (DR_STEP (dr))
3514 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3515 {
3516 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3517 compute_in_loop = true;
3518 }
3519
3520 if ((alignment_support_scheme == dr_explicit_realign_optimized
3521 || alignment_support_scheme == dr_explicit_realign)
3522 && !compute_in_loop)
3523 {
3524 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3525 alignment_support_scheme, NULL_TREE,
3526 &at_loop);
3527 if (alignment_support_scheme == dr_explicit_realign_optimized)
3528 {
3529 phi = SSA_NAME_DEF_STMT (msq);
3530 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3531 }
3532 }
3533 else
3534 at_loop = loop;
3535
3536 prev_stmt_info = NULL;
3537 for (j = 0; j < ncopies; j++)
3538 {
3539 /* 1. Create the vector pointer update chain. */
3540 if (j == 0)
3541 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3542 at_loop, offset,
3543 &dummy, &ptr_incr, false,
3544 &inv_p);
3545 else
3546 dataref_ptr =
3547 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3548
3549 for (i = 0; i < vec_num; i++)
3550 {
3551 if (i > 0)
3552 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3553 NULL_TREE);
3554
3555 /* 2. Create the vector-load in the loop. */
3556 switch (alignment_support_scheme)
3557 {
3558 case dr_aligned:
3559 gcc_assert (aligned_access_p (first_dr));
3560 data_ref = build_fold_indirect_ref (dataref_ptr);
3561 break;
3562 case dr_unaligned_supported:
3563 {
3564 int mis = DR_MISALIGNMENT (first_dr);
3565 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3566
3567 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3568 data_ref =
3569 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3570 break;
3571 }
3572 case dr_explicit_realign:
3573 {
3574 tree ptr, bump;
3575 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3576
3577 if (compute_in_loop)
3578 msq = vect_setup_realignment (first_stmt, gsi,
3579 &realignment_token,
3580 dr_explicit_realign,
3581 dataref_ptr, NULL);
3582
3583 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3584 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3585 new_stmt = gimple_build_assign (vec_dest, data_ref);
3586 new_temp = make_ssa_name (vec_dest, new_stmt);
3587 gimple_assign_set_lhs (new_stmt, new_temp);
3588 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3589 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3590 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3591 msq = new_temp;
3592
3593 bump = size_binop (MULT_EXPR, vs_minus_1,
3594 TYPE_SIZE_UNIT (scalar_type));
3595 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3596 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3597 break;
3598 }
3599 case dr_explicit_realign_optimized:
3600 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3601 break;
3602 default:
3603 gcc_unreachable ();
3604 }
3605 /* If accesses through a pointer to vectype do not alias the original
3606 memory reference we have a problem. This should never happen. */
3607 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3608 get_alias_set (gimple_assign_rhs1 (stmt))));
3609 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3610 new_stmt = gimple_build_assign (vec_dest, data_ref);
3611 new_temp = make_ssa_name (vec_dest, new_stmt);
3612 gimple_assign_set_lhs (new_stmt, new_temp);
3613 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3614 mark_symbols_for_renaming (new_stmt);
3615
3616 /* 3. Handle explicit realignment if necessary/supported. Create in
3617 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3618 if (alignment_support_scheme == dr_explicit_realign_optimized
3619 || alignment_support_scheme == dr_explicit_realign)
3620 {
3621 tree tmp;
3622
3623 lsq = gimple_assign_lhs (new_stmt);
3624 if (!realignment_token)
3625 realignment_token = dataref_ptr;
3626 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3627 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3628 realignment_token);
3629 new_stmt = gimple_build_assign (vec_dest, tmp);
3630 new_temp = make_ssa_name (vec_dest, new_stmt);
3631 gimple_assign_set_lhs (new_stmt, new_temp);
3632 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3633
3634 if (alignment_support_scheme == dr_explicit_realign_optimized)
3635 {
3636 gcc_assert (phi);
3637 if (i == vec_num - 1 && j == ncopies - 1)
3638 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop));
3639 msq = lsq;
3640 }
3641 }
3642
3643 /* 4. Handle invariant-load. */
3644 if (inv_p && !bb_vinfo)
3645 {
3646 gcc_assert (!strided_load);
3647 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3648 if (j == 0)
3649 {
3650 int k;
3651 tree t = NULL_TREE;
3652 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3653
3654 /* CHECKME: bitpos depends on endianess? */
3655 bitpos = bitsize_zero_node;
3656 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3657 bitsize, bitpos);
3658 vec_dest =
3659 vect_create_destination_var (scalar_dest, NULL_TREE);
3660 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3661 new_temp = make_ssa_name (vec_dest, new_stmt);
3662 gimple_assign_set_lhs (new_stmt, new_temp);
3663 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3664
3665 for (k = nunits - 1; k >= 0; --k)
3666 t = tree_cons (NULL_TREE, new_temp, t);
3667 /* FIXME: use build_constructor directly. */
3668 vec_inv = build_constructor_from_list (vectype, t);
3669 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3670 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3671 }
3672 else
3673 gcc_unreachable (); /* FORNOW. */
3674 }
3675
3676 /* Collect vector loads and later create their permutation in
3677 vect_transform_strided_load (). */
3678 if (strided_load || slp_perm)
3679 VEC_quick_push (tree, dr_chain, new_temp);
3680
3681 /* Store vector loads in the corresponding SLP_NODE. */
3682 if (slp && !slp_perm)
3683 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3684 }
3685
3686 if (slp && !slp_perm)
3687 continue;
3688
3689 if (slp_perm)
3690 {
3691 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3692 slp_node_instance, false))
3693 {
3694 VEC_free (tree, heap, dr_chain);
3695 return false;
3696 }
3697 }
3698 else
3699 {
3700 if (strided_load)
3701 {
3702 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3703 return false;
3704
3705 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3706 VEC_free (tree, heap, dr_chain);
3707 dr_chain = VEC_alloc (tree, heap, group_size);
3708 }
3709 else
3710 {
3711 if (j == 0)
3712 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3713 else
3714 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3715 prev_stmt_info = vinfo_for_stmt (new_stmt);
3716 }
3717 }
3718 }
3719
3720 if (dr_chain)
3721 VEC_free (tree, heap, dr_chain);
3722
3723 return true;
3724 }
3725
3726 /* Function vect_is_simple_cond.
3727
3728 Input:
3729 LOOP - the loop that is being vectorized.
3730 COND - Condition that is checked for simple use.
3731
3732 Returns whether a COND can be vectorized. Checks whether
3733 condition operands are supportable using vec_is_simple_use. */
3734
3735 static bool
3736 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3737 {
3738 tree lhs, rhs;
3739 tree def;
3740 enum vect_def_type dt;
3741
3742 if (!COMPARISON_CLASS_P (cond))
3743 return false;
3744
3745 lhs = TREE_OPERAND (cond, 0);
3746 rhs = TREE_OPERAND (cond, 1);
3747
3748 if (TREE_CODE (lhs) == SSA_NAME)
3749 {
3750 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3751 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3752 &dt))
3753 return false;
3754 }
3755 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3756 && TREE_CODE (lhs) != FIXED_CST)
3757 return false;
3758
3759 if (TREE_CODE (rhs) == SSA_NAME)
3760 {
3761 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3762 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3763 &dt))
3764 return false;
3765 }
3766 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3767 && TREE_CODE (rhs) != FIXED_CST)
3768 return false;
3769
3770 return true;
3771 }
3772
3773 /* vectorizable_condition.
3774
3775 Check if STMT is conditional modify expression that can be vectorized.
3776 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3777 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3778 at BSI.
3779
3780 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3781
3782 static bool
3783 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3784 gimple *vec_stmt)
3785 {
3786 tree scalar_dest = NULL_TREE;
3787 tree vec_dest = NULL_TREE;
3788 tree op = NULL_TREE;
3789 tree cond_expr, then_clause, else_clause;
3790 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3791 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3792 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3793 tree vec_compare, vec_cond_expr;
3794 tree new_temp;
3795 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3796 enum machine_mode vec_mode;
3797 tree def;
3798 enum vect_def_type dt;
3799 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3800 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3801 enum tree_code code;
3802
3803 /* FORNOW: unsupported in basic block SLP. */
3804 gcc_assert (loop_vinfo);
3805
3806 gcc_assert (ncopies >= 1);
3807 if (ncopies > 1)
3808 return false; /* FORNOW */
3809
3810 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3811 return false;
3812
3813 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3814 return false;
3815
3816 /* FORNOW: SLP not supported. */
3817 if (STMT_SLP_TYPE (stmt_info))
3818 return false;
3819
3820 /* FORNOW: not yet supported. */
3821 if (STMT_VINFO_LIVE_P (stmt_info))
3822 {
3823 if (vect_print_dump_info (REPORT_DETAILS))
3824 fprintf (vect_dump, "value used after loop.");
3825 return false;
3826 }
3827
3828 /* Is vectorizable conditional operation? */
3829 if (!is_gimple_assign (stmt))
3830 return false;
3831
3832 code = gimple_assign_rhs_code (stmt);
3833
3834 if (code != COND_EXPR)
3835 return false;
3836
3837 gcc_assert (gimple_assign_single_p (stmt));
3838 op = gimple_assign_rhs1 (stmt);
3839 cond_expr = TREE_OPERAND (op, 0);
3840 then_clause = TREE_OPERAND (op, 1);
3841 else_clause = TREE_OPERAND (op, 2);
3842
3843 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3844 return false;
3845
3846 /* We do not handle two different vector types for the condition
3847 and the values. */
3848 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
3849 return false;
3850
3851 if (TREE_CODE (then_clause) == SSA_NAME)
3852 {
3853 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3854 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3855 &then_def_stmt, &def, &dt))
3856 return false;
3857 }
3858 else if (TREE_CODE (then_clause) != INTEGER_CST
3859 && TREE_CODE (then_clause) != REAL_CST
3860 && TREE_CODE (then_clause) != FIXED_CST)
3861 return false;
3862
3863 if (TREE_CODE (else_clause) == SSA_NAME)
3864 {
3865 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3866 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3867 &else_def_stmt, &def, &dt))
3868 return false;
3869 }
3870 else if (TREE_CODE (else_clause) != INTEGER_CST
3871 && TREE_CODE (else_clause) != REAL_CST
3872 && TREE_CODE (else_clause) != FIXED_CST)
3873 return false;
3874
3875
3876 vec_mode = TYPE_MODE (vectype);
3877
3878 if (!vec_stmt)
3879 {
3880 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3881 return expand_vec_cond_expr_p (op, vec_mode);
3882 }
3883
3884 /* Transform */
3885
3886 /* Handle def. */
3887 scalar_dest = gimple_assign_lhs (stmt);
3888 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3889
3890 /* Handle cond expr. */
3891 vec_cond_lhs =
3892 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3893 vec_cond_rhs =
3894 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3895 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3896 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3897
3898 /* Arguments are ready. Create the new vector stmt. */
3899 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3900 vec_cond_lhs, vec_cond_rhs);
3901 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3902 vec_compare, vec_then_clause, vec_else_clause);
3903
3904 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3905 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3906 gimple_assign_set_lhs (*vec_stmt, new_temp);
3907 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3908
3909 return true;
3910 }
3911
3912
3913 /* Make sure the statement is vectorizable. */
3914
3915 bool
3916 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
3917 {
3918 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3919 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3920 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
3921 bool ok;
3922 HOST_WIDE_INT dummy;
3923 tree scalar_type, vectype;
3924
3925 if (vect_print_dump_info (REPORT_DETAILS))
3926 {
3927 fprintf (vect_dump, "==> examining statement: ");
3928 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3929 }
3930
3931 if (gimple_has_volatile_ops (stmt))
3932 {
3933 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3934 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
3935
3936 return false;
3937 }
3938
3939 /* Skip stmts that do not need to be vectorized. In loops this is expected
3940 to include:
3941 - the COND_EXPR which is the loop exit condition
3942 - any LABEL_EXPRs in the loop
3943 - computations that are used only for array indexing or loop control.
3944 In basic blocks we only analyze statements that are a part of some SLP
3945 instance, therefore, all the statements are relevant. */
3946
3947 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3948 && !STMT_VINFO_LIVE_P (stmt_info))
3949 {
3950 if (vect_print_dump_info (REPORT_DETAILS))
3951 fprintf (vect_dump, "irrelevant.");
3952
3953 return true;
3954 }
3955
3956 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3957 {
3958 case vect_internal_def:
3959 break;
3960
3961 case vect_reduction_def:
3962 case vect_nested_cycle:
3963 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
3964 || relevance == vect_used_in_outer_by_reduction
3965 || relevance == vect_unused_in_scope));
3966 break;
3967
3968 case vect_induction_def:
3969 case vect_constant_def:
3970 case vect_external_def:
3971 case vect_unknown_def_type:
3972 default:
3973 gcc_unreachable ();
3974 }
3975
3976 if (bb_vinfo)
3977 {
3978 gcc_assert (PURE_SLP_STMT (stmt_info));
3979
3980 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
3981 if (vect_print_dump_info (REPORT_DETAILS))
3982 {
3983 fprintf (vect_dump, "get vectype for scalar type: ");
3984 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
3985 }
3986
3987 vectype = get_vectype_for_scalar_type (scalar_type);
3988 if (!vectype)
3989 {
3990 if (vect_print_dump_info (REPORT_DETAILS))
3991 {
3992 fprintf (vect_dump, "not SLPed: unsupported data-type ");
3993 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
3994 }
3995 return false;
3996 }
3997
3998 if (vect_print_dump_info (REPORT_DETAILS))
3999 {
4000 fprintf (vect_dump, "vectype: ");
4001 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4002 }
4003
4004 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4005 }
4006
4007 if (STMT_VINFO_RELEVANT_P (stmt_info))
4008 {
4009 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4010 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4011 *need_to_vectorize = true;
4012 }
4013
4014 ok = true;
4015 if (!bb_vinfo
4016 && (STMT_VINFO_RELEVANT_P (stmt_info)
4017 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4018 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4019 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4020 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4021 || vectorizable_operation (stmt, NULL, NULL, NULL)
4022 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4023 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4024 || vectorizable_call (stmt, NULL, NULL)
4025 || vectorizable_store (stmt, NULL, NULL, NULL)
4026 || vectorizable_condition (stmt, NULL, NULL)
4027 || vectorizable_reduction (stmt, NULL, NULL));
4028 else
4029 {
4030 if (bb_vinfo)
4031 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4032 || vectorizable_assignment (stmt, NULL, NULL, node)
4033 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4034 || vectorizable_store (stmt, NULL, NULL, node));
4035 }
4036
4037 if (!ok)
4038 {
4039 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4040 {
4041 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4042 fprintf (vect_dump, "supported: ");
4043 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4044 }
4045
4046 return false;
4047 }
4048
4049 if (bb_vinfo)
4050 return true;
4051
4052 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4053 need extra handling, except for vectorizable reductions. */
4054 if (STMT_VINFO_LIVE_P (stmt_info)
4055 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4056 ok = vectorizable_live_operation (stmt, NULL, NULL);
4057
4058 if (!ok)
4059 {
4060 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4061 {
4062 fprintf (vect_dump, "not vectorized: live stmt not ");
4063 fprintf (vect_dump, "supported: ");
4064 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4065 }
4066
4067 return false;
4068 }
4069
4070 if (!PURE_SLP_STMT (stmt_info))
4071 {
4072 /* Groups of strided accesses whose size is not a power of 2 are not
4073 vectorizable yet using loop-vectorization. Therefore, if this stmt
4074 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4075 loop-based vectorized), the loop cannot be vectorized. */
4076 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4077 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4078 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4079 {
4080 if (vect_print_dump_info (REPORT_DETAILS))
4081 {
4082 fprintf (vect_dump, "not vectorized: the size of group "
4083 "of strided accesses is not a power of 2");
4084 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4085 }
4086
4087 return false;
4088 }
4089 }
4090
4091 return true;
4092 }
4093
4094
4095 /* Function vect_transform_stmt.
4096
4097 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4098
4099 bool
4100 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4101 bool *strided_store, slp_tree slp_node,
4102 slp_instance slp_node_instance)
4103 {
4104 bool is_store = false;
4105 gimple vec_stmt = NULL;
4106 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4107 gimple orig_stmt_in_pattern;
4108 bool done;
4109
4110 switch (STMT_VINFO_TYPE (stmt_info))
4111 {
4112 case type_demotion_vec_info_type:
4113 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4114 gcc_assert (done);
4115 break;
4116
4117 case type_promotion_vec_info_type:
4118 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4119 gcc_assert (done);
4120 break;
4121
4122 case type_conversion_vec_info_type:
4123 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4124 gcc_assert (done);
4125 break;
4126
4127 case induc_vec_info_type:
4128 gcc_assert (!slp_node);
4129 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4130 gcc_assert (done);
4131 break;
4132
4133 case op_vec_info_type:
4134 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4135 gcc_assert (done);
4136 break;
4137
4138 case assignment_vec_info_type:
4139 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4140 gcc_assert (done);
4141 break;
4142
4143 case load_vec_info_type:
4144 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4145 slp_node_instance);
4146 gcc_assert (done);
4147 break;
4148
4149 case store_vec_info_type:
4150 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4151 gcc_assert (done);
4152 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4153 {
4154 /* In case of interleaving, the whole chain is vectorized when the
4155 last store in the chain is reached. Store stmts before the last
4156 one are skipped, and there vec_stmt_info shouldn't be freed
4157 meanwhile. */
4158 *strided_store = true;
4159 if (STMT_VINFO_VEC_STMT (stmt_info))
4160 is_store = true;
4161 }
4162 else
4163 is_store = true;
4164 break;
4165
4166 case condition_vec_info_type:
4167 gcc_assert (!slp_node);
4168 done = vectorizable_condition (stmt, gsi, &vec_stmt);
4169 gcc_assert (done);
4170 break;
4171
4172 case call_vec_info_type:
4173 gcc_assert (!slp_node);
4174 done = vectorizable_call (stmt, gsi, &vec_stmt);
4175 break;
4176
4177 case reduc_vec_info_type:
4178 gcc_assert (!slp_node);
4179 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
4180 gcc_assert (done);
4181 break;
4182
4183 default:
4184 if (!STMT_VINFO_LIVE_P (stmt_info))
4185 {
4186 if (vect_print_dump_info (REPORT_DETAILS))
4187 fprintf (vect_dump, "stmt not supported.");
4188 gcc_unreachable ();
4189 }
4190 }
4191
4192 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4193 is being vectorized, but outside the immediately enclosing loop. */
4194 if (vec_stmt
4195 && STMT_VINFO_LOOP_VINFO (stmt_info)
4196 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4197 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4198 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4199 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4200 || STMT_VINFO_RELEVANT (stmt_info) ==
4201 vect_used_in_outer_by_reduction))
4202 {
4203 struct loop *innerloop = LOOP_VINFO_LOOP (
4204 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4205 imm_use_iterator imm_iter;
4206 use_operand_p use_p;
4207 tree scalar_dest;
4208 gimple exit_phi;
4209
4210 if (vect_print_dump_info (REPORT_DETAILS))
4211 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4212
4213 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4214 (to be used when vectorizing outer-loop stmts that use the DEF of
4215 STMT). */
4216 if (gimple_code (stmt) == GIMPLE_PHI)
4217 scalar_dest = PHI_RESULT (stmt);
4218 else
4219 scalar_dest = gimple_assign_lhs (stmt);
4220
4221 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4222 {
4223 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4224 {
4225 exit_phi = USE_STMT (use_p);
4226 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4227 }
4228 }
4229 }
4230
4231 /* Handle stmts whose DEF is used outside the loop-nest that is
4232 being vectorized. */
4233 if (STMT_VINFO_LIVE_P (stmt_info)
4234 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4235 {
4236 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4237 gcc_assert (done);
4238 }
4239
4240 if (vec_stmt)
4241 {
4242 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4243 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4244 if (orig_stmt_in_pattern)
4245 {
4246 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4247 /* STMT was inserted by the vectorizer to replace a computation idiom.
4248 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4249 computed this idiom. We need to record a pointer to VEC_STMT in
4250 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4251 documentation of vect_pattern_recog. */
4252 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4253 {
4254 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4255 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4256 }
4257 }
4258 }
4259
4260 return is_store;
4261 }
4262
4263
4264 /* Remove a group of stores (for SLP or interleaving), free their
4265 stmt_vec_info. */
4266
4267 void
4268 vect_remove_stores (gimple first_stmt)
4269 {
4270 gimple next = first_stmt;
4271 gimple tmp;
4272 gimple_stmt_iterator next_si;
4273
4274 while (next)
4275 {
4276 /* Free the attached stmt_vec_info and remove the stmt. */
4277 next_si = gsi_for_stmt (next);
4278 gsi_remove (&next_si, true);
4279 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4280 free_stmt_vec_info (next);
4281 next = tmp;
4282 }
4283 }
4284
4285
4286 /* Function new_stmt_vec_info.
4287
4288 Create and initialize a new stmt_vec_info struct for STMT. */
4289
4290 stmt_vec_info
4291 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4292 bb_vec_info bb_vinfo)
4293 {
4294 stmt_vec_info res;
4295 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4296
4297 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4298 STMT_VINFO_STMT (res) = stmt;
4299 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4300 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4301 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4302 STMT_VINFO_LIVE_P (res) = false;
4303 STMT_VINFO_VECTYPE (res) = NULL;
4304 STMT_VINFO_VEC_STMT (res) = NULL;
4305 STMT_VINFO_IN_PATTERN_P (res) = false;
4306 STMT_VINFO_RELATED_STMT (res) = NULL;
4307 STMT_VINFO_DATA_REF (res) = NULL;
4308
4309 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4310 STMT_VINFO_DR_OFFSET (res) = NULL;
4311 STMT_VINFO_DR_INIT (res) = NULL;
4312 STMT_VINFO_DR_STEP (res) = NULL;
4313 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4314
4315 if (gimple_code (stmt) == GIMPLE_PHI
4316 && is_loop_header_bb_p (gimple_bb (stmt)))
4317 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4318 else
4319 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4320
4321 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4322 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4323 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4324 STMT_SLP_TYPE (res) = loop_vect;
4325 DR_GROUP_FIRST_DR (res) = NULL;
4326 DR_GROUP_NEXT_DR (res) = NULL;
4327 DR_GROUP_SIZE (res) = 0;
4328 DR_GROUP_STORE_COUNT (res) = 0;
4329 DR_GROUP_GAP (res) = 0;
4330 DR_GROUP_SAME_DR_STMT (res) = NULL;
4331 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4332
4333 return res;
4334 }
4335
4336
4337 /* Create a hash table for stmt_vec_info. */
4338
4339 void
4340 init_stmt_vec_info_vec (void)
4341 {
4342 gcc_assert (!stmt_vec_info_vec);
4343 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4344 }
4345
4346
4347 /* Free hash table for stmt_vec_info. */
4348
4349 void
4350 free_stmt_vec_info_vec (void)
4351 {
4352 gcc_assert (stmt_vec_info_vec);
4353 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4354 }
4355
4356
4357 /* Free stmt vectorization related info. */
4358
4359 void
4360 free_stmt_vec_info (gimple stmt)
4361 {
4362 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4363
4364 if (!stmt_info)
4365 return;
4366
4367 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4368 set_vinfo_for_stmt (stmt, NULL);
4369 free (stmt_info);
4370 }
4371
4372
4373 /* Function get_vectype_for_scalar_type.
4374
4375 Returns the vector type corresponding to SCALAR_TYPE as supported
4376 by the target. */
4377
4378 tree
4379 get_vectype_for_scalar_type (tree scalar_type)
4380 {
4381 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4382 int nbytes = GET_MODE_SIZE (inner_mode);
4383 int nunits;
4384 tree vectype;
4385
4386 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4387 return NULL_TREE;
4388
4389 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4390 is expected. */
4391 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4392
4393 vectype = build_vector_type (scalar_type, nunits);
4394 if (vect_print_dump_info (REPORT_DETAILS))
4395 {
4396 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4397 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4398 }
4399
4400 if (!vectype)
4401 return NULL_TREE;
4402
4403 if (vect_print_dump_info (REPORT_DETAILS))
4404 {
4405 fprintf (vect_dump, "vectype: ");
4406 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4407 }
4408
4409 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4410 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4411 {
4412 if (vect_print_dump_info (REPORT_DETAILS))
4413 fprintf (vect_dump, "mode not supported by target.");
4414 return NULL_TREE;
4415 }
4416
4417 return vectype;
4418 }
4419
4420 /* Function vect_is_simple_use.
4421
4422 Input:
4423 LOOP_VINFO - the vect info of the loop that is being vectorized.
4424 BB_VINFO - the vect info of the basic block that is being vectorized.
4425 OPERAND - operand of a stmt in the loop or bb.
4426 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4427
4428 Returns whether a stmt with OPERAND can be vectorized.
4429 For loops, supportable operands are constants, loop invariants, and operands
4430 that are defined by the current iteration of the loop. Unsupportable
4431 operands are those that are defined by a previous iteration of the loop (as
4432 is the case in reduction/induction computations).
4433 For basic blocks, supportable operands are constants and bb invariants.
4434 For now, operands defined outside the basic block are not supported. */
4435
4436 bool
4437 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4438 bb_vec_info bb_vinfo, gimple *def_stmt,
4439 tree *def, enum vect_def_type *dt)
4440 {
4441 basic_block bb;
4442 stmt_vec_info stmt_vinfo;
4443 struct loop *loop = NULL;
4444
4445 if (loop_vinfo)
4446 loop = LOOP_VINFO_LOOP (loop_vinfo);
4447
4448 *def_stmt = NULL;
4449 *def = NULL_TREE;
4450
4451 if (vect_print_dump_info (REPORT_DETAILS))
4452 {
4453 fprintf (vect_dump, "vect_is_simple_use: operand ");
4454 print_generic_expr (vect_dump, operand, TDF_SLIM);
4455 }
4456
4457 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4458 {
4459 *dt = vect_constant_def;
4460 return true;
4461 }
4462
4463 if (is_gimple_min_invariant (operand))
4464 {
4465 *def = operand;
4466 *dt = vect_external_def;
4467 return true;
4468 }
4469
4470 if (TREE_CODE (operand) == PAREN_EXPR)
4471 {
4472 if (vect_print_dump_info (REPORT_DETAILS))
4473 fprintf (vect_dump, "non-associatable copy.");
4474 operand = TREE_OPERAND (operand, 0);
4475 }
4476
4477 if (TREE_CODE (operand) != SSA_NAME)
4478 {
4479 if (vect_print_dump_info (REPORT_DETAILS))
4480 fprintf (vect_dump, "not ssa-name.");
4481 return false;
4482 }
4483
4484 *def_stmt = SSA_NAME_DEF_STMT (operand);
4485 if (*def_stmt == NULL)
4486 {
4487 if (vect_print_dump_info (REPORT_DETAILS))
4488 fprintf (vect_dump, "no def_stmt.");
4489 return false;
4490 }
4491
4492 if (vect_print_dump_info (REPORT_DETAILS))
4493 {
4494 fprintf (vect_dump, "def_stmt: ");
4495 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4496 }
4497
4498 /* Empty stmt is expected only in case of a function argument.
4499 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4500 if (gimple_nop_p (*def_stmt))
4501 {
4502 *def = operand;
4503 *dt = vect_external_def;
4504 return true;
4505 }
4506
4507 bb = gimple_bb (*def_stmt);
4508
4509 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4510 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4511 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4512 *dt = vect_external_def;
4513 else
4514 {
4515 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4516 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4517 }
4518
4519 if (*dt == vect_unknown_def_type)
4520 {
4521 if (vect_print_dump_info (REPORT_DETAILS))
4522 fprintf (vect_dump, "Unsupported pattern.");
4523 return false;
4524 }
4525
4526 if (vect_print_dump_info (REPORT_DETAILS))
4527 fprintf (vect_dump, "type of def: %d.",*dt);
4528
4529 switch (gimple_code (*def_stmt))
4530 {
4531 case GIMPLE_PHI:
4532 *def = gimple_phi_result (*def_stmt);
4533 break;
4534
4535 case GIMPLE_ASSIGN:
4536 *def = gimple_assign_lhs (*def_stmt);
4537 break;
4538
4539 case GIMPLE_CALL:
4540 *def = gimple_call_lhs (*def_stmt);
4541 if (*def != NULL)
4542 break;
4543 /* FALLTHRU */
4544 default:
4545 if (vect_print_dump_info (REPORT_DETAILS))
4546 fprintf (vect_dump, "unsupported defining stmt: ");
4547 return false;
4548 }
4549
4550 return true;
4551 }
4552
4553
4554 /* Function supportable_widening_operation
4555
4556 Check whether an operation represented by the code CODE is a
4557 widening operation that is supported by the target platform in
4558 vector form (i.e., when operating on arguments of type VECTYPE).
4559
4560 Widening operations we currently support are NOP (CONVERT), FLOAT
4561 and WIDEN_MULT. This function checks if these operations are supported
4562 by the target platform either directly (via vector tree-codes), or via
4563 target builtins.
4564
4565 Output:
4566 - CODE1 and CODE2 are codes of vector operations to be used when
4567 vectorizing the operation, if available.
4568 - DECL1 and DECL2 are decls of target builtin functions to be used
4569 when vectorizing the operation, if available. In this case,
4570 CODE1 and CODE2 are CALL_EXPR.
4571 - MULTI_STEP_CVT determines the number of required intermediate steps in
4572 case of multi-step conversion (like char->short->int - in that case
4573 MULTI_STEP_CVT will be 1).
4574 - INTERM_TYPES contains the intermediate type required to perform the
4575 widening operation (short in the above example). */
4576
4577 bool
4578 supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
4579 tree *decl1, tree *decl2,
4580 enum tree_code *code1, enum tree_code *code2,
4581 int *multi_step_cvt,
4582 VEC (tree, heap) **interm_types)
4583 {
4584 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4585 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4586 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4587 bool ordered_p;
4588 enum machine_mode vec_mode;
4589 enum insn_code icode1, icode2;
4590 optab optab1, optab2;
4591 tree type = gimple_expr_type (stmt);
4592 tree wide_vectype = get_vectype_for_scalar_type (type);
4593 enum tree_code c1, c2;
4594
4595 /* The result of a vectorized widening operation usually requires two vectors
4596 (because the widened results do not fit int one vector). The generated
4597 vector results would normally be expected to be generated in the same
4598 order as in the original scalar computation, i.e. if 8 results are
4599 generated in each vector iteration, they are to be organized as follows:
4600 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4601
4602 However, in the special case that the result of the widening operation is
4603 used in a reduction computation only, the order doesn't matter (because
4604 when vectorizing a reduction we change the order of the computation).
4605 Some targets can take advantage of this and generate more efficient code.
4606 For example, targets like Altivec, that support widen_mult using a sequence
4607 of {mult_even,mult_odd} generate the following vectors:
4608 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4609
4610 When vectorizing outer-loops, we execute the inner-loop sequentially
4611 (each vectorized inner-loop iteration contributes to VF outer-loop
4612 iterations in parallel). We therefore don't allow to change the order
4613 of the computation in the inner-loop during outer-loop vectorization. */
4614
4615 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4616 && !nested_in_vect_loop_p (vect_loop, stmt))
4617 ordered_p = false;
4618 else
4619 ordered_p = true;
4620
4621 if (!ordered_p
4622 && code == WIDEN_MULT_EXPR
4623 && targetm.vectorize.builtin_mul_widen_even
4624 && targetm.vectorize.builtin_mul_widen_even (vectype)
4625 && targetm.vectorize.builtin_mul_widen_odd
4626 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4627 {
4628 if (vect_print_dump_info (REPORT_DETAILS))
4629 fprintf (vect_dump, "Unordered widening operation detected.");
4630
4631 *code1 = *code2 = CALL_EXPR;
4632 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4633 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4634 return true;
4635 }
4636
4637 switch (code)
4638 {
4639 case WIDEN_MULT_EXPR:
4640 if (BYTES_BIG_ENDIAN)
4641 {
4642 c1 = VEC_WIDEN_MULT_HI_EXPR;
4643 c2 = VEC_WIDEN_MULT_LO_EXPR;
4644 }
4645 else
4646 {
4647 c2 = VEC_WIDEN_MULT_HI_EXPR;
4648 c1 = VEC_WIDEN_MULT_LO_EXPR;
4649 }
4650 break;
4651
4652 CASE_CONVERT:
4653 if (BYTES_BIG_ENDIAN)
4654 {
4655 c1 = VEC_UNPACK_HI_EXPR;
4656 c2 = VEC_UNPACK_LO_EXPR;
4657 }
4658 else
4659 {
4660 c2 = VEC_UNPACK_HI_EXPR;
4661 c1 = VEC_UNPACK_LO_EXPR;
4662 }
4663 break;
4664
4665 case FLOAT_EXPR:
4666 if (BYTES_BIG_ENDIAN)
4667 {
4668 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4669 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4670 }
4671 else
4672 {
4673 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4674 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4675 }
4676 break;
4677
4678 case FIX_TRUNC_EXPR:
4679 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4680 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4681 computing the operation. */
4682 return false;
4683
4684 default:
4685 gcc_unreachable ();
4686 }
4687
4688 if (code == FIX_TRUNC_EXPR)
4689 {
4690 /* The signedness is determined from output operand. */
4691 optab1 = optab_for_tree_code (c1, type, optab_default);
4692 optab2 = optab_for_tree_code (c2, type, optab_default);
4693 }
4694 else
4695 {
4696 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4697 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4698 }
4699
4700 if (!optab1 || !optab2)
4701 return false;
4702
4703 vec_mode = TYPE_MODE (vectype);
4704 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4705 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4706 == CODE_FOR_nothing)
4707 return false;
4708
4709 /* Check if it's a multi-step conversion that can be done using intermediate
4710 types. */
4711 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4712 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4713 {
4714 int i;
4715 tree prev_type = vectype, intermediate_type;
4716 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4717 optab optab3, optab4;
4718
4719 if (!CONVERT_EXPR_CODE_P (code))
4720 return false;
4721
4722 *code1 = c1;
4723 *code2 = c2;
4724
4725 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4726 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4727 to get to NARROW_VECTYPE, and fail if we do not. */
4728 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4729 for (i = 0; i < 3; i++)
4730 {
4731 intermediate_mode = insn_data[icode1].operand[0].mode;
4732 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4733 TYPE_UNSIGNED (prev_type));
4734 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4735 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4736
4737 if (!optab3 || !optab4
4738 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4739 == CODE_FOR_nothing
4740 || insn_data[icode1].operand[0].mode != intermediate_mode
4741 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4742 == CODE_FOR_nothing
4743 || insn_data[icode2].operand[0].mode != intermediate_mode
4744 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4745 == CODE_FOR_nothing
4746 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4747 == CODE_FOR_nothing)
4748 return false;
4749
4750 VEC_quick_push (tree, *interm_types, intermediate_type);
4751 (*multi_step_cvt)++;
4752
4753 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4754 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4755 return true;
4756
4757 prev_type = intermediate_type;
4758 prev_mode = intermediate_mode;
4759 }
4760
4761 return false;
4762 }
4763
4764 *code1 = c1;
4765 *code2 = c2;
4766 return true;
4767 }
4768
4769
4770 /* Function supportable_narrowing_operation
4771
4772 Check whether an operation represented by the code CODE is a
4773 narrowing operation that is supported by the target platform in
4774 vector form (i.e., when operating on arguments of type VECTYPE).
4775
4776 Narrowing operations we currently support are NOP (CONVERT) and
4777 FIX_TRUNC. This function checks if these operations are supported by
4778 the target platform directly via vector tree-codes.
4779
4780 Output:
4781 - CODE1 is the code of a vector operation to be used when
4782 vectorizing the operation, if available.
4783 - MULTI_STEP_CVT determines the number of required intermediate steps in
4784 case of multi-step conversion (like int->short->char - in that case
4785 MULTI_STEP_CVT will be 1).
4786 - INTERM_TYPES contains the intermediate type required to perform the
4787 narrowing operation (short in the above example). */
4788
4789 bool
4790 supportable_narrowing_operation (enum tree_code code,
4791 const_gimple stmt, tree vectype,
4792 enum tree_code *code1, int *multi_step_cvt,
4793 VEC (tree, heap) **interm_types)
4794 {
4795 enum machine_mode vec_mode;
4796 enum insn_code icode1;
4797 optab optab1, interm_optab;
4798 tree type = gimple_expr_type (stmt);
4799 tree narrow_vectype = get_vectype_for_scalar_type (type);
4800 enum tree_code c1;
4801 tree intermediate_type, prev_type;
4802 int i;
4803
4804 switch (code)
4805 {
4806 CASE_CONVERT:
4807 c1 = VEC_PACK_TRUNC_EXPR;
4808 break;
4809
4810 case FIX_TRUNC_EXPR:
4811 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4812 break;
4813
4814 case FLOAT_EXPR:
4815 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4816 tree code and optabs used for computing the operation. */
4817 return false;
4818
4819 default:
4820 gcc_unreachable ();
4821 }
4822
4823 if (code == FIX_TRUNC_EXPR)
4824 /* The signedness is determined from output operand. */
4825 optab1 = optab_for_tree_code (c1, type, optab_default);
4826 else
4827 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4828
4829 if (!optab1)
4830 return false;
4831
4832 vec_mode = TYPE_MODE (vectype);
4833 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4834 == CODE_FOR_nothing)
4835 return false;
4836
4837 /* Check if it's a multi-step conversion that can be done using intermediate
4838 types. */
4839 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4840 {
4841 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4842
4843 *code1 = c1;
4844 prev_type = vectype;
4845 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4846 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4847 to get to NARROW_VECTYPE, and fail if we do not. */
4848 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4849 for (i = 0; i < 3; i++)
4850 {
4851 intermediate_mode = insn_data[icode1].operand[0].mode;
4852 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4853 TYPE_UNSIGNED (prev_type));
4854 interm_optab = optab_for_tree_code (c1, intermediate_type,
4855 optab_default);
4856 if (!interm_optab
4857 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4858 == CODE_FOR_nothing
4859 || insn_data[icode1].operand[0].mode != intermediate_mode
4860 || (icode1
4861 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4862 == CODE_FOR_nothing)
4863 return false;
4864
4865 VEC_quick_push (tree, *interm_types, intermediate_type);
4866 (*multi_step_cvt)++;
4867
4868 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4869 return true;
4870
4871 prev_type = intermediate_type;
4872 prev_mode = intermediate_mode;
4873 }
4874
4875 return false;
4876 }
4877
4878 *code1 = c1;
4879 return true;
4880 }