machmode.h (mode_for_vector): Declare.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "toplev.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
44
45
46 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48 /* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52 static void
53 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55 {
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
67 /* This is the last stmt in a sequence that was detected as a
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96 }
97
98
99 /* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111 static bool
112 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114 {
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
121 *relevant = vect_unused_in_scope;
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
128 *relevant = vect_used_in_scope;
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
132 if (gimple_vdef (stmt))
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
136 *relevant = vect_used_in_scope;
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164 }
165
166
167 /* Function exist_non_indexing_operands_for_use_p
168
169 USE is one of the uses attached to STMT. Check if USE is
170 used in STMT for anything other than indexing an array. */
171
172 static bool
173 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174 {
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
177
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
183
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
191 so USE cannot correspond to any operands that are not used
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
201 operand = gimple_assign_rhs1 (stmt);
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209 }
210
211
212 /*
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
228 which does not need to be directly vectorized, then the liveness/relevance
229 of the respective DEF_STMT is left unchanged.
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237 static bool
238 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240 {
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
249 /* case 1: we are only interested in uses that need to be vectorized. Uses
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
255 {
256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
306
307 switch (relevant)
308 {
309 case vect_unused_in_scope:
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
312 break;
313
314 case vect_used_in_outer_by_reduction:
315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
316 relevant = vect_used_by_reduction;
317 break;
318
319 case vect_used_in_outer:
320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
321 relevant = vect_used_in_scope;
322 break;
323
324 case vect_used_in_scope:
325 break;
326
327 default:
328 gcc_unreachable ();
329 }
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
343
344 switch (relevant)
345 {
346 case vect_unused_in_scope:
347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
350 break;
351
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
356 case vect_used_in_scope:
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367 }
368
369
370 /* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386 bool
387 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388 {
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
413 {
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 }
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
471 order of the results that they produce does not have to be kept. */
472
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
476 {
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
497 live_p = false;
498 break;
499
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
507
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
511
512 live_p = false;
513 break;
514
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
519 if (vect_print_dump_info (REPORT_DETAILS))
520 fprintf (vect_dump, "unsupported use of double reduction.");
521
522 VEC_free (gimple, heap, worklist);
523 return false;
524 }
525
526 live_p = false;
527 break;
528
529 default:
530 break;
531 }
532
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546 }
547
548
549 /* Get cost by calling cost target builtin. */
550
551 static inline
552 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553 {
554 tree dummy_type = NULL;
555 int dummy = 0;
556
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
559 }
560
561
562 /* Get cost for STMT. */
563
564 int
565 cost_for_stmt (gimple stmt)
566 {
567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
568
569 switch (STMT_VINFO_TYPE (stmt_info))
570 {
571 case load_vec_info_type:
572 return vect_get_stmt_cost (scalar_load);
573 case store_vec_info_type:
574 return vect_get_stmt_cost (scalar_store);
575 case op_vec_info_type:
576 case condition_vec_info_type:
577 case assignment_vec_info_type:
578 case reduc_vec_info_type:
579 case induc_vec_info_type:
580 case type_promotion_vec_info_type:
581 case type_demotion_vec_info_type:
582 case type_conversion_vec_info_type:
583 case call_vec_info_type:
584 return vect_get_stmt_cost (scalar_stmt);
585 case undef_vec_info_type:
586 default:
587 gcc_unreachable ();
588 }
589 }
590
591 /* Function vect_model_simple_cost.
592
593 Models cost for simple operations, i.e. those that only emit ncopies of a
594 single op. Right now, this does not account for multiple insns that could
595 be generated for the single vector op. We will handle that shortly. */
596
597 void
598 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
599 enum vect_def_type *dt, slp_tree slp_node)
600 {
601 int i;
602 int inside_cost = 0, outside_cost = 0;
603
604 /* The SLP costs were already calculated during SLP tree build. */
605 if (PURE_SLP_STMT (stmt_info))
606 return;
607
608 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
609
610 /* FORNOW: Assuming maximum 2 args per stmts. */
611 for (i = 0; i < 2; i++)
612 {
613 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
614 outside_cost += vect_get_stmt_cost (vector_stmt);
615 }
616
617 if (vect_print_dump_info (REPORT_COST))
618 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
619 "outside_cost = %d .", inside_cost, outside_cost);
620
621 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
622 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
623 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
624 }
625
626
627 /* Function vect_cost_strided_group_size
628
629 For strided load or store, return the group_size only if it is the first
630 load or store of a group, else return 1. This ensures that group size is
631 only returned once per group. */
632
633 static int
634 vect_cost_strided_group_size (stmt_vec_info stmt_info)
635 {
636 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
637
638 if (first_stmt == STMT_VINFO_STMT (stmt_info))
639 return DR_GROUP_SIZE (stmt_info);
640
641 return 1;
642 }
643
644
645 /* Function vect_model_store_cost
646
647 Models cost for stores. In the case of strided accesses, one access
648 has the overhead of the strided access attributed to it. */
649
650 void
651 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
652 enum vect_def_type dt, slp_tree slp_node)
653 {
654 int group_size;
655 unsigned int inside_cost = 0, outside_cost = 0;
656 struct data_reference *first_dr;
657 gimple first_stmt;
658
659 /* The SLP costs were already calculated during SLP tree build. */
660 if (PURE_SLP_STMT (stmt_info))
661 return;
662
663 if (dt == vect_constant_def || dt == vect_external_def)
664 outside_cost = vect_get_stmt_cost (scalar_to_vec);
665
666 /* Strided access? */
667 if (DR_GROUP_FIRST_DR (stmt_info))
668 {
669 if (slp_node)
670 {
671 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
672 group_size = 1;
673 }
674 else
675 {
676 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
677 group_size = vect_cost_strided_group_size (stmt_info);
678 }
679
680 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
681 }
682 /* Not a strided access. */
683 else
684 {
685 group_size = 1;
686 first_dr = STMT_VINFO_DATA_REF (stmt_info);
687 }
688
689 /* Is this an access in a group of stores, which provide strided access?
690 If so, add in the cost of the permutes. */
691 if (group_size > 1)
692 {
693 /* Uses a high and low interleave operation for each needed permute. */
694 inside_cost = ncopies * exact_log2(group_size) * group_size
695 * vect_get_stmt_cost (vector_stmt);
696
697 if (vect_print_dump_info (REPORT_COST))
698 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
699 group_size);
700
701 }
702
703 /* Costs of the stores. */
704 vect_get_store_cost (first_dr, ncopies, &inside_cost);
705
706 if (vect_print_dump_info (REPORT_COST))
707 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
708 "outside_cost = %d .", inside_cost, outside_cost);
709
710 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
711 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
712 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
713 }
714
715
716 /* Calculate cost of DR's memory access. */
717 void
718 vect_get_store_cost (struct data_reference *dr, int ncopies,
719 unsigned int *inside_cost)
720 {
721 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
722
723 switch (alignment_support_scheme)
724 {
725 case dr_aligned:
726 {
727 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
728
729 if (vect_print_dump_info (REPORT_COST))
730 fprintf (vect_dump, "vect_model_store_cost: aligned.");
731
732 break;
733 }
734
735 case dr_unaligned_supported:
736 {
737 gimple stmt = DR_STMT (dr);
738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
740
741 /* Here, we assign an additional cost for the unaligned store. */
742 *inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
744 vectype, DR_MISALIGNMENT (dr));
745
746 if (vect_print_dump_info (REPORT_COST))
747 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
748 "hardware.");
749
750 break;
751 }
752
753 default:
754 gcc_unreachable ();
755 }
756 }
757
758
759 /* Function vect_model_load_cost
760
761 Models cost for loads. In the case of strided accesses, the last access
762 has the overhead of the strided access attributed to it. Since unaligned
763 accesses are supported for loads, we also account for the costs of the
764 access scheme chosen. */
765
766 void
767 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
768
769 {
770 int group_size;
771 gimple first_stmt;
772 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
773 unsigned int inside_cost = 0, outside_cost = 0;
774
775 /* The SLP costs were already calculated during SLP tree build. */
776 if (PURE_SLP_STMT (stmt_info))
777 return;
778
779 /* Strided accesses? */
780 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
781 if (first_stmt && !slp_node)
782 {
783 group_size = vect_cost_strided_group_size (stmt_info);
784 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
785 }
786 /* Not a strided access. */
787 else
788 {
789 group_size = 1;
790 first_dr = dr;
791 }
792
793 /* Is this an access in a group of loads providing strided access?
794 If so, add in the cost of the permutes. */
795 if (group_size > 1)
796 {
797 /* Uses an even and odd extract operations for each needed permute. */
798 inside_cost = ncopies * exact_log2(group_size) * group_size
799 * vect_get_stmt_cost (vector_stmt);
800
801 if (vect_print_dump_info (REPORT_COST))
802 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
803 group_size);
804 }
805
806 /* The loads themselves. */
807 vect_get_load_cost (first_dr, ncopies,
808 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
809 &inside_cost, &outside_cost);
810
811 if (vect_print_dump_info (REPORT_COST))
812 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
813 "outside_cost = %d .", inside_cost, outside_cost);
814
815 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
816 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
817 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
818 }
819
820
821 /* Calculate cost of DR's memory access. */
822 void
823 vect_get_load_cost (struct data_reference *dr, int ncopies,
824 bool add_realign_cost, unsigned int *inside_cost,
825 unsigned int *outside_cost)
826 {
827 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
828
829 switch (alignment_support_scheme)
830 {
831 case dr_aligned:
832 {
833 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
834
835 if (vect_print_dump_info (REPORT_COST))
836 fprintf (vect_dump, "vect_model_load_cost: aligned.");
837
838 break;
839 }
840 case dr_unaligned_supported:
841 {
842 gimple stmt = DR_STMT (dr);
843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
844 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
845
846 /* Here, we assign an additional cost for the unaligned load. */
847 *inside_cost += ncopies
848 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
849 vectype, DR_MISALIGNMENT (dr));
850 if (vect_print_dump_info (REPORT_COST))
851 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
852 "hardware.");
853
854 break;
855 }
856 case dr_explicit_realign:
857 {
858 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
859 + vect_get_stmt_cost (vector_stmt));
860
861 /* FIXME: If the misalignment remains fixed across the iterations of
862 the containing loop, the following cost should be added to the
863 outside costs. */
864 if (targetm.vectorize.builtin_mask_for_load)
865 *inside_cost += vect_get_stmt_cost (vector_stmt);
866
867 break;
868 }
869 case dr_explicit_realign_optimized:
870 {
871 if (vect_print_dump_info (REPORT_COST))
872 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
873 "pipelined.");
874
875 /* Unaligned software pipeline has a load of an address, an initial
876 load, and possibly a mask operation to "prime" the loop. However,
877 if this is an access in a group of loads, which provide strided
878 access, then the above cost should only be considered for one
879 access in the group. Inside the loop, there is a load op
880 and a realignment op. */
881
882 if (add_realign_cost)
883 {
884 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
885 if (targetm.vectorize.builtin_mask_for_load)
886 *outside_cost += vect_get_stmt_cost (vector_stmt);
887 }
888
889 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
890 + vect_get_stmt_cost (vector_stmt));
891 break;
892 }
893
894 default:
895 gcc_unreachable ();
896 }
897 }
898
899
900 /* Function vect_init_vector.
901
902 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
903 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
904 is not NULL. Otherwise, place the initialization at the loop preheader.
905 Return the DEF of INIT_STMT.
906 It will be used in the vectorization of STMT. */
907
908 tree
909 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
910 gimple_stmt_iterator *gsi)
911 {
912 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
913 tree new_var;
914 gimple init_stmt;
915 tree vec_oprnd;
916 edge pe;
917 tree new_temp;
918 basic_block new_bb;
919
920 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
921 add_referenced_var (new_var);
922 init_stmt = gimple_build_assign (new_var, vector_var);
923 new_temp = make_ssa_name (new_var, init_stmt);
924 gimple_assign_set_lhs (init_stmt, new_temp);
925
926 if (gsi)
927 vect_finish_stmt_generation (stmt, init_stmt, gsi);
928 else
929 {
930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
931
932 if (loop_vinfo)
933 {
934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
935
936 if (nested_in_vect_loop_p (loop, stmt))
937 loop = loop->inner;
938
939 pe = loop_preheader_edge (loop);
940 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
941 gcc_assert (!new_bb);
942 }
943 else
944 {
945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
946 basic_block bb;
947 gimple_stmt_iterator gsi_bb_start;
948
949 gcc_assert (bb_vinfo);
950 bb = BB_VINFO_BB (bb_vinfo);
951 gsi_bb_start = gsi_after_labels (bb);
952 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
953 }
954 }
955
956 if (vect_print_dump_info (REPORT_DETAILS))
957 {
958 fprintf (vect_dump, "created new init_stmt: ");
959 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
960 }
961
962 vec_oprnd = gimple_assign_lhs (init_stmt);
963 return vec_oprnd;
964 }
965
966
967 /* Function vect_get_vec_def_for_operand.
968
969 OP is an operand in STMT. This function returns a (vector) def that will be
970 used in the vectorized stmt for STMT.
971
972 In the case that OP is an SSA_NAME which is defined in the loop, then
973 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
974
975 In case OP is an invariant or constant, a new stmt that creates a vector def
976 needs to be introduced. */
977
978 tree
979 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
980 {
981 tree vec_oprnd;
982 gimple vec_stmt;
983 gimple def_stmt;
984 stmt_vec_info def_stmt_info = NULL;
985 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
986 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
987 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
988 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
989 tree vec_inv;
990 tree vec_cst;
991 tree t = NULL_TREE;
992 tree def;
993 int i;
994 enum vect_def_type dt;
995 bool is_simple_use;
996 tree vector_type;
997
998 if (vect_print_dump_info (REPORT_DETAILS))
999 {
1000 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1001 print_generic_expr (vect_dump, op, TDF_SLIM);
1002 }
1003
1004 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1005 &dt);
1006 gcc_assert (is_simple_use);
1007 if (vect_print_dump_info (REPORT_DETAILS))
1008 {
1009 if (def)
1010 {
1011 fprintf (vect_dump, "def = ");
1012 print_generic_expr (vect_dump, def, TDF_SLIM);
1013 }
1014 if (def_stmt)
1015 {
1016 fprintf (vect_dump, " def_stmt = ");
1017 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1018 }
1019 }
1020
1021 switch (dt)
1022 {
1023 /* Case 1: operand is a constant. */
1024 case vect_constant_def:
1025 {
1026 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1027 gcc_assert (vector_type);
1028
1029 if (scalar_def)
1030 *scalar_def = op;
1031
1032 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1033 if (vect_print_dump_info (REPORT_DETAILS))
1034 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1035
1036 for (i = nunits - 1; i >= 0; --i)
1037 {
1038 t = tree_cons (NULL_TREE, op, t);
1039 }
1040 vec_cst = build_vector (vector_type, t);
1041 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1042 }
1043
1044 /* Case 2: operand is defined outside the loop - loop invariant. */
1045 case vect_external_def:
1046 {
1047 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1048 gcc_assert (vector_type);
1049 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1050
1051 if (scalar_def)
1052 *scalar_def = def;
1053
1054 /* Create 'vec_inv = {inv,inv,..,inv}' */
1055 if (vect_print_dump_info (REPORT_DETAILS))
1056 fprintf (vect_dump, "Create vector_inv.");
1057
1058 for (i = nunits - 1; i >= 0; --i)
1059 {
1060 t = tree_cons (NULL_TREE, def, t);
1061 }
1062
1063 /* FIXME: use build_constructor directly. */
1064 vec_inv = build_constructor_from_list (vector_type, t);
1065 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1066 }
1067
1068 /* Case 3: operand is defined inside the loop. */
1069 case vect_internal_def:
1070 {
1071 if (scalar_def)
1072 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1073
1074 /* Get the def from the vectorized stmt. */
1075 def_stmt_info = vinfo_for_stmt (def_stmt);
1076 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1077 gcc_assert (vec_stmt);
1078 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1079 vec_oprnd = PHI_RESULT (vec_stmt);
1080 else if (is_gimple_call (vec_stmt))
1081 vec_oprnd = gimple_call_lhs (vec_stmt);
1082 else
1083 vec_oprnd = gimple_assign_lhs (vec_stmt);
1084 return vec_oprnd;
1085 }
1086
1087 /* Case 4: operand is defined by a loop header phi - reduction */
1088 case vect_reduction_def:
1089 case vect_double_reduction_def:
1090 case vect_nested_cycle:
1091 {
1092 struct loop *loop;
1093
1094 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1095 loop = (gimple_bb (def_stmt))->loop_father;
1096
1097 /* Get the def before the loop */
1098 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1099 return get_initial_def_for_reduction (stmt, op, scalar_def);
1100 }
1101
1102 /* Case 5: operand is defined by loop-header phi - induction. */
1103 case vect_induction_def:
1104 {
1105 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1106
1107 /* Get the def from the vectorized stmt. */
1108 def_stmt_info = vinfo_for_stmt (def_stmt);
1109 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1110 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1111 vec_oprnd = PHI_RESULT (vec_stmt);
1112 return vec_oprnd;
1113 }
1114
1115 default:
1116 gcc_unreachable ();
1117 }
1118 }
1119
1120
1121 /* Function vect_get_vec_def_for_stmt_copy
1122
1123 Return a vector-def for an operand. This function is used when the
1124 vectorized stmt to be created (by the caller to this function) is a "copy"
1125 created in case the vectorized result cannot fit in one vector, and several
1126 copies of the vector-stmt are required. In this case the vector-def is
1127 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1128 of the stmt that defines VEC_OPRND.
1129 DT is the type of the vector def VEC_OPRND.
1130
1131 Context:
1132 In case the vectorization factor (VF) is bigger than the number
1133 of elements that can fit in a vectype (nunits), we have to generate
1134 more than one vector stmt to vectorize the scalar stmt. This situation
1135 arises when there are multiple data-types operated upon in the loop; the
1136 smallest data-type determines the VF, and as a result, when vectorizing
1137 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1138 vector stmt (each computing a vector of 'nunits' results, and together
1139 computing 'VF' results in each iteration). This function is called when
1140 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1141 which VF=16 and nunits=4, so the number of copies required is 4):
1142
1143 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1144
1145 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1146 VS1.1: vx.1 = memref1 VS1.2
1147 VS1.2: vx.2 = memref2 VS1.3
1148 VS1.3: vx.3 = memref3
1149
1150 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1151 VSnew.1: vz1 = vx.1 + ... VSnew.2
1152 VSnew.2: vz2 = vx.2 + ... VSnew.3
1153 VSnew.3: vz3 = vx.3 + ...
1154
1155 The vectorization of S1 is explained in vectorizable_load.
1156 The vectorization of S2:
1157 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1158 the function 'vect_get_vec_def_for_operand' is called to
1159 get the relevant vector-def for each operand of S2. For operand x it
1160 returns the vector-def 'vx.0'.
1161
1162 To create the remaining copies of the vector-stmt (VSnew.j), this
1163 function is called to get the relevant vector-def for each operand. It is
1164 obtained from the respective VS1.j stmt, which is recorded in the
1165 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1166
1167 For example, to obtain the vector-def 'vx.1' in order to create the
1168 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1169 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1170 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1171 and return its def ('vx.1').
1172 Overall, to create the above sequence this function will be called 3 times:
1173 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1174 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1175 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1176
1177 tree
1178 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1179 {
1180 gimple vec_stmt_for_operand;
1181 stmt_vec_info def_stmt_info;
1182
1183 /* Do nothing; can reuse same def. */
1184 if (dt == vect_external_def || dt == vect_constant_def )
1185 return vec_oprnd;
1186
1187 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1188 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1189 gcc_assert (def_stmt_info);
1190 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1191 gcc_assert (vec_stmt_for_operand);
1192 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1193 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1194 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1195 else
1196 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1197 return vec_oprnd;
1198 }
1199
1200
1201 /* Get vectorized definitions for the operands to create a copy of an original
1202 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1203
1204 static void
1205 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1206 VEC(tree,heap) **vec_oprnds0,
1207 VEC(tree,heap) **vec_oprnds1)
1208 {
1209 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1210
1211 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1212 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1213
1214 if (vec_oprnds1 && *vec_oprnds1)
1215 {
1216 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1217 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1218 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1219 }
1220 }
1221
1222
1223 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1224 NULL. */
1225
1226 static void
1227 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1228 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1229 slp_tree slp_node)
1230 {
1231 if (slp_node)
1232 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
1233 else
1234 {
1235 tree vec_oprnd;
1236
1237 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1238 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1239 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1240
1241 if (op1)
1242 {
1243 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1244 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1245 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1246 }
1247 }
1248 }
1249
1250
1251 /* Function vect_finish_stmt_generation.
1252
1253 Insert a new stmt. */
1254
1255 void
1256 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1257 gimple_stmt_iterator *gsi)
1258 {
1259 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1262
1263 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1264
1265 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1266
1267 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1268 bb_vinfo));
1269
1270 if (vect_print_dump_info (REPORT_DETAILS))
1271 {
1272 fprintf (vect_dump, "add new stmt: ");
1273 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1274 }
1275
1276 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1277 }
1278
1279 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1280 a function declaration if the target has a vectorized version
1281 of the function, or NULL_TREE if the function cannot be vectorized. */
1282
1283 tree
1284 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1285 {
1286 tree fndecl = gimple_call_fndecl (call);
1287
1288 /* We only handle functions that do not read or clobber memory -- i.e.
1289 const or novops ones. */
1290 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1291 return NULL_TREE;
1292
1293 if (!fndecl
1294 || TREE_CODE (fndecl) != FUNCTION_DECL
1295 || !DECL_BUILT_IN (fndecl))
1296 return NULL_TREE;
1297
1298 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1299 vectype_in);
1300 }
1301
1302 /* Function vectorizable_call.
1303
1304 Check if STMT performs a function call that can be vectorized.
1305 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1306 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1307 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1308
1309 static bool
1310 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1311 {
1312 tree vec_dest;
1313 tree scalar_dest;
1314 tree op, type;
1315 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1316 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1317 tree vectype_out, vectype_in;
1318 int nunits_in;
1319 int nunits_out;
1320 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1321 tree fndecl, new_temp, def, rhs_type;
1322 gimple def_stmt;
1323 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1324 gimple new_stmt = NULL;
1325 int ncopies, j;
1326 VEC(tree, heap) *vargs = NULL;
1327 enum { NARROW, NONE, WIDEN } modifier;
1328 size_t i, nargs;
1329
1330 /* FORNOW: unsupported in basic block SLP. */
1331 gcc_assert (loop_vinfo);
1332
1333 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1334 return false;
1335
1336 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1337 return false;
1338
1339 /* FORNOW: SLP not supported. */
1340 if (STMT_SLP_TYPE (stmt_info))
1341 return false;
1342
1343 /* Is STMT a vectorizable call? */
1344 if (!is_gimple_call (stmt))
1345 return false;
1346
1347 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1348 return false;
1349
1350 if (stmt_could_throw_p (stmt))
1351 return false;
1352
1353 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1354
1355 /* Process function arguments. */
1356 rhs_type = NULL_TREE;
1357 vectype_in = NULL_TREE;
1358 nargs = gimple_call_num_args (stmt);
1359
1360 /* Bail out if the function has more than two arguments, we
1361 do not have interesting builtin functions to vectorize with
1362 more than two arguments. No arguments is also not good. */
1363 if (nargs == 0 || nargs > 2)
1364 return false;
1365
1366 for (i = 0; i < nargs; i++)
1367 {
1368 tree opvectype;
1369
1370 op = gimple_call_arg (stmt, i);
1371
1372 /* We can only handle calls with arguments of the same type. */
1373 if (rhs_type
1374 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1375 {
1376 if (vect_print_dump_info (REPORT_DETAILS))
1377 fprintf (vect_dump, "argument types differ.");
1378 return false;
1379 }
1380 if (!rhs_type)
1381 rhs_type = TREE_TYPE (op);
1382
1383 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1384 &def_stmt, &def, &dt[i], &opvectype))
1385 {
1386 if (vect_print_dump_info (REPORT_DETAILS))
1387 fprintf (vect_dump, "use not simple.");
1388 return false;
1389 }
1390
1391 if (!vectype_in)
1392 vectype_in = opvectype;
1393 else if (opvectype
1394 && opvectype != vectype_in)
1395 {
1396 if (vect_print_dump_info (REPORT_DETAILS))
1397 fprintf (vect_dump, "argument vector types differ.");
1398 return false;
1399 }
1400 }
1401 /* If all arguments are external or constant defs use a vector type with
1402 the same size as the output vector type. */
1403 if (!vectype_in)
1404 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1405 if (vec_stmt)
1406 gcc_assert (vectype_in);
1407 if (!vectype_in)
1408 {
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 {
1411 fprintf (vect_dump, "no vectype for scalar type ");
1412 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1413 }
1414
1415 return false;
1416 }
1417
1418 /* FORNOW */
1419 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1420 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1421 if (nunits_in == nunits_out / 2)
1422 modifier = NARROW;
1423 else if (nunits_out == nunits_in)
1424 modifier = NONE;
1425 else if (nunits_out == nunits_in / 2)
1426 modifier = WIDEN;
1427 else
1428 return false;
1429
1430 /* For now, we only vectorize functions if a target specific builtin
1431 is available. TODO -- in some cases, it might be profitable to
1432 insert the calls for pieces of the vector, in order to be able
1433 to vectorize other operations in the loop. */
1434 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1435 if (fndecl == NULL_TREE)
1436 {
1437 if (vect_print_dump_info (REPORT_DETAILS))
1438 fprintf (vect_dump, "function is not vectorizable.");
1439
1440 return false;
1441 }
1442
1443 gcc_assert (!gimple_vuse (stmt));
1444
1445 if (modifier == NARROW)
1446 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1447 else
1448 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1449
1450 /* Sanity check: make sure that at least one copy of the vectorized stmt
1451 needs to be generated. */
1452 gcc_assert (ncopies >= 1);
1453
1454 if (!vec_stmt) /* transformation not required. */
1455 {
1456 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1457 if (vect_print_dump_info (REPORT_DETAILS))
1458 fprintf (vect_dump, "=== vectorizable_call ===");
1459 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1460 return true;
1461 }
1462
1463 /** Transform. **/
1464
1465 if (vect_print_dump_info (REPORT_DETAILS))
1466 fprintf (vect_dump, "transform operation.");
1467
1468 /* Handle def. */
1469 scalar_dest = gimple_call_lhs (stmt);
1470 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1471
1472 prev_stmt_info = NULL;
1473 switch (modifier)
1474 {
1475 case NONE:
1476 for (j = 0; j < ncopies; ++j)
1477 {
1478 /* Build argument list for the vectorized call. */
1479 if (j == 0)
1480 vargs = VEC_alloc (tree, heap, nargs);
1481 else
1482 VEC_truncate (tree, vargs, 0);
1483
1484 for (i = 0; i < nargs; i++)
1485 {
1486 op = gimple_call_arg (stmt, i);
1487 if (j == 0)
1488 vec_oprnd0
1489 = vect_get_vec_def_for_operand (op, stmt, NULL);
1490 else
1491 {
1492 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1493 vec_oprnd0
1494 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1495 }
1496
1497 VEC_quick_push (tree, vargs, vec_oprnd0);
1498 }
1499
1500 new_stmt = gimple_build_call_vec (fndecl, vargs);
1501 new_temp = make_ssa_name (vec_dest, new_stmt);
1502 gimple_call_set_lhs (new_stmt, new_temp);
1503
1504 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1505 mark_symbols_for_renaming (new_stmt);
1506
1507 if (j == 0)
1508 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1509 else
1510 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1511
1512 prev_stmt_info = vinfo_for_stmt (new_stmt);
1513 }
1514
1515 break;
1516
1517 case NARROW:
1518 for (j = 0; j < ncopies; ++j)
1519 {
1520 /* Build argument list for the vectorized call. */
1521 if (j == 0)
1522 vargs = VEC_alloc (tree, heap, nargs * 2);
1523 else
1524 VEC_truncate (tree, vargs, 0);
1525
1526 for (i = 0; i < nargs; i++)
1527 {
1528 op = gimple_call_arg (stmt, i);
1529 if (j == 0)
1530 {
1531 vec_oprnd0
1532 = vect_get_vec_def_for_operand (op, stmt, NULL);
1533 vec_oprnd1
1534 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1535 }
1536 else
1537 {
1538 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1539 vec_oprnd0
1540 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1541 vec_oprnd1
1542 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1543 }
1544
1545 VEC_quick_push (tree, vargs, vec_oprnd0);
1546 VEC_quick_push (tree, vargs, vec_oprnd1);
1547 }
1548
1549 new_stmt = gimple_build_call_vec (fndecl, vargs);
1550 new_temp = make_ssa_name (vec_dest, new_stmt);
1551 gimple_call_set_lhs (new_stmt, new_temp);
1552
1553 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1554 mark_symbols_for_renaming (new_stmt);
1555
1556 if (j == 0)
1557 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1558 else
1559 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1560
1561 prev_stmt_info = vinfo_for_stmt (new_stmt);
1562 }
1563
1564 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1565
1566 break;
1567
1568 case WIDEN:
1569 /* No current target implements this case. */
1570 return false;
1571 }
1572
1573 VEC_free (tree, heap, vargs);
1574
1575 /* Update the exception handling table with the vector stmt if necessary. */
1576 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1577 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1578
1579 /* The call in STMT might prevent it from being removed in dce.
1580 We however cannot remove it here, due to the way the ssa name
1581 it defines is mapped to the new definition. So just replace
1582 rhs of the statement with something harmless. */
1583
1584 type = TREE_TYPE (scalar_dest);
1585 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1586 fold_convert (type, integer_zero_node));
1587 set_vinfo_for_stmt (new_stmt, stmt_info);
1588 set_vinfo_for_stmt (stmt, NULL);
1589 STMT_VINFO_STMT (stmt_info) = new_stmt;
1590 gsi_replace (gsi, new_stmt, false);
1591 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1592
1593 return true;
1594 }
1595
1596
1597 /* Function vect_gen_widened_results_half
1598
1599 Create a vector stmt whose code, type, number of arguments, and result
1600 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1601 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1602 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1603 needs to be created (DECL is a function-decl of a target-builtin).
1604 STMT is the original scalar stmt that we are vectorizing. */
1605
1606 static gimple
1607 vect_gen_widened_results_half (enum tree_code code,
1608 tree decl,
1609 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1610 tree vec_dest, gimple_stmt_iterator *gsi,
1611 gimple stmt)
1612 {
1613 gimple new_stmt;
1614 tree new_temp;
1615
1616 /* Generate half of the widened result: */
1617 if (code == CALL_EXPR)
1618 {
1619 /* Target specific support */
1620 if (op_type == binary_op)
1621 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1622 else
1623 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1624 new_temp = make_ssa_name (vec_dest, new_stmt);
1625 gimple_call_set_lhs (new_stmt, new_temp);
1626 }
1627 else
1628 {
1629 /* Generic support */
1630 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1631 if (op_type != binary_op)
1632 vec_oprnd1 = NULL;
1633 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1634 vec_oprnd1);
1635 new_temp = make_ssa_name (vec_dest, new_stmt);
1636 gimple_assign_set_lhs (new_stmt, new_temp);
1637 }
1638 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1639
1640 return new_stmt;
1641 }
1642
1643
1644 /* Check if STMT performs a conversion operation, that can be vectorized.
1645 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1646 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1647 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1648
1649 static bool
1650 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1651 gimple *vec_stmt, slp_tree slp_node)
1652 {
1653 tree vec_dest;
1654 tree scalar_dest;
1655 tree op0;
1656 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1658 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1659 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1660 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1661 tree new_temp;
1662 tree def;
1663 gimple def_stmt;
1664 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1665 gimple new_stmt = NULL;
1666 stmt_vec_info prev_stmt_info;
1667 int nunits_in;
1668 int nunits_out;
1669 tree vectype_out, vectype_in;
1670 int ncopies, j;
1671 tree rhs_type;
1672 tree builtin_decl;
1673 enum { NARROW, NONE, WIDEN } modifier;
1674 int i;
1675 VEC(tree,heap) *vec_oprnds0 = NULL;
1676 tree vop0;
1677 VEC(tree,heap) *dummy = NULL;
1678 int dummy_int;
1679
1680 /* Is STMT a vectorizable conversion? */
1681
1682 /* FORNOW: unsupported in basic block SLP. */
1683 gcc_assert (loop_vinfo);
1684
1685 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1686 return false;
1687
1688 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1689 return false;
1690
1691 if (!is_gimple_assign (stmt))
1692 return false;
1693
1694 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1695 return false;
1696
1697 code = gimple_assign_rhs_code (stmt);
1698 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1699 return false;
1700
1701 /* Check types of lhs and rhs. */
1702 scalar_dest = gimple_assign_lhs (stmt);
1703 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1704
1705 op0 = gimple_assign_rhs1 (stmt);
1706 rhs_type = TREE_TYPE (op0);
1707 /* Check the operands of the operation. */
1708 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1709 &def_stmt, &def, &dt[0], &vectype_in))
1710 {
1711 if (vect_print_dump_info (REPORT_DETAILS))
1712 fprintf (vect_dump, "use not simple.");
1713 return false;
1714 }
1715 /* If op0 is an external or constant defs use a vector type of
1716 the same size as the output vector type. */
1717 if (!vectype_in)
1718 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1719 if (vec_stmt)
1720 gcc_assert (vectype_in);
1721 if (!vectype_in)
1722 {
1723 if (vect_print_dump_info (REPORT_DETAILS))
1724 {
1725 fprintf (vect_dump, "no vectype for scalar type ");
1726 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1727 }
1728
1729 return false;
1730 }
1731
1732 /* FORNOW */
1733 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1734 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1735 if (nunits_in == nunits_out / 2)
1736 modifier = NARROW;
1737 else if (nunits_out == nunits_in)
1738 modifier = NONE;
1739 else if (nunits_out == nunits_in / 2)
1740 modifier = WIDEN;
1741 else
1742 return false;
1743
1744 if (modifier == NARROW)
1745 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1746 else
1747 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1748
1749 /* Multiple types in SLP are handled by creating the appropriate number of
1750 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1751 case of SLP. */
1752 if (slp_node)
1753 ncopies = 1;
1754
1755 /* Sanity check: make sure that at least one copy of the vectorized stmt
1756 needs to be generated. */
1757 gcc_assert (ncopies >= 1);
1758
1759 /* Supportable by target? */
1760 if ((modifier == NONE
1761 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1762 || (modifier == WIDEN
1763 && !supportable_widening_operation (code, stmt,
1764 vectype_out, vectype_in,
1765 &decl1, &decl2,
1766 &code1, &code2,
1767 &dummy_int, &dummy))
1768 || (modifier == NARROW
1769 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1770 &code1, &dummy_int, &dummy)))
1771 {
1772 if (vect_print_dump_info (REPORT_DETAILS))
1773 fprintf (vect_dump, "conversion not supported by target.");
1774 return false;
1775 }
1776
1777 if (modifier != NONE)
1778 {
1779 /* FORNOW: SLP not supported. */
1780 if (STMT_SLP_TYPE (stmt_info))
1781 return false;
1782 }
1783
1784 if (!vec_stmt) /* transformation not required. */
1785 {
1786 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1787 return true;
1788 }
1789
1790 /** Transform. **/
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "transform conversion.");
1793
1794 /* Handle def. */
1795 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1796
1797 if (modifier == NONE && !slp_node)
1798 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1799
1800 prev_stmt_info = NULL;
1801 switch (modifier)
1802 {
1803 case NONE:
1804 for (j = 0; j < ncopies; j++)
1805 {
1806 if (j == 0)
1807 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1808 else
1809 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1810
1811 builtin_decl =
1812 targetm.vectorize.builtin_conversion (code,
1813 vectype_out, vectype_in);
1814 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1815 {
1816 /* Arguments are ready. create the new vector stmt. */
1817 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1818 new_temp = make_ssa_name (vec_dest, new_stmt);
1819 gimple_call_set_lhs (new_stmt, new_temp);
1820 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1821 if (slp_node)
1822 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1823 }
1824
1825 if (j == 0)
1826 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1827 else
1828 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1829 prev_stmt_info = vinfo_for_stmt (new_stmt);
1830 }
1831 break;
1832
1833 case WIDEN:
1834 /* In case the vectorization factor (VF) is bigger than the number
1835 of elements that we can fit in a vectype (nunits), we have to
1836 generate more than one vector stmt - i.e - we need to "unroll"
1837 the vector stmt by a factor VF/nunits. */
1838 for (j = 0; j < ncopies; j++)
1839 {
1840 if (j == 0)
1841 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1842 else
1843 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1844
1845 /* Generate first half of the widened result: */
1846 new_stmt
1847 = vect_gen_widened_results_half (code1, decl1,
1848 vec_oprnd0, vec_oprnd1,
1849 unary_op, vec_dest, gsi, stmt);
1850 if (j == 0)
1851 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1852 else
1853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1854 prev_stmt_info = vinfo_for_stmt (new_stmt);
1855
1856 /* Generate second half of the widened result: */
1857 new_stmt
1858 = vect_gen_widened_results_half (code2, decl2,
1859 vec_oprnd0, vec_oprnd1,
1860 unary_op, vec_dest, gsi, stmt);
1861 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1862 prev_stmt_info = vinfo_for_stmt (new_stmt);
1863 }
1864 break;
1865
1866 case NARROW:
1867 /* In case the vectorization factor (VF) is bigger than the number
1868 of elements that we can fit in a vectype (nunits), we have to
1869 generate more than one vector stmt - i.e - we need to "unroll"
1870 the vector stmt by a factor VF/nunits. */
1871 for (j = 0; j < ncopies; j++)
1872 {
1873 /* Handle uses. */
1874 if (j == 0)
1875 {
1876 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1877 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1878 }
1879 else
1880 {
1881 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1882 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1883 }
1884
1885 /* Arguments are ready. Create the new vector stmt. */
1886 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1887 vec_oprnd1);
1888 new_temp = make_ssa_name (vec_dest, new_stmt);
1889 gimple_assign_set_lhs (new_stmt, new_temp);
1890 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1891
1892 if (j == 0)
1893 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1894 else
1895 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1896
1897 prev_stmt_info = vinfo_for_stmt (new_stmt);
1898 }
1899
1900 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1901 }
1902
1903 if (vec_oprnds0)
1904 VEC_free (tree, heap, vec_oprnds0);
1905
1906 return true;
1907 }
1908
1909
1910 /* Function vectorizable_assignment.
1911
1912 Check if STMT performs an assignment (copy) that can be vectorized.
1913 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1914 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1915 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1916
1917 static bool
1918 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1919 gimple *vec_stmt, slp_tree slp_node)
1920 {
1921 tree vec_dest;
1922 tree scalar_dest;
1923 tree op;
1924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1925 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1926 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1927 tree new_temp;
1928 tree def;
1929 gimple def_stmt;
1930 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1931 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1932 int ncopies;
1933 int i, j;
1934 VEC(tree,heap) *vec_oprnds = NULL;
1935 tree vop;
1936 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1937 gimple new_stmt = NULL;
1938 stmt_vec_info prev_stmt_info = NULL;
1939 enum tree_code code;
1940 tree vectype_in;
1941
1942 /* Multiple types in SLP are handled by creating the appropriate number of
1943 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1944 case of SLP. */
1945 if (slp_node)
1946 ncopies = 1;
1947 else
1948 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1949
1950 gcc_assert (ncopies >= 1);
1951
1952 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1953 return false;
1954
1955 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1956 return false;
1957
1958 /* Is vectorizable assignment? */
1959 if (!is_gimple_assign (stmt))
1960 return false;
1961
1962 scalar_dest = gimple_assign_lhs (stmt);
1963 if (TREE_CODE (scalar_dest) != SSA_NAME)
1964 return false;
1965
1966 code = gimple_assign_rhs_code (stmt);
1967 if (gimple_assign_single_p (stmt)
1968 || code == PAREN_EXPR
1969 || CONVERT_EXPR_CODE_P (code))
1970 op = gimple_assign_rhs1 (stmt);
1971 else
1972 return false;
1973
1974 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1975 &def_stmt, &def, &dt[0], &vectype_in))
1976 {
1977 if (vect_print_dump_info (REPORT_DETAILS))
1978 fprintf (vect_dump, "use not simple.");
1979 return false;
1980 }
1981
1982 /* We can handle NOP_EXPR conversions that do not change the number
1983 of elements or the vector size. */
1984 if (CONVERT_EXPR_CODE_P (code)
1985 && (!vectype_in
1986 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1987 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1988 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1989 return false;
1990
1991 if (!vec_stmt) /* transformation not required. */
1992 {
1993 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1994 if (vect_print_dump_info (REPORT_DETAILS))
1995 fprintf (vect_dump, "=== vectorizable_assignment ===");
1996 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1997 return true;
1998 }
1999
2000 /** Transform. **/
2001 if (vect_print_dump_info (REPORT_DETAILS))
2002 fprintf (vect_dump, "transform assignment.");
2003
2004 /* Handle def. */
2005 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2006
2007 /* Handle use. */
2008 for (j = 0; j < ncopies; j++)
2009 {
2010 /* Handle uses. */
2011 if (j == 0)
2012 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2013 else
2014 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2015
2016 /* Arguments are ready. create the new vector stmt. */
2017 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2018 {
2019 if (CONVERT_EXPR_CODE_P (code))
2020 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2021 new_stmt = gimple_build_assign (vec_dest, vop);
2022 new_temp = make_ssa_name (vec_dest, new_stmt);
2023 gimple_assign_set_lhs (new_stmt, new_temp);
2024 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2025 if (slp_node)
2026 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2027 }
2028
2029 if (slp_node)
2030 continue;
2031
2032 if (j == 0)
2033 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2034 else
2035 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2036
2037 prev_stmt_info = vinfo_for_stmt (new_stmt);
2038 }
2039
2040 VEC_free (tree, heap, vec_oprnds);
2041 return true;
2042 }
2043
2044 /* Function vectorizable_operation.
2045
2046 Check if STMT performs a binary or unary operation that can be vectorized.
2047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2050
2051 static bool
2052 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2053 gimple *vec_stmt, slp_tree slp_node)
2054 {
2055 tree vec_dest;
2056 tree scalar_dest;
2057 tree op0, op1 = NULL;
2058 tree vec_oprnd1 = NULL_TREE;
2059 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2060 tree vectype;
2061 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2062 enum tree_code code;
2063 enum machine_mode vec_mode;
2064 tree new_temp;
2065 int op_type;
2066 optab optab;
2067 int icode;
2068 enum machine_mode optab_op2_mode;
2069 tree def;
2070 gimple def_stmt;
2071 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2072 gimple new_stmt = NULL;
2073 stmt_vec_info prev_stmt_info;
2074 int nunits_in;
2075 int nunits_out;
2076 tree vectype_out;
2077 int ncopies;
2078 int j, i;
2079 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2080 tree vop0, vop1;
2081 unsigned int k;
2082 bool scalar_shift_arg = false;
2083 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2084 int vf;
2085
2086 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2087 return false;
2088
2089 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2090 return false;
2091
2092 /* Is STMT a vectorizable binary/unary operation? */
2093 if (!is_gimple_assign (stmt))
2094 return false;
2095
2096 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2097 return false;
2098
2099 code = gimple_assign_rhs_code (stmt);
2100
2101 /* For pointer addition, we should use the normal plus for
2102 the vector addition. */
2103 if (code == POINTER_PLUS_EXPR)
2104 code = PLUS_EXPR;
2105
2106 /* Support only unary or binary operations. */
2107 op_type = TREE_CODE_LENGTH (code);
2108 if (op_type != unary_op && op_type != binary_op)
2109 {
2110 if (vect_print_dump_info (REPORT_DETAILS))
2111 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2112 return false;
2113 }
2114
2115 scalar_dest = gimple_assign_lhs (stmt);
2116 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2117
2118 op0 = gimple_assign_rhs1 (stmt);
2119 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2120 &def_stmt, &def, &dt[0], &vectype))
2121 {
2122 if (vect_print_dump_info (REPORT_DETAILS))
2123 fprintf (vect_dump, "use not simple.");
2124 return false;
2125 }
2126 /* If op0 is an external or constant def use a vector type with
2127 the same size as the output vector type. */
2128 if (!vectype)
2129 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2130 if (vec_stmt)
2131 gcc_assert (vectype);
2132 if (!vectype)
2133 {
2134 if (vect_print_dump_info (REPORT_DETAILS))
2135 {
2136 fprintf (vect_dump, "no vectype for scalar type ");
2137 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2138 }
2139
2140 return false;
2141 }
2142
2143 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2144 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2145 if (nunits_out != nunits_in)
2146 return false;
2147
2148 if (op_type == binary_op)
2149 {
2150 op1 = gimple_assign_rhs2 (stmt);
2151 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2152 &dt[1]))
2153 {
2154 if (vect_print_dump_info (REPORT_DETAILS))
2155 fprintf (vect_dump, "use not simple.");
2156 return false;
2157 }
2158 }
2159
2160 if (loop_vinfo)
2161 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2162 else
2163 vf = 1;
2164
2165 /* Multiple types in SLP are handled by creating the appropriate number of
2166 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2167 case of SLP. */
2168 if (slp_node)
2169 ncopies = 1;
2170 else
2171 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2172
2173 gcc_assert (ncopies >= 1);
2174
2175 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2176 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2177 shift optabs. */
2178 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2179 || code == RROTATE_EXPR)
2180 {
2181 /* vector shifted by vector */
2182 if (dt[1] == vect_internal_def)
2183 {
2184 optab = optab_for_tree_code (code, vectype, optab_vector);
2185 if (vect_print_dump_info (REPORT_DETAILS))
2186 fprintf (vect_dump, "vector/vector shift/rotate found.");
2187 }
2188
2189 /* See if the machine has a vector shifted by scalar insn and if not
2190 then see if it has a vector shifted by vector insn */
2191 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2192 {
2193 optab = optab_for_tree_code (code, vectype, optab_scalar);
2194 if (optab
2195 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2196 {
2197 scalar_shift_arg = true;
2198 if (vect_print_dump_info (REPORT_DETAILS))
2199 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2200 }
2201 else
2202 {
2203 optab = optab_for_tree_code (code, vectype, optab_vector);
2204 if (optab
2205 && (optab_handler (optab, TYPE_MODE (vectype))
2206 != CODE_FOR_nothing))
2207 {
2208 if (vect_print_dump_info (REPORT_DETAILS))
2209 fprintf (vect_dump, "vector/vector shift/rotate found.");
2210
2211 /* Unlike the other binary operators, shifts/rotates have
2212 the rhs being int, instead of the same type as the lhs,
2213 so make sure the scalar is the right type if we are
2214 dealing with vectors of short/char. */
2215 if (dt[1] == vect_constant_def)
2216 op1 = fold_convert (TREE_TYPE (vectype), op1);
2217 }
2218 }
2219 }
2220
2221 else
2222 {
2223 if (vect_print_dump_info (REPORT_DETAILS))
2224 fprintf (vect_dump, "operand mode requires invariant argument.");
2225 return false;
2226 }
2227 }
2228 else
2229 optab = optab_for_tree_code (code, vectype, optab_default);
2230
2231 /* Supportable by target? */
2232 if (!optab)
2233 {
2234 if (vect_print_dump_info (REPORT_DETAILS))
2235 fprintf (vect_dump, "no optab.");
2236 return false;
2237 }
2238 vec_mode = TYPE_MODE (vectype);
2239 icode = (int) optab_handler (optab, vec_mode);
2240 if (icode == CODE_FOR_nothing)
2241 {
2242 if (vect_print_dump_info (REPORT_DETAILS))
2243 fprintf (vect_dump, "op not supported by target.");
2244 /* Check only during analysis. */
2245 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2246 || (vf < vect_min_worthwhile_factor (code)
2247 && !vec_stmt))
2248 return false;
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "proceeding using word mode.");
2251 }
2252
2253 /* Worthwhile without SIMD support? Check only during analysis. */
2254 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2255 && vf < vect_min_worthwhile_factor (code)
2256 && !vec_stmt)
2257 {
2258 if (vect_print_dump_info (REPORT_DETAILS))
2259 fprintf (vect_dump, "not worthwhile without SIMD support.");
2260 return false;
2261 }
2262
2263 if (!vec_stmt) /* transformation not required. */
2264 {
2265 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2266 if (vect_print_dump_info (REPORT_DETAILS))
2267 fprintf (vect_dump, "=== vectorizable_operation ===");
2268 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2269 return true;
2270 }
2271
2272 /** Transform. **/
2273
2274 if (vect_print_dump_info (REPORT_DETAILS))
2275 fprintf (vect_dump, "transform binary/unary operation.");
2276
2277 /* Handle def. */
2278 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2279
2280 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2281 created in the previous stages of the recursion, so no allocation is
2282 needed, except for the case of shift with scalar shift argument. In that
2283 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2284 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2285 In case of loop-based vectorization we allocate VECs of size 1. We
2286 allocate VEC_OPRNDS1 only in case of binary operation. */
2287 if (!slp_node)
2288 {
2289 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2290 if (op_type == binary_op)
2291 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2292 }
2293 else if (scalar_shift_arg)
2294 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2295
2296 /* In case the vectorization factor (VF) is bigger than the number
2297 of elements that we can fit in a vectype (nunits), we have to generate
2298 more than one vector stmt - i.e - we need to "unroll" the
2299 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2300 from one copy of the vector stmt to the next, in the field
2301 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2302 stages to find the correct vector defs to be used when vectorizing
2303 stmts that use the defs of the current stmt. The example below
2304 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2305 we need to create 4 vectorized stmts):
2306
2307 before vectorization:
2308 RELATED_STMT VEC_STMT
2309 S1: x = memref - -
2310 S2: z = x + 1 - -
2311
2312 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2313 there):
2314 RELATED_STMT VEC_STMT
2315 VS1_0: vx0 = memref0 VS1_1 -
2316 VS1_1: vx1 = memref1 VS1_2 -
2317 VS1_2: vx2 = memref2 VS1_3 -
2318 VS1_3: vx3 = memref3 - -
2319 S1: x = load - VS1_0
2320 S2: z = x + 1 - -
2321
2322 step2: vectorize stmt S2 (done here):
2323 To vectorize stmt S2 we first need to find the relevant vector
2324 def for the first operand 'x'. This is, as usual, obtained from
2325 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2326 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2327 relevant vector def 'vx0'. Having found 'vx0' we can generate
2328 the vector stmt VS2_0, and as usual, record it in the
2329 STMT_VINFO_VEC_STMT of stmt S2.
2330 When creating the second copy (VS2_1), we obtain the relevant vector
2331 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2332 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2333 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2334 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2335 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2336 chain of stmts and pointers:
2337 RELATED_STMT VEC_STMT
2338 VS1_0: vx0 = memref0 VS1_1 -
2339 VS1_1: vx1 = memref1 VS1_2 -
2340 VS1_2: vx2 = memref2 VS1_3 -
2341 VS1_3: vx3 = memref3 - -
2342 S1: x = load - VS1_0
2343 VS2_0: vz0 = vx0 + v1 VS2_1 -
2344 VS2_1: vz1 = vx1 + v1 VS2_2 -
2345 VS2_2: vz2 = vx2 + v1 VS2_3 -
2346 VS2_3: vz3 = vx3 + v1 - -
2347 S2: z = x + 1 - VS2_0 */
2348
2349 prev_stmt_info = NULL;
2350 for (j = 0; j < ncopies; j++)
2351 {
2352 /* Handle uses. */
2353 if (j == 0)
2354 {
2355 if (op_type == binary_op && scalar_shift_arg)
2356 {
2357 /* Vector shl and shr insn patterns can be defined with scalar
2358 operand 2 (shift operand). In this case, use constant or loop
2359 invariant op1 directly, without extending it to vector mode
2360 first. */
2361 optab_op2_mode = insn_data[icode].operand[2].mode;
2362 if (!VECTOR_MODE_P (optab_op2_mode))
2363 {
2364 if (vect_print_dump_info (REPORT_DETAILS))
2365 fprintf (vect_dump, "operand 1 using scalar mode.");
2366 vec_oprnd1 = op1;
2367 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2368 if (slp_node)
2369 {
2370 /* Store vec_oprnd1 for every vector stmt to be created
2371 for SLP_NODE. We check during the analysis that all
2372 the shift arguments are the same.
2373 TODO: Allow different constants for different vector
2374 stmts generated for an SLP instance. */
2375 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2376 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2377 }
2378 }
2379 }
2380
2381 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2382 (a special case for certain kind of vector shifts); otherwise,
2383 operand 1 should be of a vector type (the usual case). */
2384 if (op_type == binary_op && !vec_oprnd1)
2385 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2386 slp_node);
2387 else
2388 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2389 slp_node);
2390 }
2391 else
2392 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2393
2394 /* Arguments are ready. Create the new vector stmt. */
2395 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2396 {
2397 vop1 = ((op_type == binary_op)
2398 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2399 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2400 new_temp = make_ssa_name (vec_dest, new_stmt);
2401 gimple_assign_set_lhs (new_stmt, new_temp);
2402 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2403 if (slp_node)
2404 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2405 }
2406
2407 if (slp_node)
2408 continue;
2409
2410 if (j == 0)
2411 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2412 else
2413 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2414 prev_stmt_info = vinfo_for_stmt (new_stmt);
2415 }
2416
2417 VEC_free (tree, heap, vec_oprnds0);
2418 if (vec_oprnds1)
2419 VEC_free (tree, heap, vec_oprnds1);
2420
2421 return true;
2422 }
2423
2424
2425 /* Get vectorized definitions for loop-based vectorization. For the first
2426 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2427 scalar operand), and for the rest we get a copy with
2428 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2429 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2430 The vectors are collected into VEC_OPRNDS. */
2431
2432 static void
2433 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2434 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2435 {
2436 tree vec_oprnd;
2437
2438 /* Get first vector operand. */
2439 /* All the vector operands except the very first one (that is scalar oprnd)
2440 are stmt copies. */
2441 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2442 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2443 else
2444 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2445
2446 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2447
2448 /* Get second vector operand. */
2449 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2450 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2451
2452 *oprnd = vec_oprnd;
2453
2454 /* For conversion in multiple steps, continue to get operands
2455 recursively. */
2456 if (multi_step_cvt)
2457 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2458 }
2459
2460
2461 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2462 For multi-step conversions store the resulting vectors and call the function
2463 recursively. */
2464
2465 static void
2466 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2467 int multi_step_cvt, gimple stmt,
2468 VEC (tree, heap) *vec_dsts,
2469 gimple_stmt_iterator *gsi,
2470 slp_tree slp_node, enum tree_code code,
2471 stmt_vec_info *prev_stmt_info)
2472 {
2473 unsigned int i;
2474 tree vop0, vop1, new_tmp, vec_dest;
2475 gimple new_stmt;
2476 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2477
2478 vec_dest = VEC_pop (tree, vec_dsts);
2479
2480 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2481 {
2482 /* Create demotion operation. */
2483 vop0 = VEC_index (tree, *vec_oprnds, i);
2484 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2485 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2486 new_tmp = make_ssa_name (vec_dest, new_stmt);
2487 gimple_assign_set_lhs (new_stmt, new_tmp);
2488 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2489
2490 if (multi_step_cvt)
2491 /* Store the resulting vector for next recursive call. */
2492 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2493 else
2494 {
2495 /* This is the last step of the conversion sequence. Store the
2496 vectors in SLP_NODE or in vector info of the scalar statement
2497 (or in STMT_VINFO_RELATED_STMT chain). */
2498 if (slp_node)
2499 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2500 else
2501 {
2502 if (!*prev_stmt_info)
2503 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2504 else
2505 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2506
2507 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2508 }
2509 }
2510 }
2511
2512 /* For multi-step demotion operations we first generate demotion operations
2513 from the source type to the intermediate types, and then combine the
2514 results (stored in VEC_OPRNDS) in demotion operation to the destination
2515 type. */
2516 if (multi_step_cvt)
2517 {
2518 /* At each level of recursion we have have of the operands we had at the
2519 previous level. */
2520 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2521 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2522 stmt, vec_dsts, gsi, slp_node,
2523 code, prev_stmt_info);
2524 }
2525 }
2526
2527
2528 /* Function vectorizable_type_demotion
2529
2530 Check if STMT performs a binary or unary operation that involves
2531 type demotion, and if it can be vectorized.
2532 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2533 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2534 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2535
2536 static bool
2537 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2538 gimple *vec_stmt, slp_tree slp_node)
2539 {
2540 tree vec_dest;
2541 tree scalar_dest;
2542 tree op0;
2543 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2544 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2545 enum tree_code code, code1 = ERROR_MARK;
2546 tree def;
2547 gimple def_stmt;
2548 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2549 stmt_vec_info prev_stmt_info;
2550 int nunits_in;
2551 int nunits_out;
2552 tree vectype_out;
2553 int ncopies;
2554 int j, i;
2555 tree vectype_in;
2556 int multi_step_cvt = 0;
2557 VEC (tree, heap) *vec_oprnds0 = NULL;
2558 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2559 tree last_oprnd, intermediate_type;
2560
2561 /* FORNOW: not supported by basic block SLP vectorization. */
2562 gcc_assert (loop_vinfo);
2563
2564 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2565 return false;
2566
2567 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2568 return false;
2569
2570 /* Is STMT a vectorizable type-demotion operation? */
2571 if (!is_gimple_assign (stmt))
2572 return false;
2573
2574 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2575 return false;
2576
2577 code = gimple_assign_rhs_code (stmt);
2578 if (!CONVERT_EXPR_CODE_P (code))
2579 return false;
2580
2581 scalar_dest = gimple_assign_lhs (stmt);
2582 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2583
2584 /* Check the operands of the operation. */
2585 op0 = gimple_assign_rhs1 (stmt);
2586 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2587 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2588 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2589 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2590 && CONVERT_EXPR_CODE_P (code))))
2591 return false;
2592 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2593 &def_stmt, &def, &dt[0], &vectype_in))
2594 {
2595 if (vect_print_dump_info (REPORT_DETAILS))
2596 fprintf (vect_dump, "use not simple.");
2597 return false;
2598 }
2599 /* If op0 is an external def use a vector type with the
2600 same size as the output vector type if possible. */
2601 if (!vectype_in)
2602 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2603 if (vec_stmt)
2604 gcc_assert (vectype_in);
2605 if (!vectype_in)
2606 {
2607 if (vect_print_dump_info (REPORT_DETAILS))
2608 {
2609 fprintf (vect_dump, "no vectype for scalar type ");
2610 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2611 }
2612
2613 return false;
2614 }
2615
2616 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2617 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2618 if (nunits_in >= nunits_out)
2619 return false;
2620
2621 /* Multiple types in SLP are handled by creating the appropriate number of
2622 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2623 case of SLP. */
2624 if (slp_node)
2625 ncopies = 1;
2626 else
2627 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2628 gcc_assert (ncopies >= 1);
2629
2630 /* Supportable by target? */
2631 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2632 &code1, &multi_step_cvt, &interm_types))
2633 return false;
2634
2635 if (!vec_stmt) /* transformation not required. */
2636 {
2637 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2638 if (vect_print_dump_info (REPORT_DETAILS))
2639 fprintf (vect_dump, "=== vectorizable_demotion ===");
2640 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2641 return true;
2642 }
2643
2644 /** Transform. **/
2645 if (vect_print_dump_info (REPORT_DETAILS))
2646 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2647 ncopies);
2648
2649 /* In case of multi-step demotion, we first generate demotion operations to
2650 the intermediate types, and then from that types to the final one.
2651 We create vector destinations for the intermediate type (TYPES) received
2652 from supportable_narrowing_operation, and store them in the correct order
2653 for future use in vect_create_vectorized_demotion_stmts(). */
2654 if (multi_step_cvt)
2655 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2656 else
2657 vec_dsts = VEC_alloc (tree, heap, 1);
2658
2659 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2660 VEC_quick_push (tree, vec_dsts, vec_dest);
2661
2662 if (multi_step_cvt)
2663 {
2664 for (i = VEC_length (tree, interm_types) - 1;
2665 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2666 {
2667 vec_dest = vect_create_destination_var (scalar_dest,
2668 intermediate_type);
2669 VEC_quick_push (tree, vec_dsts, vec_dest);
2670 }
2671 }
2672
2673 /* In case the vectorization factor (VF) is bigger than the number
2674 of elements that we can fit in a vectype (nunits), we have to generate
2675 more than one vector stmt - i.e - we need to "unroll" the
2676 vector stmt by a factor VF/nunits. */
2677 last_oprnd = op0;
2678 prev_stmt_info = NULL;
2679 for (j = 0; j < ncopies; j++)
2680 {
2681 /* Handle uses. */
2682 if (slp_node)
2683 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
2684 else
2685 {
2686 VEC_free (tree, heap, vec_oprnds0);
2687 vec_oprnds0 = VEC_alloc (tree, heap,
2688 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2689 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2690 vect_pow2 (multi_step_cvt) - 1);
2691 }
2692
2693 /* Arguments are ready. Create the new vector stmts. */
2694 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2695 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2696 multi_step_cvt, stmt, tmp_vec_dsts,
2697 gsi, slp_node, code1,
2698 &prev_stmt_info);
2699 }
2700
2701 VEC_free (tree, heap, vec_oprnds0);
2702 VEC_free (tree, heap, vec_dsts);
2703 VEC_free (tree, heap, tmp_vec_dsts);
2704 VEC_free (tree, heap, interm_types);
2705
2706 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2707 return true;
2708 }
2709
2710
2711 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2712 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2713 the resulting vectors and call the function recursively. */
2714
2715 static void
2716 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2717 VEC (tree, heap) **vec_oprnds1,
2718 int multi_step_cvt, gimple stmt,
2719 VEC (tree, heap) *vec_dsts,
2720 gimple_stmt_iterator *gsi,
2721 slp_tree slp_node, enum tree_code code1,
2722 enum tree_code code2, tree decl1,
2723 tree decl2, int op_type,
2724 stmt_vec_info *prev_stmt_info)
2725 {
2726 int i;
2727 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2728 gimple new_stmt1, new_stmt2;
2729 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2730 VEC (tree, heap) *vec_tmp;
2731
2732 vec_dest = VEC_pop (tree, vec_dsts);
2733 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2734
2735 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
2736 {
2737 if (op_type == binary_op)
2738 vop1 = VEC_index (tree, *vec_oprnds1, i);
2739 else
2740 vop1 = NULL_TREE;
2741
2742 /* Generate the two halves of promotion operation. */
2743 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2744 op_type, vec_dest, gsi, stmt);
2745 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2746 op_type, vec_dest, gsi, stmt);
2747 if (is_gimple_call (new_stmt1))
2748 {
2749 new_tmp1 = gimple_call_lhs (new_stmt1);
2750 new_tmp2 = gimple_call_lhs (new_stmt2);
2751 }
2752 else
2753 {
2754 new_tmp1 = gimple_assign_lhs (new_stmt1);
2755 new_tmp2 = gimple_assign_lhs (new_stmt2);
2756 }
2757
2758 if (multi_step_cvt)
2759 {
2760 /* Store the results for the recursive call. */
2761 VEC_quick_push (tree, vec_tmp, new_tmp1);
2762 VEC_quick_push (tree, vec_tmp, new_tmp2);
2763 }
2764 else
2765 {
2766 /* Last step of promotion sequience - store the results. */
2767 if (slp_node)
2768 {
2769 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2770 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2771 }
2772 else
2773 {
2774 if (!*prev_stmt_info)
2775 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2776 else
2777 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2778
2779 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2780 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2781 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2782 }
2783 }
2784 }
2785
2786 if (multi_step_cvt)
2787 {
2788 /* For multi-step promotion operation we first generate we call the
2789 function recurcively for every stage. We start from the input type,
2790 create promotion operations to the intermediate types, and then
2791 create promotions to the output type. */
2792 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2793 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2794 multi_step_cvt - 1, stmt,
2795 vec_dsts, gsi, slp_node, code1,
2796 code2, decl2, decl2, op_type,
2797 prev_stmt_info);
2798 }
2799
2800 VEC_free (tree, heap, vec_tmp);
2801 }
2802
2803
2804 /* Function vectorizable_type_promotion
2805
2806 Check if STMT performs a binary or unary operation that involves
2807 type promotion, and if it can be vectorized.
2808 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2809 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2810 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2811
2812 static bool
2813 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2814 gimple *vec_stmt, slp_tree slp_node)
2815 {
2816 tree vec_dest;
2817 tree scalar_dest;
2818 tree op0, op1 = NULL;
2819 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2820 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2821 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2822 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2823 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2824 int op_type;
2825 tree def;
2826 gimple def_stmt;
2827 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2828 stmt_vec_info prev_stmt_info;
2829 int nunits_in;
2830 int nunits_out;
2831 tree vectype_out;
2832 int ncopies;
2833 int j, i;
2834 tree vectype_in;
2835 tree intermediate_type = NULL_TREE;
2836 int multi_step_cvt = 0;
2837 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2838 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2839
2840 /* FORNOW: not supported by basic block SLP vectorization. */
2841 gcc_assert (loop_vinfo);
2842
2843 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2844 return false;
2845
2846 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2847 return false;
2848
2849 /* Is STMT a vectorizable type-promotion operation? */
2850 if (!is_gimple_assign (stmt))
2851 return false;
2852
2853 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2854 return false;
2855
2856 code = gimple_assign_rhs_code (stmt);
2857 if (!CONVERT_EXPR_CODE_P (code)
2858 && code != WIDEN_MULT_EXPR)
2859 return false;
2860
2861 scalar_dest = gimple_assign_lhs (stmt);
2862 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2863
2864 /* Check the operands of the operation. */
2865 op0 = gimple_assign_rhs1 (stmt);
2866 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2867 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2868 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2869 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2870 && CONVERT_EXPR_CODE_P (code))))
2871 return false;
2872 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2873 &def_stmt, &def, &dt[0], &vectype_in))
2874 {
2875 if (vect_print_dump_info (REPORT_DETAILS))
2876 fprintf (vect_dump, "use not simple.");
2877 return false;
2878 }
2879 /* If op0 is an external or constant def use a vector type with
2880 the same size as the output vector type. */
2881 if (!vectype_in)
2882 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2883 if (vec_stmt)
2884 gcc_assert (vectype_in);
2885 if (!vectype_in)
2886 {
2887 if (vect_print_dump_info (REPORT_DETAILS))
2888 {
2889 fprintf (vect_dump, "no vectype for scalar type ");
2890 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2891 }
2892
2893 return false;
2894 }
2895
2896 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2897 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2898 if (nunits_in <= nunits_out)
2899 return false;
2900
2901 /* Multiple types in SLP are handled by creating the appropriate number of
2902 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2903 case of SLP. */
2904 if (slp_node)
2905 ncopies = 1;
2906 else
2907 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2908
2909 gcc_assert (ncopies >= 1);
2910
2911 op_type = TREE_CODE_LENGTH (code);
2912 if (op_type == binary_op)
2913 {
2914 op1 = gimple_assign_rhs2 (stmt);
2915 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2916 {
2917 if (vect_print_dump_info (REPORT_DETAILS))
2918 fprintf (vect_dump, "use not simple.");
2919 return false;
2920 }
2921 }
2922
2923 /* Supportable by target? */
2924 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2925 &decl1, &decl2, &code1, &code2,
2926 &multi_step_cvt, &interm_types))
2927 return false;
2928
2929 /* Binary widening operation can only be supported directly by the
2930 architecture. */
2931 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2932
2933 if (!vec_stmt) /* transformation not required. */
2934 {
2935 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2936 if (vect_print_dump_info (REPORT_DETAILS))
2937 fprintf (vect_dump, "=== vectorizable_promotion ===");
2938 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2939 return true;
2940 }
2941
2942 /** Transform. **/
2943
2944 if (vect_print_dump_info (REPORT_DETAILS))
2945 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2946 ncopies);
2947
2948 /* Handle def. */
2949 /* In case of multi-step promotion, we first generate promotion operations
2950 to the intermediate types, and then from that types to the final one.
2951 We store vector destination in VEC_DSTS in the correct order for
2952 recursive creation of promotion operations in
2953 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2954 according to TYPES recieved from supportable_widening_operation(). */
2955 if (multi_step_cvt)
2956 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2957 else
2958 vec_dsts = VEC_alloc (tree, heap, 1);
2959
2960 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2961 VEC_quick_push (tree, vec_dsts, vec_dest);
2962
2963 if (multi_step_cvt)
2964 {
2965 for (i = VEC_length (tree, interm_types) - 1;
2966 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2967 {
2968 vec_dest = vect_create_destination_var (scalar_dest,
2969 intermediate_type);
2970 VEC_quick_push (tree, vec_dsts, vec_dest);
2971 }
2972 }
2973
2974 if (!slp_node)
2975 {
2976 vec_oprnds0 = VEC_alloc (tree, heap,
2977 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2978 if (op_type == binary_op)
2979 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2980 }
2981
2982 /* In case the vectorization factor (VF) is bigger than the number
2983 of elements that we can fit in a vectype (nunits), we have to generate
2984 more than one vector stmt - i.e - we need to "unroll" the
2985 vector stmt by a factor VF/nunits. */
2986
2987 prev_stmt_info = NULL;
2988 for (j = 0; j < ncopies; j++)
2989 {
2990 /* Handle uses. */
2991 if (j == 0)
2992 {
2993 if (slp_node)
2994 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
2995 else
2996 {
2997 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2998 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2999 if (op_type == binary_op)
3000 {
3001 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3002 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3003 }
3004 }
3005 }
3006 else
3007 {
3008 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3009 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3010 if (op_type == binary_op)
3011 {
3012 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3013 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3014 }
3015 }
3016
3017 /* Arguments are ready. Create the new vector stmts. */
3018 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3019 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3020 multi_step_cvt, stmt,
3021 tmp_vec_dsts,
3022 gsi, slp_node, code1, code2,
3023 decl1, decl2, op_type,
3024 &prev_stmt_info);
3025 }
3026
3027 VEC_free (tree, heap, vec_dsts);
3028 VEC_free (tree, heap, tmp_vec_dsts);
3029 VEC_free (tree, heap, interm_types);
3030 VEC_free (tree, heap, vec_oprnds0);
3031 VEC_free (tree, heap, vec_oprnds1);
3032
3033 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3034 return true;
3035 }
3036
3037
3038 /* Function vectorizable_store.
3039
3040 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3041 can be vectorized.
3042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3043 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3045
3046 static bool
3047 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3048 slp_tree slp_node)
3049 {
3050 tree scalar_dest;
3051 tree data_ref;
3052 tree op;
3053 tree vec_oprnd = NULL_TREE;
3054 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3055 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3056 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3057 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3058 struct loop *loop = NULL;
3059 enum machine_mode vec_mode;
3060 tree dummy;
3061 enum dr_alignment_support alignment_support_scheme;
3062 tree def;
3063 gimple def_stmt;
3064 enum vect_def_type dt;
3065 stmt_vec_info prev_stmt_info = NULL;
3066 tree dataref_ptr = NULL_TREE;
3067 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3068 int ncopies;
3069 int j;
3070 gimple next_stmt, first_stmt = NULL;
3071 bool strided_store = false;
3072 unsigned int group_size, i;
3073 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3074 bool inv_p;
3075 VEC(tree,heap) *vec_oprnds = NULL;
3076 bool slp = (slp_node != NULL);
3077 unsigned int vec_num;
3078 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3079
3080 if (loop_vinfo)
3081 loop = LOOP_VINFO_LOOP (loop_vinfo);
3082
3083 /* Multiple types in SLP are handled by creating the appropriate number of
3084 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3085 case of SLP. */
3086 if (slp)
3087 ncopies = 1;
3088 else
3089 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3090
3091 gcc_assert (ncopies >= 1);
3092
3093 /* FORNOW. This restriction should be relaxed. */
3094 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3095 {
3096 if (vect_print_dump_info (REPORT_DETAILS))
3097 fprintf (vect_dump, "multiple types in nested loop.");
3098 return false;
3099 }
3100
3101 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3102 return false;
3103
3104 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3105 return false;
3106
3107 /* Is vectorizable store? */
3108
3109 if (!is_gimple_assign (stmt))
3110 return false;
3111
3112 scalar_dest = gimple_assign_lhs (stmt);
3113 if (TREE_CODE (scalar_dest) != ARRAY_REF
3114 && TREE_CODE (scalar_dest) != INDIRECT_REF
3115 && TREE_CODE (scalar_dest) != COMPONENT_REF
3116 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3117 && TREE_CODE (scalar_dest) != REALPART_EXPR
3118 && TREE_CODE (scalar_dest) != MEM_REF)
3119 return false;
3120
3121 gcc_assert (gimple_assign_single_p (stmt));
3122 op = gimple_assign_rhs1 (stmt);
3123 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3124 {
3125 if (vect_print_dump_info (REPORT_DETAILS))
3126 fprintf (vect_dump, "use not simple.");
3127 return false;
3128 }
3129
3130 /* The scalar rhs type needs to be trivially convertible to the vector
3131 component type. This should always be the case. */
3132 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3133 {
3134 if (vect_print_dump_info (REPORT_DETAILS))
3135 fprintf (vect_dump, "??? operands of different types");
3136 return false;
3137 }
3138
3139 vec_mode = TYPE_MODE (vectype);
3140 /* FORNOW. In some cases can vectorize even if data-type not supported
3141 (e.g. - array initialization with 0). */
3142 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3143 return false;
3144
3145 if (!STMT_VINFO_DATA_REF (stmt_info))
3146 return false;
3147
3148 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3149 {
3150 if (vect_print_dump_info (REPORT_DETAILS))
3151 fprintf (vect_dump, "negative step for store.");
3152 return false;
3153 }
3154
3155 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3156 {
3157 strided_store = true;
3158 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3159 if (!vect_strided_store_supported (vectype)
3160 && !PURE_SLP_STMT (stmt_info) && !slp)
3161 return false;
3162
3163 if (first_stmt == stmt)
3164 {
3165 /* STMT is the leader of the group. Check the operands of all the
3166 stmts of the group. */
3167 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3168 while (next_stmt)
3169 {
3170 gcc_assert (gimple_assign_single_p (next_stmt));
3171 op = gimple_assign_rhs1 (next_stmt);
3172 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3173 &def, &dt))
3174 {
3175 if (vect_print_dump_info (REPORT_DETAILS))
3176 fprintf (vect_dump, "use not simple.");
3177 return false;
3178 }
3179 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3180 }
3181 }
3182 }
3183
3184 if (!vec_stmt) /* transformation not required. */
3185 {
3186 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3187 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3188 return true;
3189 }
3190
3191 /** Transform. **/
3192
3193 if (strided_store)
3194 {
3195 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3196 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3197
3198 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3199
3200 /* FORNOW */
3201 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3202
3203 /* We vectorize all the stmts of the interleaving group when we
3204 reach the last stmt in the group. */
3205 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3206 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3207 && !slp)
3208 {
3209 *vec_stmt = NULL;
3210 return true;
3211 }
3212
3213 if (slp)
3214 {
3215 strided_store = false;
3216 /* VEC_NUM is the number of vect stmts to be created for this
3217 group. */
3218 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3219 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3220 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3221 }
3222 else
3223 /* VEC_NUM is the number of vect stmts to be created for this
3224 group. */
3225 vec_num = group_size;
3226 }
3227 else
3228 {
3229 first_stmt = stmt;
3230 first_dr = dr;
3231 group_size = vec_num = 1;
3232 }
3233
3234 if (vect_print_dump_info (REPORT_DETAILS))
3235 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3236
3237 dr_chain = VEC_alloc (tree, heap, group_size);
3238 oprnds = VEC_alloc (tree, heap, group_size);
3239
3240 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3241 gcc_assert (alignment_support_scheme);
3242
3243 /* In case the vectorization factor (VF) is bigger than the number
3244 of elements that we can fit in a vectype (nunits), we have to generate
3245 more than one vector stmt - i.e - we need to "unroll" the
3246 vector stmt by a factor VF/nunits. For more details see documentation in
3247 vect_get_vec_def_for_copy_stmt. */
3248
3249 /* In case of interleaving (non-unit strided access):
3250
3251 S1: &base + 2 = x2
3252 S2: &base = x0
3253 S3: &base + 1 = x1
3254 S4: &base + 3 = x3
3255
3256 We create vectorized stores starting from base address (the access of the
3257 first stmt in the chain (S2 in the above example), when the last store stmt
3258 of the chain (S4) is reached:
3259
3260 VS1: &base = vx2
3261 VS2: &base + vec_size*1 = vx0
3262 VS3: &base + vec_size*2 = vx1
3263 VS4: &base + vec_size*3 = vx3
3264
3265 Then permutation statements are generated:
3266
3267 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3268 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3269 ...
3270
3271 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3272 (the order of the data-refs in the output of vect_permute_store_chain
3273 corresponds to the order of scalar stmts in the interleaving chain - see
3274 the documentation of vect_permute_store_chain()).
3275
3276 In case of both multiple types and interleaving, above vector stores and
3277 permutation stmts are created for every copy. The result vector stmts are
3278 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3279 STMT_VINFO_RELATED_STMT for the next copies.
3280 */
3281
3282 prev_stmt_info = NULL;
3283 for (j = 0; j < ncopies; j++)
3284 {
3285 gimple new_stmt;
3286 gimple ptr_incr;
3287
3288 if (j == 0)
3289 {
3290 if (slp)
3291 {
3292 /* Get vectorized arguments for SLP_NODE. */
3293 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
3294
3295 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3296 }
3297 else
3298 {
3299 /* For interleaved stores we collect vectorized defs for all the
3300 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3301 used as an input to vect_permute_store_chain(), and OPRNDS as
3302 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3303
3304 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3305 OPRNDS are of size 1. */
3306 next_stmt = first_stmt;
3307 for (i = 0; i < group_size; i++)
3308 {
3309 /* Since gaps are not supported for interleaved stores,
3310 GROUP_SIZE is the exact number of stmts in the chain.
3311 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3312 there is no interleaving, GROUP_SIZE is 1, and only one
3313 iteration of the loop will be executed. */
3314 gcc_assert (next_stmt
3315 && gimple_assign_single_p (next_stmt));
3316 op = gimple_assign_rhs1 (next_stmt);
3317
3318 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3319 NULL);
3320 VEC_quick_push(tree, dr_chain, vec_oprnd);
3321 VEC_quick_push(tree, oprnds, vec_oprnd);
3322 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3323 }
3324 }
3325
3326 /* We should have catched mismatched types earlier. */
3327 gcc_assert (useless_type_conversion_p (vectype,
3328 TREE_TYPE (vec_oprnd)));
3329 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3330 &dummy, &ptr_incr, false,
3331 &inv_p);
3332 gcc_assert (bb_vinfo || !inv_p);
3333 }
3334 else
3335 {
3336 /* For interleaved stores we created vectorized defs for all the
3337 defs stored in OPRNDS in the previous iteration (previous copy).
3338 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3339 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3340 next copy.
3341 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3342 OPRNDS are of size 1. */
3343 for (i = 0; i < group_size; i++)
3344 {
3345 op = VEC_index (tree, oprnds, i);
3346 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3347 &dt);
3348 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3349 VEC_replace(tree, dr_chain, i, vec_oprnd);
3350 VEC_replace(tree, oprnds, i, vec_oprnd);
3351 }
3352 dataref_ptr =
3353 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3354 }
3355
3356 if (strided_store)
3357 {
3358 result_chain = VEC_alloc (tree, heap, group_size);
3359 /* Permute. */
3360 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3361 &result_chain))
3362 return false;
3363 }
3364
3365 next_stmt = first_stmt;
3366 for (i = 0; i < vec_num; i++)
3367 {
3368 struct ptr_info_def *pi;
3369
3370 if (i > 0)
3371 /* Bump the vector pointer. */
3372 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3373 NULL_TREE);
3374
3375 if (slp)
3376 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3377 else if (strided_store)
3378 /* For strided stores vectorized defs are interleaved in
3379 vect_permute_store_chain(). */
3380 vec_oprnd = VEC_index (tree, result_chain, i);
3381
3382 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3383 build_int_cst (reference_alias_ptr_type
3384 (DR_REF (first_dr)), 0));
3385 pi = get_ptr_info (dataref_ptr);
3386 pi->align = TYPE_ALIGN_UNIT (vectype);
3387 if (aligned_access_p (first_dr))
3388 pi->misalign = 0;
3389 else if (DR_MISALIGNMENT (first_dr) == -1)
3390 {
3391 TREE_TYPE (data_ref)
3392 = build_aligned_type (TREE_TYPE (data_ref),
3393 TYPE_ALIGN (TREE_TYPE (vectype)));
3394 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3395 pi->misalign = 0;
3396 }
3397 else
3398 {
3399 TREE_TYPE (data_ref)
3400 = build_aligned_type (TREE_TYPE (data_ref),
3401 TYPE_ALIGN (TREE_TYPE (vectype)));
3402 pi->misalign = DR_MISALIGNMENT (first_dr);
3403 }
3404
3405 /* Arguments are ready. Create the new vector stmt. */
3406 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3407 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3408 mark_symbols_for_renaming (new_stmt);
3409
3410 if (slp)
3411 continue;
3412
3413 if (j == 0)
3414 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3415 else
3416 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3417
3418 prev_stmt_info = vinfo_for_stmt (new_stmt);
3419 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3420 if (!next_stmt)
3421 break;
3422 }
3423 }
3424
3425 VEC_free (tree, heap, dr_chain);
3426 VEC_free (tree, heap, oprnds);
3427 if (result_chain)
3428 VEC_free (tree, heap, result_chain);
3429 if (vec_oprnds)
3430 VEC_free (tree, heap, vec_oprnds);
3431
3432 return true;
3433 }
3434
3435 /* Given a vector type VECTYPE returns a builtin DECL to be used
3436 for vector permutation and stores a mask into *MASK that implements
3437 reversal of the vector elements. If that is impossible to do
3438 returns NULL (and *MASK is unchanged). */
3439
3440 static tree
3441 perm_mask_for_reverse (tree vectype, tree *mask)
3442 {
3443 tree builtin_decl;
3444 tree mask_element_type, mask_type;
3445 tree mask_vec = NULL;
3446 int i;
3447 int nunits;
3448 if (!targetm.vectorize.builtin_vec_perm)
3449 return NULL;
3450
3451 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3452 &mask_element_type);
3453 if (!builtin_decl || !mask_element_type)
3454 return NULL;
3455
3456 mask_type = get_vectype_for_scalar_type (mask_element_type);
3457 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3458 if (!mask_type
3459 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3460 return NULL;
3461
3462 for (i = 0; i < nunits; i++)
3463 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3464 mask_vec = build_vector (mask_type, mask_vec);
3465
3466 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3467 return NULL;
3468 if (mask)
3469 *mask = mask_vec;
3470 return builtin_decl;
3471 }
3472
3473 /* Given a vector variable X, that was generated for the scalar LHS of
3474 STMT, generate instructions to reverse the vector elements of X,
3475 insert them a *GSI and return the permuted vector variable. */
3476
3477 static tree
3478 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3479 {
3480 tree vectype = TREE_TYPE (x);
3481 tree mask_vec, builtin_decl;
3482 tree perm_dest, data_ref;
3483 gimple perm_stmt;
3484
3485 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3486
3487 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3488
3489 /* Generate the permute statement. */
3490 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3491 data_ref = make_ssa_name (perm_dest, perm_stmt);
3492 gimple_call_set_lhs (perm_stmt, data_ref);
3493 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3494
3495 return data_ref;
3496 }
3497
3498 /* vectorizable_load.
3499
3500 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3501 can be vectorized.
3502 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3503 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3504 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3505
3506 static bool
3507 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3508 slp_tree slp_node, slp_instance slp_node_instance)
3509 {
3510 tree scalar_dest;
3511 tree vec_dest = NULL;
3512 tree data_ref = NULL;
3513 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3514 stmt_vec_info prev_stmt_info;
3515 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3516 struct loop *loop = NULL;
3517 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3518 bool nested_in_vect_loop = false;
3519 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3520 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3521 tree new_temp;
3522 enum machine_mode mode;
3523 gimple new_stmt = NULL;
3524 tree dummy;
3525 enum dr_alignment_support alignment_support_scheme;
3526 tree dataref_ptr = NULL_TREE;
3527 gimple ptr_incr;
3528 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3529 int ncopies;
3530 int i, j, group_size;
3531 tree msq = NULL_TREE, lsq;
3532 tree offset = NULL_TREE;
3533 tree realignment_token = NULL_TREE;
3534 gimple phi = NULL;
3535 VEC(tree,heap) *dr_chain = NULL;
3536 bool strided_load = false;
3537 gimple first_stmt;
3538 tree scalar_type;
3539 bool inv_p;
3540 bool negative;
3541 bool compute_in_loop = false;
3542 struct loop *at_loop;
3543 int vec_num;
3544 bool slp = (slp_node != NULL);
3545 bool slp_perm = false;
3546 enum tree_code code;
3547 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3548 int vf;
3549
3550 if (loop_vinfo)
3551 {
3552 loop = LOOP_VINFO_LOOP (loop_vinfo);
3553 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3554 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3555 }
3556 else
3557 vf = 1;
3558
3559 /* Multiple types in SLP are handled by creating the appropriate number of
3560 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3561 case of SLP. */
3562 if (slp)
3563 ncopies = 1;
3564 else
3565 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3566
3567 gcc_assert (ncopies >= 1);
3568
3569 /* FORNOW. This restriction should be relaxed. */
3570 if (nested_in_vect_loop && ncopies > 1)
3571 {
3572 if (vect_print_dump_info (REPORT_DETAILS))
3573 fprintf (vect_dump, "multiple types in nested loop.");
3574 return false;
3575 }
3576
3577 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3578 return false;
3579
3580 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3581 return false;
3582
3583 /* Is vectorizable load? */
3584 if (!is_gimple_assign (stmt))
3585 return false;
3586
3587 scalar_dest = gimple_assign_lhs (stmt);
3588 if (TREE_CODE (scalar_dest) != SSA_NAME)
3589 return false;
3590
3591 code = gimple_assign_rhs_code (stmt);
3592 if (code != ARRAY_REF
3593 && code != INDIRECT_REF
3594 && code != COMPONENT_REF
3595 && code != IMAGPART_EXPR
3596 && code != REALPART_EXPR
3597 && code != MEM_REF)
3598 return false;
3599
3600 if (!STMT_VINFO_DATA_REF (stmt_info))
3601 return false;
3602
3603 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
3604 if (negative && ncopies > 1)
3605 {
3606 if (vect_print_dump_info (REPORT_DETAILS))
3607 fprintf (vect_dump, "multiple types with negative step.");
3608 return false;
3609 }
3610
3611 scalar_type = TREE_TYPE (DR_REF (dr));
3612 mode = TYPE_MODE (vectype);
3613
3614 /* FORNOW. In some cases can vectorize even if data-type not supported
3615 (e.g. - data copies). */
3616 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3617 {
3618 if (vect_print_dump_info (REPORT_DETAILS))
3619 fprintf (vect_dump, "Aligned load, but unsupported type.");
3620 return false;
3621 }
3622
3623 /* The vector component type needs to be trivially convertible to the
3624 scalar lhs. This should always be the case. */
3625 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3626 {
3627 if (vect_print_dump_info (REPORT_DETAILS))
3628 fprintf (vect_dump, "??? operands of different types");
3629 return false;
3630 }
3631
3632 /* Check if the load is a part of an interleaving chain. */
3633 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3634 {
3635 strided_load = true;
3636 /* FORNOW */
3637 gcc_assert (! nested_in_vect_loop);
3638
3639 /* Check if interleaving is supported. */
3640 if (!vect_strided_load_supported (vectype)
3641 && !PURE_SLP_STMT (stmt_info) && !slp)
3642 return false;
3643 }
3644
3645 if (negative)
3646 {
3647 gcc_assert (!strided_load);
3648 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
3649 if (alignment_support_scheme != dr_aligned
3650 && alignment_support_scheme != dr_unaligned_supported)
3651 {
3652 if (vect_print_dump_info (REPORT_DETAILS))
3653 fprintf (vect_dump, "negative step but alignment required.");
3654 return false;
3655 }
3656 if (!perm_mask_for_reverse (vectype, NULL))
3657 {
3658 if (vect_print_dump_info (REPORT_DETAILS))
3659 fprintf (vect_dump, "negative step and reversing not supported.");
3660 return false;
3661 }
3662 }
3663
3664 if (!vec_stmt) /* transformation not required. */
3665 {
3666 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3667 vect_model_load_cost (stmt_info, ncopies, NULL);
3668 return true;
3669 }
3670
3671 if (vect_print_dump_info (REPORT_DETAILS))
3672 fprintf (vect_dump, "transform load.");
3673
3674 /** Transform. **/
3675
3676 if (strided_load)
3677 {
3678 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3679 /* Check if the chain of loads is already vectorized. */
3680 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3681 {
3682 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3683 return true;
3684 }
3685 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3686 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3687
3688 /* VEC_NUM is the number of vect stmts to be created for this group. */
3689 if (slp)
3690 {
3691 strided_load = false;
3692 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3693 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3694 slp_perm = true;
3695 }
3696 else
3697 vec_num = group_size;
3698
3699 dr_chain = VEC_alloc (tree, heap, vec_num);
3700 }
3701 else
3702 {
3703 first_stmt = stmt;
3704 first_dr = dr;
3705 group_size = vec_num = 1;
3706 }
3707
3708 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3709 gcc_assert (alignment_support_scheme);
3710
3711 /* In case the vectorization factor (VF) is bigger than the number
3712 of elements that we can fit in a vectype (nunits), we have to generate
3713 more than one vector stmt - i.e - we need to "unroll" the
3714 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3715 from one copy of the vector stmt to the next, in the field
3716 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3717 stages to find the correct vector defs to be used when vectorizing
3718 stmts that use the defs of the current stmt. The example below
3719 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3720 need to create 4 vectorized stmts):
3721
3722 before vectorization:
3723 RELATED_STMT VEC_STMT
3724 S1: x = memref - -
3725 S2: z = x + 1 - -
3726
3727 step 1: vectorize stmt S1:
3728 We first create the vector stmt VS1_0, and, as usual, record a
3729 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3730 Next, we create the vector stmt VS1_1, and record a pointer to
3731 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3732 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3733 stmts and pointers:
3734 RELATED_STMT VEC_STMT
3735 VS1_0: vx0 = memref0 VS1_1 -
3736 VS1_1: vx1 = memref1 VS1_2 -
3737 VS1_2: vx2 = memref2 VS1_3 -
3738 VS1_3: vx3 = memref3 - -
3739 S1: x = load - VS1_0
3740 S2: z = x + 1 - -
3741
3742 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3743 information we recorded in RELATED_STMT field is used to vectorize
3744 stmt S2. */
3745
3746 /* In case of interleaving (non-unit strided access):
3747
3748 S1: x2 = &base + 2
3749 S2: x0 = &base
3750 S3: x1 = &base + 1
3751 S4: x3 = &base + 3
3752
3753 Vectorized loads are created in the order of memory accesses
3754 starting from the access of the first stmt of the chain:
3755
3756 VS1: vx0 = &base
3757 VS2: vx1 = &base + vec_size*1
3758 VS3: vx3 = &base + vec_size*2
3759 VS4: vx4 = &base + vec_size*3
3760
3761 Then permutation statements are generated:
3762
3763 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3764 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3765 ...
3766
3767 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3768 (the order of the data-refs in the output of vect_permute_load_chain
3769 corresponds to the order of scalar stmts in the interleaving chain - see
3770 the documentation of vect_permute_load_chain()).
3771 The generation of permutation stmts and recording them in
3772 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3773
3774 In case of both multiple types and interleaving, the vector loads and
3775 permutation stmts above are created for every copy. The result vector
3776 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
3777 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
3778
3779 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3780 on a target that supports unaligned accesses (dr_unaligned_supported)
3781 we generate the following code:
3782 p = initial_addr;
3783 indx = 0;
3784 loop {
3785 p = p + indx * vectype_size;
3786 vec_dest = *(p);
3787 indx = indx + 1;
3788 }
3789
3790 Otherwise, the data reference is potentially unaligned on a target that
3791 does not support unaligned accesses (dr_explicit_realign_optimized) -
3792 then generate the following code, in which the data in each iteration is
3793 obtained by two vector loads, one from the previous iteration, and one
3794 from the current iteration:
3795 p1 = initial_addr;
3796 msq_init = *(floor(p1))
3797 p2 = initial_addr + VS - 1;
3798 realignment_token = call target_builtin;
3799 indx = 0;
3800 loop {
3801 p2 = p2 + indx * vectype_size
3802 lsq = *(floor(p2))
3803 vec_dest = realign_load (msq, lsq, realignment_token)
3804 indx = indx + 1;
3805 msq = lsq;
3806 } */
3807
3808 /* If the misalignment remains the same throughout the execution of the
3809 loop, we can create the init_addr and permutation mask at the loop
3810 preheader. Otherwise, it needs to be created inside the loop.
3811 This can only occur when vectorizing memory accesses in the inner-loop
3812 nested within an outer-loop that is being vectorized. */
3813
3814 if (loop && nested_in_vect_loop_p (loop, stmt)
3815 && (TREE_INT_CST_LOW (DR_STEP (dr))
3816 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3817 {
3818 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3819 compute_in_loop = true;
3820 }
3821
3822 if ((alignment_support_scheme == dr_explicit_realign_optimized
3823 || alignment_support_scheme == dr_explicit_realign)
3824 && !compute_in_loop)
3825 {
3826 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3827 alignment_support_scheme, NULL_TREE,
3828 &at_loop);
3829 if (alignment_support_scheme == dr_explicit_realign_optimized)
3830 {
3831 phi = SSA_NAME_DEF_STMT (msq);
3832 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3833 }
3834 }
3835 else
3836 at_loop = loop;
3837
3838 if (negative)
3839 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
3840
3841 prev_stmt_info = NULL;
3842 for (j = 0; j < ncopies; j++)
3843 {
3844 /* 1. Create the vector pointer update chain. */
3845 if (j == 0)
3846 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3847 at_loop, offset,
3848 &dummy, &ptr_incr, false,
3849 &inv_p);
3850 else
3851 dataref_ptr =
3852 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3853
3854 for (i = 0; i < vec_num; i++)
3855 {
3856 if (i > 0)
3857 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3858 NULL_TREE);
3859
3860 /* 2. Create the vector-load in the loop. */
3861 switch (alignment_support_scheme)
3862 {
3863 case dr_aligned:
3864 case dr_unaligned_supported:
3865 {
3866 struct ptr_info_def *pi;
3867 data_ref
3868 = build2 (MEM_REF, vectype, dataref_ptr,
3869 build_int_cst (reference_alias_ptr_type
3870 (DR_REF (first_dr)), 0));
3871 pi = get_ptr_info (dataref_ptr);
3872 pi->align = TYPE_ALIGN_UNIT (vectype);
3873 if (alignment_support_scheme == dr_aligned)
3874 {
3875 gcc_assert (aligned_access_p (first_dr));
3876 pi->misalign = 0;
3877 }
3878 else if (DR_MISALIGNMENT (first_dr) == -1)
3879 {
3880 TREE_TYPE (data_ref)
3881 = build_aligned_type (TREE_TYPE (data_ref),
3882 TYPE_ALIGN (TREE_TYPE (vectype)));
3883 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3884 pi->misalign = 0;
3885 }
3886 else
3887 {
3888 TREE_TYPE (data_ref)
3889 = build_aligned_type (TREE_TYPE (data_ref),
3890 TYPE_ALIGN (TREE_TYPE (vectype)));
3891 pi->misalign = DR_MISALIGNMENT (first_dr);
3892 }
3893 break;
3894 }
3895 case dr_explicit_realign:
3896 {
3897 tree ptr, bump;
3898 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3899
3900 if (compute_in_loop)
3901 msq = vect_setup_realignment (first_stmt, gsi,
3902 &realignment_token,
3903 dr_explicit_realign,
3904 dataref_ptr, NULL);
3905
3906 new_stmt = gimple_build_assign_with_ops
3907 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3908 build_int_cst
3909 (TREE_TYPE (dataref_ptr),
3910 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3911 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3912 gimple_assign_set_lhs (new_stmt, ptr);
3913 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3914 data_ref
3915 = build2 (MEM_REF, vectype, ptr,
3916 build_int_cst (reference_alias_ptr_type
3917 (DR_REF (first_dr)), 0));
3918 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3919 new_stmt = gimple_build_assign (vec_dest, data_ref);
3920 new_temp = make_ssa_name (vec_dest, new_stmt);
3921 gimple_assign_set_lhs (new_stmt, new_temp);
3922 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3923 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3924 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3925 msq = new_temp;
3926
3927 bump = size_binop (MULT_EXPR, vs_minus_1,
3928 TYPE_SIZE_UNIT (scalar_type));
3929 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3930 new_stmt = gimple_build_assign_with_ops
3931 (BIT_AND_EXPR, NULL_TREE, ptr,
3932 build_int_cst
3933 (TREE_TYPE (ptr),
3934 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3935 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3936 gimple_assign_set_lhs (new_stmt, ptr);
3937 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3938 data_ref
3939 = build2 (MEM_REF, vectype, ptr,
3940 build_int_cst (reference_alias_ptr_type
3941 (DR_REF (first_dr)), 0));
3942 break;
3943 }
3944 case dr_explicit_realign_optimized:
3945 new_stmt = gimple_build_assign_with_ops
3946 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3947 build_int_cst
3948 (TREE_TYPE (dataref_ptr),
3949 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3950 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3951 gimple_assign_set_lhs (new_stmt, new_temp);
3952 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3953 data_ref
3954 = build2 (MEM_REF, vectype, new_temp,
3955 build_int_cst (reference_alias_ptr_type
3956 (DR_REF (first_dr)), 0));
3957 break;
3958 default:
3959 gcc_unreachable ();
3960 }
3961 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3962 new_stmt = gimple_build_assign (vec_dest, data_ref);
3963 new_temp = make_ssa_name (vec_dest, new_stmt);
3964 gimple_assign_set_lhs (new_stmt, new_temp);
3965 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3966 mark_symbols_for_renaming (new_stmt);
3967
3968 /* 3. Handle explicit realignment if necessary/supported. Create in
3969 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3970 if (alignment_support_scheme == dr_explicit_realign_optimized
3971 || alignment_support_scheme == dr_explicit_realign)
3972 {
3973 tree tmp;
3974
3975 lsq = gimple_assign_lhs (new_stmt);
3976 if (!realignment_token)
3977 realignment_token = dataref_ptr;
3978 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3979 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3980 realignment_token);
3981 new_stmt = gimple_build_assign (vec_dest, tmp);
3982 new_temp = make_ssa_name (vec_dest, new_stmt);
3983 gimple_assign_set_lhs (new_stmt, new_temp);
3984 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3985
3986 if (alignment_support_scheme == dr_explicit_realign_optimized)
3987 {
3988 gcc_assert (phi);
3989 if (i == vec_num - 1 && j == ncopies - 1)
3990 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3991 UNKNOWN_LOCATION);
3992 msq = lsq;
3993 }
3994 }
3995
3996 /* 4. Handle invariant-load. */
3997 if (inv_p && !bb_vinfo)
3998 {
3999 gcc_assert (!strided_load);
4000 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4001 if (j == 0)
4002 {
4003 int k;
4004 tree t = NULL_TREE;
4005 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4006
4007 /* CHECKME: bitpos depends on endianess? */
4008 bitpos = bitsize_zero_node;
4009 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4010 bitsize, bitpos);
4011 vec_dest =
4012 vect_create_destination_var (scalar_dest, NULL_TREE);
4013 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4014 new_temp = make_ssa_name (vec_dest, new_stmt);
4015 gimple_assign_set_lhs (new_stmt, new_temp);
4016 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4017
4018 for (k = nunits - 1; k >= 0; --k)
4019 t = tree_cons (NULL_TREE, new_temp, t);
4020 /* FIXME: use build_constructor directly. */
4021 vec_inv = build_constructor_from_list (vectype, t);
4022 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
4023 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4024 }
4025 else
4026 gcc_unreachable (); /* FORNOW. */
4027 }
4028
4029 if (negative)
4030 {
4031 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4032 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4033 }
4034
4035 /* Collect vector loads and later create their permutation in
4036 vect_transform_strided_load (). */
4037 if (strided_load || slp_perm)
4038 VEC_quick_push (tree, dr_chain, new_temp);
4039
4040 /* Store vector loads in the corresponding SLP_NODE. */
4041 if (slp && !slp_perm)
4042 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4043 }
4044
4045 if (slp && !slp_perm)
4046 continue;
4047
4048 if (slp_perm)
4049 {
4050 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4051 slp_node_instance, false))
4052 {
4053 VEC_free (tree, heap, dr_chain);
4054 return false;
4055 }
4056 }
4057 else
4058 {
4059 if (strided_load)
4060 {
4061 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
4062 return false;
4063
4064 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4065 VEC_free (tree, heap, dr_chain);
4066 dr_chain = VEC_alloc (tree, heap, group_size);
4067 }
4068 else
4069 {
4070 if (j == 0)
4071 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4072 else
4073 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4074 prev_stmt_info = vinfo_for_stmt (new_stmt);
4075 }
4076 }
4077 }
4078
4079 if (dr_chain)
4080 VEC_free (tree, heap, dr_chain);
4081
4082 return true;
4083 }
4084
4085 /* Function vect_is_simple_cond.
4086
4087 Input:
4088 LOOP - the loop that is being vectorized.
4089 COND - Condition that is checked for simple use.
4090
4091 Returns whether a COND can be vectorized. Checks whether
4092 condition operands are supportable using vec_is_simple_use. */
4093
4094 static bool
4095 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4096 {
4097 tree lhs, rhs;
4098 tree def;
4099 enum vect_def_type dt;
4100
4101 if (!COMPARISON_CLASS_P (cond))
4102 return false;
4103
4104 lhs = TREE_OPERAND (cond, 0);
4105 rhs = TREE_OPERAND (cond, 1);
4106
4107 if (TREE_CODE (lhs) == SSA_NAME)
4108 {
4109 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4110 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4111 &dt))
4112 return false;
4113 }
4114 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4115 && TREE_CODE (lhs) != FIXED_CST)
4116 return false;
4117
4118 if (TREE_CODE (rhs) == SSA_NAME)
4119 {
4120 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4121 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4122 &dt))
4123 return false;
4124 }
4125 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4126 && TREE_CODE (rhs) != FIXED_CST)
4127 return false;
4128
4129 return true;
4130 }
4131
4132 /* vectorizable_condition.
4133
4134 Check if STMT is conditional modify expression that can be vectorized.
4135 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4136 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4137 at GSI.
4138
4139 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4140 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4141 else caluse if it is 2).
4142
4143 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4144
4145 bool
4146 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4147 gimple *vec_stmt, tree reduc_def, int reduc_index)
4148 {
4149 tree scalar_dest = NULL_TREE;
4150 tree vec_dest = NULL_TREE;
4151 tree op = NULL_TREE;
4152 tree cond_expr, then_clause, else_clause;
4153 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4154 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4155 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4156 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4157 tree vec_compare, vec_cond_expr;
4158 tree new_temp;
4159 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4160 enum machine_mode vec_mode;
4161 tree def;
4162 enum vect_def_type dt, dts[4];
4163 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4164 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4165 enum tree_code code;
4166 stmt_vec_info prev_stmt_info = NULL;
4167 int j;
4168
4169 /* FORNOW: unsupported in basic block SLP. */
4170 gcc_assert (loop_vinfo);
4171
4172 gcc_assert (ncopies >= 1);
4173 if (reduc_index && ncopies > 1)
4174 return false; /* FORNOW */
4175
4176 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4177 return false;
4178
4179 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4180 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4181 && reduc_def))
4182 return false;
4183
4184 /* FORNOW: SLP not supported. */
4185 if (STMT_SLP_TYPE (stmt_info))
4186 return false;
4187
4188 /* FORNOW: not yet supported. */
4189 if (STMT_VINFO_LIVE_P (stmt_info))
4190 {
4191 if (vect_print_dump_info (REPORT_DETAILS))
4192 fprintf (vect_dump, "value used after loop.");
4193 return false;
4194 }
4195
4196 /* Is vectorizable conditional operation? */
4197 if (!is_gimple_assign (stmt))
4198 return false;
4199
4200 code = gimple_assign_rhs_code (stmt);
4201
4202 if (code != COND_EXPR)
4203 return false;
4204
4205 gcc_assert (gimple_assign_single_p (stmt));
4206 op = gimple_assign_rhs1 (stmt);
4207 cond_expr = TREE_OPERAND (op, 0);
4208 then_clause = TREE_OPERAND (op, 1);
4209 else_clause = TREE_OPERAND (op, 2);
4210
4211 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4212 return false;
4213
4214 /* We do not handle two different vector types for the condition
4215 and the values. */
4216 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4217 TREE_TYPE (vectype)))
4218 return false;
4219
4220 if (TREE_CODE (then_clause) == SSA_NAME)
4221 {
4222 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4223 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4224 &then_def_stmt, &def, &dt))
4225 return false;
4226 }
4227 else if (TREE_CODE (then_clause) != INTEGER_CST
4228 && TREE_CODE (then_clause) != REAL_CST
4229 && TREE_CODE (then_clause) != FIXED_CST)
4230 return false;
4231
4232 if (TREE_CODE (else_clause) == SSA_NAME)
4233 {
4234 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4235 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4236 &else_def_stmt, &def, &dt))
4237 return false;
4238 }
4239 else if (TREE_CODE (else_clause) != INTEGER_CST
4240 && TREE_CODE (else_clause) != REAL_CST
4241 && TREE_CODE (else_clause) != FIXED_CST)
4242 return false;
4243
4244
4245 vec_mode = TYPE_MODE (vectype);
4246
4247 if (!vec_stmt)
4248 {
4249 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4250 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4251 }
4252
4253 /* Transform */
4254
4255 /* Handle def. */
4256 scalar_dest = gimple_assign_lhs (stmt);
4257 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4258
4259 /* Handle cond expr. */
4260 for (j = 0; j < ncopies; j++)
4261 {
4262 gimple new_stmt;
4263 if (j == 0)
4264 {
4265 gimple gtemp;
4266 vec_cond_lhs =
4267 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4268 stmt, NULL);
4269 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4270 NULL, &gtemp, &def, &dts[0]);
4271 vec_cond_rhs =
4272 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4273 stmt, NULL);
4274 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4275 NULL, &gtemp, &def, &dts[1]);
4276 if (reduc_index == 1)
4277 vec_then_clause = reduc_def;
4278 else
4279 {
4280 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4281 stmt, NULL);
4282 vect_is_simple_use (then_clause, loop_vinfo,
4283 NULL, &gtemp, &def, &dts[2]);
4284 }
4285 if (reduc_index == 2)
4286 vec_else_clause = reduc_def;
4287 else
4288 {
4289 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4290 stmt, NULL);
4291 vect_is_simple_use (else_clause, loop_vinfo,
4292 NULL, &gtemp, &def, &dts[3]);
4293 }
4294 }
4295 else
4296 {
4297 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4298 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4299 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4300 vec_then_clause);
4301 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4302 vec_else_clause);
4303 }
4304
4305 /* Arguments are ready. Create the new vector stmt. */
4306 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4307 vec_cond_lhs, vec_cond_rhs);
4308 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4309 vec_compare, vec_then_clause, vec_else_clause);
4310
4311 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4312 new_temp = make_ssa_name (vec_dest, new_stmt);
4313 gimple_assign_set_lhs (new_stmt, new_temp);
4314 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4315 if (j == 0)
4316 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4317 else
4318 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4319
4320 prev_stmt_info = vinfo_for_stmt (new_stmt);
4321 }
4322
4323 return true;
4324 }
4325
4326
4327 /* Make sure the statement is vectorizable. */
4328
4329 bool
4330 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4331 {
4332 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4333 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4334 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4335 bool ok;
4336 tree scalar_type, vectype;
4337
4338 if (vect_print_dump_info (REPORT_DETAILS))
4339 {
4340 fprintf (vect_dump, "==> examining statement: ");
4341 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4342 }
4343
4344 if (gimple_has_volatile_ops (stmt))
4345 {
4346 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4347 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4348
4349 return false;
4350 }
4351
4352 /* Skip stmts that do not need to be vectorized. In loops this is expected
4353 to include:
4354 - the COND_EXPR which is the loop exit condition
4355 - any LABEL_EXPRs in the loop
4356 - computations that are used only for array indexing or loop control.
4357 In basic blocks we only analyze statements that are a part of some SLP
4358 instance, therefore, all the statements are relevant. */
4359
4360 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4361 && !STMT_VINFO_LIVE_P (stmt_info))
4362 {
4363 if (vect_print_dump_info (REPORT_DETAILS))
4364 fprintf (vect_dump, "irrelevant.");
4365
4366 return true;
4367 }
4368
4369 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4370 {
4371 case vect_internal_def:
4372 break;
4373
4374 case vect_reduction_def:
4375 case vect_nested_cycle:
4376 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4377 || relevance == vect_used_in_outer_by_reduction
4378 || relevance == vect_unused_in_scope));
4379 break;
4380
4381 case vect_induction_def:
4382 case vect_constant_def:
4383 case vect_external_def:
4384 case vect_unknown_def_type:
4385 default:
4386 gcc_unreachable ();
4387 }
4388
4389 if (bb_vinfo)
4390 {
4391 gcc_assert (PURE_SLP_STMT (stmt_info));
4392
4393 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4394 if (vect_print_dump_info (REPORT_DETAILS))
4395 {
4396 fprintf (vect_dump, "get vectype for scalar type: ");
4397 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4398 }
4399
4400 vectype = get_vectype_for_scalar_type (scalar_type);
4401 if (!vectype)
4402 {
4403 if (vect_print_dump_info (REPORT_DETAILS))
4404 {
4405 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4406 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4407 }
4408 return false;
4409 }
4410
4411 if (vect_print_dump_info (REPORT_DETAILS))
4412 {
4413 fprintf (vect_dump, "vectype: ");
4414 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4415 }
4416
4417 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4418 }
4419
4420 if (STMT_VINFO_RELEVANT_P (stmt_info))
4421 {
4422 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4423 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4424 *need_to_vectorize = true;
4425 }
4426
4427 ok = true;
4428 if (!bb_vinfo
4429 && (STMT_VINFO_RELEVANT_P (stmt_info)
4430 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4431 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4432 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4433 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4434 || vectorizable_operation (stmt, NULL, NULL, NULL)
4435 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4436 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4437 || vectorizable_call (stmt, NULL, NULL)
4438 || vectorizable_store (stmt, NULL, NULL, NULL)
4439 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4440 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4441 else
4442 {
4443 if (bb_vinfo)
4444 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4445 || vectorizable_assignment (stmt, NULL, NULL, node)
4446 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4447 || vectorizable_store (stmt, NULL, NULL, node));
4448 }
4449
4450 if (!ok)
4451 {
4452 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4453 {
4454 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4455 fprintf (vect_dump, "supported: ");
4456 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4457 }
4458
4459 return false;
4460 }
4461
4462 if (bb_vinfo)
4463 return true;
4464
4465 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4466 need extra handling, except for vectorizable reductions. */
4467 if (STMT_VINFO_LIVE_P (stmt_info)
4468 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4469 ok = vectorizable_live_operation (stmt, NULL, NULL);
4470
4471 if (!ok)
4472 {
4473 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4474 {
4475 fprintf (vect_dump, "not vectorized: live stmt not ");
4476 fprintf (vect_dump, "supported: ");
4477 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4478 }
4479
4480 return false;
4481 }
4482
4483 if (!PURE_SLP_STMT (stmt_info))
4484 {
4485 /* Groups of strided accesses whose size is not a power of 2 are not
4486 vectorizable yet using loop-vectorization. Therefore, if this stmt
4487 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4488 loop-based vectorized), the loop cannot be vectorized. */
4489 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4490 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4491 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4492 {
4493 if (vect_print_dump_info (REPORT_DETAILS))
4494 {
4495 fprintf (vect_dump, "not vectorized: the size of group "
4496 "of strided accesses is not a power of 2");
4497 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4498 }
4499
4500 return false;
4501 }
4502 }
4503
4504 return true;
4505 }
4506
4507
4508 /* Function vect_transform_stmt.
4509
4510 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4511
4512 bool
4513 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4514 bool *strided_store, slp_tree slp_node,
4515 slp_instance slp_node_instance)
4516 {
4517 bool is_store = false;
4518 gimple vec_stmt = NULL;
4519 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4520 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4521 bool done;
4522
4523 switch (STMT_VINFO_TYPE (stmt_info))
4524 {
4525 case type_demotion_vec_info_type:
4526 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4527 gcc_assert (done);
4528 break;
4529
4530 case type_promotion_vec_info_type:
4531 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4532 gcc_assert (done);
4533 break;
4534
4535 case type_conversion_vec_info_type:
4536 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4537 gcc_assert (done);
4538 break;
4539
4540 case induc_vec_info_type:
4541 gcc_assert (!slp_node);
4542 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4543 gcc_assert (done);
4544 break;
4545
4546 case op_vec_info_type:
4547 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4548 gcc_assert (done);
4549 break;
4550
4551 case assignment_vec_info_type:
4552 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4553 gcc_assert (done);
4554 break;
4555
4556 case load_vec_info_type:
4557 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4558 slp_node_instance);
4559 gcc_assert (done);
4560 break;
4561
4562 case store_vec_info_type:
4563 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4564 gcc_assert (done);
4565 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4566 {
4567 /* In case of interleaving, the whole chain is vectorized when the
4568 last store in the chain is reached. Store stmts before the last
4569 one are skipped, and there vec_stmt_info shouldn't be freed
4570 meanwhile. */
4571 *strided_store = true;
4572 if (STMT_VINFO_VEC_STMT (stmt_info))
4573 is_store = true;
4574 }
4575 else
4576 is_store = true;
4577 break;
4578
4579 case condition_vec_info_type:
4580 gcc_assert (!slp_node);
4581 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4582 gcc_assert (done);
4583 break;
4584
4585 case call_vec_info_type:
4586 gcc_assert (!slp_node);
4587 done = vectorizable_call (stmt, gsi, &vec_stmt);
4588 stmt = gsi_stmt (*gsi);
4589 break;
4590
4591 case reduc_vec_info_type:
4592 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4593 gcc_assert (done);
4594 break;
4595
4596 default:
4597 if (!STMT_VINFO_LIVE_P (stmt_info))
4598 {
4599 if (vect_print_dump_info (REPORT_DETAILS))
4600 fprintf (vect_dump, "stmt not supported.");
4601 gcc_unreachable ();
4602 }
4603 }
4604
4605 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4606 is being vectorized, but outside the immediately enclosing loop. */
4607 if (vec_stmt
4608 && STMT_VINFO_LOOP_VINFO (stmt_info)
4609 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4610 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4611 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4612 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4613 || STMT_VINFO_RELEVANT (stmt_info) ==
4614 vect_used_in_outer_by_reduction))
4615 {
4616 struct loop *innerloop = LOOP_VINFO_LOOP (
4617 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4618 imm_use_iterator imm_iter;
4619 use_operand_p use_p;
4620 tree scalar_dest;
4621 gimple exit_phi;
4622
4623 if (vect_print_dump_info (REPORT_DETAILS))
4624 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4625
4626 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4627 (to be used when vectorizing outer-loop stmts that use the DEF of
4628 STMT). */
4629 if (gimple_code (stmt) == GIMPLE_PHI)
4630 scalar_dest = PHI_RESULT (stmt);
4631 else
4632 scalar_dest = gimple_assign_lhs (stmt);
4633
4634 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4635 {
4636 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4637 {
4638 exit_phi = USE_STMT (use_p);
4639 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4640 }
4641 }
4642 }
4643
4644 /* Handle stmts whose DEF is used outside the loop-nest that is
4645 being vectorized. */
4646 if (STMT_VINFO_LIVE_P (stmt_info)
4647 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4648 {
4649 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4650 gcc_assert (done);
4651 }
4652
4653 if (vec_stmt)
4654 {
4655 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4656 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4657 if (orig_stmt_in_pattern)
4658 {
4659 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4660 /* STMT was inserted by the vectorizer to replace a computation idiom.
4661 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4662 computed this idiom. We need to record a pointer to VEC_STMT in
4663 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4664 documentation of vect_pattern_recog. */
4665 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4666 {
4667 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4668 == orig_scalar_stmt);
4669 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4670 }
4671 }
4672 }
4673
4674 return is_store;
4675 }
4676
4677
4678 /* Remove a group of stores (for SLP or interleaving), free their
4679 stmt_vec_info. */
4680
4681 void
4682 vect_remove_stores (gimple first_stmt)
4683 {
4684 gimple next = first_stmt;
4685 gimple tmp;
4686 gimple_stmt_iterator next_si;
4687
4688 while (next)
4689 {
4690 /* Free the attached stmt_vec_info and remove the stmt. */
4691 next_si = gsi_for_stmt (next);
4692 gsi_remove (&next_si, true);
4693 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4694 free_stmt_vec_info (next);
4695 next = tmp;
4696 }
4697 }
4698
4699
4700 /* Function new_stmt_vec_info.
4701
4702 Create and initialize a new stmt_vec_info struct for STMT. */
4703
4704 stmt_vec_info
4705 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4706 bb_vec_info bb_vinfo)
4707 {
4708 stmt_vec_info res;
4709 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4710
4711 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4712 STMT_VINFO_STMT (res) = stmt;
4713 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4714 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4715 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4716 STMT_VINFO_LIVE_P (res) = false;
4717 STMT_VINFO_VECTYPE (res) = NULL;
4718 STMT_VINFO_VEC_STMT (res) = NULL;
4719 STMT_VINFO_VECTORIZABLE (res) = true;
4720 STMT_VINFO_IN_PATTERN_P (res) = false;
4721 STMT_VINFO_RELATED_STMT (res) = NULL;
4722 STMT_VINFO_DATA_REF (res) = NULL;
4723
4724 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4725 STMT_VINFO_DR_OFFSET (res) = NULL;
4726 STMT_VINFO_DR_INIT (res) = NULL;
4727 STMT_VINFO_DR_STEP (res) = NULL;
4728 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4729
4730 if (gimple_code (stmt) == GIMPLE_PHI
4731 && is_loop_header_bb_p (gimple_bb (stmt)))
4732 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4733 else
4734 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4735
4736 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4737 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4738 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4739 STMT_SLP_TYPE (res) = loop_vect;
4740 DR_GROUP_FIRST_DR (res) = NULL;
4741 DR_GROUP_NEXT_DR (res) = NULL;
4742 DR_GROUP_SIZE (res) = 0;
4743 DR_GROUP_STORE_COUNT (res) = 0;
4744 DR_GROUP_GAP (res) = 0;
4745 DR_GROUP_SAME_DR_STMT (res) = NULL;
4746 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4747
4748 return res;
4749 }
4750
4751
4752 /* Create a hash table for stmt_vec_info. */
4753
4754 void
4755 init_stmt_vec_info_vec (void)
4756 {
4757 gcc_assert (!stmt_vec_info_vec);
4758 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4759 }
4760
4761
4762 /* Free hash table for stmt_vec_info. */
4763
4764 void
4765 free_stmt_vec_info_vec (void)
4766 {
4767 gcc_assert (stmt_vec_info_vec);
4768 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4769 }
4770
4771
4772 /* Free stmt vectorization related info. */
4773
4774 void
4775 free_stmt_vec_info (gimple stmt)
4776 {
4777 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4778
4779 if (!stmt_info)
4780 return;
4781
4782 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4783 set_vinfo_for_stmt (stmt, NULL);
4784 free (stmt_info);
4785 }
4786
4787
4788 /* Function get_vectype_for_scalar_type_and_size.
4789
4790 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
4791 by the target. */
4792
4793 static tree
4794 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
4795 {
4796 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4797 enum machine_mode simd_mode;
4798 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4799 int nunits;
4800 tree vectype;
4801
4802 if (nbytes == 0)
4803 return NULL_TREE;
4804
4805 /* We can't build a vector type of elements with alignment bigger than
4806 their size. */
4807 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4808 return NULL_TREE;
4809
4810 /* If we'd build a vector type of elements whose mode precision doesn't
4811 match their types precision we'll get mismatched types on vector
4812 extracts via BIT_FIELD_REFs. This effectively means we disable
4813 vectorization of bool and/or enum types in some languages. */
4814 if (INTEGRAL_TYPE_P (scalar_type)
4815 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4816 return NULL_TREE;
4817
4818 if (GET_MODE_CLASS (inner_mode) != MODE_INT
4819 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
4820 return NULL_TREE;
4821
4822 /* If no size was supplied use the mode the target prefers. Otherwise
4823 lookup a vector mode of the specified size. */
4824 if (size == 0)
4825 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
4826 else
4827 simd_mode = mode_for_vector (inner_mode, size / nbytes);
4828 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
4829 if (nunits <= 1)
4830 return NULL_TREE;
4831
4832 vectype = build_vector_type (scalar_type, nunits);
4833 if (vect_print_dump_info (REPORT_DETAILS))
4834 {
4835 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4836 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4837 }
4838
4839 if (!vectype)
4840 return NULL_TREE;
4841
4842 if (vect_print_dump_info (REPORT_DETAILS))
4843 {
4844 fprintf (vect_dump, "vectype: ");
4845 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4846 }
4847
4848 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4849 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4850 {
4851 if (vect_print_dump_info (REPORT_DETAILS))
4852 fprintf (vect_dump, "mode not supported by target.");
4853 return NULL_TREE;
4854 }
4855
4856 return vectype;
4857 }
4858
4859 unsigned int current_vector_size;
4860
4861 /* Function get_vectype_for_scalar_type.
4862
4863 Returns the vector type corresponding to SCALAR_TYPE as supported
4864 by the target. */
4865
4866 tree
4867 get_vectype_for_scalar_type (tree scalar_type)
4868 {
4869 tree vectype;
4870 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
4871 current_vector_size);
4872 if (vectype
4873 && current_vector_size == 0)
4874 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
4875 return vectype;
4876 }
4877
4878 /* Function get_same_sized_vectype
4879
4880 Returns a vector type corresponding to SCALAR_TYPE of size
4881 VECTOR_TYPE if supported by the target. */
4882
4883 tree
4884 get_same_sized_vectype (tree scalar_type, tree vector_type)
4885 {
4886 return get_vectype_for_scalar_type_and_size
4887 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
4888 }
4889
4890 /* Function vect_is_simple_use.
4891
4892 Input:
4893 LOOP_VINFO - the vect info of the loop that is being vectorized.
4894 BB_VINFO - the vect info of the basic block that is being vectorized.
4895 OPERAND - operand of a stmt in the loop or bb.
4896 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4897
4898 Returns whether a stmt with OPERAND can be vectorized.
4899 For loops, supportable operands are constants, loop invariants, and operands
4900 that are defined by the current iteration of the loop. Unsupportable
4901 operands are those that are defined by a previous iteration of the loop (as
4902 is the case in reduction/induction computations).
4903 For basic blocks, supportable operands are constants and bb invariants.
4904 For now, operands defined outside the basic block are not supported. */
4905
4906 bool
4907 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4908 bb_vec_info bb_vinfo, gimple *def_stmt,
4909 tree *def, enum vect_def_type *dt)
4910 {
4911 basic_block bb;
4912 stmt_vec_info stmt_vinfo;
4913 struct loop *loop = NULL;
4914
4915 if (loop_vinfo)
4916 loop = LOOP_VINFO_LOOP (loop_vinfo);
4917
4918 *def_stmt = NULL;
4919 *def = NULL_TREE;
4920
4921 if (vect_print_dump_info (REPORT_DETAILS))
4922 {
4923 fprintf (vect_dump, "vect_is_simple_use: operand ");
4924 print_generic_expr (vect_dump, operand, TDF_SLIM);
4925 }
4926
4927 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4928 {
4929 *dt = vect_constant_def;
4930 return true;
4931 }
4932
4933 if (is_gimple_min_invariant (operand))
4934 {
4935 *def = operand;
4936 *dt = vect_external_def;
4937 return true;
4938 }
4939
4940 if (TREE_CODE (operand) == PAREN_EXPR)
4941 {
4942 if (vect_print_dump_info (REPORT_DETAILS))
4943 fprintf (vect_dump, "non-associatable copy.");
4944 operand = TREE_OPERAND (operand, 0);
4945 }
4946
4947 if (TREE_CODE (operand) != SSA_NAME)
4948 {
4949 if (vect_print_dump_info (REPORT_DETAILS))
4950 fprintf (vect_dump, "not ssa-name.");
4951 return false;
4952 }
4953
4954 *def_stmt = SSA_NAME_DEF_STMT (operand);
4955 if (*def_stmt == NULL)
4956 {
4957 if (vect_print_dump_info (REPORT_DETAILS))
4958 fprintf (vect_dump, "no def_stmt.");
4959 return false;
4960 }
4961
4962 if (vect_print_dump_info (REPORT_DETAILS))
4963 {
4964 fprintf (vect_dump, "def_stmt: ");
4965 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4966 }
4967
4968 /* Empty stmt is expected only in case of a function argument.
4969 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4970 if (gimple_nop_p (*def_stmt))
4971 {
4972 *def = operand;
4973 *dt = vect_external_def;
4974 return true;
4975 }
4976
4977 bb = gimple_bb (*def_stmt);
4978
4979 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4980 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4981 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4982 *dt = vect_external_def;
4983 else
4984 {
4985 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4986 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4987 }
4988
4989 if (*dt == vect_unknown_def_type)
4990 {
4991 if (vect_print_dump_info (REPORT_DETAILS))
4992 fprintf (vect_dump, "Unsupported pattern.");
4993 return false;
4994 }
4995
4996 if (vect_print_dump_info (REPORT_DETAILS))
4997 fprintf (vect_dump, "type of def: %d.",*dt);
4998
4999 switch (gimple_code (*def_stmt))
5000 {
5001 case GIMPLE_PHI:
5002 *def = gimple_phi_result (*def_stmt);
5003 break;
5004
5005 case GIMPLE_ASSIGN:
5006 *def = gimple_assign_lhs (*def_stmt);
5007 break;
5008
5009 case GIMPLE_CALL:
5010 *def = gimple_call_lhs (*def_stmt);
5011 if (*def != NULL)
5012 break;
5013 /* FALLTHRU */
5014 default:
5015 if (vect_print_dump_info (REPORT_DETAILS))
5016 fprintf (vect_dump, "unsupported defining stmt: ");
5017 return false;
5018 }
5019
5020 return true;
5021 }
5022
5023 /* Function vect_is_simple_use_1.
5024
5025 Same as vect_is_simple_use_1 but also determines the vector operand
5026 type of OPERAND and stores it to *VECTYPE. If the definition of
5027 OPERAND is vect_uninitialized_def, vect_constant_def or
5028 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5029 is responsible to compute the best suited vector type for the
5030 scalar operand. */
5031
5032 bool
5033 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5034 bb_vec_info bb_vinfo, gimple *def_stmt,
5035 tree *def, enum vect_def_type *dt, tree *vectype)
5036 {
5037 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5038 return false;
5039
5040 /* Now get a vector type if the def is internal, otherwise supply
5041 NULL_TREE and leave it up to the caller to figure out a proper
5042 type for the use stmt. */
5043 if (*dt == vect_internal_def
5044 || *dt == vect_induction_def
5045 || *dt == vect_reduction_def
5046 || *dt == vect_double_reduction_def
5047 || *dt == vect_nested_cycle)
5048 {
5049 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5050 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5051 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5052 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5053 gcc_assert (*vectype != NULL_TREE);
5054 }
5055 else if (*dt == vect_uninitialized_def
5056 || *dt == vect_constant_def
5057 || *dt == vect_external_def)
5058 *vectype = NULL_TREE;
5059 else
5060 gcc_unreachable ();
5061
5062 return true;
5063 }
5064
5065
5066 /* Function supportable_widening_operation
5067
5068 Check whether an operation represented by the code CODE is a
5069 widening operation that is supported by the target platform in
5070 vector form (i.e., when operating on arguments of type VECTYPE_IN
5071 producing a result of type VECTYPE_OUT).
5072
5073 Widening operations we currently support are NOP (CONVERT), FLOAT
5074 and WIDEN_MULT. This function checks if these operations are supported
5075 by the target platform either directly (via vector tree-codes), or via
5076 target builtins.
5077
5078 Output:
5079 - CODE1 and CODE2 are codes of vector operations to be used when
5080 vectorizing the operation, if available.
5081 - DECL1 and DECL2 are decls of target builtin functions to be used
5082 when vectorizing the operation, if available. In this case,
5083 CODE1 and CODE2 are CALL_EXPR.
5084 - MULTI_STEP_CVT determines the number of required intermediate steps in
5085 case of multi-step conversion (like char->short->int - in that case
5086 MULTI_STEP_CVT will be 1).
5087 - INTERM_TYPES contains the intermediate type required to perform the
5088 widening operation (short in the above example). */
5089
5090 bool
5091 supportable_widening_operation (enum tree_code code, gimple stmt,
5092 tree vectype_out, tree vectype_in,
5093 tree *decl1, tree *decl2,
5094 enum tree_code *code1, enum tree_code *code2,
5095 int *multi_step_cvt,
5096 VEC (tree, heap) **interm_types)
5097 {
5098 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5099 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5100 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5101 bool ordered_p;
5102 enum machine_mode vec_mode;
5103 enum insn_code icode1, icode2;
5104 optab optab1, optab2;
5105 tree vectype = vectype_in;
5106 tree wide_vectype = vectype_out;
5107 enum tree_code c1, c2;
5108
5109 /* The result of a vectorized widening operation usually requires two vectors
5110 (because the widened results do not fit int one vector). The generated
5111 vector results would normally be expected to be generated in the same
5112 order as in the original scalar computation, i.e. if 8 results are
5113 generated in each vector iteration, they are to be organized as follows:
5114 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5115
5116 However, in the special case that the result of the widening operation is
5117 used in a reduction computation only, the order doesn't matter (because
5118 when vectorizing a reduction we change the order of the computation).
5119 Some targets can take advantage of this and generate more efficient code.
5120 For example, targets like Altivec, that support widen_mult using a sequence
5121 of {mult_even,mult_odd} generate the following vectors:
5122 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5123
5124 When vectorizing outer-loops, we execute the inner-loop sequentially
5125 (each vectorized inner-loop iteration contributes to VF outer-loop
5126 iterations in parallel). We therefore don't allow to change the order
5127 of the computation in the inner-loop during outer-loop vectorization. */
5128
5129 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5130 && !nested_in_vect_loop_p (vect_loop, stmt))
5131 ordered_p = false;
5132 else
5133 ordered_p = true;
5134
5135 if (!ordered_p
5136 && code == WIDEN_MULT_EXPR
5137 && targetm.vectorize.builtin_mul_widen_even
5138 && targetm.vectorize.builtin_mul_widen_even (vectype)
5139 && targetm.vectorize.builtin_mul_widen_odd
5140 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5141 {
5142 if (vect_print_dump_info (REPORT_DETAILS))
5143 fprintf (vect_dump, "Unordered widening operation detected.");
5144
5145 *code1 = *code2 = CALL_EXPR;
5146 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5147 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5148 return true;
5149 }
5150
5151 switch (code)
5152 {
5153 case WIDEN_MULT_EXPR:
5154 if (BYTES_BIG_ENDIAN)
5155 {
5156 c1 = VEC_WIDEN_MULT_HI_EXPR;
5157 c2 = VEC_WIDEN_MULT_LO_EXPR;
5158 }
5159 else
5160 {
5161 c2 = VEC_WIDEN_MULT_HI_EXPR;
5162 c1 = VEC_WIDEN_MULT_LO_EXPR;
5163 }
5164 break;
5165
5166 CASE_CONVERT:
5167 if (BYTES_BIG_ENDIAN)
5168 {
5169 c1 = VEC_UNPACK_HI_EXPR;
5170 c2 = VEC_UNPACK_LO_EXPR;
5171 }
5172 else
5173 {
5174 c2 = VEC_UNPACK_HI_EXPR;
5175 c1 = VEC_UNPACK_LO_EXPR;
5176 }
5177 break;
5178
5179 case FLOAT_EXPR:
5180 if (BYTES_BIG_ENDIAN)
5181 {
5182 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5183 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5184 }
5185 else
5186 {
5187 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5188 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5189 }
5190 break;
5191
5192 case FIX_TRUNC_EXPR:
5193 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5194 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5195 computing the operation. */
5196 return false;
5197
5198 default:
5199 gcc_unreachable ();
5200 }
5201
5202 if (code == FIX_TRUNC_EXPR)
5203 {
5204 /* The signedness is determined from output operand. */
5205 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5206 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5207 }
5208 else
5209 {
5210 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5211 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5212 }
5213
5214 if (!optab1 || !optab2)
5215 return false;
5216
5217 vec_mode = TYPE_MODE (vectype);
5218 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5219 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5220 return false;
5221
5222 /* Check if it's a multi-step conversion that can be done using intermediate
5223 types. */
5224 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5225 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5226 {
5227 int i;
5228 tree prev_type = vectype, intermediate_type;
5229 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5230 optab optab3, optab4;
5231
5232 if (!CONVERT_EXPR_CODE_P (code))
5233 return false;
5234
5235 *code1 = c1;
5236 *code2 = c2;
5237
5238 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5239 intermediate steps in promotion sequence. We try
5240 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5241 not. */
5242 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5243 for (i = 0; i < 3; i++)
5244 {
5245 intermediate_mode = insn_data[icode1].operand[0].mode;
5246 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5247 TYPE_UNSIGNED (prev_type));
5248 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5249 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5250
5251 if (!optab3 || !optab4
5252 || ((icode1 = optab_handler (optab1, prev_mode))
5253 == CODE_FOR_nothing)
5254 || insn_data[icode1].operand[0].mode != intermediate_mode
5255 || ((icode2 = optab_handler (optab2, prev_mode))
5256 == CODE_FOR_nothing)
5257 || insn_data[icode2].operand[0].mode != intermediate_mode
5258 || ((icode1 = optab_handler (optab3, intermediate_mode))
5259 == CODE_FOR_nothing)
5260 || ((icode2 = optab_handler (optab4, intermediate_mode))
5261 == CODE_FOR_nothing))
5262 return false;
5263
5264 VEC_quick_push (tree, *interm_types, intermediate_type);
5265 (*multi_step_cvt)++;
5266
5267 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5268 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5269 return true;
5270
5271 prev_type = intermediate_type;
5272 prev_mode = intermediate_mode;
5273 }
5274
5275 return false;
5276 }
5277
5278 *code1 = c1;
5279 *code2 = c2;
5280 return true;
5281 }
5282
5283
5284 /* Function supportable_narrowing_operation
5285
5286 Check whether an operation represented by the code CODE is a
5287 narrowing operation that is supported by the target platform in
5288 vector form (i.e., when operating on arguments of type VECTYPE_IN
5289 and producing a result of type VECTYPE_OUT).
5290
5291 Narrowing operations we currently support are NOP (CONVERT) and
5292 FIX_TRUNC. This function checks if these operations are supported by
5293 the target platform directly via vector tree-codes.
5294
5295 Output:
5296 - CODE1 is the code of a vector operation to be used when
5297 vectorizing the operation, if available.
5298 - MULTI_STEP_CVT determines the number of required intermediate steps in
5299 case of multi-step conversion (like int->short->char - in that case
5300 MULTI_STEP_CVT will be 1).
5301 - INTERM_TYPES contains the intermediate type required to perform the
5302 narrowing operation (short in the above example). */
5303
5304 bool
5305 supportable_narrowing_operation (enum tree_code code,
5306 tree vectype_out, tree vectype_in,
5307 enum tree_code *code1, int *multi_step_cvt,
5308 VEC (tree, heap) **interm_types)
5309 {
5310 enum machine_mode vec_mode;
5311 enum insn_code icode1;
5312 optab optab1, interm_optab;
5313 tree vectype = vectype_in;
5314 tree narrow_vectype = vectype_out;
5315 enum tree_code c1;
5316 tree intermediate_type, prev_type;
5317 int i;
5318
5319 switch (code)
5320 {
5321 CASE_CONVERT:
5322 c1 = VEC_PACK_TRUNC_EXPR;
5323 break;
5324
5325 case FIX_TRUNC_EXPR:
5326 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5327 break;
5328
5329 case FLOAT_EXPR:
5330 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5331 tree code and optabs used for computing the operation. */
5332 return false;
5333
5334 default:
5335 gcc_unreachable ();
5336 }
5337
5338 if (code == FIX_TRUNC_EXPR)
5339 /* The signedness is determined from output operand. */
5340 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5341 else
5342 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5343
5344 if (!optab1)
5345 return false;
5346
5347 vec_mode = TYPE_MODE (vectype);
5348 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5349 return false;
5350
5351 /* Check if it's a multi-step conversion that can be done using intermediate
5352 types. */
5353 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5354 {
5355 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5356
5357 *code1 = c1;
5358 prev_type = vectype;
5359 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5360 intermediate steps in promotion sequence. We try
5361 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5362 not. */
5363 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5364 for (i = 0; i < 3; i++)
5365 {
5366 intermediate_mode = insn_data[icode1].operand[0].mode;
5367 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5368 TYPE_UNSIGNED (prev_type));
5369 interm_optab = optab_for_tree_code (c1, intermediate_type,
5370 optab_default);
5371 if (!interm_optab
5372 || ((icode1 = optab_handler (optab1, prev_mode))
5373 == CODE_FOR_nothing)
5374 || insn_data[icode1].operand[0].mode != intermediate_mode
5375 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5376 == CODE_FOR_nothing))
5377 return false;
5378
5379 VEC_quick_push (tree, *interm_types, intermediate_type);
5380 (*multi_step_cvt)++;
5381
5382 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5383 return true;
5384
5385 prev_type = intermediate_type;
5386 prev_mode = intermediate_mode;
5387 }
5388
5389 return false;
5390 }
5391
5392 *code1 = c1;
5393 return true;
5394 }