fold-const.c (build_zero_vector): Use build_vector_from_val.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "toplev.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
44
45
46 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48 /* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52 static void
53 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55 {
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
67 /* This is the last stmt in a sequence that was detected as a
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96 }
97
98
99 /* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111 static bool
112 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114 {
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
121 *relevant = vect_unused_in_scope;
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
128 *relevant = vect_used_in_scope;
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
132 if (gimple_vdef (stmt))
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
136 *relevant = vect_used_in_scope;
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164 }
165
166
167 /* Function exist_non_indexing_operands_for_use_p
168
169 USE is one of the uses attached to STMT. Check if USE is
170 used in STMT for anything other than indexing an array. */
171
172 static bool
173 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174 {
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
177
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
183
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
191 so USE cannot correspond to any operands that are not used
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
201 operand = gimple_assign_rhs1 (stmt);
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209 }
210
211
212 /*
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
228 which does not need to be directly vectorized, then the liveness/relevance
229 of the respective DEF_STMT is left unchanged.
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237 static bool
238 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240 {
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
249 /* case 1: we are only interested in uses that need to be vectorized. Uses
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
255 {
256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
306
307 switch (relevant)
308 {
309 case vect_unused_in_scope:
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
312 break;
313
314 case vect_used_in_outer_by_reduction:
315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
316 relevant = vect_used_by_reduction;
317 break;
318
319 case vect_used_in_outer:
320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
321 relevant = vect_used_in_scope;
322 break;
323
324 case vect_used_in_scope:
325 break;
326
327 default:
328 gcc_unreachable ();
329 }
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
343
344 switch (relevant)
345 {
346 case vect_unused_in_scope:
347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
350 break;
351
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
356 case vect_used_in_scope:
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367 }
368
369
370 /* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386 bool
387 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388 {
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
413 {
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 }
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
471 order of the results that they produce does not have to be kept. */
472
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
476 {
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
497 live_p = false;
498 break;
499
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
507
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
511
512 live_p = false;
513 break;
514
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
519 if (vect_print_dump_info (REPORT_DETAILS))
520 fprintf (vect_dump, "unsupported use of double reduction.");
521
522 VEC_free (gimple, heap, worklist);
523 return false;
524 }
525
526 live_p = false;
527 break;
528
529 default:
530 break;
531 }
532
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546 }
547
548
549 /* Get cost by calling cost target builtin. */
550
551 static inline
552 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553 {
554 tree dummy_type = NULL;
555 int dummy = 0;
556
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
559 }
560
561
562 /* Get cost for STMT. */
563
564 int
565 cost_for_stmt (gimple stmt)
566 {
567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
568
569 switch (STMT_VINFO_TYPE (stmt_info))
570 {
571 case load_vec_info_type:
572 return vect_get_stmt_cost (scalar_load);
573 case store_vec_info_type:
574 return vect_get_stmt_cost (scalar_store);
575 case op_vec_info_type:
576 case condition_vec_info_type:
577 case assignment_vec_info_type:
578 case reduc_vec_info_type:
579 case induc_vec_info_type:
580 case type_promotion_vec_info_type:
581 case type_demotion_vec_info_type:
582 case type_conversion_vec_info_type:
583 case call_vec_info_type:
584 return vect_get_stmt_cost (scalar_stmt);
585 case undef_vec_info_type:
586 default:
587 gcc_unreachable ();
588 }
589 }
590
591 /* Function vect_model_simple_cost.
592
593 Models cost for simple operations, i.e. those that only emit ncopies of a
594 single op. Right now, this does not account for multiple insns that could
595 be generated for the single vector op. We will handle that shortly. */
596
597 void
598 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
599 enum vect_def_type *dt, slp_tree slp_node)
600 {
601 int i;
602 int inside_cost = 0, outside_cost = 0;
603
604 /* The SLP costs were already calculated during SLP tree build. */
605 if (PURE_SLP_STMT (stmt_info))
606 return;
607
608 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
609
610 /* FORNOW: Assuming maximum 2 args per stmts. */
611 for (i = 0; i < 2; i++)
612 {
613 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
614 outside_cost += vect_get_stmt_cost (vector_stmt);
615 }
616
617 if (vect_print_dump_info (REPORT_COST))
618 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
619 "outside_cost = %d .", inside_cost, outside_cost);
620
621 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
622 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
623 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
624 }
625
626
627 /* Function vect_cost_strided_group_size
628
629 For strided load or store, return the group_size only if it is the first
630 load or store of a group, else return 1. This ensures that group size is
631 only returned once per group. */
632
633 static int
634 vect_cost_strided_group_size (stmt_vec_info stmt_info)
635 {
636 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
637
638 if (first_stmt == STMT_VINFO_STMT (stmt_info))
639 return DR_GROUP_SIZE (stmt_info);
640
641 return 1;
642 }
643
644
645 /* Function vect_model_store_cost
646
647 Models cost for stores. In the case of strided accesses, one access
648 has the overhead of the strided access attributed to it. */
649
650 void
651 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
652 enum vect_def_type dt, slp_tree slp_node)
653 {
654 int group_size;
655 unsigned int inside_cost = 0, outside_cost = 0;
656 struct data_reference *first_dr;
657 gimple first_stmt;
658
659 /* The SLP costs were already calculated during SLP tree build. */
660 if (PURE_SLP_STMT (stmt_info))
661 return;
662
663 if (dt == vect_constant_def || dt == vect_external_def)
664 outside_cost = vect_get_stmt_cost (scalar_to_vec);
665
666 /* Strided access? */
667 if (DR_GROUP_FIRST_DR (stmt_info))
668 {
669 if (slp_node)
670 {
671 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
672 group_size = 1;
673 }
674 else
675 {
676 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
677 group_size = vect_cost_strided_group_size (stmt_info);
678 }
679
680 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
681 }
682 /* Not a strided access. */
683 else
684 {
685 group_size = 1;
686 first_dr = STMT_VINFO_DATA_REF (stmt_info);
687 }
688
689 /* Is this an access in a group of stores, which provide strided access?
690 If so, add in the cost of the permutes. */
691 if (group_size > 1)
692 {
693 /* Uses a high and low interleave operation for each needed permute. */
694 inside_cost = ncopies * exact_log2(group_size) * group_size
695 * vect_get_stmt_cost (vector_stmt);
696
697 if (vect_print_dump_info (REPORT_COST))
698 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
699 group_size);
700
701 }
702
703 /* Costs of the stores. */
704 vect_get_store_cost (first_dr, ncopies, &inside_cost);
705
706 if (vect_print_dump_info (REPORT_COST))
707 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
708 "outside_cost = %d .", inside_cost, outside_cost);
709
710 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
711 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
712 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
713 }
714
715
716 /* Calculate cost of DR's memory access. */
717 void
718 vect_get_store_cost (struct data_reference *dr, int ncopies,
719 unsigned int *inside_cost)
720 {
721 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
722
723 switch (alignment_support_scheme)
724 {
725 case dr_aligned:
726 {
727 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
728
729 if (vect_print_dump_info (REPORT_COST))
730 fprintf (vect_dump, "vect_model_store_cost: aligned.");
731
732 break;
733 }
734
735 case dr_unaligned_supported:
736 {
737 gimple stmt = DR_STMT (dr);
738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
740
741 /* Here, we assign an additional cost for the unaligned store. */
742 *inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
744 vectype, DR_MISALIGNMENT (dr));
745
746 if (vect_print_dump_info (REPORT_COST))
747 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
748 "hardware.");
749
750 break;
751 }
752
753 default:
754 gcc_unreachable ();
755 }
756 }
757
758
759 /* Function vect_model_load_cost
760
761 Models cost for loads. In the case of strided accesses, the last access
762 has the overhead of the strided access attributed to it. Since unaligned
763 accesses are supported for loads, we also account for the costs of the
764 access scheme chosen. */
765
766 void
767 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
768
769 {
770 int group_size;
771 gimple first_stmt;
772 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
773 unsigned int inside_cost = 0, outside_cost = 0;
774
775 /* The SLP costs were already calculated during SLP tree build. */
776 if (PURE_SLP_STMT (stmt_info))
777 return;
778
779 /* Strided accesses? */
780 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
781 if (first_stmt && !slp_node)
782 {
783 group_size = vect_cost_strided_group_size (stmt_info);
784 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
785 }
786 /* Not a strided access. */
787 else
788 {
789 group_size = 1;
790 first_dr = dr;
791 }
792
793 /* Is this an access in a group of loads providing strided access?
794 If so, add in the cost of the permutes. */
795 if (group_size > 1)
796 {
797 /* Uses an even and odd extract operations for each needed permute. */
798 inside_cost = ncopies * exact_log2(group_size) * group_size
799 * vect_get_stmt_cost (vector_stmt);
800
801 if (vect_print_dump_info (REPORT_COST))
802 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
803 group_size);
804 }
805
806 /* The loads themselves. */
807 vect_get_load_cost (first_dr, ncopies,
808 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
809 &inside_cost, &outside_cost);
810
811 if (vect_print_dump_info (REPORT_COST))
812 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
813 "outside_cost = %d .", inside_cost, outside_cost);
814
815 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
816 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
817 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
818 }
819
820
821 /* Calculate cost of DR's memory access. */
822 void
823 vect_get_load_cost (struct data_reference *dr, int ncopies,
824 bool add_realign_cost, unsigned int *inside_cost,
825 unsigned int *outside_cost)
826 {
827 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
828
829 switch (alignment_support_scheme)
830 {
831 case dr_aligned:
832 {
833 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
834
835 if (vect_print_dump_info (REPORT_COST))
836 fprintf (vect_dump, "vect_model_load_cost: aligned.");
837
838 break;
839 }
840 case dr_unaligned_supported:
841 {
842 gimple stmt = DR_STMT (dr);
843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
844 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
845
846 /* Here, we assign an additional cost for the unaligned load. */
847 *inside_cost += ncopies
848 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
849 vectype, DR_MISALIGNMENT (dr));
850 if (vect_print_dump_info (REPORT_COST))
851 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
852 "hardware.");
853
854 break;
855 }
856 case dr_explicit_realign:
857 {
858 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
859 + vect_get_stmt_cost (vector_stmt));
860
861 /* FIXME: If the misalignment remains fixed across the iterations of
862 the containing loop, the following cost should be added to the
863 outside costs. */
864 if (targetm.vectorize.builtin_mask_for_load)
865 *inside_cost += vect_get_stmt_cost (vector_stmt);
866
867 break;
868 }
869 case dr_explicit_realign_optimized:
870 {
871 if (vect_print_dump_info (REPORT_COST))
872 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
873 "pipelined.");
874
875 /* Unaligned software pipeline has a load of an address, an initial
876 load, and possibly a mask operation to "prime" the loop. However,
877 if this is an access in a group of loads, which provide strided
878 access, then the above cost should only be considered for one
879 access in the group. Inside the loop, there is a load op
880 and a realignment op. */
881
882 if (add_realign_cost)
883 {
884 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
885 if (targetm.vectorize.builtin_mask_for_load)
886 *outside_cost += vect_get_stmt_cost (vector_stmt);
887 }
888
889 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
890 + vect_get_stmt_cost (vector_stmt));
891 break;
892 }
893
894 default:
895 gcc_unreachable ();
896 }
897 }
898
899
900 /* Function vect_init_vector.
901
902 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
903 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
904 is not NULL. Otherwise, place the initialization at the loop preheader.
905 Return the DEF of INIT_STMT.
906 It will be used in the vectorization of STMT. */
907
908 tree
909 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
910 gimple_stmt_iterator *gsi)
911 {
912 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
913 tree new_var;
914 gimple init_stmt;
915 tree vec_oprnd;
916 edge pe;
917 tree new_temp;
918 basic_block new_bb;
919
920 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
921 add_referenced_var (new_var);
922 init_stmt = gimple_build_assign (new_var, vector_var);
923 new_temp = make_ssa_name (new_var, init_stmt);
924 gimple_assign_set_lhs (init_stmt, new_temp);
925
926 if (gsi)
927 vect_finish_stmt_generation (stmt, init_stmt, gsi);
928 else
929 {
930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
931
932 if (loop_vinfo)
933 {
934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
935
936 if (nested_in_vect_loop_p (loop, stmt))
937 loop = loop->inner;
938
939 pe = loop_preheader_edge (loop);
940 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
941 gcc_assert (!new_bb);
942 }
943 else
944 {
945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
946 basic_block bb;
947 gimple_stmt_iterator gsi_bb_start;
948
949 gcc_assert (bb_vinfo);
950 bb = BB_VINFO_BB (bb_vinfo);
951 gsi_bb_start = gsi_after_labels (bb);
952 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
953 }
954 }
955
956 if (vect_print_dump_info (REPORT_DETAILS))
957 {
958 fprintf (vect_dump, "created new init_stmt: ");
959 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
960 }
961
962 vec_oprnd = gimple_assign_lhs (init_stmt);
963 return vec_oprnd;
964 }
965
966
967 /* Function vect_get_vec_def_for_operand.
968
969 OP is an operand in STMT. This function returns a (vector) def that will be
970 used in the vectorized stmt for STMT.
971
972 In the case that OP is an SSA_NAME which is defined in the loop, then
973 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
974
975 In case OP is an invariant or constant, a new stmt that creates a vector def
976 needs to be introduced. */
977
978 tree
979 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
980 {
981 tree vec_oprnd;
982 gimple vec_stmt;
983 gimple def_stmt;
984 stmt_vec_info def_stmt_info = NULL;
985 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
986 unsigned int nunits;
987 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
988 tree vec_inv;
989 tree vec_cst;
990 tree def;
991 enum vect_def_type dt;
992 bool is_simple_use;
993 tree vector_type;
994
995 if (vect_print_dump_info (REPORT_DETAILS))
996 {
997 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
998 print_generic_expr (vect_dump, op, TDF_SLIM);
999 }
1000
1001 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1002 &dt);
1003 gcc_assert (is_simple_use);
1004 if (vect_print_dump_info (REPORT_DETAILS))
1005 {
1006 if (def)
1007 {
1008 fprintf (vect_dump, "def = ");
1009 print_generic_expr (vect_dump, def, TDF_SLIM);
1010 }
1011 if (def_stmt)
1012 {
1013 fprintf (vect_dump, " def_stmt = ");
1014 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1015 }
1016 }
1017
1018 switch (dt)
1019 {
1020 /* Case 1: operand is a constant. */
1021 case vect_constant_def:
1022 {
1023 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1024 gcc_assert (vector_type);
1025 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1026
1027 if (scalar_def)
1028 *scalar_def = op;
1029
1030 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1031 if (vect_print_dump_info (REPORT_DETAILS))
1032 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1033
1034 vec_cst = build_vector_from_val (vector_type, op);
1035 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1036 }
1037
1038 /* Case 2: operand is defined outside the loop - loop invariant. */
1039 case vect_external_def:
1040 {
1041 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1042 gcc_assert (vector_type);
1043 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1044
1045 if (scalar_def)
1046 *scalar_def = def;
1047
1048 /* Create 'vec_inv = {inv,inv,..,inv}' */
1049 if (vect_print_dump_info (REPORT_DETAILS))
1050 fprintf (vect_dump, "Create vector_inv.");
1051
1052 vec_inv = build_vector_from_val (vector_type, def);
1053 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1054 }
1055
1056 /* Case 3: operand is defined inside the loop. */
1057 case vect_internal_def:
1058 {
1059 if (scalar_def)
1060 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1061
1062 /* Get the def from the vectorized stmt. */
1063 def_stmt_info = vinfo_for_stmt (def_stmt);
1064 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1065 gcc_assert (vec_stmt);
1066 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1067 vec_oprnd = PHI_RESULT (vec_stmt);
1068 else if (is_gimple_call (vec_stmt))
1069 vec_oprnd = gimple_call_lhs (vec_stmt);
1070 else
1071 vec_oprnd = gimple_assign_lhs (vec_stmt);
1072 return vec_oprnd;
1073 }
1074
1075 /* Case 4: operand is defined by a loop header phi - reduction */
1076 case vect_reduction_def:
1077 case vect_double_reduction_def:
1078 case vect_nested_cycle:
1079 {
1080 struct loop *loop;
1081
1082 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1083 loop = (gimple_bb (def_stmt))->loop_father;
1084
1085 /* Get the def before the loop */
1086 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1087 return get_initial_def_for_reduction (stmt, op, scalar_def);
1088 }
1089
1090 /* Case 5: operand is defined by loop-header phi - induction. */
1091 case vect_induction_def:
1092 {
1093 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1094
1095 /* Get the def from the vectorized stmt. */
1096 def_stmt_info = vinfo_for_stmt (def_stmt);
1097 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1098 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1099 vec_oprnd = PHI_RESULT (vec_stmt);
1100 return vec_oprnd;
1101 }
1102
1103 default:
1104 gcc_unreachable ();
1105 }
1106 }
1107
1108
1109 /* Function vect_get_vec_def_for_stmt_copy
1110
1111 Return a vector-def for an operand. This function is used when the
1112 vectorized stmt to be created (by the caller to this function) is a "copy"
1113 created in case the vectorized result cannot fit in one vector, and several
1114 copies of the vector-stmt are required. In this case the vector-def is
1115 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1116 of the stmt that defines VEC_OPRND.
1117 DT is the type of the vector def VEC_OPRND.
1118
1119 Context:
1120 In case the vectorization factor (VF) is bigger than the number
1121 of elements that can fit in a vectype (nunits), we have to generate
1122 more than one vector stmt to vectorize the scalar stmt. This situation
1123 arises when there are multiple data-types operated upon in the loop; the
1124 smallest data-type determines the VF, and as a result, when vectorizing
1125 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1126 vector stmt (each computing a vector of 'nunits' results, and together
1127 computing 'VF' results in each iteration). This function is called when
1128 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1129 which VF=16 and nunits=4, so the number of copies required is 4):
1130
1131 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1132
1133 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1134 VS1.1: vx.1 = memref1 VS1.2
1135 VS1.2: vx.2 = memref2 VS1.3
1136 VS1.3: vx.3 = memref3
1137
1138 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1139 VSnew.1: vz1 = vx.1 + ... VSnew.2
1140 VSnew.2: vz2 = vx.2 + ... VSnew.3
1141 VSnew.3: vz3 = vx.3 + ...
1142
1143 The vectorization of S1 is explained in vectorizable_load.
1144 The vectorization of S2:
1145 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1146 the function 'vect_get_vec_def_for_operand' is called to
1147 get the relevant vector-def for each operand of S2. For operand x it
1148 returns the vector-def 'vx.0'.
1149
1150 To create the remaining copies of the vector-stmt (VSnew.j), this
1151 function is called to get the relevant vector-def for each operand. It is
1152 obtained from the respective VS1.j stmt, which is recorded in the
1153 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1154
1155 For example, to obtain the vector-def 'vx.1' in order to create the
1156 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1157 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1158 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1159 and return its def ('vx.1').
1160 Overall, to create the above sequence this function will be called 3 times:
1161 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1162 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1163 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1164
1165 tree
1166 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1167 {
1168 gimple vec_stmt_for_operand;
1169 stmt_vec_info def_stmt_info;
1170
1171 /* Do nothing; can reuse same def. */
1172 if (dt == vect_external_def || dt == vect_constant_def )
1173 return vec_oprnd;
1174
1175 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1176 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1177 gcc_assert (def_stmt_info);
1178 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1179 gcc_assert (vec_stmt_for_operand);
1180 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1181 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1182 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1183 else
1184 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1185 return vec_oprnd;
1186 }
1187
1188
1189 /* Get vectorized definitions for the operands to create a copy of an original
1190 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1191
1192 static void
1193 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1194 VEC(tree,heap) **vec_oprnds0,
1195 VEC(tree,heap) **vec_oprnds1)
1196 {
1197 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1198
1199 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1200 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1201
1202 if (vec_oprnds1 && *vec_oprnds1)
1203 {
1204 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1205 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1206 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1207 }
1208 }
1209
1210
1211 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1212 NULL. */
1213
1214 static void
1215 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1216 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1217 slp_tree slp_node)
1218 {
1219 if (slp_node)
1220 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1221 else
1222 {
1223 tree vec_oprnd;
1224
1225 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1226 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1227 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1228
1229 if (op1)
1230 {
1231 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1232 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1233 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1234 }
1235 }
1236 }
1237
1238
1239 /* Function vect_finish_stmt_generation.
1240
1241 Insert a new stmt. */
1242
1243 void
1244 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1245 gimple_stmt_iterator *gsi)
1246 {
1247 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1248 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1249 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1250
1251 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1252
1253 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1254
1255 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1256 bb_vinfo));
1257
1258 if (vect_print_dump_info (REPORT_DETAILS))
1259 {
1260 fprintf (vect_dump, "add new stmt: ");
1261 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1262 }
1263
1264 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1265 }
1266
1267 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1268 a function declaration if the target has a vectorized version
1269 of the function, or NULL_TREE if the function cannot be vectorized. */
1270
1271 tree
1272 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1273 {
1274 tree fndecl = gimple_call_fndecl (call);
1275
1276 /* We only handle functions that do not read or clobber memory -- i.e.
1277 const or novops ones. */
1278 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1279 return NULL_TREE;
1280
1281 if (!fndecl
1282 || TREE_CODE (fndecl) != FUNCTION_DECL
1283 || !DECL_BUILT_IN (fndecl))
1284 return NULL_TREE;
1285
1286 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1287 vectype_in);
1288 }
1289
1290 /* Function vectorizable_call.
1291
1292 Check if STMT performs a function call that can be vectorized.
1293 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1294 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1295 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1296
1297 static bool
1298 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1299 {
1300 tree vec_dest;
1301 tree scalar_dest;
1302 tree op, type;
1303 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1304 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1305 tree vectype_out, vectype_in;
1306 int nunits_in;
1307 int nunits_out;
1308 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1309 tree fndecl, new_temp, def, rhs_type;
1310 gimple def_stmt;
1311 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1312 gimple new_stmt = NULL;
1313 int ncopies, j;
1314 VEC(tree, heap) *vargs = NULL;
1315 enum { NARROW, NONE, WIDEN } modifier;
1316 size_t i, nargs;
1317
1318 /* FORNOW: unsupported in basic block SLP. */
1319 gcc_assert (loop_vinfo);
1320
1321 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1322 return false;
1323
1324 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1325 return false;
1326
1327 /* FORNOW: SLP not supported. */
1328 if (STMT_SLP_TYPE (stmt_info))
1329 return false;
1330
1331 /* Is STMT a vectorizable call? */
1332 if (!is_gimple_call (stmt))
1333 return false;
1334
1335 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1336 return false;
1337
1338 if (stmt_could_throw_p (stmt))
1339 return false;
1340
1341 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1342
1343 /* Process function arguments. */
1344 rhs_type = NULL_TREE;
1345 vectype_in = NULL_TREE;
1346 nargs = gimple_call_num_args (stmt);
1347
1348 /* Bail out if the function has more than three arguments, we do not have
1349 interesting builtin functions to vectorize with more than two arguments
1350 except for fma. No arguments is also not good. */
1351 if (nargs == 0 || nargs > 3)
1352 return false;
1353
1354 for (i = 0; i < nargs; i++)
1355 {
1356 tree opvectype;
1357
1358 op = gimple_call_arg (stmt, i);
1359
1360 /* We can only handle calls with arguments of the same type. */
1361 if (rhs_type
1362 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1363 {
1364 if (vect_print_dump_info (REPORT_DETAILS))
1365 fprintf (vect_dump, "argument types differ.");
1366 return false;
1367 }
1368 if (!rhs_type)
1369 rhs_type = TREE_TYPE (op);
1370
1371 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1372 &def_stmt, &def, &dt[i], &opvectype))
1373 {
1374 if (vect_print_dump_info (REPORT_DETAILS))
1375 fprintf (vect_dump, "use not simple.");
1376 return false;
1377 }
1378
1379 if (!vectype_in)
1380 vectype_in = opvectype;
1381 else if (opvectype
1382 && opvectype != vectype_in)
1383 {
1384 if (vect_print_dump_info (REPORT_DETAILS))
1385 fprintf (vect_dump, "argument vector types differ.");
1386 return false;
1387 }
1388 }
1389 /* If all arguments are external or constant defs use a vector type with
1390 the same size as the output vector type. */
1391 if (!vectype_in)
1392 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1393 if (vec_stmt)
1394 gcc_assert (vectype_in);
1395 if (!vectype_in)
1396 {
1397 if (vect_print_dump_info (REPORT_DETAILS))
1398 {
1399 fprintf (vect_dump, "no vectype for scalar type ");
1400 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1401 }
1402
1403 return false;
1404 }
1405
1406 /* FORNOW */
1407 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1408 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1409 if (nunits_in == nunits_out / 2)
1410 modifier = NARROW;
1411 else if (nunits_out == nunits_in)
1412 modifier = NONE;
1413 else if (nunits_out == nunits_in / 2)
1414 modifier = WIDEN;
1415 else
1416 return false;
1417
1418 /* For now, we only vectorize functions if a target specific builtin
1419 is available. TODO -- in some cases, it might be profitable to
1420 insert the calls for pieces of the vector, in order to be able
1421 to vectorize other operations in the loop. */
1422 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1423 if (fndecl == NULL_TREE)
1424 {
1425 if (vect_print_dump_info (REPORT_DETAILS))
1426 fprintf (vect_dump, "function is not vectorizable.");
1427
1428 return false;
1429 }
1430
1431 gcc_assert (!gimple_vuse (stmt));
1432
1433 if (modifier == NARROW)
1434 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1435 else
1436 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1437
1438 /* Sanity check: make sure that at least one copy of the vectorized stmt
1439 needs to be generated. */
1440 gcc_assert (ncopies >= 1);
1441
1442 if (!vec_stmt) /* transformation not required. */
1443 {
1444 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1445 if (vect_print_dump_info (REPORT_DETAILS))
1446 fprintf (vect_dump, "=== vectorizable_call ===");
1447 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1448 return true;
1449 }
1450
1451 /** Transform. **/
1452
1453 if (vect_print_dump_info (REPORT_DETAILS))
1454 fprintf (vect_dump, "transform operation.");
1455
1456 /* Handle def. */
1457 scalar_dest = gimple_call_lhs (stmt);
1458 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1459
1460 prev_stmt_info = NULL;
1461 switch (modifier)
1462 {
1463 case NONE:
1464 for (j = 0; j < ncopies; ++j)
1465 {
1466 /* Build argument list for the vectorized call. */
1467 if (j == 0)
1468 vargs = VEC_alloc (tree, heap, nargs);
1469 else
1470 VEC_truncate (tree, vargs, 0);
1471
1472 for (i = 0; i < nargs; i++)
1473 {
1474 op = gimple_call_arg (stmt, i);
1475 if (j == 0)
1476 vec_oprnd0
1477 = vect_get_vec_def_for_operand (op, stmt, NULL);
1478 else
1479 {
1480 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1481 vec_oprnd0
1482 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1483 }
1484
1485 VEC_quick_push (tree, vargs, vec_oprnd0);
1486 }
1487
1488 new_stmt = gimple_build_call_vec (fndecl, vargs);
1489 new_temp = make_ssa_name (vec_dest, new_stmt);
1490 gimple_call_set_lhs (new_stmt, new_temp);
1491
1492 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1493 mark_symbols_for_renaming (new_stmt);
1494
1495 if (j == 0)
1496 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1497 else
1498 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1499
1500 prev_stmt_info = vinfo_for_stmt (new_stmt);
1501 }
1502
1503 break;
1504
1505 case NARROW:
1506 for (j = 0; j < ncopies; ++j)
1507 {
1508 /* Build argument list for the vectorized call. */
1509 if (j == 0)
1510 vargs = VEC_alloc (tree, heap, nargs * 2);
1511 else
1512 VEC_truncate (tree, vargs, 0);
1513
1514 for (i = 0; i < nargs; i++)
1515 {
1516 op = gimple_call_arg (stmt, i);
1517 if (j == 0)
1518 {
1519 vec_oprnd0
1520 = vect_get_vec_def_for_operand (op, stmt, NULL);
1521 vec_oprnd1
1522 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1523 }
1524 else
1525 {
1526 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1527 vec_oprnd0
1528 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1529 vec_oprnd1
1530 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1531 }
1532
1533 VEC_quick_push (tree, vargs, vec_oprnd0);
1534 VEC_quick_push (tree, vargs, vec_oprnd1);
1535 }
1536
1537 new_stmt = gimple_build_call_vec (fndecl, vargs);
1538 new_temp = make_ssa_name (vec_dest, new_stmt);
1539 gimple_call_set_lhs (new_stmt, new_temp);
1540
1541 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1542 mark_symbols_for_renaming (new_stmt);
1543
1544 if (j == 0)
1545 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1546 else
1547 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1548
1549 prev_stmt_info = vinfo_for_stmt (new_stmt);
1550 }
1551
1552 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1553
1554 break;
1555
1556 case WIDEN:
1557 /* No current target implements this case. */
1558 return false;
1559 }
1560
1561 VEC_free (tree, heap, vargs);
1562
1563 /* Update the exception handling table with the vector stmt if necessary. */
1564 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1565 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1566
1567 /* The call in STMT might prevent it from being removed in dce.
1568 We however cannot remove it here, due to the way the ssa name
1569 it defines is mapped to the new definition. So just replace
1570 rhs of the statement with something harmless. */
1571
1572 type = TREE_TYPE (scalar_dest);
1573 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1574 fold_convert (type, integer_zero_node));
1575 set_vinfo_for_stmt (new_stmt, stmt_info);
1576 set_vinfo_for_stmt (stmt, NULL);
1577 STMT_VINFO_STMT (stmt_info) = new_stmt;
1578 gsi_replace (gsi, new_stmt, false);
1579 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1580
1581 return true;
1582 }
1583
1584
1585 /* Function vect_gen_widened_results_half
1586
1587 Create a vector stmt whose code, type, number of arguments, and result
1588 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1589 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1590 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1591 needs to be created (DECL is a function-decl of a target-builtin).
1592 STMT is the original scalar stmt that we are vectorizing. */
1593
1594 static gimple
1595 vect_gen_widened_results_half (enum tree_code code,
1596 tree decl,
1597 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1598 tree vec_dest, gimple_stmt_iterator *gsi,
1599 gimple stmt)
1600 {
1601 gimple new_stmt;
1602 tree new_temp;
1603
1604 /* Generate half of the widened result: */
1605 if (code == CALL_EXPR)
1606 {
1607 /* Target specific support */
1608 if (op_type == binary_op)
1609 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1610 else
1611 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1612 new_temp = make_ssa_name (vec_dest, new_stmt);
1613 gimple_call_set_lhs (new_stmt, new_temp);
1614 }
1615 else
1616 {
1617 /* Generic support */
1618 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1619 if (op_type != binary_op)
1620 vec_oprnd1 = NULL;
1621 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1622 vec_oprnd1);
1623 new_temp = make_ssa_name (vec_dest, new_stmt);
1624 gimple_assign_set_lhs (new_stmt, new_temp);
1625 }
1626 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1627
1628 return new_stmt;
1629 }
1630
1631
1632 /* Check if STMT performs a conversion operation, that can be vectorized.
1633 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1634 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1635 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1636
1637 static bool
1638 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1639 gimple *vec_stmt, slp_tree slp_node)
1640 {
1641 tree vec_dest;
1642 tree scalar_dest;
1643 tree op0;
1644 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1647 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1648 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1649 tree new_temp;
1650 tree def;
1651 gimple def_stmt;
1652 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1653 gimple new_stmt = NULL;
1654 stmt_vec_info prev_stmt_info;
1655 int nunits_in;
1656 int nunits_out;
1657 tree vectype_out, vectype_in;
1658 int ncopies, j;
1659 tree rhs_type;
1660 tree builtin_decl;
1661 enum { NARROW, NONE, WIDEN } modifier;
1662 int i;
1663 VEC(tree,heap) *vec_oprnds0 = NULL;
1664 tree vop0;
1665 VEC(tree,heap) *dummy = NULL;
1666 int dummy_int;
1667
1668 /* Is STMT a vectorizable conversion? */
1669
1670 /* FORNOW: unsupported in basic block SLP. */
1671 gcc_assert (loop_vinfo);
1672
1673 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1674 return false;
1675
1676 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1677 return false;
1678
1679 if (!is_gimple_assign (stmt))
1680 return false;
1681
1682 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1683 return false;
1684
1685 code = gimple_assign_rhs_code (stmt);
1686 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1687 return false;
1688
1689 /* Check types of lhs and rhs. */
1690 scalar_dest = gimple_assign_lhs (stmt);
1691 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1692
1693 op0 = gimple_assign_rhs1 (stmt);
1694 rhs_type = TREE_TYPE (op0);
1695 /* Check the operands of the operation. */
1696 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1697 &def_stmt, &def, &dt[0], &vectype_in))
1698 {
1699 if (vect_print_dump_info (REPORT_DETAILS))
1700 fprintf (vect_dump, "use not simple.");
1701 return false;
1702 }
1703 /* If op0 is an external or constant defs use a vector type of
1704 the same size as the output vector type. */
1705 if (!vectype_in)
1706 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1707 if (vec_stmt)
1708 gcc_assert (vectype_in);
1709 if (!vectype_in)
1710 {
1711 if (vect_print_dump_info (REPORT_DETAILS))
1712 {
1713 fprintf (vect_dump, "no vectype for scalar type ");
1714 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1715 }
1716
1717 return false;
1718 }
1719
1720 /* FORNOW */
1721 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1722 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1723 if (nunits_in == nunits_out / 2)
1724 modifier = NARROW;
1725 else if (nunits_out == nunits_in)
1726 modifier = NONE;
1727 else if (nunits_out == nunits_in / 2)
1728 modifier = WIDEN;
1729 else
1730 return false;
1731
1732 if (modifier == NARROW)
1733 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1734 else
1735 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1736
1737 /* Multiple types in SLP are handled by creating the appropriate number of
1738 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1739 case of SLP. */
1740 if (slp_node)
1741 ncopies = 1;
1742
1743 /* Sanity check: make sure that at least one copy of the vectorized stmt
1744 needs to be generated. */
1745 gcc_assert (ncopies >= 1);
1746
1747 /* Supportable by target? */
1748 if ((modifier == NONE
1749 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1750 || (modifier == WIDEN
1751 && !supportable_widening_operation (code, stmt,
1752 vectype_out, vectype_in,
1753 &decl1, &decl2,
1754 &code1, &code2,
1755 &dummy_int, &dummy))
1756 || (modifier == NARROW
1757 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1758 &code1, &dummy_int, &dummy)))
1759 {
1760 if (vect_print_dump_info (REPORT_DETAILS))
1761 fprintf (vect_dump, "conversion not supported by target.");
1762 return false;
1763 }
1764
1765 if (modifier != NONE)
1766 {
1767 /* FORNOW: SLP not supported. */
1768 if (STMT_SLP_TYPE (stmt_info))
1769 return false;
1770 }
1771
1772 if (!vec_stmt) /* transformation not required. */
1773 {
1774 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1775 return true;
1776 }
1777
1778 /** Transform. **/
1779 if (vect_print_dump_info (REPORT_DETAILS))
1780 fprintf (vect_dump, "transform conversion.");
1781
1782 /* Handle def. */
1783 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1784
1785 if (modifier == NONE && !slp_node)
1786 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1787
1788 prev_stmt_info = NULL;
1789 switch (modifier)
1790 {
1791 case NONE:
1792 for (j = 0; j < ncopies; j++)
1793 {
1794 if (j == 0)
1795 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1796 else
1797 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1798
1799 builtin_decl =
1800 targetm.vectorize.builtin_conversion (code,
1801 vectype_out, vectype_in);
1802 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1803 {
1804 /* Arguments are ready. create the new vector stmt. */
1805 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1806 new_temp = make_ssa_name (vec_dest, new_stmt);
1807 gimple_call_set_lhs (new_stmt, new_temp);
1808 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1809 if (slp_node)
1810 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1811 }
1812
1813 if (j == 0)
1814 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1815 else
1816 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1817 prev_stmt_info = vinfo_for_stmt (new_stmt);
1818 }
1819 break;
1820
1821 case WIDEN:
1822 /* In case the vectorization factor (VF) is bigger than the number
1823 of elements that we can fit in a vectype (nunits), we have to
1824 generate more than one vector stmt - i.e - we need to "unroll"
1825 the vector stmt by a factor VF/nunits. */
1826 for (j = 0; j < ncopies; j++)
1827 {
1828 if (j == 0)
1829 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1830 else
1831 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1832
1833 /* Generate first half of the widened result: */
1834 new_stmt
1835 = vect_gen_widened_results_half (code1, decl1,
1836 vec_oprnd0, vec_oprnd1,
1837 unary_op, vec_dest, gsi, stmt);
1838 if (j == 0)
1839 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1840 else
1841 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1842 prev_stmt_info = vinfo_for_stmt (new_stmt);
1843
1844 /* Generate second half of the widened result: */
1845 new_stmt
1846 = vect_gen_widened_results_half (code2, decl2,
1847 vec_oprnd0, vec_oprnd1,
1848 unary_op, vec_dest, gsi, stmt);
1849 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1850 prev_stmt_info = vinfo_for_stmt (new_stmt);
1851 }
1852 break;
1853
1854 case NARROW:
1855 /* In case the vectorization factor (VF) is bigger than the number
1856 of elements that we can fit in a vectype (nunits), we have to
1857 generate more than one vector stmt - i.e - we need to "unroll"
1858 the vector stmt by a factor VF/nunits. */
1859 for (j = 0; j < ncopies; j++)
1860 {
1861 /* Handle uses. */
1862 if (j == 0)
1863 {
1864 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1865 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1866 }
1867 else
1868 {
1869 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1870 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1871 }
1872
1873 /* Arguments are ready. Create the new vector stmt. */
1874 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1875 vec_oprnd1);
1876 new_temp = make_ssa_name (vec_dest, new_stmt);
1877 gimple_assign_set_lhs (new_stmt, new_temp);
1878 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1879
1880 if (j == 0)
1881 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1882 else
1883 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1884
1885 prev_stmt_info = vinfo_for_stmt (new_stmt);
1886 }
1887
1888 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1889 }
1890
1891 if (vec_oprnds0)
1892 VEC_free (tree, heap, vec_oprnds0);
1893
1894 return true;
1895 }
1896
1897
1898 /* Function vectorizable_assignment.
1899
1900 Check if STMT performs an assignment (copy) that can be vectorized.
1901 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1902 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1903 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1904
1905 static bool
1906 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1907 gimple *vec_stmt, slp_tree slp_node)
1908 {
1909 tree vec_dest;
1910 tree scalar_dest;
1911 tree op;
1912 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1913 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1914 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1915 tree new_temp;
1916 tree def;
1917 gimple def_stmt;
1918 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1919 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1920 int ncopies;
1921 int i, j;
1922 VEC(tree,heap) *vec_oprnds = NULL;
1923 tree vop;
1924 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1925 gimple new_stmt = NULL;
1926 stmt_vec_info prev_stmt_info = NULL;
1927 enum tree_code code;
1928 tree vectype_in;
1929
1930 /* Multiple types in SLP are handled by creating the appropriate number of
1931 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1932 case of SLP. */
1933 if (slp_node)
1934 ncopies = 1;
1935 else
1936 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1937
1938 gcc_assert (ncopies >= 1);
1939
1940 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1941 return false;
1942
1943 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1944 return false;
1945
1946 /* Is vectorizable assignment? */
1947 if (!is_gimple_assign (stmt))
1948 return false;
1949
1950 scalar_dest = gimple_assign_lhs (stmt);
1951 if (TREE_CODE (scalar_dest) != SSA_NAME)
1952 return false;
1953
1954 code = gimple_assign_rhs_code (stmt);
1955 if (gimple_assign_single_p (stmt)
1956 || code == PAREN_EXPR
1957 || CONVERT_EXPR_CODE_P (code))
1958 op = gimple_assign_rhs1 (stmt);
1959 else
1960 return false;
1961
1962 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1963 &def_stmt, &def, &dt[0], &vectype_in))
1964 {
1965 if (vect_print_dump_info (REPORT_DETAILS))
1966 fprintf (vect_dump, "use not simple.");
1967 return false;
1968 }
1969
1970 /* We can handle NOP_EXPR conversions that do not change the number
1971 of elements or the vector size. */
1972 if (CONVERT_EXPR_CODE_P (code)
1973 && (!vectype_in
1974 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1975 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1976 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1977 return false;
1978
1979 if (!vec_stmt) /* transformation not required. */
1980 {
1981 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1982 if (vect_print_dump_info (REPORT_DETAILS))
1983 fprintf (vect_dump, "=== vectorizable_assignment ===");
1984 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1985 return true;
1986 }
1987
1988 /** Transform. **/
1989 if (vect_print_dump_info (REPORT_DETAILS))
1990 fprintf (vect_dump, "transform assignment.");
1991
1992 /* Handle def. */
1993 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1994
1995 /* Handle use. */
1996 for (j = 0; j < ncopies; j++)
1997 {
1998 /* Handle uses. */
1999 if (j == 0)
2000 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2001 else
2002 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2003
2004 /* Arguments are ready. create the new vector stmt. */
2005 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2006 {
2007 if (CONVERT_EXPR_CODE_P (code))
2008 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2009 new_stmt = gimple_build_assign (vec_dest, vop);
2010 new_temp = make_ssa_name (vec_dest, new_stmt);
2011 gimple_assign_set_lhs (new_stmt, new_temp);
2012 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2013 if (slp_node)
2014 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2015 }
2016
2017 if (slp_node)
2018 continue;
2019
2020 if (j == 0)
2021 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2022 else
2023 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2024
2025 prev_stmt_info = vinfo_for_stmt (new_stmt);
2026 }
2027
2028 VEC_free (tree, heap, vec_oprnds);
2029 return true;
2030 }
2031
2032
2033 /* Function vectorizable_shift.
2034
2035 Check if STMT performs a shift operation that can be vectorized.
2036 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2037 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2038 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2039
2040 static bool
2041 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2042 gimple *vec_stmt, slp_tree slp_node)
2043 {
2044 tree vec_dest;
2045 tree scalar_dest;
2046 tree op0, op1 = NULL;
2047 tree vec_oprnd1 = NULL_TREE;
2048 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2049 tree vectype;
2050 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2051 enum tree_code code;
2052 enum machine_mode vec_mode;
2053 tree new_temp;
2054 optab optab;
2055 int icode;
2056 enum machine_mode optab_op2_mode;
2057 tree def;
2058 gimple def_stmt;
2059 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2060 gimple new_stmt = NULL;
2061 stmt_vec_info prev_stmt_info;
2062 int nunits_in;
2063 int nunits_out;
2064 tree vectype_out;
2065 int ncopies;
2066 int j, i;
2067 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2068 tree vop0, vop1;
2069 unsigned int k;
2070 bool scalar_shift_arg = false;
2071 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2072 int vf;
2073
2074 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2075 return false;
2076
2077 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2078 return false;
2079
2080 /* Is STMT a vectorizable binary/unary operation? */
2081 if (!is_gimple_assign (stmt))
2082 return false;
2083
2084 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2085 return false;
2086
2087 code = gimple_assign_rhs_code (stmt);
2088
2089 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2090 || code == RROTATE_EXPR))
2091 return false;
2092
2093 scalar_dest = gimple_assign_lhs (stmt);
2094 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2095
2096 op0 = gimple_assign_rhs1 (stmt);
2097 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2098 &def_stmt, &def, &dt[0], &vectype))
2099 {
2100 if (vect_print_dump_info (REPORT_DETAILS))
2101 fprintf (vect_dump, "use not simple.");
2102 return false;
2103 }
2104 /* If op0 is an external or constant def use a vector type with
2105 the same size as the output vector type. */
2106 if (!vectype)
2107 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2108 if (vec_stmt)
2109 gcc_assert (vectype);
2110 if (!vectype)
2111 {
2112 if (vect_print_dump_info (REPORT_DETAILS))
2113 {
2114 fprintf (vect_dump, "no vectype for scalar type ");
2115 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2116 }
2117
2118 return false;
2119 }
2120
2121 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2122 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2123 if (nunits_out != nunits_in)
2124 return false;
2125
2126 op1 = gimple_assign_rhs2 (stmt);
2127 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2128 {
2129 if (vect_print_dump_info (REPORT_DETAILS))
2130 fprintf (vect_dump, "use not simple.");
2131 return false;
2132 }
2133
2134 if (loop_vinfo)
2135 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2136 else
2137 vf = 1;
2138
2139 /* Multiple types in SLP are handled by creating the appropriate number of
2140 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2141 case of SLP. */
2142 if (slp_node)
2143 ncopies = 1;
2144 else
2145 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2146
2147 gcc_assert (ncopies >= 1);
2148
2149 /* Determine whether the shift amount is a vector, or scalar. If the
2150 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2151
2152 /* Vector shifted by vector. */
2153 if (dt[1] == vect_internal_def)
2154 {
2155 optab = optab_for_tree_code (code, vectype, optab_vector);
2156 if (vect_print_dump_info (REPORT_DETAILS))
2157 fprintf (vect_dump, "vector/vector shift/rotate found.");
2158 }
2159 /* See if the machine has a vector shifted by scalar insn and if not
2160 then see if it has a vector shifted by vector insn. */
2161 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2162 {
2163 optab = optab_for_tree_code (code, vectype, optab_scalar);
2164 if (optab
2165 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2166 {
2167 scalar_shift_arg = true;
2168 if (vect_print_dump_info (REPORT_DETAILS))
2169 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2170 }
2171 else
2172 {
2173 optab = optab_for_tree_code (code, vectype, optab_vector);
2174 if (optab
2175 && (optab_handler (optab, TYPE_MODE (vectype))
2176 != CODE_FOR_nothing))
2177 {
2178 if (vect_print_dump_info (REPORT_DETAILS))
2179 fprintf (vect_dump, "vector/vector shift/rotate found.");
2180
2181 /* Unlike the other binary operators, shifts/rotates have
2182 the rhs being int, instead of the same type as the lhs,
2183 so make sure the scalar is the right type if we are
2184 dealing with vectors of short/char. */
2185 if (dt[1] == vect_constant_def)
2186 op1 = fold_convert (TREE_TYPE (vectype), op1);
2187 }
2188 }
2189 }
2190 else
2191 {
2192 if (vect_print_dump_info (REPORT_DETAILS))
2193 fprintf (vect_dump, "operand mode requires invariant argument.");
2194 return false;
2195 }
2196
2197 /* Supportable by target? */
2198 if (!optab)
2199 {
2200 if (vect_print_dump_info (REPORT_DETAILS))
2201 fprintf (vect_dump, "no optab.");
2202 return false;
2203 }
2204 vec_mode = TYPE_MODE (vectype);
2205 icode = (int) optab_handler (optab, vec_mode);
2206 if (icode == CODE_FOR_nothing)
2207 {
2208 if (vect_print_dump_info (REPORT_DETAILS))
2209 fprintf (vect_dump, "op not supported by target.");
2210 /* Check only during analysis. */
2211 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2212 || (vf < vect_min_worthwhile_factor (code)
2213 && !vec_stmt))
2214 return false;
2215 if (vect_print_dump_info (REPORT_DETAILS))
2216 fprintf (vect_dump, "proceeding using word mode.");
2217 }
2218
2219 /* Worthwhile without SIMD support? Check only during analysis. */
2220 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2221 && vf < vect_min_worthwhile_factor (code)
2222 && !vec_stmt)
2223 {
2224 if (vect_print_dump_info (REPORT_DETAILS))
2225 fprintf (vect_dump, "not worthwhile without SIMD support.");
2226 return false;
2227 }
2228
2229 if (!vec_stmt) /* transformation not required. */
2230 {
2231 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2232 if (vect_print_dump_info (REPORT_DETAILS))
2233 fprintf (vect_dump, "=== vectorizable_shift ===");
2234 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2235 return true;
2236 }
2237
2238 /** Transform. **/
2239
2240 if (vect_print_dump_info (REPORT_DETAILS))
2241 fprintf (vect_dump, "transform binary/unary operation.");
2242
2243 /* Handle def. */
2244 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2245
2246 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2247 created in the previous stages of the recursion, so no allocation is
2248 needed, except for the case of shift with scalar shift argument. In that
2249 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2250 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2251 In case of loop-based vectorization we allocate VECs of size 1. We
2252 allocate VEC_OPRNDS1 only in case of binary operation. */
2253 if (!slp_node)
2254 {
2255 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2256 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2257 }
2258 else if (scalar_shift_arg)
2259 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2260
2261 prev_stmt_info = NULL;
2262 for (j = 0; j < ncopies; j++)
2263 {
2264 /* Handle uses. */
2265 if (j == 0)
2266 {
2267 if (scalar_shift_arg)
2268 {
2269 /* Vector shl and shr insn patterns can be defined with scalar
2270 operand 2 (shift operand). In this case, use constant or loop
2271 invariant op1 directly, without extending it to vector mode
2272 first. */
2273 optab_op2_mode = insn_data[icode].operand[2].mode;
2274 if (!VECTOR_MODE_P (optab_op2_mode))
2275 {
2276 if (vect_print_dump_info (REPORT_DETAILS))
2277 fprintf (vect_dump, "operand 1 using scalar mode.");
2278 vec_oprnd1 = op1;
2279 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2280 if (slp_node)
2281 {
2282 /* Store vec_oprnd1 for every vector stmt to be created
2283 for SLP_NODE. We check during the analysis that all
2284 the shift arguments are the same.
2285 TODO: Allow different constants for different vector
2286 stmts generated for an SLP instance. */
2287 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2288 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2289 }
2290 }
2291 }
2292
2293 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2294 (a special case for certain kind of vector shifts); otherwise,
2295 operand 1 should be of a vector type (the usual case). */
2296 if (vec_oprnd1)
2297 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2298 slp_node);
2299 else
2300 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2301 slp_node);
2302 }
2303 else
2304 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2305
2306 /* Arguments are ready. Create the new vector stmt. */
2307 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2308 {
2309 vop1 = VEC_index (tree, vec_oprnds1, i);
2310 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2311 new_temp = make_ssa_name (vec_dest, new_stmt);
2312 gimple_assign_set_lhs (new_stmt, new_temp);
2313 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2314 if (slp_node)
2315 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2316 }
2317
2318 if (slp_node)
2319 continue;
2320
2321 if (j == 0)
2322 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2323 else
2324 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2325 prev_stmt_info = vinfo_for_stmt (new_stmt);
2326 }
2327
2328 VEC_free (tree, heap, vec_oprnds0);
2329 VEC_free (tree, heap, vec_oprnds1);
2330
2331 return true;
2332 }
2333
2334
2335 /* Function vectorizable_operation.
2336
2337 Check if STMT performs a binary or unary operation that can be vectorized.
2338 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2339 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2340 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2341
2342 static bool
2343 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2344 gimple *vec_stmt, slp_tree slp_node)
2345 {
2346 tree vec_dest;
2347 tree scalar_dest;
2348 tree op0, op1 = NULL;
2349 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2350 tree vectype;
2351 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2352 enum tree_code code;
2353 enum machine_mode vec_mode;
2354 tree new_temp;
2355 int op_type;
2356 optab optab;
2357 int icode;
2358 tree def;
2359 gimple def_stmt;
2360 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2361 gimple new_stmt = NULL;
2362 stmt_vec_info prev_stmt_info;
2363 int nunits_in;
2364 int nunits_out;
2365 tree vectype_out;
2366 int ncopies;
2367 int j, i;
2368 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2369 tree vop0, vop1;
2370 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2371 int vf;
2372
2373 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2374 return false;
2375
2376 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2377 return false;
2378
2379 /* Is STMT a vectorizable binary/unary operation? */
2380 if (!is_gimple_assign (stmt))
2381 return false;
2382
2383 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2384 return false;
2385
2386 code = gimple_assign_rhs_code (stmt);
2387
2388 /* For pointer addition, we should use the normal plus for
2389 the vector addition. */
2390 if (code == POINTER_PLUS_EXPR)
2391 code = PLUS_EXPR;
2392
2393 /* Support only unary or binary operations. */
2394 op_type = TREE_CODE_LENGTH (code);
2395 if (op_type != unary_op && op_type != binary_op)
2396 {
2397 if (vect_print_dump_info (REPORT_DETAILS))
2398 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2399 return false;
2400 }
2401
2402 scalar_dest = gimple_assign_lhs (stmt);
2403 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2404
2405 op0 = gimple_assign_rhs1 (stmt);
2406 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2407 &def_stmt, &def, &dt[0], &vectype))
2408 {
2409 if (vect_print_dump_info (REPORT_DETAILS))
2410 fprintf (vect_dump, "use not simple.");
2411 return false;
2412 }
2413 /* If op0 is an external or constant def use a vector type with
2414 the same size as the output vector type. */
2415 if (!vectype)
2416 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2417 if (vec_stmt)
2418 gcc_assert (vectype);
2419 if (!vectype)
2420 {
2421 if (vect_print_dump_info (REPORT_DETAILS))
2422 {
2423 fprintf (vect_dump, "no vectype for scalar type ");
2424 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2425 }
2426
2427 return false;
2428 }
2429
2430 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2431 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2432 if (nunits_out != nunits_in)
2433 return false;
2434
2435 if (op_type == binary_op)
2436 {
2437 op1 = gimple_assign_rhs2 (stmt);
2438 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2439 &dt[1]))
2440 {
2441 if (vect_print_dump_info (REPORT_DETAILS))
2442 fprintf (vect_dump, "use not simple.");
2443 return false;
2444 }
2445 }
2446
2447 if (loop_vinfo)
2448 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2449 else
2450 vf = 1;
2451
2452 /* Multiple types in SLP are handled by creating the appropriate number of
2453 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2454 case of SLP. */
2455 if (slp_node)
2456 ncopies = 1;
2457 else
2458 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2459
2460 gcc_assert (ncopies >= 1);
2461
2462 /* Shifts are handled in vectorizable_shift (). */
2463 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2464 || code == RROTATE_EXPR)
2465 return false;
2466
2467 optab = optab_for_tree_code (code, vectype, optab_default);
2468
2469 /* Supportable by target? */
2470 if (!optab)
2471 {
2472 if (vect_print_dump_info (REPORT_DETAILS))
2473 fprintf (vect_dump, "no optab.");
2474 return false;
2475 }
2476 vec_mode = TYPE_MODE (vectype);
2477 icode = (int) optab_handler (optab, vec_mode);
2478 if (icode == CODE_FOR_nothing)
2479 {
2480 if (vect_print_dump_info (REPORT_DETAILS))
2481 fprintf (vect_dump, "op not supported by target.");
2482 /* Check only during analysis. */
2483 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2484 || (vf < vect_min_worthwhile_factor (code)
2485 && !vec_stmt))
2486 return false;
2487 if (vect_print_dump_info (REPORT_DETAILS))
2488 fprintf (vect_dump, "proceeding using word mode.");
2489 }
2490
2491 /* Worthwhile without SIMD support? Check only during analysis. */
2492 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2493 && vf < vect_min_worthwhile_factor (code)
2494 && !vec_stmt)
2495 {
2496 if (vect_print_dump_info (REPORT_DETAILS))
2497 fprintf (vect_dump, "not worthwhile without SIMD support.");
2498 return false;
2499 }
2500
2501 if (!vec_stmt) /* transformation not required. */
2502 {
2503 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2504 if (vect_print_dump_info (REPORT_DETAILS))
2505 fprintf (vect_dump, "=== vectorizable_operation ===");
2506 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2507 return true;
2508 }
2509
2510 /** Transform. **/
2511
2512 if (vect_print_dump_info (REPORT_DETAILS))
2513 fprintf (vect_dump, "transform binary/unary operation.");
2514
2515 /* Handle def. */
2516 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2517
2518 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2519 created in the previous stages of the recursion, so no allocation is
2520 needed, except for the case of shift with scalar shift argument. In that
2521 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2522 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2523 In case of loop-based vectorization we allocate VECs of size 1. We
2524 allocate VEC_OPRNDS1 only in case of binary operation. */
2525 if (!slp_node)
2526 {
2527 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2528 if (op_type == binary_op)
2529 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2530 }
2531
2532 /* In case the vectorization factor (VF) is bigger than the number
2533 of elements that we can fit in a vectype (nunits), we have to generate
2534 more than one vector stmt - i.e - we need to "unroll" the
2535 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2536 from one copy of the vector stmt to the next, in the field
2537 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2538 stages to find the correct vector defs to be used when vectorizing
2539 stmts that use the defs of the current stmt. The example below
2540 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2541 we need to create 4 vectorized stmts):
2542
2543 before vectorization:
2544 RELATED_STMT VEC_STMT
2545 S1: x = memref - -
2546 S2: z = x + 1 - -
2547
2548 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2549 there):
2550 RELATED_STMT VEC_STMT
2551 VS1_0: vx0 = memref0 VS1_1 -
2552 VS1_1: vx1 = memref1 VS1_2 -
2553 VS1_2: vx2 = memref2 VS1_3 -
2554 VS1_3: vx3 = memref3 - -
2555 S1: x = load - VS1_0
2556 S2: z = x + 1 - -
2557
2558 step2: vectorize stmt S2 (done here):
2559 To vectorize stmt S2 we first need to find the relevant vector
2560 def for the first operand 'x'. This is, as usual, obtained from
2561 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2562 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2563 relevant vector def 'vx0'. Having found 'vx0' we can generate
2564 the vector stmt VS2_0, and as usual, record it in the
2565 STMT_VINFO_VEC_STMT of stmt S2.
2566 When creating the second copy (VS2_1), we obtain the relevant vector
2567 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2568 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2569 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2570 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2571 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2572 chain of stmts and pointers:
2573 RELATED_STMT VEC_STMT
2574 VS1_0: vx0 = memref0 VS1_1 -
2575 VS1_1: vx1 = memref1 VS1_2 -
2576 VS1_2: vx2 = memref2 VS1_3 -
2577 VS1_3: vx3 = memref3 - -
2578 S1: x = load - VS1_0
2579 VS2_0: vz0 = vx0 + v1 VS2_1 -
2580 VS2_1: vz1 = vx1 + v1 VS2_2 -
2581 VS2_2: vz2 = vx2 + v1 VS2_3 -
2582 VS2_3: vz3 = vx3 + v1 - -
2583 S2: z = x + 1 - VS2_0 */
2584
2585 prev_stmt_info = NULL;
2586 for (j = 0; j < ncopies; j++)
2587 {
2588 /* Handle uses. */
2589 if (j == 0)
2590 {
2591 if (op_type == binary_op)
2592 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2593 slp_node);
2594 else
2595 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2596 slp_node);
2597 }
2598 else
2599 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2600
2601 /* Arguments are ready. Create the new vector stmt. */
2602 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2603 {
2604 vop1 = ((op_type == binary_op)
2605 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2606 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2607 new_temp = make_ssa_name (vec_dest, new_stmt);
2608 gimple_assign_set_lhs (new_stmt, new_temp);
2609 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2610 if (slp_node)
2611 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2612 }
2613
2614 if (slp_node)
2615 continue;
2616
2617 if (j == 0)
2618 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2619 else
2620 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2621 prev_stmt_info = vinfo_for_stmt (new_stmt);
2622 }
2623
2624 VEC_free (tree, heap, vec_oprnds0);
2625 if (vec_oprnds1)
2626 VEC_free (tree, heap, vec_oprnds1);
2627
2628 return true;
2629 }
2630
2631
2632 /* Get vectorized definitions for loop-based vectorization. For the first
2633 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2634 scalar operand), and for the rest we get a copy with
2635 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2636 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2637 The vectors are collected into VEC_OPRNDS. */
2638
2639 static void
2640 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2641 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2642 {
2643 tree vec_oprnd;
2644
2645 /* Get first vector operand. */
2646 /* All the vector operands except the very first one (that is scalar oprnd)
2647 are stmt copies. */
2648 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2649 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2650 else
2651 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2652
2653 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2654
2655 /* Get second vector operand. */
2656 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2657 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2658
2659 *oprnd = vec_oprnd;
2660
2661 /* For conversion in multiple steps, continue to get operands
2662 recursively. */
2663 if (multi_step_cvt)
2664 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2665 }
2666
2667
2668 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2669 For multi-step conversions store the resulting vectors and call the function
2670 recursively. */
2671
2672 static void
2673 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2674 int multi_step_cvt, gimple stmt,
2675 VEC (tree, heap) *vec_dsts,
2676 gimple_stmt_iterator *gsi,
2677 slp_tree slp_node, enum tree_code code,
2678 stmt_vec_info *prev_stmt_info)
2679 {
2680 unsigned int i;
2681 tree vop0, vop1, new_tmp, vec_dest;
2682 gimple new_stmt;
2683 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2684
2685 vec_dest = VEC_pop (tree, vec_dsts);
2686
2687 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2688 {
2689 /* Create demotion operation. */
2690 vop0 = VEC_index (tree, *vec_oprnds, i);
2691 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2692 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2693 new_tmp = make_ssa_name (vec_dest, new_stmt);
2694 gimple_assign_set_lhs (new_stmt, new_tmp);
2695 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2696
2697 if (multi_step_cvt)
2698 /* Store the resulting vector for next recursive call. */
2699 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2700 else
2701 {
2702 /* This is the last step of the conversion sequence. Store the
2703 vectors in SLP_NODE or in vector info of the scalar statement
2704 (or in STMT_VINFO_RELATED_STMT chain). */
2705 if (slp_node)
2706 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2707 else
2708 {
2709 if (!*prev_stmt_info)
2710 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2711 else
2712 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2713
2714 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2715 }
2716 }
2717 }
2718
2719 /* For multi-step demotion operations we first generate demotion operations
2720 from the source type to the intermediate types, and then combine the
2721 results (stored in VEC_OPRNDS) in demotion operation to the destination
2722 type. */
2723 if (multi_step_cvt)
2724 {
2725 /* At each level of recursion we have have of the operands we had at the
2726 previous level. */
2727 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2728 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2729 stmt, vec_dsts, gsi, slp_node,
2730 code, prev_stmt_info);
2731 }
2732 }
2733
2734
2735 /* Function vectorizable_type_demotion
2736
2737 Check if STMT performs a binary or unary operation that involves
2738 type demotion, and if it can be vectorized.
2739 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2740 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2741 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2742
2743 static bool
2744 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2745 gimple *vec_stmt, slp_tree slp_node)
2746 {
2747 tree vec_dest;
2748 tree scalar_dest;
2749 tree op0;
2750 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2751 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2752 enum tree_code code, code1 = ERROR_MARK;
2753 tree def;
2754 gimple def_stmt;
2755 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2756 stmt_vec_info prev_stmt_info;
2757 int nunits_in;
2758 int nunits_out;
2759 tree vectype_out;
2760 int ncopies;
2761 int j, i;
2762 tree vectype_in;
2763 int multi_step_cvt = 0;
2764 VEC (tree, heap) *vec_oprnds0 = NULL;
2765 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2766 tree last_oprnd, intermediate_type;
2767
2768 /* FORNOW: not supported by basic block SLP vectorization. */
2769 gcc_assert (loop_vinfo);
2770
2771 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2772 return false;
2773
2774 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2775 return false;
2776
2777 /* Is STMT a vectorizable type-demotion operation? */
2778 if (!is_gimple_assign (stmt))
2779 return false;
2780
2781 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2782 return false;
2783
2784 code = gimple_assign_rhs_code (stmt);
2785 if (!CONVERT_EXPR_CODE_P (code))
2786 return false;
2787
2788 scalar_dest = gimple_assign_lhs (stmt);
2789 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2790
2791 /* Check the operands of the operation. */
2792 op0 = gimple_assign_rhs1 (stmt);
2793 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2794 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2795 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2796 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2797 && CONVERT_EXPR_CODE_P (code))))
2798 return false;
2799 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2800 &def_stmt, &def, &dt[0], &vectype_in))
2801 {
2802 if (vect_print_dump_info (REPORT_DETAILS))
2803 fprintf (vect_dump, "use not simple.");
2804 return false;
2805 }
2806 /* If op0 is an external def use a vector type with the
2807 same size as the output vector type if possible. */
2808 if (!vectype_in)
2809 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2810 if (vec_stmt)
2811 gcc_assert (vectype_in);
2812 if (!vectype_in)
2813 {
2814 if (vect_print_dump_info (REPORT_DETAILS))
2815 {
2816 fprintf (vect_dump, "no vectype for scalar type ");
2817 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2818 }
2819
2820 return false;
2821 }
2822
2823 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2824 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2825 if (nunits_in >= nunits_out)
2826 return false;
2827
2828 /* Multiple types in SLP are handled by creating the appropriate number of
2829 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2830 case of SLP. */
2831 if (slp_node)
2832 ncopies = 1;
2833 else
2834 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2835 gcc_assert (ncopies >= 1);
2836
2837 /* Supportable by target? */
2838 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2839 &code1, &multi_step_cvt, &interm_types))
2840 return false;
2841
2842 if (!vec_stmt) /* transformation not required. */
2843 {
2844 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2845 if (vect_print_dump_info (REPORT_DETAILS))
2846 fprintf (vect_dump, "=== vectorizable_demotion ===");
2847 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2848 return true;
2849 }
2850
2851 /** Transform. **/
2852 if (vect_print_dump_info (REPORT_DETAILS))
2853 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2854 ncopies);
2855
2856 /* In case of multi-step demotion, we first generate demotion operations to
2857 the intermediate types, and then from that types to the final one.
2858 We create vector destinations for the intermediate type (TYPES) received
2859 from supportable_narrowing_operation, and store them in the correct order
2860 for future use in vect_create_vectorized_demotion_stmts(). */
2861 if (multi_step_cvt)
2862 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2863 else
2864 vec_dsts = VEC_alloc (tree, heap, 1);
2865
2866 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2867 VEC_quick_push (tree, vec_dsts, vec_dest);
2868
2869 if (multi_step_cvt)
2870 {
2871 for (i = VEC_length (tree, interm_types) - 1;
2872 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2873 {
2874 vec_dest = vect_create_destination_var (scalar_dest,
2875 intermediate_type);
2876 VEC_quick_push (tree, vec_dsts, vec_dest);
2877 }
2878 }
2879
2880 /* In case the vectorization factor (VF) is bigger than the number
2881 of elements that we can fit in a vectype (nunits), we have to generate
2882 more than one vector stmt - i.e - we need to "unroll" the
2883 vector stmt by a factor VF/nunits. */
2884 last_oprnd = op0;
2885 prev_stmt_info = NULL;
2886 for (j = 0; j < ncopies; j++)
2887 {
2888 /* Handle uses. */
2889 if (slp_node)
2890 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
2891 else
2892 {
2893 VEC_free (tree, heap, vec_oprnds0);
2894 vec_oprnds0 = VEC_alloc (tree, heap,
2895 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2896 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2897 vect_pow2 (multi_step_cvt) - 1);
2898 }
2899
2900 /* Arguments are ready. Create the new vector stmts. */
2901 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2902 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2903 multi_step_cvt, stmt, tmp_vec_dsts,
2904 gsi, slp_node, code1,
2905 &prev_stmt_info);
2906 }
2907
2908 VEC_free (tree, heap, vec_oprnds0);
2909 VEC_free (tree, heap, vec_dsts);
2910 VEC_free (tree, heap, tmp_vec_dsts);
2911 VEC_free (tree, heap, interm_types);
2912
2913 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2914 return true;
2915 }
2916
2917
2918 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2919 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2920 the resulting vectors and call the function recursively. */
2921
2922 static void
2923 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2924 VEC (tree, heap) **vec_oprnds1,
2925 int multi_step_cvt, gimple stmt,
2926 VEC (tree, heap) *vec_dsts,
2927 gimple_stmt_iterator *gsi,
2928 slp_tree slp_node, enum tree_code code1,
2929 enum tree_code code2, tree decl1,
2930 tree decl2, int op_type,
2931 stmt_vec_info *prev_stmt_info)
2932 {
2933 int i;
2934 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2935 gimple new_stmt1, new_stmt2;
2936 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2937 VEC (tree, heap) *vec_tmp;
2938
2939 vec_dest = VEC_pop (tree, vec_dsts);
2940 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2941
2942 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
2943 {
2944 if (op_type == binary_op)
2945 vop1 = VEC_index (tree, *vec_oprnds1, i);
2946 else
2947 vop1 = NULL_TREE;
2948
2949 /* Generate the two halves of promotion operation. */
2950 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2951 op_type, vec_dest, gsi, stmt);
2952 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2953 op_type, vec_dest, gsi, stmt);
2954 if (is_gimple_call (new_stmt1))
2955 {
2956 new_tmp1 = gimple_call_lhs (new_stmt1);
2957 new_tmp2 = gimple_call_lhs (new_stmt2);
2958 }
2959 else
2960 {
2961 new_tmp1 = gimple_assign_lhs (new_stmt1);
2962 new_tmp2 = gimple_assign_lhs (new_stmt2);
2963 }
2964
2965 if (multi_step_cvt)
2966 {
2967 /* Store the results for the recursive call. */
2968 VEC_quick_push (tree, vec_tmp, new_tmp1);
2969 VEC_quick_push (tree, vec_tmp, new_tmp2);
2970 }
2971 else
2972 {
2973 /* Last step of promotion sequience - store the results. */
2974 if (slp_node)
2975 {
2976 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2977 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2978 }
2979 else
2980 {
2981 if (!*prev_stmt_info)
2982 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2983 else
2984 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2985
2986 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2987 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2988 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2989 }
2990 }
2991 }
2992
2993 if (multi_step_cvt)
2994 {
2995 /* For multi-step promotion operation we first generate we call the
2996 function recurcively for every stage. We start from the input type,
2997 create promotion operations to the intermediate types, and then
2998 create promotions to the output type. */
2999 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3000 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3001 multi_step_cvt - 1, stmt,
3002 vec_dsts, gsi, slp_node, code1,
3003 code2, decl2, decl2, op_type,
3004 prev_stmt_info);
3005 }
3006
3007 VEC_free (tree, heap, vec_tmp);
3008 }
3009
3010
3011 /* Function vectorizable_type_promotion
3012
3013 Check if STMT performs a binary or unary operation that involves
3014 type promotion, and if it can be vectorized.
3015 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3016 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3017 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3018
3019 static bool
3020 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3021 gimple *vec_stmt, slp_tree slp_node)
3022 {
3023 tree vec_dest;
3024 tree scalar_dest;
3025 tree op0, op1 = NULL;
3026 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3027 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3028 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3029 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3030 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3031 int op_type;
3032 tree def;
3033 gimple def_stmt;
3034 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3035 stmt_vec_info prev_stmt_info;
3036 int nunits_in;
3037 int nunits_out;
3038 tree vectype_out;
3039 int ncopies;
3040 int j, i;
3041 tree vectype_in;
3042 tree intermediate_type = NULL_TREE;
3043 int multi_step_cvt = 0;
3044 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3045 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3046
3047 /* FORNOW: not supported by basic block SLP vectorization. */
3048 gcc_assert (loop_vinfo);
3049
3050 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3051 return false;
3052
3053 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3054 return false;
3055
3056 /* Is STMT a vectorizable type-promotion operation? */
3057 if (!is_gimple_assign (stmt))
3058 return false;
3059
3060 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3061 return false;
3062
3063 code = gimple_assign_rhs_code (stmt);
3064 if (!CONVERT_EXPR_CODE_P (code)
3065 && code != WIDEN_MULT_EXPR)
3066 return false;
3067
3068 scalar_dest = gimple_assign_lhs (stmt);
3069 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3070
3071 /* Check the operands of the operation. */
3072 op0 = gimple_assign_rhs1 (stmt);
3073 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3074 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3075 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3076 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3077 && CONVERT_EXPR_CODE_P (code))))
3078 return false;
3079 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3080 &def_stmt, &def, &dt[0], &vectype_in))
3081 {
3082 if (vect_print_dump_info (REPORT_DETAILS))
3083 fprintf (vect_dump, "use not simple.");
3084 return false;
3085 }
3086 /* If op0 is an external or constant def use a vector type with
3087 the same size as the output vector type. */
3088 if (!vectype_in)
3089 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3090 if (vec_stmt)
3091 gcc_assert (vectype_in);
3092 if (!vectype_in)
3093 {
3094 if (vect_print_dump_info (REPORT_DETAILS))
3095 {
3096 fprintf (vect_dump, "no vectype for scalar type ");
3097 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3098 }
3099
3100 return false;
3101 }
3102
3103 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3104 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3105 if (nunits_in <= nunits_out)
3106 return false;
3107
3108 /* Multiple types in SLP are handled by creating the appropriate number of
3109 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3110 case of SLP. */
3111 if (slp_node)
3112 ncopies = 1;
3113 else
3114 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3115
3116 gcc_assert (ncopies >= 1);
3117
3118 op_type = TREE_CODE_LENGTH (code);
3119 if (op_type == binary_op)
3120 {
3121 op1 = gimple_assign_rhs2 (stmt);
3122 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3123 {
3124 if (vect_print_dump_info (REPORT_DETAILS))
3125 fprintf (vect_dump, "use not simple.");
3126 return false;
3127 }
3128 }
3129
3130 /* Supportable by target? */
3131 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3132 &decl1, &decl2, &code1, &code2,
3133 &multi_step_cvt, &interm_types))
3134 return false;
3135
3136 /* Binary widening operation can only be supported directly by the
3137 architecture. */
3138 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3139
3140 if (!vec_stmt) /* transformation not required. */
3141 {
3142 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3143 if (vect_print_dump_info (REPORT_DETAILS))
3144 fprintf (vect_dump, "=== vectorizable_promotion ===");
3145 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3146 return true;
3147 }
3148
3149 /** Transform. **/
3150
3151 if (vect_print_dump_info (REPORT_DETAILS))
3152 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3153 ncopies);
3154
3155 /* Handle def. */
3156 /* In case of multi-step promotion, we first generate promotion operations
3157 to the intermediate types, and then from that types to the final one.
3158 We store vector destination in VEC_DSTS in the correct order for
3159 recursive creation of promotion operations in
3160 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3161 according to TYPES recieved from supportable_widening_operation(). */
3162 if (multi_step_cvt)
3163 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3164 else
3165 vec_dsts = VEC_alloc (tree, heap, 1);
3166
3167 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3168 VEC_quick_push (tree, vec_dsts, vec_dest);
3169
3170 if (multi_step_cvt)
3171 {
3172 for (i = VEC_length (tree, interm_types) - 1;
3173 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3174 {
3175 vec_dest = vect_create_destination_var (scalar_dest,
3176 intermediate_type);
3177 VEC_quick_push (tree, vec_dsts, vec_dest);
3178 }
3179 }
3180
3181 if (!slp_node)
3182 {
3183 vec_oprnds0 = VEC_alloc (tree, heap,
3184 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3185 if (op_type == binary_op)
3186 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3187 }
3188
3189 /* In case the vectorization factor (VF) is bigger than the number
3190 of elements that we can fit in a vectype (nunits), we have to generate
3191 more than one vector stmt - i.e - we need to "unroll" the
3192 vector stmt by a factor VF/nunits. */
3193
3194 prev_stmt_info = NULL;
3195 for (j = 0; j < ncopies; j++)
3196 {
3197 /* Handle uses. */
3198 if (j == 0)
3199 {
3200 if (slp_node)
3201 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3202 &vec_oprnds1, -1);
3203 else
3204 {
3205 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3206 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3207 if (op_type == binary_op)
3208 {
3209 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3210 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3211 }
3212 }
3213 }
3214 else
3215 {
3216 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3217 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3218 if (op_type == binary_op)
3219 {
3220 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3221 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3222 }
3223 }
3224
3225 /* Arguments are ready. Create the new vector stmts. */
3226 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3227 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3228 multi_step_cvt, stmt,
3229 tmp_vec_dsts,
3230 gsi, slp_node, code1, code2,
3231 decl1, decl2, op_type,
3232 &prev_stmt_info);
3233 }
3234
3235 VEC_free (tree, heap, vec_dsts);
3236 VEC_free (tree, heap, tmp_vec_dsts);
3237 VEC_free (tree, heap, interm_types);
3238 VEC_free (tree, heap, vec_oprnds0);
3239 VEC_free (tree, heap, vec_oprnds1);
3240
3241 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3242 return true;
3243 }
3244
3245
3246 /* Function vectorizable_store.
3247
3248 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3249 can be vectorized.
3250 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3251 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3252 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3253
3254 static bool
3255 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3256 slp_tree slp_node)
3257 {
3258 tree scalar_dest;
3259 tree data_ref;
3260 tree op;
3261 tree vec_oprnd = NULL_TREE;
3262 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3263 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3264 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3265 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3266 struct loop *loop = NULL;
3267 enum machine_mode vec_mode;
3268 tree dummy;
3269 enum dr_alignment_support alignment_support_scheme;
3270 tree def;
3271 gimple def_stmt;
3272 enum vect_def_type dt;
3273 stmt_vec_info prev_stmt_info = NULL;
3274 tree dataref_ptr = NULL_TREE;
3275 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3276 int ncopies;
3277 int j;
3278 gimple next_stmt, first_stmt = NULL;
3279 bool strided_store = false;
3280 unsigned int group_size, i;
3281 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3282 bool inv_p;
3283 VEC(tree,heap) *vec_oprnds = NULL;
3284 bool slp = (slp_node != NULL);
3285 unsigned int vec_num;
3286 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3287
3288 if (loop_vinfo)
3289 loop = LOOP_VINFO_LOOP (loop_vinfo);
3290
3291 /* Multiple types in SLP are handled by creating the appropriate number of
3292 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3293 case of SLP. */
3294 if (slp)
3295 ncopies = 1;
3296 else
3297 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3298
3299 gcc_assert (ncopies >= 1);
3300
3301 /* FORNOW. This restriction should be relaxed. */
3302 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3303 {
3304 if (vect_print_dump_info (REPORT_DETAILS))
3305 fprintf (vect_dump, "multiple types in nested loop.");
3306 return false;
3307 }
3308
3309 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3310 return false;
3311
3312 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3313 return false;
3314
3315 /* Is vectorizable store? */
3316
3317 if (!is_gimple_assign (stmt))
3318 return false;
3319
3320 scalar_dest = gimple_assign_lhs (stmt);
3321 if (TREE_CODE (scalar_dest) != ARRAY_REF
3322 && TREE_CODE (scalar_dest) != INDIRECT_REF
3323 && TREE_CODE (scalar_dest) != COMPONENT_REF
3324 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3325 && TREE_CODE (scalar_dest) != REALPART_EXPR
3326 && TREE_CODE (scalar_dest) != MEM_REF)
3327 return false;
3328
3329 gcc_assert (gimple_assign_single_p (stmt));
3330 op = gimple_assign_rhs1 (stmt);
3331 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3332 {
3333 if (vect_print_dump_info (REPORT_DETAILS))
3334 fprintf (vect_dump, "use not simple.");
3335 return false;
3336 }
3337
3338 /* The scalar rhs type needs to be trivially convertible to the vector
3339 component type. This should always be the case. */
3340 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3341 {
3342 if (vect_print_dump_info (REPORT_DETAILS))
3343 fprintf (vect_dump, "??? operands of different types");
3344 return false;
3345 }
3346
3347 vec_mode = TYPE_MODE (vectype);
3348 /* FORNOW. In some cases can vectorize even if data-type not supported
3349 (e.g. - array initialization with 0). */
3350 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3351 return false;
3352
3353 if (!STMT_VINFO_DATA_REF (stmt_info))
3354 return false;
3355
3356 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3357 {
3358 if (vect_print_dump_info (REPORT_DETAILS))
3359 fprintf (vect_dump, "negative step for store.");
3360 return false;
3361 }
3362
3363 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3364 {
3365 strided_store = true;
3366 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3367 if (!vect_strided_store_supported (vectype)
3368 && !PURE_SLP_STMT (stmt_info) && !slp)
3369 return false;
3370
3371 if (first_stmt == stmt)
3372 {
3373 /* STMT is the leader of the group. Check the operands of all the
3374 stmts of the group. */
3375 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3376 while (next_stmt)
3377 {
3378 gcc_assert (gimple_assign_single_p (next_stmt));
3379 op = gimple_assign_rhs1 (next_stmt);
3380 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3381 &def, &dt))
3382 {
3383 if (vect_print_dump_info (REPORT_DETAILS))
3384 fprintf (vect_dump, "use not simple.");
3385 return false;
3386 }
3387 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3388 }
3389 }
3390 }
3391
3392 if (!vec_stmt) /* transformation not required. */
3393 {
3394 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3395 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3396 return true;
3397 }
3398
3399 /** Transform. **/
3400
3401 if (strided_store)
3402 {
3403 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3404 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3405
3406 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3407
3408 /* FORNOW */
3409 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3410
3411 /* We vectorize all the stmts of the interleaving group when we
3412 reach the last stmt in the group. */
3413 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3414 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3415 && !slp)
3416 {
3417 *vec_stmt = NULL;
3418 return true;
3419 }
3420
3421 if (slp)
3422 {
3423 strided_store = false;
3424 /* VEC_NUM is the number of vect stmts to be created for this
3425 group. */
3426 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3427 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3428 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3429 }
3430 else
3431 /* VEC_NUM is the number of vect stmts to be created for this
3432 group. */
3433 vec_num = group_size;
3434 }
3435 else
3436 {
3437 first_stmt = stmt;
3438 first_dr = dr;
3439 group_size = vec_num = 1;
3440 }
3441
3442 if (vect_print_dump_info (REPORT_DETAILS))
3443 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3444
3445 dr_chain = VEC_alloc (tree, heap, group_size);
3446 oprnds = VEC_alloc (tree, heap, group_size);
3447
3448 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3449 gcc_assert (alignment_support_scheme);
3450
3451 /* In case the vectorization factor (VF) is bigger than the number
3452 of elements that we can fit in a vectype (nunits), we have to generate
3453 more than one vector stmt - i.e - we need to "unroll" the
3454 vector stmt by a factor VF/nunits. For more details see documentation in
3455 vect_get_vec_def_for_copy_stmt. */
3456
3457 /* In case of interleaving (non-unit strided access):
3458
3459 S1: &base + 2 = x2
3460 S2: &base = x0
3461 S3: &base + 1 = x1
3462 S4: &base + 3 = x3
3463
3464 We create vectorized stores starting from base address (the access of the
3465 first stmt in the chain (S2 in the above example), when the last store stmt
3466 of the chain (S4) is reached:
3467
3468 VS1: &base = vx2
3469 VS2: &base + vec_size*1 = vx0
3470 VS3: &base + vec_size*2 = vx1
3471 VS4: &base + vec_size*3 = vx3
3472
3473 Then permutation statements are generated:
3474
3475 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3476 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3477 ...
3478
3479 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3480 (the order of the data-refs in the output of vect_permute_store_chain
3481 corresponds to the order of scalar stmts in the interleaving chain - see
3482 the documentation of vect_permute_store_chain()).
3483
3484 In case of both multiple types and interleaving, above vector stores and
3485 permutation stmts are created for every copy. The result vector stmts are
3486 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3487 STMT_VINFO_RELATED_STMT for the next copies.
3488 */
3489
3490 prev_stmt_info = NULL;
3491 for (j = 0; j < ncopies; j++)
3492 {
3493 gimple new_stmt;
3494 gimple ptr_incr;
3495
3496 if (j == 0)
3497 {
3498 if (slp)
3499 {
3500 /* Get vectorized arguments for SLP_NODE. */
3501 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3502 NULL, -1);
3503
3504 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3505 }
3506 else
3507 {
3508 /* For interleaved stores we collect vectorized defs for all the
3509 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3510 used as an input to vect_permute_store_chain(), and OPRNDS as
3511 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3512
3513 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3514 OPRNDS are of size 1. */
3515 next_stmt = first_stmt;
3516 for (i = 0; i < group_size; i++)
3517 {
3518 /* Since gaps are not supported for interleaved stores,
3519 GROUP_SIZE is the exact number of stmts in the chain.
3520 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3521 there is no interleaving, GROUP_SIZE is 1, and only one
3522 iteration of the loop will be executed. */
3523 gcc_assert (next_stmt
3524 && gimple_assign_single_p (next_stmt));
3525 op = gimple_assign_rhs1 (next_stmt);
3526
3527 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3528 NULL);
3529 VEC_quick_push(tree, dr_chain, vec_oprnd);
3530 VEC_quick_push(tree, oprnds, vec_oprnd);
3531 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3532 }
3533 }
3534
3535 /* We should have catched mismatched types earlier. */
3536 gcc_assert (useless_type_conversion_p (vectype,
3537 TREE_TYPE (vec_oprnd)));
3538 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3539 &dummy, &ptr_incr, false,
3540 &inv_p);
3541 gcc_assert (bb_vinfo || !inv_p);
3542 }
3543 else
3544 {
3545 /* For interleaved stores we created vectorized defs for all the
3546 defs stored in OPRNDS in the previous iteration (previous copy).
3547 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3548 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3549 next copy.
3550 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3551 OPRNDS are of size 1. */
3552 for (i = 0; i < group_size; i++)
3553 {
3554 op = VEC_index (tree, oprnds, i);
3555 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3556 &dt);
3557 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3558 VEC_replace(tree, dr_chain, i, vec_oprnd);
3559 VEC_replace(tree, oprnds, i, vec_oprnd);
3560 }
3561 dataref_ptr =
3562 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3563 }
3564
3565 if (strided_store)
3566 {
3567 result_chain = VEC_alloc (tree, heap, group_size);
3568 /* Permute. */
3569 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3570 &result_chain))
3571 return false;
3572 }
3573
3574 next_stmt = first_stmt;
3575 for (i = 0; i < vec_num; i++)
3576 {
3577 struct ptr_info_def *pi;
3578
3579 if (i > 0)
3580 /* Bump the vector pointer. */
3581 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3582 NULL_TREE);
3583
3584 if (slp)
3585 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3586 else if (strided_store)
3587 /* For strided stores vectorized defs are interleaved in
3588 vect_permute_store_chain(). */
3589 vec_oprnd = VEC_index (tree, result_chain, i);
3590
3591 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3592 build_int_cst (reference_alias_ptr_type
3593 (DR_REF (first_dr)), 0));
3594 pi = get_ptr_info (dataref_ptr);
3595 pi->align = TYPE_ALIGN_UNIT (vectype);
3596 if (aligned_access_p (first_dr))
3597 pi->misalign = 0;
3598 else if (DR_MISALIGNMENT (first_dr) == -1)
3599 {
3600 TREE_TYPE (data_ref)
3601 = build_aligned_type (TREE_TYPE (data_ref),
3602 TYPE_ALIGN (TREE_TYPE (vectype)));
3603 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3604 pi->misalign = 0;
3605 }
3606 else
3607 {
3608 TREE_TYPE (data_ref)
3609 = build_aligned_type (TREE_TYPE (data_ref),
3610 TYPE_ALIGN (TREE_TYPE (vectype)));
3611 pi->misalign = DR_MISALIGNMENT (first_dr);
3612 }
3613
3614 /* Arguments are ready. Create the new vector stmt. */
3615 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3616 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3617 mark_symbols_for_renaming (new_stmt);
3618
3619 if (slp)
3620 continue;
3621
3622 if (j == 0)
3623 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3624 else
3625 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3626
3627 prev_stmt_info = vinfo_for_stmt (new_stmt);
3628 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3629 if (!next_stmt)
3630 break;
3631 }
3632 }
3633
3634 VEC_free (tree, heap, dr_chain);
3635 VEC_free (tree, heap, oprnds);
3636 if (result_chain)
3637 VEC_free (tree, heap, result_chain);
3638 if (vec_oprnds)
3639 VEC_free (tree, heap, vec_oprnds);
3640
3641 return true;
3642 }
3643
3644 /* Given a vector type VECTYPE returns a builtin DECL to be used
3645 for vector permutation and stores a mask into *MASK that implements
3646 reversal of the vector elements. If that is impossible to do
3647 returns NULL (and *MASK is unchanged). */
3648
3649 static tree
3650 perm_mask_for_reverse (tree vectype, tree *mask)
3651 {
3652 tree builtin_decl;
3653 tree mask_element_type, mask_type;
3654 tree mask_vec = NULL;
3655 int i;
3656 int nunits;
3657 if (!targetm.vectorize.builtin_vec_perm)
3658 return NULL;
3659
3660 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3661 &mask_element_type);
3662 if (!builtin_decl || !mask_element_type)
3663 return NULL;
3664
3665 mask_type = get_vectype_for_scalar_type (mask_element_type);
3666 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3667 if (!mask_type
3668 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3669 return NULL;
3670
3671 for (i = 0; i < nunits; i++)
3672 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3673 mask_vec = build_vector (mask_type, mask_vec);
3674
3675 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3676 return NULL;
3677 if (mask)
3678 *mask = mask_vec;
3679 return builtin_decl;
3680 }
3681
3682 /* Given a vector variable X, that was generated for the scalar LHS of
3683 STMT, generate instructions to reverse the vector elements of X,
3684 insert them a *GSI and return the permuted vector variable. */
3685
3686 static tree
3687 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3688 {
3689 tree vectype = TREE_TYPE (x);
3690 tree mask_vec, builtin_decl;
3691 tree perm_dest, data_ref;
3692 gimple perm_stmt;
3693
3694 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3695
3696 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3697
3698 /* Generate the permute statement. */
3699 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3700 data_ref = make_ssa_name (perm_dest, perm_stmt);
3701 gimple_call_set_lhs (perm_stmt, data_ref);
3702 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3703
3704 return data_ref;
3705 }
3706
3707 /* vectorizable_load.
3708
3709 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3710 can be vectorized.
3711 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3712 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3713 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3714
3715 static bool
3716 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3717 slp_tree slp_node, slp_instance slp_node_instance)
3718 {
3719 tree scalar_dest;
3720 tree vec_dest = NULL;
3721 tree data_ref = NULL;
3722 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3723 stmt_vec_info prev_stmt_info;
3724 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3725 struct loop *loop = NULL;
3726 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3727 bool nested_in_vect_loop = false;
3728 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3729 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3730 tree new_temp;
3731 enum machine_mode mode;
3732 gimple new_stmt = NULL;
3733 tree dummy;
3734 enum dr_alignment_support alignment_support_scheme;
3735 tree dataref_ptr = NULL_TREE;
3736 gimple ptr_incr;
3737 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3738 int ncopies;
3739 int i, j, group_size;
3740 tree msq = NULL_TREE, lsq;
3741 tree offset = NULL_TREE;
3742 tree realignment_token = NULL_TREE;
3743 gimple phi = NULL;
3744 VEC(tree,heap) *dr_chain = NULL;
3745 bool strided_load = false;
3746 gimple first_stmt;
3747 tree scalar_type;
3748 bool inv_p;
3749 bool negative;
3750 bool compute_in_loop = false;
3751 struct loop *at_loop;
3752 int vec_num;
3753 bool slp = (slp_node != NULL);
3754 bool slp_perm = false;
3755 enum tree_code code;
3756 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3757 int vf;
3758
3759 if (loop_vinfo)
3760 {
3761 loop = LOOP_VINFO_LOOP (loop_vinfo);
3762 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3763 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3764 }
3765 else
3766 vf = 1;
3767
3768 /* Multiple types in SLP are handled by creating the appropriate number of
3769 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3770 case of SLP. */
3771 if (slp)
3772 ncopies = 1;
3773 else
3774 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3775
3776 gcc_assert (ncopies >= 1);
3777
3778 /* FORNOW. This restriction should be relaxed. */
3779 if (nested_in_vect_loop && ncopies > 1)
3780 {
3781 if (vect_print_dump_info (REPORT_DETAILS))
3782 fprintf (vect_dump, "multiple types in nested loop.");
3783 return false;
3784 }
3785
3786 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3787 return false;
3788
3789 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3790 return false;
3791
3792 /* Is vectorizable load? */
3793 if (!is_gimple_assign (stmt))
3794 return false;
3795
3796 scalar_dest = gimple_assign_lhs (stmt);
3797 if (TREE_CODE (scalar_dest) != SSA_NAME)
3798 return false;
3799
3800 code = gimple_assign_rhs_code (stmt);
3801 if (code != ARRAY_REF
3802 && code != INDIRECT_REF
3803 && code != COMPONENT_REF
3804 && code != IMAGPART_EXPR
3805 && code != REALPART_EXPR
3806 && code != MEM_REF)
3807 return false;
3808
3809 if (!STMT_VINFO_DATA_REF (stmt_info))
3810 return false;
3811
3812 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
3813 if (negative && ncopies > 1)
3814 {
3815 if (vect_print_dump_info (REPORT_DETAILS))
3816 fprintf (vect_dump, "multiple types with negative step.");
3817 return false;
3818 }
3819
3820 scalar_type = TREE_TYPE (DR_REF (dr));
3821 mode = TYPE_MODE (vectype);
3822
3823 /* FORNOW. In some cases can vectorize even if data-type not supported
3824 (e.g. - data copies). */
3825 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3826 {
3827 if (vect_print_dump_info (REPORT_DETAILS))
3828 fprintf (vect_dump, "Aligned load, but unsupported type.");
3829 return false;
3830 }
3831
3832 /* The vector component type needs to be trivially convertible to the
3833 scalar lhs. This should always be the case. */
3834 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3835 {
3836 if (vect_print_dump_info (REPORT_DETAILS))
3837 fprintf (vect_dump, "??? operands of different types");
3838 return false;
3839 }
3840
3841 /* Check if the load is a part of an interleaving chain. */
3842 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3843 {
3844 strided_load = true;
3845 /* FORNOW */
3846 gcc_assert (! nested_in_vect_loop);
3847
3848 /* Check if interleaving is supported. */
3849 if (!vect_strided_load_supported (vectype)
3850 && !PURE_SLP_STMT (stmt_info) && !slp)
3851 return false;
3852 }
3853
3854 if (negative)
3855 {
3856 gcc_assert (!strided_load);
3857 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
3858 if (alignment_support_scheme != dr_aligned
3859 && alignment_support_scheme != dr_unaligned_supported)
3860 {
3861 if (vect_print_dump_info (REPORT_DETAILS))
3862 fprintf (vect_dump, "negative step but alignment required.");
3863 return false;
3864 }
3865 if (!perm_mask_for_reverse (vectype, NULL))
3866 {
3867 if (vect_print_dump_info (REPORT_DETAILS))
3868 fprintf (vect_dump, "negative step and reversing not supported.");
3869 return false;
3870 }
3871 }
3872
3873 if (!vec_stmt) /* transformation not required. */
3874 {
3875 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3876 vect_model_load_cost (stmt_info, ncopies, NULL);
3877 return true;
3878 }
3879
3880 if (vect_print_dump_info (REPORT_DETAILS))
3881 fprintf (vect_dump, "transform load.");
3882
3883 /** Transform. **/
3884
3885 if (strided_load)
3886 {
3887 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3888 /* Check if the chain of loads is already vectorized. */
3889 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3890 {
3891 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3892 return true;
3893 }
3894 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3895 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3896
3897 /* VEC_NUM is the number of vect stmts to be created for this group. */
3898 if (slp)
3899 {
3900 strided_load = false;
3901 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3902 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3903 slp_perm = true;
3904 }
3905 else
3906 vec_num = group_size;
3907
3908 dr_chain = VEC_alloc (tree, heap, vec_num);
3909 }
3910 else
3911 {
3912 first_stmt = stmt;
3913 first_dr = dr;
3914 group_size = vec_num = 1;
3915 }
3916
3917 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3918 gcc_assert (alignment_support_scheme);
3919
3920 /* In case the vectorization factor (VF) is bigger than the number
3921 of elements that we can fit in a vectype (nunits), we have to generate
3922 more than one vector stmt - i.e - we need to "unroll" the
3923 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3924 from one copy of the vector stmt to the next, in the field
3925 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3926 stages to find the correct vector defs to be used when vectorizing
3927 stmts that use the defs of the current stmt. The example below
3928 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3929 need to create 4 vectorized stmts):
3930
3931 before vectorization:
3932 RELATED_STMT VEC_STMT
3933 S1: x = memref - -
3934 S2: z = x + 1 - -
3935
3936 step 1: vectorize stmt S1:
3937 We first create the vector stmt VS1_0, and, as usual, record a
3938 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3939 Next, we create the vector stmt VS1_1, and record a pointer to
3940 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3941 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3942 stmts and pointers:
3943 RELATED_STMT VEC_STMT
3944 VS1_0: vx0 = memref0 VS1_1 -
3945 VS1_1: vx1 = memref1 VS1_2 -
3946 VS1_2: vx2 = memref2 VS1_3 -
3947 VS1_3: vx3 = memref3 - -
3948 S1: x = load - VS1_0
3949 S2: z = x + 1 - -
3950
3951 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3952 information we recorded in RELATED_STMT field is used to vectorize
3953 stmt S2. */
3954
3955 /* In case of interleaving (non-unit strided access):
3956
3957 S1: x2 = &base + 2
3958 S2: x0 = &base
3959 S3: x1 = &base + 1
3960 S4: x3 = &base + 3
3961
3962 Vectorized loads are created in the order of memory accesses
3963 starting from the access of the first stmt of the chain:
3964
3965 VS1: vx0 = &base
3966 VS2: vx1 = &base + vec_size*1
3967 VS3: vx3 = &base + vec_size*2
3968 VS4: vx4 = &base + vec_size*3
3969
3970 Then permutation statements are generated:
3971
3972 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3973 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3974 ...
3975
3976 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3977 (the order of the data-refs in the output of vect_permute_load_chain
3978 corresponds to the order of scalar stmts in the interleaving chain - see
3979 the documentation of vect_permute_load_chain()).
3980 The generation of permutation stmts and recording them in
3981 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3982
3983 In case of both multiple types and interleaving, the vector loads and
3984 permutation stmts above are created for every copy. The result vector
3985 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
3986 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
3987
3988 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3989 on a target that supports unaligned accesses (dr_unaligned_supported)
3990 we generate the following code:
3991 p = initial_addr;
3992 indx = 0;
3993 loop {
3994 p = p + indx * vectype_size;
3995 vec_dest = *(p);
3996 indx = indx + 1;
3997 }
3998
3999 Otherwise, the data reference is potentially unaligned on a target that
4000 does not support unaligned accesses (dr_explicit_realign_optimized) -
4001 then generate the following code, in which the data in each iteration is
4002 obtained by two vector loads, one from the previous iteration, and one
4003 from the current iteration:
4004 p1 = initial_addr;
4005 msq_init = *(floor(p1))
4006 p2 = initial_addr + VS - 1;
4007 realignment_token = call target_builtin;
4008 indx = 0;
4009 loop {
4010 p2 = p2 + indx * vectype_size
4011 lsq = *(floor(p2))
4012 vec_dest = realign_load (msq, lsq, realignment_token)
4013 indx = indx + 1;
4014 msq = lsq;
4015 } */
4016
4017 /* If the misalignment remains the same throughout the execution of the
4018 loop, we can create the init_addr and permutation mask at the loop
4019 preheader. Otherwise, it needs to be created inside the loop.
4020 This can only occur when vectorizing memory accesses in the inner-loop
4021 nested within an outer-loop that is being vectorized. */
4022
4023 if (loop && nested_in_vect_loop_p (loop, stmt)
4024 && (TREE_INT_CST_LOW (DR_STEP (dr))
4025 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4026 {
4027 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4028 compute_in_loop = true;
4029 }
4030
4031 if ((alignment_support_scheme == dr_explicit_realign_optimized
4032 || alignment_support_scheme == dr_explicit_realign)
4033 && !compute_in_loop)
4034 {
4035 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4036 alignment_support_scheme, NULL_TREE,
4037 &at_loop);
4038 if (alignment_support_scheme == dr_explicit_realign_optimized)
4039 {
4040 phi = SSA_NAME_DEF_STMT (msq);
4041 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4042 }
4043 }
4044 else
4045 at_loop = loop;
4046
4047 if (negative)
4048 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4049
4050 prev_stmt_info = NULL;
4051 for (j = 0; j < ncopies; j++)
4052 {
4053 /* 1. Create the vector pointer update chain. */
4054 if (j == 0)
4055 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
4056 at_loop, offset,
4057 &dummy, &ptr_incr, false,
4058 &inv_p);
4059 else
4060 dataref_ptr =
4061 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
4062
4063 for (i = 0; i < vec_num; i++)
4064 {
4065 if (i > 0)
4066 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4067 NULL_TREE);
4068
4069 /* 2. Create the vector-load in the loop. */
4070 switch (alignment_support_scheme)
4071 {
4072 case dr_aligned:
4073 case dr_unaligned_supported:
4074 {
4075 struct ptr_info_def *pi;
4076 data_ref
4077 = build2 (MEM_REF, vectype, dataref_ptr,
4078 build_int_cst (reference_alias_ptr_type
4079 (DR_REF (first_dr)), 0));
4080 pi = get_ptr_info (dataref_ptr);
4081 pi->align = TYPE_ALIGN_UNIT (vectype);
4082 if (alignment_support_scheme == dr_aligned)
4083 {
4084 gcc_assert (aligned_access_p (first_dr));
4085 pi->misalign = 0;
4086 }
4087 else if (DR_MISALIGNMENT (first_dr) == -1)
4088 {
4089 TREE_TYPE (data_ref)
4090 = build_aligned_type (TREE_TYPE (data_ref),
4091 TYPE_ALIGN (TREE_TYPE (vectype)));
4092 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
4093 pi->misalign = 0;
4094 }
4095 else
4096 {
4097 TREE_TYPE (data_ref)
4098 = build_aligned_type (TREE_TYPE (data_ref),
4099 TYPE_ALIGN (TREE_TYPE (vectype)));
4100 pi->misalign = DR_MISALIGNMENT (first_dr);
4101 }
4102 break;
4103 }
4104 case dr_explicit_realign:
4105 {
4106 tree ptr, bump;
4107 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4108
4109 if (compute_in_loop)
4110 msq = vect_setup_realignment (first_stmt, gsi,
4111 &realignment_token,
4112 dr_explicit_realign,
4113 dataref_ptr, NULL);
4114
4115 new_stmt = gimple_build_assign_with_ops
4116 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4117 build_int_cst
4118 (TREE_TYPE (dataref_ptr),
4119 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4120 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4121 gimple_assign_set_lhs (new_stmt, ptr);
4122 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4123 data_ref
4124 = build2 (MEM_REF, vectype, ptr,
4125 build_int_cst (reference_alias_ptr_type
4126 (DR_REF (first_dr)), 0));
4127 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4128 new_stmt = gimple_build_assign (vec_dest, data_ref);
4129 new_temp = make_ssa_name (vec_dest, new_stmt);
4130 gimple_assign_set_lhs (new_stmt, new_temp);
4131 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4132 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4133 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4134 msq = new_temp;
4135
4136 bump = size_binop (MULT_EXPR, vs_minus_1,
4137 TYPE_SIZE_UNIT (scalar_type));
4138 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4139 new_stmt = gimple_build_assign_with_ops
4140 (BIT_AND_EXPR, NULL_TREE, ptr,
4141 build_int_cst
4142 (TREE_TYPE (ptr),
4143 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4144 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4145 gimple_assign_set_lhs (new_stmt, ptr);
4146 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4147 data_ref
4148 = build2 (MEM_REF, vectype, ptr,
4149 build_int_cst (reference_alias_ptr_type
4150 (DR_REF (first_dr)), 0));
4151 break;
4152 }
4153 case dr_explicit_realign_optimized:
4154 new_stmt = gimple_build_assign_with_ops
4155 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4156 build_int_cst
4157 (TREE_TYPE (dataref_ptr),
4158 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4159 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4160 gimple_assign_set_lhs (new_stmt, new_temp);
4161 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4162 data_ref
4163 = build2 (MEM_REF, vectype, new_temp,
4164 build_int_cst (reference_alias_ptr_type
4165 (DR_REF (first_dr)), 0));
4166 break;
4167 default:
4168 gcc_unreachable ();
4169 }
4170 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4171 new_stmt = gimple_build_assign (vec_dest, data_ref);
4172 new_temp = make_ssa_name (vec_dest, new_stmt);
4173 gimple_assign_set_lhs (new_stmt, new_temp);
4174 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4175 mark_symbols_for_renaming (new_stmt);
4176
4177 /* 3. Handle explicit realignment if necessary/supported. Create in
4178 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
4179 if (alignment_support_scheme == dr_explicit_realign_optimized
4180 || alignment_support_scheme == dr_explicit_realign)
4181 {
4182 tree tmp;
4183
4184 lsq = gimple_assign_lhs (new_stmt);
4185 if (!realignment_token)
4186 realignment_token = dataref_ptr;
4187 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4188 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
4189 realignment_token);
4190 new_stmt = gimple_build_assign (vec_dest, tmp);
4191 new_temp = make_ssa_name (vec_dest, new_stmt);
4192 gimple_assign_set_lhs (new_stmt, new_temp);
4193 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4194
4195 if (alignment_support_scheme == dr_explicit_realign_optimized)
4196 {
4197 gcc_assert (phi);
4198 if (i == vec_num - 1 && j == ncopies - 1)
4199 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
4200 UNKNOWN_LOCATION);
4201 msq = lsq;
4202 }
4203 }
4204
4205 /* 4. Handle invariant-load. */
4206 if (inv_p && !bb_vinfo)
4207 {
4208 gcc_assert (!strided_load);
4209 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4210 if (j == 0)
4211 {
4212 int k;
4213 tree t = NULL_TREE;
4214 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4215
4216 /* CHECKME: bitpos depends on endianess? */
4217 bitpos = bitsize_zero_node;
4218 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4219 bitsize, bitpos);
4220 vec_dest =
4221 vect_create_destination_var (scalar_dest, NULL_TREE);
4222 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4223 new_temp = make_ssa_name (vec_dest, new_stmt);
4224 gimple_assign_set_lhs (new_stmt, new_temp);
4225 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4226
4227 for (k = nunits - 1; k >= 0; --k)
4228 t = tree_cons (NULL_TREE, new_temp, t);
4229 /* FIXME: use build_constructor directly. */
4230 vec_inv = build_constructor_from_list (vectype, t);
4231 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
4232 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4233 }
4234 else
4235 gcc_unreachable (); /* FORNOW. */
4236 }
4237
4238 if (negative)
4239 {
4240 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4241 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4242 }
4243
4244 /* Collect vector loads and later create their permutation in
4245 vect_transform_strided_load (). */
4246 if (strided_load || slp_perm)
4247 VEC_quick_push (tree, dr_chain, new_temp);
4248
4249 /* Store vector loads in the corresponding SLP_NODE. */
4250 if (slp && !slp_perm)
4251 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4252 }
4253
4254 if (slp && !slp_perm)
4255 continue;
4256
4257 if (slp_perm)
4258 {
4259 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4260 slp_node_instance, false))
4261 {
4262 VEC_free (tree, heap, dr_chain);
4263 return false;
4264 }
4265 }
4266 else
4267 {
4268 if (strided_load)
4269 {
4270 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
4271 return false;
4272
4273 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4274 VEC_free (tree, heap, dr_chain);
4275 dr_chain = VEC_alloc (tree, heap, group_size);
4276 }
4277 else
4278 {
4279 if (j == 0)
4280 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4281 else
4282 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4283 prev_stmt_info = vinfo_for_stmt (new_stmt);
4284 }
4285 }
4286 }
4287
4288 if (dr_chain)
4289 VEC_free (tree, heap, dr_chain);
4290
4291 return true;
4292 }
4293
4294 /* Function vect_is_simple_cond.
4295
4296 Input:
4297 LOOP - the loop that is being vectorized.
4298 COND - Condition that is checked for simple use.
4299
4300 Returns whether a COND can be vectorized. Checks whether
4301 condition operands are supportable using vec_is_simple_use. */
4302
4303 static bool
4304 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4305 {
4306 tree lhs, rhs;
4307 tree def;
4308 enum vect_def_type dt;
4309
4310 if (!COMPARISON_CLASS_P (cond))
4311 return false;
4312
4313 lhs = TREE_OPERAND (cond, 0);
4314 rhs = TREE_OPERAND (cond, 1);
4315
4316 if (TREE_CODE (lhs) == SSA_NAME)
4317 {
4318 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4319 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4320 &dt))
4321 return false;
4322 }
4323 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4324 && TREE_CODE (lhs) != FIXED_CST)
4325 return false;
4326
4327 if (TREE_CODE (rhs) == SSA_NAME)
4328 {
4329 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4330 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4331 &dt))
4332 return false;
4333 }
4334 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4335 && TREE_CODE (rhs) != FIXED_CST)
4336 return false;
4337
4338 return true;
4339 }
4340
4341 /* vectorizable_condition.
4342
4343 Check if STMT is conditional modify expression that can be vectorized.
4344 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4345 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4346 at GSI.
4347
4348 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4349 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4350 else caluse if it is 2).
4351
4352 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4353
4354 bool
4355 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4356 gimple *vec_stmt, tree reduc_def, int reduc_index)
4357 {
4358 tree scalar_dest = NULL_TREE;
4359 tree vec_dest = NULL_TREE;
4360 tree op = NULL_TREE;
4361 tree cond_expr, then_clause, else_clause;
4362 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4363 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4364 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4365 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4366 tree vec_compare, vec_cond_expr;
4367 tree new_temp;
4368 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4369 enum machine_mode vec_mode;
4370 tree def;
4371 enum vect_def_type dt, dts[4];
4372 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4373 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4374 enum tree_code code;
4375 stmt_vec_info prev_stmt_info = NULL;
4376 int j;
4377
4378 /* FORNOW: unsupported in basic block SLP. */
4379 gcc_assert (loop_vinfo);
4380
4381 gcc_assert (ncopies >= 1);
4382 if (reduc_index && ncopies > 1)
4383 return false; /* FORNOW */
4384
4385 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4386 return false;
4387
4388 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4389 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4390 && reduc_def))
4391 return false;
4392
4393 /* FORNOW: SLP not supported. */
4394 if (STMT_SLP_TYPE (stmt_info))
4395 return false;
4396
4397 /* FORNOW: not yet supported. */
4398 if (STMT_VINFO_LIVE_P (stmt_info))
4399 {
4400 if (vect_print_dump_info (REPORT_DETAILS))
4401 fprintf (vect_dump, "value used after loop.");
4402 return false;
4403 }
4404
4405 /* Is vectorizable conditional operation? */
4406 if (!is_gimple_assign (stmt))
4407 return false;
4408
4409 code = gimple_assign_rhs_code (stmt);
4410
4411 if (code != COND_EXPR)
4412 return false;
4413
4414 gcc_assert (gimple_assign_single_p (stmt));
4415 op = gimple_assign_rhs1 (stmt);
4416 cond_expr = TREE_OPERAND (op, 0);
4417 then_clause = TREE_OPERAND (op, 1);
4418 else_clause = TREE_OPERAND (op, 2);
4419
4420 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4421 return false;
4422
4423 /* We do not handle two different vector types for the condition
4424 and the values. */
4425 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4426 TREE_TYPE (vectype)))
4427 return false;
4428
4429 if (TREE_CODE (then_clause) == SSA_NAME)
4430 {
4431 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4432 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4433 &then_def_stmt, &def, &dt))
4434 return false;
4435 }
4436 else if (TREE_CODE (then_clause) != INTEGER_CST
4437 && TREE_CODE (then_clause) != REAL_CST
4438 && TREE_CODE (then_clause) != FIXED_CST)
4439 return false;
4440
4441 if (TREE_CODE (else_clause) == SSA_NAME)
4442 {
4443 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4444 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4445 &else_def_stmt, &def, &dt))
4446 return false;
4447 }
4448 else if (TREE_CODE (else_clause) != INTEGER_CST
4449 && TREE_CODE (else_clause) != REAL_CST
4450 && TREE_CODE (else_clause) != FIXED_CST)
4451 return false;
4452
4453
4454 vec_mode = TYPE_MODE (vectype);
4455
4456 if (!vec_stmt)
4457 {
4458 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4459 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4460 }
4461
4462 /* Transform */
4463
4464 /* Handle def. */
4465 scalar_dest = gimple_assign_lhs (stmt);
4466 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4467
4468 /* Handle cond expr. */
4469 for (j = 0; j < ncopies; j++)
4470 {
4471 gimple new_stmt;
4472 if (j == 0)
4473 {
4474 gimple gtemp;
4475 vec_cond_lhs =
4476 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4477 stmt, NULL);
4478 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4479 NULL, &gtemp, &def, &dts[0]);
4480 vec_cond_rhs =
4481 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4482 stmt, NULL);
4483 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4484 NULL, &gtemp, &def, &dts[1]);
4485 if (reduc_index == 1)
4486 vec_then_clause = reduc_def;
4487 else
4488 {
4489 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4490 stmt, NULL);
4491 vect_is_simple_use (then_clause, loop_vinfo,
4492 NULL, &gtemp, &def, &dts[2]);
4493 }
4494 if (reduc_index == 2)
4495 vec_else_clause = reduc_def;
4496 else
4497 {
4498 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4499 stmt, NULL);
4500 vect_is_simple_use (else_clause, loop_vinfo,
4501 NULL, &gtemp, &def, &dts[3]);
4502 }
4503 }
4504 else
4505 {
4506 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4507 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4508 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4509 vec_then_clause);
4510 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4511 vec_else_clause);
4512 }
4513
4514 /* Arguments are ready. Create the new vector stmt. */
4515 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4516 vec_cond_lhs, vec_cond_rhs);
4517 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4518 vec_compare, vec_then_clause, vec_else_clause);
4519
4520 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4521 new_temp = make_ssa_name (vec_dest, new_stmt);
4522 gimple_assign_set_lhs (new_stmt, new_temp);
4523 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4524 if (j == 0)
4525 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4526 else
4527 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4528
4529 prev_stmt_info = vinfo_for_stmt (new_stmt);
4530 }
4531
4532 return true;
4533 }
4534
4535
4536 /* Make sure the statement is vectorizable. */
4537
4538 bool
4539 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4540 {
4541 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4542 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4543 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4544 bool ok;
4545 tree scalar_type, vectype;
4546
4547 if (vect_print_dump_info (REPORT_DETAILS))
4548 {
4549 fprintf (vect_dump, "==> examining statement: ");
4550 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4551 }
4552
4553 if (gimple_has_volatile_ops (stmt))
4554 {
4555 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4556 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4557
4558 return false;
4559 }
4560
4561 /* Skip stmts that do not need to be vectorized. In loops this is expected
4562 to include:
4563 - the COND_EXPR which is the loop exit condition
4564 - any LABEL_EXPRs in the loop
4565 - computations that are used only for array indexing or loop control.
4566 In basic blocks we only analyze statements that are a part of some SLP
4567 instance, therefore, all the statements are relevant. */
4568
4569 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4570 && !STMT_VINFO_LIVE_P (stmt_info))
4571 {
4572 if (vect_print_dump_info (REPORT_DETAILS))
4573 fprintf (vect_dump, "irrelevant.");
4574
4575 return true;
4576 }
4577
4578 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4579 {
4580 case vect_internal_def:
4581 break;
4582
4583 case vect_reduction_def:
4584 case vect_nested_cycle:
4585 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4586 || relevance == vect_used_in_outer_by_reduction
4587 || relevance == vect_unused_in_scope));
4588 break;
4589
4590 case vect_induction_def:
4591 case vect_constant_def:
4592 case vect_external_def:
4593 case vect_unknown_def_type:
4594 default:
4595 gcc_unreachable ();
4596 }
4597
4598 if (bb_vinfo)
4599 {
4600 gcc_assert (PURE_SLP_STMT (stmt_info));
4601
4602 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4603 if (vect_print_dump_info (REPORT_DETAILS))
4604 {
4605 fprintf (vect_dump, "get vectype for scalar type: ");
4606 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4607 }
4608
4609 vectype = get_vectype_for_scalar_type (scalar_type);
4610 if (!vectype)
4611 {
4612 if (vect_print_dump_info (REPORT_DETAILS))
4613 {
4614 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4615 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4616 }
4617 return false;
4618 }
4619
4620 if (vect_print_dump_info (REPORT_DETAILS))
4621 {
4622 fprintf (vect_dump, "vectype: ");
4623 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4624 }
4625
4626 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4627 }
4628
4629 if (STMT_VINFO_RELEVANT_P (stmt_info))
4630 {
4631 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4632 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4633 *need_to_vectorize = true;
4634 }
4635
4636 ok = true;
4637 if (!bb_vinfo
4638 && (STMT_VINFO_RELEVANT_P (stmt_info)
4639 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4640 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4641 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4642 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4643 || vectorizable_shift (stmt, NULL, NULL, NULL)
4644 || vectorizable_operation (stmt, NULL, NULL, NULL)
4645 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4646 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4647 || vectorizable_call (stmt, NULL, NULL)
4648 || vectorizable_store (stmt, NULL, NULL, NULL)
4649 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4650 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4651 else
4652 {
4653 if (bb_vinfo)
4654 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4655 || vectorizable_operation (stmt, NULL, NULL, node)
4656 || vectorizable_assignment (stmt, NULL, NULL, node)
4657 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4658 || vectorizable_store (stmt, NULL, NULL, node));
4659 }
4660
4661 if (!ok)
4662 {
4663 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4664 {
4665 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4666 fprintf (vect_dump, "supported: ");
4667 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4668 }
4669
4670 return false;
4671 }
4672
4673 if (bb_vinfo)
4674 return true;
4675
4676 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4677 need extra handling, except for vectorizable reductions. */
4678 if (STMT_VINFO_LIVE_P (stmt_info)
4679 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4680 ok = vectorizable_live_operation (stmt, NULL, NULL);
4681
4682 if (!ok)
4683 {
4684 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4685 {
4686 fprintf (vect_dump, "not vectorized: live stmt not ");
4687 fprintf (vect_dump, "supported: ");
4688 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4689 }
4690
4691 return false;
4692 }
4693
4694 if (!PURE_SLP_STMT (stmt_info))
4695 {
4696 /* Groups of strided accesses whose size is not a power of 2 are not
4697 vectorizable yet using loop-vectorization. Therefore, if this stmt
4698 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4699 loop-based vectorized), the loop cannot be vectorized. */
4700 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4701 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4702 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4703 {
4704 if (vect_print_dump_info (REPORT_DETAILS))
4705 {
4706 fprintf (vect_dump, "not vectorized: the size of group "
4707 "of strided accesses is not a power of 2");
4708 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4709 }
4710
4711 return false;
4712 }
4713 }
4714
4715 return true;
4716 }
4717
4718
4719 /* Function vect_transform_stmt.
4720
4721 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4722
4723 bool
4724 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4725 bool *strided_store, slp_tree slp_node,
4726 slp_instance slp_node_instance)
4727 {
4728 bool is_store = false;
4729 gimple vec_stmt = NULL;
4730 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4731 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4732 bool done;
4733
4734 switch (STMT_VINFO_TYPE (stmt_info))
4735 {
4736 case type_demotion_vec_info_type:
4737 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4738 gcc_assert (done);
4739 break;
4740
4741 case type_promotion_vec_info_type:
4742 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4743 gcc_assert (done);
4744 break;
4745
4746 case type_conversion_vec_info_type:
4747 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4748 gcc_assert (done);
4749 break;
4750
4751 case induc_vec_info_type:
4752 gcc_assert (!slp_node);
4753 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4754 gcc_assert (done);
4755 break;
4756
4757 case shift_vec_info_type:
4758 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4759 gcc_assert (done);
4760 break;
4761
4762 case op_vec_info_type:
4763 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4764 gcc_assert (done);
4765 break;
4766
4767 case assignment_vec_info_type:
4768 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4769 gcc_assert (done);
4770 break;
4771
4772 case load_vec_info_type:
4773 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4774 slp_node_instance);
4775 gcc_assert (done);
4776 break;
4777
4778 case store_vec_info_type:
4779 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4780 gcc_assert (done);
4781 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4782 {
4783 /* In case of interleaving, the whole chain is vectorized when the
4784 last store in the chain is reached. Store stmts before the last
4785 one are skipped, and there vec_stmt_info shouldn't be freed
4786 meanwhile. */
4787 *strided_store = true;
4788 if (STMT_VINFO_VEC_STMT (stmt_info))
4789 is_store = true;
4790 }
4791 else
4792 is_store = true;
4793 break;
4794
4795 case condition_vec_info_type:
4796 gcc_assert (!slp_node);
4797 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4798 gcc_assert (done);
4799 break;
4800
4801 case call_vec_info_type:
4802 gcc_assert (!slp_node);
4803 done = vectorizable_call (stmt, gsi, &vec_stmt);
4804 stmt = gsi_stmt (*gsi);
4805 break;
4806
4807 case reduc_vec_info_type:
4808 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4809 gcc_assert (done);
4810 break;
4811
4812 default:
4813 if (!STMT_VINFO_LIVE_P (stmt_info))
4814 {
4815 if (vect_print_dump_info (REPORT_DETAILS))
4816 fprintf (vect_dump, "stmt not supported.");
4817 gcc_unreachable ();
4818 }
4819 }
4820
4821 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4822 is being vectorized, but outside the immediately enclosing loop. */
4823 if (vec_stmt
4824 && STMT_VINFO_LOOP_VINFO (stmt_info)
4825 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4826 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4827 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4828 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4829 || STMT_VINFO_RELEVANT (stmt_info) ==
4830 vect_used_in_outer_by_reduction))
4831 {
4832 struct loop *innerloop = LOOP_VINFO_LOOP (
4833 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4834 imm_use_iterator imm_iter;
4835 use_operand_p use_p;
4836 tree scalar_dest;
4837 gimple exit_phi;
4838
4839 if (vect_print_dump_info (REPORT_DETAILS))
4840 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4841
4842 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4843 (to be used when vectorizing outer-loop stmts that use the DEF of
4844 STMT). */
4845 if (gimple_code (stmt) == GIMPLE_PHI)
4846 scalar_dest = PHI_RESULT (stmt);
4847 else
4848 scalar_dest = gimple_assign_lhs (stmt);
4849
4850 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4851 {
4852 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4853 {
4854 exit_phi = USE_STMT (use_p);
4855 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4856 }
4857 }
4858 }
4859
4860 /* Handle stmts whose DEF is used outside the loop-nest that is
4861 being vectorized. */
4862 if (STMT_VINFO_LIVE_P (stmt_info)
4863 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4864 {
4865 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4866 gcc_assert (done);
4867 }
4868
4869 if (vec_stmt)
4870 {
4871 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4872 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4873 if (orig_stmt_in_pattern)
4874 {
4875 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4876 /* STMT was inserted by the vectorizer to replace a computation idiom.
4877 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4878 computed this idiom. We need to record a pointer to VEC_STMT in
4879 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4880 documentation of vect_pattern_recog. */
4881 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4882 {
4883 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4884 == orig_scalar_stmt);
4885 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4886 }
4887 }
4888 }
4889
4890 return is_store;
4891 }
4892
4893
4894 /* Remove a group of stores (for SLP or interleaving), free their
4895 stmt_vec_info. */
4896
4897 void
4898 vect_remove_stores (gimple first_stmt)
4899 {
4900 gimple next = first_stmt;
4901 gimple tmp;
4902 gimple_stmt_iterator next_si;
4903
4904 while (next)
4905 {
4906 /* Free the attached stmt_vec_info and remove the stmt. */
4907 next_si = gsi_for_stmt (next);
4908 gsi_remove (&next_si, true);
4909 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4910 free_stmt_vec_info (next);
4911 next = tmp;
4912 }
4913 }
4914
4915
4916 /* Function new_stmt_vec_info.
4917
4918 Create and initialize a new stmt_vec_info struct for STMT. */
4919
4920 stmt_vec_info
4921 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4922 bb_vec_info bb_vinfo)
4923 {
4924 stmt_vec_info res;
4925 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4926
4927 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4928 STMT_VINFO_STMT (res) = stmt;
4929 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4930 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4931 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4932 STMT_VINFO_LIVE_P (res) = false;
4933 STMT_VINFO_VECTYPE (res) = NULL;
4934 STMT_VINFO_VEC_STMT (res) = NULL;
4935 STMT_VINFO_VECTORIZABLE (res) = true;
4936 STMT_VINFO_IN_PATTERN_P (res) = false;
4937 STMT_VINFO_RELATED_STMT (res) = NULL;
4938 STMT_VINFO_DATA_REF (res) = NULL;
4939
4940 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4941 STMT_VINFO_DR_OFFSET (res) = NULL;
4942 STMT_VINFO_DR_INIT (res) = NULL;
4943 STMT_VINFO_DR_STEP (res) = NULL;
4944 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4945
4946 if (gimple_code (stmt) == GIMPLE_PHI
4947 && is_loop_header_bb_p (gimple_bb (stmt)))
4948 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4949 else
4950 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4951
4952 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4953 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4954 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4955 STMT_SLP_TYPE (res) = loop_vect;
4956 DR_GROUP_FIRST_DR (res) = NULL;
4957 DR_GROUP_NEXT_DR (res) = NULL;
4958 DR_GROUP_SIZE (res) = 0;
4959 DR_GROUP_STORE_COUNT (res) = 0;
4960 DR_GROUP_GAP (res) = 0;
4961 DR_GROUP_SAME_DR_STMT (res) = NULL;
4962 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4963
4964 return res;
4965 }
4966
4967
4968 /* Create a hash table for stmt_vec_info. */
4969
4970 void
4971 init_stmt_vec_info_vec (void)
4972 {
4973 gcc_assert (!stmt_vec_info_vec);
4974 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4975 }
4976
4977
4978 /* Free hash table for stmt_vec_info. */
4979
4980 void
4981 free_stmt_vec_info_vec (void)
4982 {
4983 gcc_assert (stmt_vec_info_vec);
4984 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4985 }
4986
4987
4988 /* Free stmt vectorization related info. */
4989
4990 void
4991 free_stmt_vec_info (gimple stmt)
4992 {
4993 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4994
4995 if (!stmt_info)
4996 return;
4997
4998 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4999 set_vinfo_for_stmt (stmt, NULL);
5000 free (stmt_info);
5001 }
5002
5003
5004 /* Function get_vectype_for_scalar_type_and_size.
5005
5006 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5007 by the target. */
5008
5009 static tree
5010 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5011 {
5012 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5013 enum machine_mode simd_mode;
5014 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5015 int nunits;
5016 tree vectype;
5017
5018 if (nbytes == 0)
5019 return NULL_TREE;
5020
5021 /* We can't build a vector type of elements with alignment bigger than
5022 their size. */
5023 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5024 return NULL_TREE;
5025
5026 /* If we'd build a vector type of elements whose mode precision doesn't
5027 match their types precision we'll get mismatched types on vector
5028 extracts via BIT_FIELD_REFs. This effectively means we disable
5029 vectorization of bool and/or enum types in some languages. */
5030 if (INTEGRAL_TYPE_P (scalar_type)
5031 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5032 return NULL_TREE;
5033
5034 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5035 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5036 return NULL_TREE;
5037
5038 /* If no size was supplied use the mode the target prefers. Otherwise
5039 lookup a vector mode of the specified size. */
5040 if (size == 0)
5041 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5042 else
5043 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5044 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5045 if (nunits <= 1)
5046 return NULL_TREE;
5047
5048 vectype = build_vector_type (scalar_type, nunits);
5049 if (vect_print_dump_info (REPORT_DETAILS))
5050 {
5051 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5052 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5053 }
5054
5055 if (!vectype)
5056 return NULL_TREE;
5057
5058 if (vect_print_dump_info (REPORT_DETAILS))
5059 {
5060 fprintf (vect_dump, "vectype: ");
5061 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5062 }
5063
5064 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5065 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5066 {
5067 if (vect_print_dump_info (REPORT_DETAILS))
5068 fprintf (vect_dump, "mode not supported by target.");
5069 return NULL_TREE;
5070 }
5071
5072 return vectype;
5073 }
5074
5075 unsigned int current_vector_size;
5076
5077 /* Function get_vectype_for_scalar_type.
5078
5079 Returns the vector type corresponding to SCALAR_TYPE as supported
5080 by the target. */
5081
5082 tree
5083 get_vectype_for_scalar_type (tree scalar_type)
5084 {
5085 tree vectype;
5086 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5087 current_vector_size);
5088 if (vectype
5089 && current_vector_size == 0)
5090 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5091 return vectype;
5092 }
5093
5094 /* Function get_same_sized_vectype
5095
5096 Returns a vector type corresponding to SCALAR_TYPE of size
5097 VECTOR_TYPE if supported by the target. */
5098
5099 tree
5100 get_same_sized_vectype (tree scalar_type, tree vector_type)
5101 {
5102 return get_vectype_for_scalar_type_and_size
5103 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5104 }
5105
5106 /* Function vect_is_simple_use.
5107
5108 Input:
5109 LOOP_VINFO - the vect info of the loop that is being vectorized.
5110 BB_VINFO - the vect info of the basic block that is being vectorized.
5111 OPERAND - operand of a stmt in the loop or bb.
5112 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5113
5114 Returns whether a stmt with OPERAND can be vectorized.
5115 For loops, supportable operands are constants, loop invariants, and operands
5116 that are defined by the current iteration of the loop. Unsupportable
5117 operands are those that are defined by a previous iteration of the loop (as
5118 is the case in reduction/induction computations).
5119 For basic blocks, supportable operands are constants and bb invariants.
5120 For now, operands defined outside the basic block are not supported. */
5121
5122 bool
5123 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5124 bb_vec_info bb_vinfo, gimple *def_stmt,
5125 tree *def, enum vect_def_type *dt)
5126 {
5127 basic_block bb;
5128 stmt_vec_info stmt_vinfo;
5129 struct loop *loop = NULL;
5130
5131 if (loop_vinfo)
5132 loop = LOOP_VINFO_LOOP (loop_vinfo);
5133
5134 *def_stmt = NULL;
5135 *def = NULL_TREE;
5136
5137 if (vect_print_dump_info (REPORT_DETAILS))
5138 {
5139 fprintf (vect_dump, "vect_is_simple_use: operand ");
5140 print_generic_expr (vect_dump, operand, TDF_SLIM);
5141 }
5142
5143 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5144 {
5145 *dt = vect_constant_def;
5146 return true;
5147 }
5148
5149 if (is_gimple_min_invariant (operand))
5150 {
5151 *def = operand;
5152 *dt = vect_external_def;
5153 return true;
5154 }
5155
5156 if (TREE_CODE (operand) == PAREN_EXPR)
5157 {
5158 if (vect_print_dump_info (REPORT_DETAILS))
5159 fprintf (vect_dump, "non-associatable copy.");
5160 operand = TREE_OPERAND (operand, 0);
5161 }
5162
5163 if (TREE_CODE (operand) != SSA_NAME)
5164 {
5165 if (vect_print_dump_info (REPORT_DETAILS))
5166 fprintf (vect_dump, "not ssa-name.");
5167 return false;
5168 }
5169
5170 *def_stmt = SSA_NAME_DEF_STMT (operand);
5171 if (*def_stmt == NULL)
5172 {
5173 if (vect_print_dump_info (REPORT_DETAILS))
5174 fprintf (vect_dump, "no def_stmt.");
5175 return false;
5176 }
5177
5178 if (vect_print_dump_info (REPORT_DETAILS))
5179 {
5180 fprintf (vect_dump, "def_stmt: ");
5181 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5182 }
5183
5184 /* Empty stmt is expected only in case of a function argument.
5185 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5186 if (gimple_nop_p (*def_stmt))
5187 {
5188 *def = operand;
5189 *dt = vect_external_def;
5190 return true;
5191 }
5192
5193 bb = gimple_bb (*def_stmt);
5194
5195 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5196 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5197 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5198 *dt = vect_external_def;
5199 else
5200 {
5201 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5202 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5203 }
5204
5205 if (*dt == vect_unknown_def_type)
5206 {
5207 if (vect_print_dump_info (REPORT_DETAILS))
5208 fprintf (vect_dump, "Unsupported pattern.");
5209 return false;
5210 }
5211
5212 if (vect_print_dump_info (REPORT_DETAILS))
5213 fprintf (vect_dump, "type of def: %d.",*dt);
5214
5215 switch (gimple_code (*def_stmt))
5216 {
5217 case GIMPLE_PHI:
5218 *def = gimple_phi_result (*def_stmt);
5219 break;
5220
5221 case GIMPLE_ASSIGN:
5222 *def = gimple_assign_lhs (*def_stmt);
5223 break;
5224
5225 case GIMPLE_CALL:
5226 *def = gimple_call_lhs (*def_stmt);
5227 if (*def != NULL)
5228 break;
5229 /* FALLTHRU */
5230 default:
5231 if (vect_print_dump_info (REPORT_DETAILS))
5232 fprintf (vect_dump, "unsupported defining stmt: ");
5233 return false;
5234 }
5235
5236 return true;
5237 }
5238
5239 /* Function vect_is_simple_use_1.
5240
5241 Same as vect_is_simple_use_1 but also determines the vector operand
5242 type of OPERAND and stores it to *VECTYPE. If the definition of
5243 OPERAND is vect_uninitialized_def, vect_constant_def or
5244 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5245 is responsible to compute the best suited vector type for the
5246 scalar operand. */
5247
5248 bool
5249 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5250 bb_vec_info bb_vinfo, gimple *def_stmt,
5251 tree *def, enum vect_def_type *dt, tree *vectype)
5252 {
5253 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5254 return false;
5255
5256 /* Now get a vector type if the def is internal, otherwise supply
5257 NULL_TREE and leave it up to the caller to figure out a proper
5258 type for the use stmt. */
5259 if (*dt == vect_internal_def
5260 || *dt == vect_induction_def
5261 || *dt == vect_reduction_def
5262 || *dt == vect_double_reduction_def
5263 || *dt == vect_nested_cycle)
5264 {
5265 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5266 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5267 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5268 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5269 gcc_assert (*vectype != NULL_TREE);
5270 }
5271 else if (*dt == vect_uninitialized_def
5272 || *dt == vect_constant_def
5273 || *dt == vect_external_def)
5274 *vectype = NULL_TREE;
5275 else
5276 gcc_unreachable ();
5277
5278 return true;
5279 }
5280
5281
5282 /* Function supportable_widening_operation
5283
5284 Check whether an operation represented by the code CODE is a
5285 widening operation that is supported by the target platform in
5286 vector form (i.e., when operating on arguments of type VECTYPE_IN
5287 producing a result of type VECTYPE_OUT).
5288
5289 Widening operations we currently support are NOP (CONVERT), FLOAT
5290 and WIDEN_MULT. This function checks if these operations are supported
5291 by the target platform either directly (via vector tree-codes), or via
5292 target builtins.
5293
5294 Output:
5295 - CODE1 and CODE2 are codes of vector operations to be used when
5296 vectorizing the operation, if available.
5297 - DECL1 and DECL2 are decls of target builtin functions to be used
5298 when vectorizing the operation, if available. In this case,
5299 CODE1 and CODE2 are CALL_EXPR.
5300 - MULTI_STEP_CVT determines the number of required intermediate steps in
5301 case of multi-step conversion (like char->short->int - in that case
5302 MULTI_STEP_CVT will be 1).
5303 - INTERM_TYPES contains the intermediate type required to perform the
5304 widening operation (short in the above example). */
5305
5306 bool
5307 supportable_widening_operation (enum tree_code code, gimple stmt,
5308 tree vectype_out, tree vectype_in,
5309 tree *decl1, tree *decl2,
5310 enum tree_code *code1, enum tree_code *code2,
5311 int *multi_step_cvt,
5312 VEC (tree, heap) **interm_types)
5313 {
5314 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5315 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5316 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5317 bool ordered_p;
5318 enum machine_mode vec_mode;
5319 enum insn_code icode1, icode2;
5320 optab optab1, optab2;
5321 tree vectype = vectype_in;
5322 tree wide_vectype = vectype_out;
5323 enum tree_code c1, c2;
5324
5325 /* The result of a vectorized widening operation usually requires two vectors
5326 (because the widened results do not fit int one vector). The generated
5327 vector results would normally be expected to be generated in the same
5328 order as in the original scalar computation, i.e. if 8 results are
5329 generated in each vector iteration, they are to be organized as follows:
5330 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5331
5332 However, in the special case that the result of the widening operation is
5333 used in a reduction computation only, the order doesn't matter (because
5334 when vectorizing a reduction we change the order of the computation).
5335 Some targets can take advantage of this and generate more efficient code.
5336 For example, targets like Altivec, that support widen_mult using a sequence
5337 of {mult_even,mult_odd} generate the following vectors:
5338 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5339
5340 When vectorizing outer-loops, we execute the inner-loop sequentially
5341 (each vectorized inner-loop iteration contributes to VF outer-loop
5342 iterations in parallel). We therefore don't allow to change the order
5343 of the computation in the inner-loop during outer-loop vectorization. */
5344
5345 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5346 && !nested_in_vect_loop_p (vect_loop, stmt))
5347 ordered_p = false;
5348 else
5349 ordered_p = true;
5350
5351 if (!ordered_p
5352 && code == WIDEN_MULT_EXPR
5353 && targetm.vectorize.builtin_mul_widen_even
5354 && targetm.vectorize.builtin_mul_widen_even (vectype)
5355 && targetm.vectorize.builtin_mul_widen_odd
5356 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5357 {
5358 if (vect_print_dump_info (REPORT_DETAILS))
5359 fprintf (vect_dump, "Unordered widening operation detected.");
5360
5361 *code1 = *code2 = CALL_EXPR;
5362 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5363 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5364 return true;
5365 }
5366
5367 switch (code)
5368 {
5369 case WIDEN_MULT_EXPR:
5370 if (BYTES_BIG_ENDIAN)
5371 {
5372 c1 = VEC_WIDEN_MULT_HI_EXPR;
5373 c2 = VEC_WIDEN_MULT_LO_EXPR;
5374 }
5375 else
5376 {
5377 c2 = VEC_WIDEN_MULT_HI_EXPR;
5378 c1 = VEC_WIDEN_MULT_LO_EXPR;
5379 }
5380 break;
5381
5382 CASE_CONVERT:
5383 if (BYTES_BIG_ENDIAN)
5384 {
5385 c1 = VEC_UNPACK_HI_EXPR;
5386 c2 = VEC_UNPACK_LO_EXPR;
5387 }
5388 else
5389 {
5390 c2 = VEC_UNPACK_HI_EXPR;
5391 c1 = VEC_UNPACK_LO_EXPR;
5392 }
5393 break;
5394
5395 case FLOAT_EXPR:
5396 if (BYTES_BIG_ENDIAN)
5397 {
5398 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5399 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5400 }
5401 else
5402 {
5403 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5404 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5405 }
5406 break;
5407
5408 case FIX_TRUNC_EXPR:
5409 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5410 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5411 computing the operation. */
5412 return false;
5413
5414 default:
5415 gcc_unreachable ();
5416 }
5417
5418 if (code == FIX_TRUNC_EXPR)
5419 {
5420 /* The signedness is determined from output operand. */
5421 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5422 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5423 }
5424 else
5425 {
5426 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5427 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5428 }
5429
5430 if (!optab1 || !optab2)
5431 return false;
5432
5433 vec_mode = TYPE_MODE (vectype);
5434 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5435 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5436 return false;
5437
5438 /* Check if it's a multi-step conversion that can be done using intermediate
5439 types. */
5440 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5441 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5442 {
5443 int i;
5444 tree prev_type = vectype, intermediate_type;
5445 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5446 optab optab3, optab4;
5447
5448 if (!CONVERT_EXPR_CODE_P (code))
5449 return false;
5450
5451 *code1 = c1;
5452 *code2 = c2;
5453
5454 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5455 intermediate steps in promotion sequence. We try
5456 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5457 not. */
5458 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5459 for (i = 0; i < 3; i++)
5460 {
5461 intermediate_mode = insn_data[icode1].operand[0].mode;
5462 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5463 TYPE_UNSIGNED (prev_type));
5464 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5465 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5466
5467 if (!optab3 || !optab4
5468 || ((icode1 = optab_handler (optab1, prev_mode))
5469 == CODE_FOR_nothing)
5470 || insn_data[icode1].operand[0].mode != intermediate_mode
5471 || ((icode2 = optab_handler (optab2, prev_mode))
5472 == CODE_FOR_nothing)
5473 || insn_data[icode2].operand[0].mode != intermediate_mode
5474 || ((icode1 = optab_handler (optab3, intermediate_mode))
5475 == CODE_FOR_nothing)
5476 || ((icode2 = optab_handler (optab4, intermediate_mode))
5477 == CODE_FOR_nothing))
5478 return false;
5479
5480 VEC_quick_push (tree, *interm_types, intermediate_type);
5481 (*multi_step_cvt)++;
5482
5483 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5484 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5485 return true;
5486
5487 prev_type = intermediate_type;
5488 prev_mode = intermediate_mode;
5489 }
5490
5491 return false;
5492 }
5493
5494 *code1 = c1;
5495 *code2 = c2;
5496 return true;
5497 }
5498
5499
5500 /* Function supportable_narrowing_operation
5501
5502 Check whether an operation represented by the code CODE is a
5503 narrowing operation that is supported by the target platform in
5504 vector form (i.e., when operating on arguments of type VECTYPE_IN
5505 and producing a result of type VECTYPE_OUT).
5506
5507 Narrowing operations we currently support are NOP (CONVERT) and
5508 FIX_TRUNC. This function checks if these operations are supported by
5509 the target platform directly via vector tree-codes.
5510
5511 Output:
5512 - CODE1 is the code of a vector operation to be used when
5513 vectorizing the operation, if available.
5514 - MULTI_STEP_CVT determines the number of required intermediate steps in
5515 case of multi-step conversion (like int->short->char - in that case
5516 MULTI_STEP_CVT will be 1).
5517 - INTERM_TYPES contains the intermediate type required to perform the
5518 narrowing operation (short in the above example). */
5519
5520 bool
5521 supportable_narrowing_operation (enum tree_code code,
5522 tree vectype_out, tree vectype_in,
5523 enum tree_code *code1, int *multi_step_cvt,
5524 VEC (tree, heap) **interm_types)
5525 {
5526 enum machine_mode vec_mode;
5527 enum insn_code icode1;
5528 optab optab1, interm_optab;
5529 tree vectype = vectype_in;
5530 tree narrow_vectype = vectype_out;
5531 enum tree_code c1;
5532 tree intermediate_type, prev_type;
5533 int i;
5534
5535 switch (code)
5536 {
5537 CASE_CONVERT:
5538 c1 = VEC_PACK_TRUNC_EXPR;
5539 break;
5540
5541 case FIX_TRUNC_EXPR:
5542 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5543 break;
5544
5545 case FLOAT_EXPR:
5546 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5547 tree code and optabs used for computing the operation. */
5548 return false;
5549
5550 default:
5551 gcc_unreachable ();
5552 }
5553
5554 if (code == FIX_TRUNC_EXPR)
5555 /* The signedness is determined from output operand. */
5556 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5557 else
5558 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5559
5560 if (!optab1)
5561 return false;
5562
5563 vec_mode = TYPE_MODE (vectype);
5564 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5565 return false;
5566
5567 /* Check if it's a multi-step conversion that can be done using intermediate
5568 types. */
5569 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5570 {
5571 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5572
5573 *code1 = c1;
5574 prev_type = vectype;
5575 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5576 intermediate steps in promotion sequence. We try
5577 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5578 not. */
5579 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5580 for (i = 0; i < 3; i++)
5581 {
5582 intermediate_mode = insn_data[icode1].operand[0].mode;
5583 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5584 TYPE_UNSIGNED (prev_type));
5585 interm_optab = optab_for_tree_code (c1, intermediate_type,
5586 optab_default);
5587 if (!interm_optab
5588 || ((icode1 = optab_handler (optab1, prev_mode))
5589 == CODE_FOR_nothing)
5590 || insn_data[icode1].operand[0].mode != intermediate_mode
5591 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5592 == CODE_FOR_nothing))
5593 return false;
5594
5595 VEC_quick_push (tree, *interm_types, intermediate_type);
5596 (*multi_step_cvt)++;
5597
5598 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5599 return true;
5600
5601 prev_type = intermediate_type;
5602 prev_mode = intermediate_mode;
5603 }
5604
5605 return false;
5606 }
5607
5608 *code1 = c1;
5609 return true;
5610 }