builtins.c (fold_builtin_signbit): Use build_zero_cst instead of fold_convert.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "toplev.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
44
45
46 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48 /* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52 static void
53 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55 {
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
67 /* This is the last stmt in a sequence that was detected as a
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96 }
97
98
99 /* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111 static bool
112 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114 {
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
121 *relevant = vect_unused_in_scope;
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
128 *relevant = vect_used_in_scope;
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
132 if (gimple_vdef (stmt))
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
136 *relevant = vect_used_in_scope;
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164 }
165
166
167 /* Function exist_non_indexing_operands_for_use_p
168
169 USE is one of the uses attached to STMT. Check if USE is
170 used in STMT for anything other than indexing an array. */
171
172 static bool
173 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174 {
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
177
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
183
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
191 so USE cannot correspond to any operands that are not used
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
201 operand = gimple_assign_rhs1 (stmt);
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209 }
210
211
212 /*
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
228 which does not need to be directly vectorized, then the liveness/relevance
229 of the respective DEF_STMT is left unchanged.
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237 static bool
238 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240 {
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
249 /* case 1: we are only interested in uses that need to be vectorized. Uses
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
255 {
256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
306
307 switch (relevant)
308 {
309 case vect_unused_in_scope:
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
312 break;
313
314 case vect_used_in_outer_by_reduction:
315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
316 relevant = vect_used_by_reduction;
317 break;
318
319 case vect_used_in_outer:
320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
321 relevant = vect_used_in_scope;
322 break;
323
324 case vect_used_in_scope:
325 break;
326
327 default:
328 gcc_unreachable ();
329 }
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
343
344 switch (relevant)
345 {
346 case vect_unused_in_scope:
347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
350 break;
351
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
356 case vect_used_in_scope:
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367 }
368
369
370 /* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386 bool
387 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388 {
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
413 {
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 }
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
471 order of the results that they produce does not have to be kept. */
472
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
476 {
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
497 live_p = false;
498 break;
499
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
507
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
511
512 live_p = false;
513 break;
514
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
519 if (vect_print_dump_info (REPORT_DETAILS))
520 fprintf (vect_dump, "unsupported use of double reduction.");
521
522 VEC_free (gimple, heap, worklist);
523 return false;
524 }
525
526 live_p = false;
527 break;
528
529 default:
530 break;
531 }
532
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546 }
547
548
549 /* Get cost by calling cost target builtin. */
550
551 static inline
552 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553 {
554 tree dummy_type = NULL;
555 int dummy = 0;
556
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
559 }
560
561
562 /* Get cost for STMT. */
563
564 int
565 cost_for_stmt (gimple stmt)
566 {
567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
568
569 switch (STMT_VINFO_TYPE (stmt_info))
570 {
571 case load_vec_info_type:
572 return vect_get_stmt_cost (scalar_load);
573 case store_vec_info_type:
574 return vect_get_stmt_cost (scalar_store);
575 case op_vec_info_type:
576 case condition_vec_info_type:
577 case assignment_vec_info_type:
578 case reduc_vec_info_type:
579 case induc_vec_info_type:
580 case type_promotion_vec_info_type:
581 case type_demotion_vec_info_type:
582 case type_conversion_vec_info_type:
583 case call_vec_info_type:
584 return vect_get_stmt_cost (scalar_stmt);
585 case undef_vec_info_type:
586 default:
587 gcc_unreachable ();
588 }
589 }
590
591 /* Function vect_model_simple_cost.
592
593 Models cost for simple operations, i.e. those that only emit ncopies of a
594 single op. Right now, this does not account for multiple insns that could
595 be generated for the single vector op. We will handle that shortly. */
596
597 void
598 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
599 enum vect_def_type *dt, slp_tree slp_node)
600 {
601 int i;
602 int inside_cost = 0, outside_cost = 0;
603
604 /* The SLP costs were already calculated during SLP tree build. */
605 if (PURE_SLP_STMT (stmt_info))
606 return;
607
608 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
609
610 /* FORNOW: Assuming maximum 2 args per stmts. */
611 for (i = 0; i < 2; i++)
612 {
613 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
614 outside_cost += vect_get_stmt_cost (vector_stmt);
615 }
616
617 if (vect_print_dump_info (REPORT_COST))
618 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
619 "outside_cost = %d .", inside_cost, outside_cost);
620
621 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
622 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
623 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
624 }
625
626
627 /* Function vect_cost_strided_group_size
628
629 For strided load or store, return the group_size only if it is the first
630 load or store of a group, else return 1. This ensures that group size is
631 only returned once per group. */
632
633 static int
634 vect_cost_strided_group_size (stmt_vec_info stmt_info)
635 {
636 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
637
638 if (first_stmt == STMT_VINFO_STMT (stmt_info))
639 return DR_GROUP_SIZE (stmt_info);
640
641 return 1;
642 }
643
644
645 /* Function vect_model_store_cost
646
647 Models cost for stores. In the case of strided accesses, one access
648 has the overhead of the strided access attributed to it. */
649
650 void
651 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
652 enum vect_def_type dt, slp_tree slp_node)
653 {
654 int group_size;
655 unsigned int inside_cost = 0, outside_cost = 0;
656 struct data_reference *first_dr;
657 gimple first_stmt;
658
659 /* The SLP costs were already calculated during SLP tree build. */
660 if (PURE_SLP_STMT (stmt_info))
661 return;
662
663 if (dt == vect_constant_def || dt == vect_external_def)
664 outside_cost = vect_get_stmt_cost (scalar_to_vec);
665
666 /* Strided access? */
667 if (DR_GROUP_FIRST_DR (stmt_info))
668 {
669 if (slp_node)
670 {
671 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
672 group_size = 1;
673 }
674 else
675 {
676 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
677 group_size = vect_cost_strided_group_size (stmt_info);
678 }
679
680 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
681 }
682 /* Not a strided access. */
683 else
684 {
685 group_size = 1;
686 first_dr = STMT_VINFO_DATA_REF (stmt_info);
687 }
688
689 /* Is this an access in a group of stores, which provide strided access?
690 If so, add in the cost of the permutes. */
691 if (group_size > 1)
692 {
693 /* Uses a high and low interleave operation for each needed permute. */
694 inside_cost = ncopies * exact_log2(group_size) * group_size
695 * vect_get_stmt_cost (vector_stmt);
696
697 if (vect_print_dump_info (REPORT_COST))
698 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
699 group_size);
700
701 }
702
703 /* Costs of the stores. */
704 vect_get_store_cost (first_dr, ncopies, &inside_cost);
705
706 if (vect_print_dump_info (REPORT_COST))
707 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
708 "outside_cost = %d .", inside_cost, outside_cost);
709
710 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
711 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
712 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
713 }
714
715
716 /* Calculate cost of DR's memory access. */
717 void
718 vect_get_store_cost (struct data_reference *dr, int ncopies,
719 unsigned int *inside_cost)
720 {
721 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
722
723 switch (alignment_support_scheme)
724 {
725 case dr_aligned:
726 {
727 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
728
729 if (vect_print_dump_info (REPORT_COST))
730 fprintf (vect_dump, "vect_model_store_cost: aligned.");
731
732 break;
733 }
734
735 case dr_unaligned_supported:
736 {
737 gimple stmt = DR_STMT (dr);
738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
740
741 /* Here, we assign an additional cost for the unaligned store. */
742 *inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
744 vectype, DR_MISALIGNMENT (dr));
745
746 if (vect_print_dump_info (REPORT_COST))
747 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
748 "hardware.");
749
750 break;
751 }
752
753 default:
754 gcc_unreachable ();
755 }
756 }
757
758
759 /* Function vect_model_load_cost
760
761 Models cost for loads. In the case of strided accesses, the last access
762 has the overhead of the strided access attributed to it. Since unaligned
763 accesses are supported for loads, we also account for the costs of the
764 access scheme chosen. */
765
766 void
767 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
768
769 {
770 int group_size;
771 gimple first_stmt;
772 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
773 unsigned int inside_cost = 0, outside_cost = 0;
774
775 /* The SLP costs were already calculated during SLP tree build. */
776 if (PURE_SLP_STMT (stmt_info))
777 return;
778
779 /* Strided accesses? */
780 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
781 if (first_stmt && !slp_node)
782 {
783 group_size = vect_cost_strided_group_size (stmt_info);
784 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
785 }
786 /* Not a strided access. */
787 else
788 {
789 group_size = 1;
790 first_dr = dr;
791 }
792
793 /* Is this an access in a group of loads providing strided access?
794 If so, add in the cost of the permutes. */
795 if (group_size > 1)
796 {
797 /* Uses an even and odd extract operations for each needed permute. */
798 inside_cost = ncopies * exact_log2(group_size) * group_size
799 * vect_get_stmt_cost (vector_stmt);
800
801 if (vect_print_dump_info (REPORT_COST))
802 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
803 group_size);
804 }
805
806 /* The loads themselves. */
807 vect_get_load_cost (first_dr, ncopies,
808 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
809 &inside_cost, &outside_cost);
810
811 if (vect_print_dump_info (REPORT_COST))
812 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
813 "outside_cost = %d .", inside_cost, outside_cost);
814
815 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
816 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
817 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
818 }
819
820
821 /* Calculate cost of DR's memory access. */
822 void
823 vect_get_load_cost (struct data_reference *dr, int ncopies,
824 bool add_realign_cost, unsigned int *inside_cost,
825 unsigned int *outside_cost)
826 {
827 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
828
829 switch (alignment_support_scheme)
830 {
831 case dr_aligned:
832 {
833 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
834
835 if (vect_print_dump_info (REPORT_COST))
836 fprintf (vect_dump, "vect_model_load_cost: aligned.");
837
838 break;
839 }
840 case dr_unaligned_supported:
841 {
842 gimple stmt = DR_STMT (dr);
843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
844 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
845
846 /* Here, we assign an additional cost for the unaligned load. */
847 *inside_cost += ncopies
848 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
849 vectype, DR_MISALIGNMENT (dr));
850 if (vect_print_dump_info (REPORT_COST))
851 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
852 "hardware.");
853
854 break;
855 }
856 case dr_explicit_realign:
857 {
858 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
859 + vect_get_stmt_cost (vector_stmt));
860
861 /* FIXME: If the misalignment remains fixed across the iterations of
862 the containing loop, the following cost should be added to the
863 outside costs. */
864 if (targetm.vectorize.builtin_mask_for_load)
865 *inside_cost += vect_get_stmt_cost (vector_stmt);
866
867 break;
868 }
869 case dr_explicit_realign_optimized:
870 {
871 if (vect_print_dump_info (REPORT_COST))
872 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
873 "pipelined.");
874
875 /* Unaligned software pipeline has a load of an address, an initial
876 load, and possibly a mask operation to "prime" the loop. However,
877 if this is an access in a group of loads, which provide strided
878 access, then the above cost should only be considered for one
879 access in the group. Inside the loop, there is a load op
880 and a realignment op. */
881
882 if (add_realign_cost)
883 {
884 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
885 if (targetm.vectorize.builtin_mask_for_load)
886 *outside_cost += vect_get_stmt_cost (vector_stmt);
887 }
888
889 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
890 + vect_get_stmt_cost (vector_stmt));
891 break;
892 }
893
894 default:
895 gcc_unreachable ();
896 }
897 }
898
899
900 /* Function vect_init_vector.
901
902 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
903 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
904 is not NULL. Otherwise, place the initialization at the loop preheader.
905 Return the DEF of INIT_STMT.
906 It will be used in the vectorization of STMT. */
907
908 tree
909 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
910 gimple_stmt_iterator *gsi)
911 {
912 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
913 tree new_var;
914 gimple init_stmt;
915 tree vec_oprnd;
916 edge pe;
917 tree new_temp;
918 basic_block new_bb;
919
920 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
921 add_referenced_var (new_var);
922 init_stmt = gimple_build_assign (new_var, vector_var);
923 new_temp = make_ssa_name (new_var, init_stmt);
924 gimple_assign_set_lhs (init_stmt, new_temp);
925
926 if (gsi)
927 vect_finish_stmt_generation (stmt, init_stmt, gsi);
928 else
929 {
930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
931
932 if (loop_vinfo)
933 {
934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
935
936 if (nested_in_vect_loop_p (loop, stmt))
937 loop = loop->inner;
938
939 pe = loop_preheader_edge (loop);
940 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
941 gcc_assert (!new_bb);
942 }
943 else
944 {
945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
946 basic_block bb;
947 gimple_stmt_iterator gsi_bb_start;
948
949 gcc_assert (bb_vinfo);
950 bb = BB_VINFO_BB (bb_vinfo);
951 gsi_bb_start = gsi_after_labels (bb);
952 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
953 }
954 }
955
956 if (vect_print_dump_info (REPORT_DETAILS))
957 {
958 fprintf (vect_dump, "created new init_stmt: ");
959 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
960 }
961
962 vec_oprnd = gimple_assign_lhs (init_stmt);
963 return vec_oprnd;
964 }
965
966
967 /* Function vect_get_vec_def_for_operand.
968
969 OP is an operand in STMT. This function returns a (vector) def that will be
970 used in the vectorized stmt for STMT.
971
972 In the case that OP is an SSA_NAME which is defined in the loop, then
973 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
974
975 In case OP is an invariant or constant, a new stmt that creates a vector def
976 needs to be introduced. */
977
978 tree
979 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
980 {
981 tree vec_oprnd;
982 gimple vec_stmt;
983 gimple def_stmt;
984 stmt_vec_info def_stmt_info = NULL;
985 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
986 unsigned int nunits;
987 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
988 tree vec_inv;
989 tree vec_cst;
990 tree t = NULL_TREE;
991 tree def;
992 int i;
993 enum vect_def_type dt;
994 bool is_simple_use;
995 tree vector_type;
996
997 if (vect_print_dump_info (REPORT_DETAILS))
998 {
999 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1000 print_generic_expr (vect_dump, op, TDF_SLIM);
1001 }
1002
1003 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1004 &dt);
1005 gcc_assert (is_simple_use);
1006 if (vect_print_dump_info (REPORT_DETAILS))
1007 {
1008 if (def)
1009 {
1010 fprintf (vect_dump, "def = ");
1011 print_generic_expr (vect_dump, def, TDF_SLIM);
1012 }
1013 if (def_stmt)
1014 {
1015 fprintf (vect_dump, " def_stmt = ");
1016 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1017 }
1018 }
1019
1020 switch (dt)
1021 {
1022 /* Case 1: operand is a constant. */
1023 case vect_constant_def:
1024 {
1025 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1026 gcc_assert (vector_type);
1027 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1028
1029 if (scalar_def)
1030 *scalar_def = op;
1031
1032 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1033 if (vect_print_dump_info (REPORT_DETAILS))
1034 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1035
1036 vec_cst = build_vector_from_val (vector_type, op);
1037 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1038 }
1039
1040 /* Case 2: operand is defined outside the loop - loop invariant. */
1041 case vect_external_def:
1042 {
1043 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1044 gcc_assert (vector_type);
1045 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1046
1047 if (scalar_def)
1048 *scalar_def = def;
1049
1050 /* Create 'vec_inv = {inv,inv,..,inv}' */
1051 if (vect_print_dump_info (REPORT_DETAILS))
1052 fprintf (vect_dump, "Create vector_inv.");
1053
1054 for (i = nunits - 1; i >= 0; --i)
1055 {
1056 t = tree_cons (NULL_TREE, def, t);
1057 }
1058
1059 /* FIXME: use build_constructor directly. */
1060 vec_inv = build_constructor_from_list (vector_type, t);
1061 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1062 }
1063
1064 /* Case 3: operand is defined inside the loop. */
1065 case vect_internal_def:
1066 {
1067 if (scalar_def)
1068 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1069
1070 /* Get the def from the vectorized stmt. */
1071 def_stmt_info = vinfo_for_stmt (def_stmt);
1072 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1073 gcc_assert (vec_stmt);
1074 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1075 vec_oprnd = PHI_RESULT (vec_stmt);
1076 else if (is_gimple_call (vec_stmt))
1077 vec_oprnd = gimple_call_lhs (vec_stmt);
1078 else
1079 vec_oprnd = gimple_assign_lhs (vec_stmt);
1080 return vec_oprnd;
1081 }
1082
1083 /* Case 4: operand is defined by a loop header phi - reduction */
1084 case vect_reduction_def:
1085 case vect_double_reduction_def:
1086 case vect_nested_cycle:
1087 {
1088 struct loop *loop;
1089
1090 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1091 loop = (gimple_bb (def_stmt))->loop_father;
1092
1093 /* Get the def before the loop */
1094 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1095 return get_initial_def_for_reduction (stmt, op, scalar_def);
1096 }
1097
1098 /* Case 5: operand is defined by loop-header phi - induction. */
1099 case vect_induction_def:
1100 {
1101 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1102
1103 /* Get the def from the vectorized stmt. */
1104 def_stmt_info = vinfo_for_stmt (def_stmt);
1105 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1106 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1107 vec_oprnd = PHI_RESULT (vec_stmt);
1108 return vec_oprnd;
1109 }
1110
1111 default:
1112 gcc_unreachable ();
1113 }
1114 }
1115
1116
1117 /* Function vect_get_vec_def_for_stmt_copy
1118
1119 Return a vector-def for an operand. This function is used when the
1120 vectorized stmt to be created (by the caller to this function) is a "copy"
1121 created in case the vectorized result cannot fit in one vector, and several
1122 copies of the vector-stmt are required. In this case the vector-def is
1123 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1124 of the stmt that defines VEC_OPRND.
1125 DT is the type of the vector def VEC_OPRND.
1126
1127 Context:
1128 In case the vectorization factor (VF) is bigger than the number
1129 of elements that can fit in a vectype (nunits), we have to generate
1130 more than one vector stmt to vectorize the scalar stmt. This situation
1131 arises when there are multiple data-types operated upon in the loop; the
1132 smallest data-type determines the VF, and as a result, when vectorizing
1133 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1134 vector stmt (each computing a vector of 'nunits' results, and together
1135 computing 'VF' results in each iteration). This function is called when
1136 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1137 which VF=16 and nunits=4, so the number of copies required is 4):
1138
1139 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1140
1141 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1142 VS1.1: vx.1 = memref1 VS1.2
1143 VS1.2: vx.2 = memref2 VS1.3
1144 VS1.3: vx.3 = memref3
1145
1146 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1147 VSnew.1: vz1 = vx.1 + ... VSnew.2
1148 VSnew.2: vz2 = vx.2 + ... VSnew.3
1149 VSnew.3: vz3 = vx.3 + ...
1150
1151 The vectorization of S1 is explained in vectorizable_load.
1152 The vectorization of S2:
1153 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1154 the function 'vect_get_vec_def_for_operand' is called to
1155 get the relevant vector-def for each operand of S2. For operand x it
1156 returns the vector-def 'vx.0'.
1157
1158 To create the remaining copies of the vector-stmt (VSnew.j), this
1159 function is called to get the relevant vector-def for each operand. It is
1160 obtained from the respective VS1.j stmt, which is recorded in the
1161 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1162
1163 For example, to obtain the vector-def 'vx.1' in order to create the
1164 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1165 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1166 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1167 and return its def ('vx.1').
1168 Overall, to create the above sequence this function will be called 3 times:
1169 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1170 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1171 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1172
1173 tree
1174 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1175 {
1176 gimple vec_stmt_for_operand;
1177 stmt_vec_info def_stmt_info;
1178
1179 /* Do nothing; can reuse same def. */
1180 if (dt == vect_external_def || dt == vect_constant_def )
1181 return vec_oprnd;
1182
1183 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1184 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1185 gcc_assert (def_stmt_info);
1186 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1187 gcc_assert (vec_stmt_for_operand);
1188 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1189 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1190 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1191 else
1192 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1193 return vec_oprnd;
1194 }
1195
1196
1197 /* Get vectorized definitions for the operands to create a copy of an original
1198 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1199
1200 static void
1201 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1202 VEC(tree,heap) **vec_oprnds0,
1203 VEC(tree,heap) **vec_oprnds1)
1204 {
1205 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1206
1207 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1208 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1209
1210 if (vec_oprnds1 && *vec_oprnds1)
1211 {
1212 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1213 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1214 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1215 }
1216 }
1217
1218
1219 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1220 NULL. */
1221
1222 static void
1223 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1224 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1225 slp_tree slp_node)
1226 {
1227 if (slp_node)
1228 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1229 else
1230 {
1231 tree vec_oprnd;
1232
1233 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1234 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1235 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1236
1237 if (op1)
1238 {
1239 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1240 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1241 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1242 }
1243 }
1244 }
1245
1246
1247 /* Function vect_finish_stmt_generation.
1248
1249 Insert a new stmt. */
1250
1251 void
1252 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1253 gimple_stmt_iterator *gsi)
1254 {
1255 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1256 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1257 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1258
1259 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1260
1261 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1262
1263 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1264 bb_vinfo));
1265
1266 if (vect_print_dump_info (REPORT_DETAILS))
1267 {
1268 fprintf (vect_dump, "add new stmt: ");
1269 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1270 }
1271
1272 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1273 }
1274
1275 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1276 a function declaration if the target has a vectorized version
1277 of the function, or NULL_TREE if the function cannot be vectorized. */
1278
1279 tree
1280 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1281 {
1282 tree fndecl = gimple_call_fndecl (call);
1283
1284 /* We only handle functions that do not read or clobber memory -- i.e.
1285 const or novops ones. */
1286 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1287 return NULL_TREE;
1288
1289 if (!fndecl
1290 || TREE_CODE (fndecl) != FUNCTION_DECL
1291 || !DECL_BUILT_IN (fndecl))
1292 return NULL_TREE;
1293
1294 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1295 vectype_in);
1296 }
1297
1298 /* Function vectorizable_call.
1299
1300 Check if STMT performs a function call that can be vectorized.
1301 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1302 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1303 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1304
1305 static bool
1306 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1307 {
1308 tree vec_dest;
1309 tree scalar_dest;
1310 tree op, type;
1311 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1312 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1313 tree vectype_out, vectype_in;
1314 int nunits_in;
1315 int nunits_out;
1316 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1317 tree fndecl, new_temp, def, rhs_type;
1318 gimple def_stmt;
1319 enum vect_def_type dt[3]
1320 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1321 gimple new_stmt = NULL;
1322 int ncopies, j;
1323 VEC(tree, heap) *vargs = NULL;
1324 enum { NARROW, NONE, WIDEN } modifier;
1325 size_t i, nargs;
1326
1327 /* FORNOW: unsupported in basic block SLP. */
1328 gcc_assert (loop_vinfo);
1329
1330 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1331 return false;
1332
1333 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1334 return false;
1335
1336 /* FORNOW: SLP not supported. */
1337 if (STMT_SLP_TYPE (stmt_info))
1338 return false;
1339
1340 /* Is STMT a vectorizable call? */
1341 if (!is_gimple_call (stmt))
1342 return false;
1343
1344 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1345 return false;
1346
1347 if (stmt_could_throw_p (stmt))
1348 return false;
1349
1350 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1351
1352 /* Process function arguments. */
1353 rhs_type = NULL_TREE;
1354 vectype_in = NULL_TREE;
1355 nargs = gimple_call_num_args (stmt);
1356
1357 /* Bail out if the function has more than three arguments, we do not have
1358 interesting builtin functions to vectorize with more than two arguments
1359 except for fma. No arguments is also not good. */
1360 if (nargs == 0 || nargs > 3)
1361 return false;
1362
1363 for (i = 0; i < nargs; i++)
1364 {
1365 tree opvectype;
1366
1367 op = gimple_call_arg (stmt, i);
1368
1369 /* We can only handle calls with arguments of the same type. */
1370 if (rhs_type
1371 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1372 {
1373 if (vect_print_dump_info (REPORT_DETAILS))
1374 fprintf (vect_dump, "argument types differ.");
1375 return false;
1376 }
1377 if (!rhs_type)
1378 rhs_type = TREE_TYPE (op);
1379
1380 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1381 &def_stmt, &def, &dt[i], &opvectype))
1382 {
1383 if (vect_print_dump_info (REPORT_DETAILS))
1384 fprintf (vect_dump, "use not simple.");
1385 return false;
1386 }
1387
1388 if (!vectype_in)
1389 vectype_in = opvectype;
1390 else if (opvectype
1391 && opvectype != vectype_in)
1392 {
1393 if (vect_print_dump_info (REPORT_DETAILS))
1394 fprintf (vect_dump, "argument vector types differ.");
1395 return false;
1396 }
1397 }
1398 /* If all arguments are external or constant defs use a vector type with
1399 the same size as the output vector type. */
1400 if (!vectype_in)
1401 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1402 if (vec_stmt)
1403 gcc_assert (vectype_in);
1404 if (!vectype_in)
1405 {
1406 if (vect_print_dump_info (REPORT_DETAILS))
1407 {
1408 fprintf (vect_dump, "no vectype for scalar type ");
1409 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1410 }
1411
1412 return false;
1413 }
1414
1415 /* FORNOW */
1416 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1417 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1418 if (nunits_in == nunits_out / 2)
1419 modifier = NARROW;
1420 else if (nunits_out == nunits_in)
1421 modifier = NONE;
1422 else if (nunits_out == nunits_in / 2)
1423 modifier = WIDEN;
1424 else
1425 return false;
1426
1427 /* For now, we only vectorize functions if a target specific builtin
1428 is available. TODO -- in some cases, it might be profitable to
1429 insert the calls for pieces of the vector, in order to be able
1430 to vectorize other operations in the loop. */
1431 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1432 if (fndecl == NULL_TREE)
1433 {
1434 if (vect_print_dump_info (REPORT_DETAILS))
1435 fprintf (vect_dump, "function is not vectorizable.");
1436
1437 return false;
1438 }
1439
1440 gcc_assert (!gimple_vuse (stmt));
1441
1442 if (modifier == NARROW)
1443 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1444 else
1445 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1446
1447 /* Sanity check: make sure that at least one copy of the vectorized stmt
1448 needs to be generated. */
1449 gcc_assert (ncopies >= 1);
1450
1451 if (!vec_stmt) /* transformation not required. */
1452 {
1453 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1454 if (vect_print_dump_info (REPORT_DETAILS))
1455 fprintf (vect_dump, "=== vectorizable_call ===");
1456 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1457 return true;
1458 }
1459
1460 /** Transform. **/
1461
1462 if (vect_print_dump_info (REPORT_DETAILS))
1463 fprintf (vect_dump, "transform operation.");
1464
1465 /* Handle def. */
1466 scalar_dest = gimple_call_lhs (stmt);
1467 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1468
1469 prev_stmt_info = NULL;
1470 switch (modifier)
1471 {
1472 case NONE:
1473 for (j = 0; j < ncopies; ++j)
1474 {
1475 /* Build argument list for the vectorized call. */
1476 if (j == 0)
1477 vargs = VEC_alloc (tree, heap, nargs);
1478 else
1479 VEC_truncate (tree, vargs, 0);
1480
1481 for (i = 0; i < nargs; i++)
1482 {
1483 op = gimple_call_arg (stmt, i);
1484 if (j == 0)
1485 vec_oprnd0
1486 = vect_get_vec_def_for_operand (op, stmt, NULL);
1487 else
1488 {
1489 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1490 vec_oprnd0
1491 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1492 }
1493
1494 VEC_quick_push (tree, vargs, vec_oprnd0);
1495 }
1496
1497 new_stmt = gimple_build_call_vec (fndecl, vargs);
1498 new_temp = make_ssa_name (vec_dest, new_stmt);
1499 gimple_call_set_lhs (new_stmt, new_temp);
1500
1501 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1502 mark_symbols_for_renaming (new_stmt);
1503
1504 if (j == 0)
1505 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1506 else
1507 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1508
1509 prev_stmt_info = vinfo_for_stmt (new_stmt);
1510 }
1511
1512 break;
1513
1514 case NARROW:
1515 for (j = 0; j < ncopies; ++j)
1516 {
1517 /* Build argument list for the vectorized call. */
1518 if (j == 0)
1519 vargs = VEC_alloc (tree, heap, nargs * 2);
1520 else
1521 VEC_truncate (tree, vargs, 0);
1522
1523 for (i = 0; i < nargs; i++)
1524 {
1525 op = gimple_call_arg (stmt, i);
1526 if (j == 0)
1527 {
1528 vec_oprnd0
1529 = vect_get_vec_def_for_operand (op, stmt, NULL);
1530 vec_oprnd1
1531 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1532 }
1533 else
1534 {
1535 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1536 vec_oprnd0
1537 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1538 vec_oprnd1
1539 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1540 }
1541
1542 VEC_quick_push (tree, vargs, vec_oprnd0);
1543 VEC_quick_push (tree, vargs, vec_oprnd1);
1544 }
1545
1546 new_stmt = gimple_build_call_vec (fndecl, vargs);
1547 new_temp = make_ssa_name (vec_dest, new_stmt);
1548 gimple_call_set_lhs (new_stmt, new_temp);
1549
1550 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1551 mark_symbols_for_renaming (new_stmt);
1552
1553 if (j == 0)
1554 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1555 else
1556 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1557
1558 prev_stmt_info = vinfo_for_stmt (new_stmt);
1559 }
1560
1561 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1562
1563 break;
1564
1565 case WIDEN:
1566 /* No current target implements this case. */
1567 return false;
1568 }
1569
1570 VEC_free (tree, heap, vargs);
1571
1572 /* Update the exception handling table with the vector stmt if necessary. */
1573 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1574 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1575
1576 /* The call in STMT might prevent it from being removed in dce.
1577 We however cannot remove it here, due to the way the ssa name
1578 it defines is mapped to the new definition. So just replace
1579 rhs of the statement with something harmless. */
1580
1581 type = TREE_TYPE (scalar_dest);
1582 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1583 build_zero_cst (type));
1584 set_vinfo_for_stmt (new_stmt, stmt_info);
1585 set_vinfo_for_stmt (stmt, NULL);
1586 STMT_VINFO_STMT (stmt_info) = new_stmt;
1587 gsi_replace (gsi, new_stmt, false);
1588 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1589
1590 return true;
1591 }
1592
1593
1594 /* Function vect_gen_widened_results_half
1595
1596 Create a vector stmt whose code, type, number of arguments, and result
1597 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1598 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1599 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1600 needs to be created (DECL is a function-decl of a target-builtin).
1601 STMT is the original scalar stmt that we are vectorizing. */
1602
1603 static gimple
1604 vect_gen_widened_results_half (enum tree_code code,
1605 tree decl,
1606 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1607 tree vec_dest, gimple_stmt_iterator *gsi,
1608 gimple stmt)
1609 {
1610 gimple new_stmt;
1611 tree new_temp;
1612
1613 /* Generate half of the widened result: */
1614 if (code == CALL_EXPR)
1615 {
1616 /* Target specific support */
1617 if (op_type == binary_op)
1618 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1619 else
1620 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1621 new_temp = make_ssa_name (vec_dest, new_stmt);
1622 gimple_call_set_lhs (new_stmt, new_temp);
1623 }
1624 else
1625 {
1626 /* Generic support */
1627 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1628 if (op_type != binary_op)
1629 vec_oprnd1 = NULL;
1630 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1631 vec_oprnd1);
1632 new_temp = make_ssa_name (vec_dest, new_stmt);
1633 gimple_assign_set_lhs (new_stmt, new_temp);
1634 }
1635 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1636
1637 return new_stmt;
1638 }
1639
1640
1641 /* Check if STMT performs a conversion operation, that can be vectorized.
1642 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1643 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1644 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1645
1646 static bool
1647 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1648 gimple *vec_stmt, slp_tree slp_node)
1649 {
1650 tree vec_dest;
1651 tree scalar_dest;
1652 tree op0;
1653 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1654 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1655 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1656 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1657 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1658 tree new_temp;
1659 tree def;
1660 gimple def_stmt;
1661 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1662 gimple new_stmt = NULL;
1663 stmt_vec_info prev_stmt_info;
1664 int nunits_in;
1665 int nunits_out;
1666 tree vectype_out, vectype_in;
1667 int ncopies, j;
1668 tree rhs_type;
1669 tree builtin_decl;
1670 enum { NARROW, NONE, WIDEN } modifier;
1671 int i;
1672 VEC(tree,heap) *vec_oprnds0 = NULL;
1673 tree vop0;
1674 VEC(tree,heap) *dummy = NULL;
1675 int dummy_int;
1676
1677 /* Is STMT a vectorizable conversion? */
1678
1679 /* FORNOW: unsupported in basic block SLP. */
1680 gcc_assert (loop_vinfo);
1681
1682 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1683 return false;
1684
1685 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1686 return false;
1687
1688 if (!is_gimple_assign (stmt))
1689 return false;
1690
1691 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1692 return false;
1693
1694 code = gimple_assign_rhs_code (stmt);
1695 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1696 return false;
1697
1698 /* Check types of lhs and rhs. */
1699 scalar_dest = gimple_assign_lhs (stmt);
1700 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1701
1702 op0 = gimple_assign_rhs1 (stmt);
1703 rhs_type = TREE_TYPE (op0);
1704 /* Check the operands of the operation. */
1705 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1706 &def_stmt, &def, &dt[0], &vectype_in))
1707 {
1708 if (vect_print_dump_info (REPORT_DETAILS))
1709 fprintf (vect_dump, "use not simple.");
1710 return false;
1711 }
1712 /* If op0 is an external or constant defs use a vector type of
1713 the same size as the output vector type. */
1714 if (!vectype_in)
1715 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1716 if (vec_stmt)
1717 gcc_assert (vectype_in);
1718 if (!vectype_in)
1719 {
1720 if (vect_print_dump_info (REPORT_DETAILS))
1721 {
1722 fprintf (vect_dump, "no vectype for scalar type ");
1723 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1724 }
1725
1726 return false;
1727 }
1728
1729 /* FORNOW */
1730 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1731 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1732 if (nunits_in == nunits_out / 2)
1733 modifier = NARROW;
1734 else if (nunits_out == nunits_in)
1735 modifier = NONE;
1736 else if (nunits_out == nunits_in / 2)
1737 modifier = WIDEN;
1738 else
1739 return false;
1740
1741 if (modifier == NARROW)
1742 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1743 else
1744 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1745
1746 /* Multiple types in SLP are handled by creating the appropriate number of
1747 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1748 case of SLP. */
1749 if (slp_node)
1750 ncopies = 1;
1751
1752 /* Sanity check: make sure that at least one copy of the vectorized stmt
1753 needs to be generated. */
1754 gcc_assert (ncopies >= 1);
1755
1756 /* Supportable by target? */
1757 if ((modifier == NONE
1758 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1759 || (modifier == WIDEN
1760 && !supportable_widening_operation (code, stmt,
1761 vectype_out, vectype_in,
1762 &decl1, &decl2,
1763 &code1, &code2,
1764 &dummy_int, &dummy))
1765 || (modifier == NARROW
1766 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1767 &code1, &dummy_int, &dummy)))
1768 {
1769 if (vect_print_dump_info (REPORT_DETAILS))
1770 fprintf (vect_dump, "conversion not supported by target.");
1771 return false;
1772 }
1773
1774 if (modifier != NONE)
1775 {
1776 /* FORNOW: SLP not supported. */
1777 if (STMT_SLP_TYPE (stmt_info))
1778 return false;
1779 }
1780
1781 if (!vec_stmt) /* transformation not required. */
1782 {
1783 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1784 return true;
1785 }
1786
1787 /** Transform. **/
1788 if (vect_print_dump_info (REPORT_DETAILS))
1789 fprintf (vect_dump, "transform conversion.");
1790
1791 /* Handle def. */
1792 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1793
1794 if (modifier == NONE && !slp_node)
1795 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1796
1797 prev_stmt_info = NULL;
1798 switch (modifier)
1799 {
1800 case NONE:
1801 for (j = 0; j < ncopies; j++)
1802 {
1803 if (j == 0)
1804 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1805 else
1806 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1807
1808 builtin_decl =
1809 targetm.vectorize.builtin_conversion (code,
1810 vectype_out, vectype_in);
1811 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1812 {
1813 /* Arguments are ready. create the new vector stmt. */
1814 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1815 new_temp = make_ssa_name (vec_dest, new_stmt);
1816 gimple_call_set_lhs (new_stmt, new_temp);
1817 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1818 if (slp_node)
1819 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1820 }
1821
1822 if (j == 0)
1823 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1824 else
1825 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1826 prev_stmt_info = vinfo_for_stmt (new_stmt);
1827 }
1828 break;
1829
1830 case WIDEN:
1831 /* In case the vectorization factor (VF) is bigger than the number
1832 of elements that we can fit in a vectype (nunits), we have to
1833 generate more than one vector stmt - i.e - we need to "unroll"
1834 the vector stmt by a factor VF/nunits. */
1835 for (j = 0; j < ncopies; j++)
1836 {
1837 if (j == 0)
1838 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1839 else
1840 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1841
1842 /* Generate first half of the widened result: */
1843 new_stmt
1844 = vect_gen_widened_results_half (code1, decl1,
1845 vec_oprnd0, vec_oprnd1,
1846 unary_op, vec_dest, gsi, stmt);
1847 if (j == 0)
1848 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1849 else
1850 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1851 prev_stmt_info = vinfo_for_stmt (new_stmt);
1852
1853 /* Generate second half of the widened result: */
1854 new_stmt
1855 = vect_gen_widened_results_half (code2, decl2,
1856 vec_oprnd0, vec_oprnd1,
1857 unary_op, vec_dest, gsi, stmt);
1858 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1859 prev_stmt_info = vinfo_for_stmt (new_stmt);
1860 }
1861 break;
1862
1863 case NARROW:
1864 /* In case the vectorization factor (VF) is bigger than the number
1865 of elements that we can fit in a vectype (nunits), we have to
1866 generate more than one vector stmt - i.e - we need to "unroll"
1867 the vector stmt by a factor VF/nunits. */
1868 for (j = 0; j < ncopies; j++)
1869 {
1870 /* Handle uses. */
1871 if (j == 0)
1872 {
1873 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1874 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1875 }
1876 else
1877 {
1878 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1879 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1880 }
1881
1882 /* Arguments are ready. Create the new vector stmt. */
1883 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1884 vec_oprnd1);
1885 new_temp = make_ssa_name (vec_dest, new_stmt);
1886 gimple_assign_set_lhs (new_stmt, new_temp);
1887 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1888
1889 if (j == 0)
1890 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1891 else
1892 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1893
1894 prev_stmt_info = vinfo_for_stmt (new_stmt);
1895 }
1896
1897 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1898 }
1899
1900 if (vec_oprnds0)
1901 VEC_free (tree, heap, vec_oprnds0);
1902
1903 return true;
1904 }
1905
1906
1907 /* Function vectorizable_assignment.
1908
1909 Check if STMT performs an assignment (copy) that can be vectorized.
1910 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1911 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1912 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1913
1914 static bool
1915 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1916 gimple *vec_stmt, slp_tree slp_node)
1917 {
1918 tree vec_dest;
1919 tree scalar_dest;
1920 tree op;
1921 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1922 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1923 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1924 tree new_temp;
1925 tree def;
1926 gimple def_stmt;
1927 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1928 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1929 int ncopies;
1930 int i, j;
1931 VEC(tree,heap) *vec_oprnds = NULL;
1932 tree vop;
1933 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1934 gimple new_stmt = NULL;
1935 stmt_vec_info prev_stmt_info = NULL;
1936 enum tree_code code;
1937 tree vectype_in;
1938
1939 /* Multiple types in SLP are handled by creating the appropriate number of
1940 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1941 case of SLP. */
1942 if (slp_node)
1943 ncopies = 1;
1944 else
1945 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1946
1947 gcc_assert (ncopies >= 1);
1948
1949 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1950 return false;
1951
1952 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1953 return false;
1954
1955 /* Is vectorizable assignment? */
1956 if (!is_gimple_assign (stmt))
1957 return false;
1958
1959 scalar_dest = gimple_assign_lhs (stmt);
1960 if (TREE_CODE (scalar_dest) != SSA_NAME)
1961 return false;
1962
1963 code = gimple_assign_rhs_code (stmt);
1964 if (gimple_assign_single_p (stmt)
1965 || code == PAREN_EXPR
1966 || CONVERT_EXPR_CODE_P (code))
1967 op = gimple_assign_rhs1 (stmt);
1968 else
1969 return false;
1970
1971 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1972 &def_stmt, &def, &dt[0], &vectype_in))
1973 {
1974 if (vect_print_dump_info (REPORT_DETAILS))
1975 fprintf (vect_dump, "use not simple.");
1976 return false;
1977 }
1978
1979 /* We can handle NOP_EXPR conversions that do not change the number
1980 of elements or the vector size. */
1981 if (CONVERT_EXPR_CODE_P (code)
1982 && (!vectype_in
1983 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1984 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1985 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1986 return false;
1987
1988 if (!vec_stmt) /* transformation not required. */
1989 {
1990 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1991 if (vect_print_dump_info (REPORT_DETAILS))
1992 fprintf (vect_dump, "=== vectorizable_assignment ===");
1993 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1994 return true;
1995 }
1996
1997 /** Transform. **/
1998 if (vect_print_dump_info (REPORT_DETAILS))
1999 fprintf (vect_dump, "transform assignment.");
2000
2001 /* Handle def. */
2002 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2003
2004 /* Handle use. */
2005 for (j = 0; j < ncopies; j++)
2006 {
2007 /* Handle uses. */
2008 if (j == 0)
2009 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2010 else
2011 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2012
2013 /* Arguments are ready. create the new vector stmt. */
2014 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2015 {
2016 if (CONVERT_EXPR_CODE_P (code))
2017 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2018 new_stmt = gimple_build_assign (vec_dest, vop);
2019 new_temp = make_ssa_name (vec_dest, new_stmt);
2020 gimple_assign_set_lhs (new_stmt, new_temp);
2021 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2022 if (slp_node)
2023 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2024 }
2025
2026 if (slp_node)
2027 continue;
2028
2029 if (j == 0)
2030 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2031 else
2032 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2033
2034 prev_stmt_info = vinfo_for_stmt (new_stmt);
2035 }
2036
2037 VEC_free (tree, heap, vec_oprnds);
2038 return true;
2039 }
2040
2041
2042 /* Function vectorizable_shift.
2043
2044 Check if STMT performs a shift operation that can be vectorized.
2045 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2046 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2047 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2048
2049 static bool
2050 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2051 gimple *vec_stmt, slp_tree slp_node)
2052 {
2053 tree vec_dest;
2054 tree scalar_dest;
2055 tree op0, op1 = NULL;
2056 tree vec_oprnd1 = NULL_TREE;
2057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2058 tree vectype;
2059 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2060 enum tree_code code;
2061 enum machine_mode vec_mode;
2062 tree new_temp;
2063 optab optab;
2064 int icode;
2065 enum machine_mode optab_op2_mode;
2066 tree def;
2067 gimple def_stmt;
2068 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2069 gimple new_stmt = NULL;
2070 stmt_vec_info prev_stmt_info;
2071 int nunits_in;
2072 int nunits_out;
2073 tree vectype_out;
2074 int ncopies;
2075 int j, i;
2076 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2077 tree vop0, vop1;
2078 unsigned int k;
2079 bool scalar_shift_arg = false;
2080 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2081 int vf;
2082
2083 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2084 return false;
2085
2086 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2087 return false;
2088
2089 /* Is STMT a vectorizable binary/unary operation? */
2090 if (!is_gimple_assign (stmt))
2091 return false;
2092
2093 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2094 return false;
2095
2096 code = gimple_assign_rhs_code (stmt);
2097
2098 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2099 || code == RROTATE_EXPR))
2100 return false;
2101
2102 scalar_dest = gimple_assign_lhs (stmt);
2103 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2104
2105 op0 = gimple_assign_rhs1 (stmt);
2106 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2107 &def_stmt, &def, &dt[0], &vectype))
2108 {
2109 if (vect_print_dump_info (REPORT_DETAILS))
2110 fprintf (vect_dump, "use not simple.");
2111 return false;
2112 }
2113 /* If op0 is an external or constant def use a vector type with
2114 the same size as the output vector type. */
2115 if (!vectype)
2116 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2117 if (vec_stmt)
2118 gcc_assert (vectype);
2119 if (!vectype)
2120 {
2121 if (vect_print_dump_info (REPORT_DETAILS))
2122 {
2123 fprintf (vect_dump, "no vectype for scalar type ");
2124 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2125 }
2126
2127 return false;
2128 }
2129
2130 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2131 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2132 if (nunits_out != nunits_in)
2133 return false;
2134
2135 op1 = gimple_assign_rhs2 (stmt);
2136 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2137 {
2138 if (vect_print_dump_info (REPORT_DETAILS))
2139 fprintf (vect_dump, "use not simple.");
2140 return false;
2141 }
2142
2143 if (loop_vinfo)
2144 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2145 else
2146 vf = 1;
2147
2148 /* Multiple types in SLP are handled by creating the appropriate number of
2149 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2150 case of SLP. */
2151 if (slp_node)
2152 ncopies = 1;
2153 else
2154 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2155
2156 gcc_assert (ncopies >= 1);
2157
2158 /* Determine whether the shift amount is a vector, or scalar. If the
2159 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2160
2161 /* Vector shifted by vector. */
2162 if (dt[1] == vect_internal_def)
2163 {
2164 optab = optab_for_tree_code (code, vectype, optab_vector);
2165 if (vect_print_dump_info (REPORT_DETAILS))
2166 fprintf (vect_dump, "vector/vector shift/rotate found.");
2167 }
2168 /* See if the machine has a vector shifted by scalar insn and if not
2169 then see if it has a vector shifted by vector insn. */
2170 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2171 {
2172 optab = optab_for_tree_code (code, vectype, optab_scalar);
2173 if (optab
2174 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2175 {
2176 scalar_shift_arg = true;
2177 if (vect_print_dump_info (REPORT_DETAILS))
2178 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2179 }
2180 else
2181 {
2182 optab = optab_for_tree_code (code, vectype, optab_vector);
2183 if (optab
2184 && (optab_handler (optab, TYPE_MODE (vectype))
2185 != CODE_FOR_nothing))
2186 {
2187 if (vect_print_dump_info (REPORT_DETAILS))
2188 fprintf (vect_dump, "vector/vector shift/rotate found.");
2189
2190 /* Unlike the other binary operators, shifts/rotates have
2191 the rhs being int, instead of the same type as the lhs,
2192 so make sure the scalar is the right type if we are
2193 dealing with vectors of short/char. */
2194 if (dt[1] == vect_constant_def)
2195 op1 = fold_convert (TREE_TYPE (vectype), op1);
2196 }
2197 }
2198 }
2199 else
2200 {
2201 if (vect_print_dump_info (REPORT_DETAILS))
2202 fprintf (vect_dump, "operand mode requires invariant argument.");
2203 return false;
2204 }
2205
2206 /* Supportable by target? */
2207 if (!optab)
2208 {
2209 if (vect_print_dump_info (REPORT_DETAILS))
2210 fprintf (vect_dump, "no optab.");
2211 return false;
2212 }
2213 vec_mode = TYPE_MODE (vectype);
2214 icode = (int) optab_handler (optab, vec_mode);
2215 if (icode == CODE_FOR_nothing)
2216 {
2217 if (vect_print_dump_info (REPORT_DETAILS))
2218 fprintf (vect_dump, "op not supported by target.");
2219 /* Check only during analysis. */
2220 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2221 || (vf < vect_min_worthwhile_factor (code)
2222 && !vec_stmt))
2223 return false;
2224 if (vect_print_dump_info (REPORT_DETAILS))
2225 fprintf (vect_dump, "proceeding using word mode.");
2226 }
2227
2228 /* Worthwhile without SIMD support? Check only during analysis. */
2229 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2230 && vf < vect_min_worthwhile_factor (code)
2231 && !vec_stmt)
2232 {
2233 if (vect_print_dump_info (REPORT_DETAILS))
2234 fprintf (vect_dump, "not worthwhile without SIMD support.");
2235 return false;
2236 }
2237
2238 if (!vec_stmt) /* transformation not required. */
2239 {
2240 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2241 if (vect_print_dump_info (REPORT_DETAILS))
2242 fprintf (vect_dump, "=== vectorizable_shift ===");
2243 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2244 return true;
2245 }
2246
2247 /** Transform. **/
2248
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "transform binary/unary operation.");
2251
2252 /* Handle def. */
2253 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2254
2255 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2256 created in the previous stages of the recursion, so no allocation is
2257 needed, except for the case of shift with scalar shift argument. In that
2258 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2259 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2260 In case of loop-based vectorization we allocate VECs of size 1. We
2261 allocate VEC_OPRNDS1 only in case of binary operation. */
2262 if (!slp_node)
2263 {
2264 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2265 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2266 }
2267 else if (scalar_shift_arg)
2268 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2269
2270 prev_stmt_info = NULL;
2271 for (j = 0; j < ncopies; j++)
2272 {
2273 /* Handle uses. */
2274 if (j == 0)
2275 {
2276 if (scalar_shift_arg)
2277 {
2278 /* Vector shl and shr insn patterns can be defined with scalar
2279 operand 2 (shift operand). In this case, use constant or loop
2280 invariant op1 directly, without extending it to vector mode
2281 first. */
2282 optab_op2_mode = insn_data[icode].operand[2].mode;
2283 if (!VECTOR_MODE_P (optab_op2_mode))
2284 {
2285 if (vect_print_dump_info (REPORT_DETAILS))
2286 fprintf (vect_dump, "operand 1 using scalar mode.");
2287 vec_oprnd1 = op1;
2288 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2289 if (slp_node)
2290 {
2291 /* Store vec_oprnd1 for every vector stmt to be created
2292 for SLP_NODE. We check during the analysis that all
2293 the shift arguments are the same.
2294 TODO: Allow different constants for different vector
2295 stmts generated for an SLP instance. */
2296 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2297 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2298 }
2299 }
2300 }
2301
2302 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2303 (a special case for certain kind of vector shifts); otherwise,
2304 operand 1 should be of a vector type (the usual case). */
2305 if (vec_oprnd1)
2306 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2307 slp_node);
2308 else
2309 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2310 slp_node);
2311 }
2312 else
2313 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2314
2315 /* Arguments are ready. Create the new vector stmt. */
2316 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2317 {
2318 vop1 = VEC_index (tree, vec_oprnds1, i);
2319 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2320 new_temp = make_ssa_name (vec_dest, new_stmt);
2321 gimple_assign_set_lhs (new_stmt, new_temp);
2322 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2323 if (slp_node)
2324 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2325 }
2326
2327 if (slp_node)
2328 continue;
2329
2330 if (j == 0)
2331 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2332 else
2333 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2334 prev_stmt_info = vinfo_for_stmt (new_stmt);
2335 }
2336
2337 VEC_free (tree, heap, vec_oprnds0);
2338 VEC_free (tree, heap, vec_oprnds1);
2339
2340 return true;
2341 }
2342
2343
2344 /* Function vectorizable_operation.
2345
2346 Check if STMT performs a binary or unary operation that can be vectorized.
2347 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2348 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2349 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2350
2351 static bool
2352 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2353 gimple *vec_stmt, slp_tree slp_node)
2354 {
2355 tree vec_dest;
2356 tree scalar_dest;
2357 tree op0, op1 = NULL;
2358 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2359 tree vectype;
2360 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2361 enum tree_code code;
2362 enum machine_mode vec_mode;
2363 tree new_temp;
2364 int op_type;
2365 optab optab;
2366 int icode;
2367 tree def;
2368 gimple def_stmt;
2369 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2370 gimple new_stmt = NULL;
2371 stmt_vec_info prev_stmt_info;
2372 int nunits_in;
2373 int nunits_out;
2374 tree vectype_out;
2375 int ncopies;
2376 int j, i;
2377 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2378 tree vop0, vop1;
2379 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2380 int vf;
2381
2382 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2383 return false;
2384
2385 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2386 return false;
2387
2388 /* Is STMT a vectorizable binary/unary operation? */
2389 if (!is_gimple_assign (stmt))
2390 return false;
2391
2392 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2393 return false;
2394
2395 code = gimple_assign_rhs_code (stmt);
2396
2397 /* For pointer addition, we should use the normal plus for
2398 the vector addition. */
2399 if (code == POINTER_PLUS_EXPR)
2400 code = PLUS_EXPR;
2401
2402 /* Support only unary or binary operations. */
2403 op_type = TREE_CODE_LENGTH (code);
2404 if (op_type != unary_op && op_type != binary_op)
2405 {
2406 if (vect_print_dump_info (REPORT_DETAILS))
2407 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2408 return false;
2409 }
2410
2411 scalar_dest = gimple_assign_lhs (stmt);
2412 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2413
2414 op0 = gimple_assign_rhs1 (stmt);
2415 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2416 &def_stmt, &def, &dt[0], &vectype))
2417 {
2418 if (vect_print_dump_info (REPORT_DETAILS))
2419 fprintf (vect_dump, "use not simple.");
2420 return false;
2421 }
2422 /* If op0 is an external or constant def use a vector type with
2423 the same size as the output vector type. */
2424 if (!vectype)
2425 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2426 if (vec_stmt)
2427 gcc_assert (vectype);
2428 if (!vectype)
2429 {
2430 if (vect_print_dump_info (REPORT_DETAILS))
2431 {
2432 fprintf (vect_dump, "no vectype for scalar type ");
2433 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2434 }
2435
2436 return false;
2437 }
2438
2439 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2440 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2441 if (nunits_out != nunits_in)
2442 return false;
2443
2444 if (op_type == binary_op)
2445 {
2446 op1 = gimple_assign_rhs2 (stmt);
2447 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2448 &dt[1]))
2449 {
2450 if (vect_print_dump_info (REPORT_DETAILS))
2451 fprintf (vect_dump, "use not simple.");
2452 return false;
2453 }
2454 }
2455
2456 if (loop_vinfo)
2457 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2458 else
2459 vf = 1;
2460
2461 /* Multiple types in SLP are handled by creating the appropriate number of
2462 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2463 case of SLP. */
2464 if (slp_node)
2465 ncopies = 1;
2466 else
2467 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2468
2469 gcc_assert (ncopies >= 1);
2470
2471 /* Shifts are handled in vectorizable_shift (). */
2472 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2473 || code == RROTATE_EXPR)
2474 return false;
2475
2476 optab = optab_for_tree_code (code, vectype, optab_default);
2477
2478 /* Supportable by target? */
2479 if (!optab)
2480 {
2481 if (vect_print_dump_info (REPORT_DETAILS))
2482 fprintf (vect_dump, "no optab.");
2483 return false;
2484 }
2485 vec_mode = TYPE_MODE (vectype);
2486 icode = (int) optab_handler (optab, vec_mode);
2487 if (icode == CODE_FOR_nothing)
2488 {
2489 if (vect_print_dump_info (REPORT_DETAILS))
2490 fprintf (vect_dump, "op not supported by target.");
2491 /* Check only during analysis. */
2492 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2493 || (vf < vect_min_worthwhile_factor (code)
2494 && !vec_stmt))
2495 return false;
2496 if (vect_print_dump_info (REPORT_DETAILS))
2497 fprintf (vect_dump, "proceeding using word mode.");
2498 }
2499
2500 /* Worthwhile without SIMD support? Check only during analysis. */
2501 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2502 && vf < vect_min_worthwhile_factor (code)
2503 && !vec_stmt)
2504 {
2505 if (vect_print_dump_info (REPORT_DETAILS))
2506 fprintf (vect_dump, "not worthwhile without SIMD support.");
2507 return false;
2508 }
2509
2510 if (!vec_stmt) /* transformation not required. */
2511 {
2512 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2513 if (vect_print_dump_info (REPORT_DETAILS))
2514 fprintf (vect_dump, "=== vectorizable_operation ===");
2515 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2516 return true;
2517 }
2518
2519 /** Transform. **/
2520
2521 if (vect_print_dump_info (REPORT_DETAILS))
2522 fprintf (vect_dump, "transform binary/unary operation.");
2523
2524 /* Handle def. */
2525 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2526
2527 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2528 created in the previous stages of the recursion, so no allocation is
2529 needed, except for the case of shift with scalar shift argument. In that
2530 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2531 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2532 In case of loop-based vectorization we allocate VECs of size 1. We
2533 allocate VEC_OPRNDS1 only in case of binary operation. */
2534 if (!slp_node)
2535 {
2536 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2537 if (op_type == binary_op)
2538 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2539 }
2540
2541 /* In case the vectorization factor (VF) is bigger than the number
2542 of elements that we can fit in a vectype (nunits), we have to generate
2543 more than one vector stmt - i.e - we need to "unroll" the
2544 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2545 from one copy of the vector stmt to the next, in the field
2546 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2547 stages to find the correct vector defs to be used when vectorizing
2548 stmts that use the defs of the current stmt. The example below
2549 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2550 we need to create 4 vectorized stmts):
2551
2552 before vectorization:
2553 RELATED_STMT VEC_STMT
2554 S1: x = memref - -
2555 S2: z = x + 1 - -
2556
2557 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2558 there):
2559 RELATED_STMT VEC_STMT
2560 VS1_0: vx0 = memref0 VS1_1 -
2561 VS1_1: vx1 = memref1 VS1_2 -
2562 VS1_2: vx2 = memref2 VS1_3 -
2563 VS1_3: vx3 = memref3 - -
2564 S1: x = load - VS1_0
2565 S2: z = x + 1 - -
2566
2567 step2: vectorize stmt S2 (done here):
2568 To vectorize stmt S2 we first need to find the relevant vector
2569 def for the first operand 'x'. This is, as usual, obtained from
2570 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2571 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2572 relevant vector def 'vx0'. Having found 'vx0' we can generate
2573 the vector stmt VS2_0, and as usual, record it in the
2574 STMT_VINFO_VEC_STMT of stmt S2.
2575 When creating the second copy (VS2_1), we obtain the relevant vector
2576 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2577 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2578 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2579 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2580 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2581 chain of stmts and pointers:
2582 RELATED_STMT VEC_STMT
2583 VS1_0: vx0 = memref0 VS1_1 -
2584 VS1_1: vx1 = memref1 VS1_2 -
2585 VS1_2: vx2 = memref2 VS1_3 -
2586 VS1_3: vx3 = memref3 - -
2587 S1: x = load - VS1_0
2588 VS2_0: vz0 = vx0 + v1 VS2_1 -
2589 VS2_1: vz1 = vx1 + v1 VS2_2 -
2590 VS2_2: vz2 = vx2 + v1 VS2_3 -
2591 VS2_3: vz3 = vx3 + v1 - -
2592 S2: z = x + 1 - VS2_0 */
2593
2594 prev_stmt_info = NULL;
2595 for (j = 0; j < ncopies; j++)
2596 {
2597 /* Handle uses. */
2598 if (j == 0)
2599 {
2600 if (op_type == binary_op)
2601 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2602 slp_node);
2603 else
2604 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2605 slp_node);
2606 }
2607 else
2608 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2609
2610 /* Arguments are ready. Create the new vector stmt. */
2611 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2612 {
2613 vop1 = ((op_type == binary_op)
2614 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2615 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2616 new_temp = make_ssa_name (vec_dest, new_stmt);
2617 gimple_assign_set_lhs (new_stmt, new_temp);
2618 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2619 if (slp_node)
2620 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2621 }
2622
2623 if (slp_node)
2624 continue;
2625
2626 if (j == 0)
2627 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2628 else
2629 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2630 prev_stmt_info = vinfo_for_stmt (new_stmt);
2631 }
2632
2633 VEC_free (tree, heap, vec_oprnds0);
2634 if (vec_oprnds1)
2635 VEC_free (tree, heap, vec_oprnds1);
2636
2637 return true;
2638 }
2639
2640
2641 /* Get vectorized definitions for loop-based vectorization. For the first
2642 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2643 scalar operand), and for the rest we get a copy with
2644 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2645 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2646 The vectors are collected into VEC_OPRNDS. */
2647
2648 static void
2649 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2650 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2651 {
2652 tree vec_oprnd;
2653
2654 /* Get first vector operand. */
2655 /* All the vector operands except the very first one (that is scalar oprnd)
2656 are stmt copies. */
2657 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2658 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2659 else
2660 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2661
2662 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2663
2664 /* Get second vector operand. */
2665 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2666 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2667
2668 *oprnd = vec_oprnd;
2669
2670 /* For conversion in multiple steps, continue to get operands
2671 recursively. */
2672 if (multi_step_cvt)
2673 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2674 }
2675
2676
2677 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2678 For multi-step conversions store the resulting vectors and call the function
2679 recursively. */
2680
2681 static void
2682 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2683 int multi_step_cvt, gimple stmt,
2684 VEC (tree, heap) *vec_dsts,
2685 gimple_stmt_iterator *gsi,
2686 slp_tree slp_node, enum tree_code code,
2687 stmt_vec_info *prev_stmt_info)
2688 {
2689 unsigned int i;
2690 tree vop0, vop1, new_tmp, vec_dest;
2691 gimple new_stmt;
2692 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2693
2694 vec_dest = VEC_pop (tree, vec_dsts);
2695
2696 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2697 {
2698 /* Create demotion operation. */
2699 vop0 = VEC_index (tree, *vec_oprnds, i);
2700 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2701 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2702 new_tmp = make_ssa_name (vec_dest, new_stmt);
2703 gimple_assign_set_lhs (new_stmt, new_tmp);
2704 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2705
2706 if (multi_step_cvt)
2707 /* Store the resulting vector for next recursive call. */
2708 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2709 else
2710 {
2711 /* This is the last step of the conversion sequence. Store the
2712 vectors in SLP_NODE or in vector info of the scalar statement
2713 (or in STMT_VINFO_RELATED_STMT chain). */
2714 if (slp_node)
2715 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2716 else
2717 {
2718 if (!*prev_stmt_info)
2719 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2720 else
2721 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2722
2723 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2724 }
2725 }
2726 }
2727
2728 /* For multi-step demotion operations we first generate demotion operations
2729 from the source type to the intermediate types, and then combine the
2730 results (stored in VEC_OPRNDS) in demotion operation to the destination
2731 type. */
2732 if (multi_step_cvt)
2733 {
2734 /* At each level of recursion we have have of the operands we had at the
2735 previous level. */
2736 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2737 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2738 stmt, vec_dsts, gsi, slp_node,
2739 code, prev_stmt_info);
2740 }
2741 }
2742
2743
2744 /* Function vectorizable_type_demotion
2745
2746 Check if STMT performs a binary or unary operation that involves
2747 type demotion, and if it can be vectorized.
2748 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2749 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2750 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2751
2752 static bool
2753 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2754 gimple *vec_stmt, slp_tree slp_node)
2755 {
2756 tree vec_dest;
2757 tree scalar_dest;
2758 tree op0;
2759 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2760 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2761 enum tree_code code, code1 = ERROR_MARK;
2762 tree def;
2763 gimple def_stmt;
2764 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2765 stmt_vec_info prev_stmt_info;
2766 int nunits_in;
2767 int nunits_out;
2768 tree vectype_out;
2769 int ncopies;
2770 int j, i;
2771 tree vectype_in;
2772 int multi_step_cvt = 0;
2773 VEC (tree, heap) *vec_oprnds0 = NULL;
2774 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2775 tree last_oprnd, intermediate_type;
2776
2777 /* FORNOW: not supported by basic block SLP vectorization. */
2778 gcc_assert (loop_vinfo);
2779
2780 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2781 return false;
2782
2783 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2784 return false;
2785
2786 /* Is STMT a vectorizable type-demotion operation? */
2787 if (!is_gimple_assign (stmt))
2788 return false;
2789
2790 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2791 return false;
2792
2793 code = gimple_assign_rhs_code (stmt);
2794 if (!CONVERT_EXPR_CODE_P (code))
2795 return false;
2796
2797 scalar_dest = gimple_assign_lhs (stmt);
2798 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2799
2800 /* Check the operands of the operation. */
2801 op0 = gimple_assign_rhs1 (stmt);
2802 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2803 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2804 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2805 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2806 && CONVERT_EXPR_CODE_P (code))))
2807 return false;
2808 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2809 &def_stmt, &def, &dt[0], &vectype_in))
2810 {
2811 if (vect_print_dump_info (REPORT_DETAILS))
2812 fprintf (vect_dump, "use not simple.");
2813 return false;
2814 }
2815 /* If op0 is an external def use a vector type with the
2816 same size as the output vector type if possible. */
2817 if (!vectype_in)
2818 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2819 if (vec_stmt)
2820 gcc_assert (vectype_in);
2821 if (!vectype_in)
2822 {
2823 if (vect_print_dump_info (REPORT_DETAILS))
2824 {
2825 fprintf (vect_dump, "no vectype for scalar type ");
2826 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2827 }
2828
2829 return false;
2830 }
2831
2832 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2833 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2834 if (nunits_in >= nunits_out)
2835 return false;
2836
2837 /* Multiple types in SLP are handled by creating the appropriate number of
2838 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2839 case of SLP. */
2840 if (slp_node)
2841 ncopies = 1;
2842 else
2843 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2844 gcc_assert (ncopies >= 1);
2845
2846 /* Supportable by target? */
2847 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2848 &code1, &multi_step_cvt, &interm_types))
2849 return false;
2850
2851 if (!vec_stmt) /* transformation not required. */
2852 {
2853 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2854 if (vect_print_dump_info (REPORT_DETAILS))
2855 fprintf (vect_dump, "=== vectorizable_demotion ===");
2856 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2857 return true;
2858 }
2859
2860 /** Transform. **/
2861 if (vect_print_dump_info (REPORT_DETAILS))
2862 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2863 ncopies);
2864
2865 /* In case of multi-step demotion, we first generate demotion operations to
2866 the intermediate types, and then from that types to the final one.
2867 We create vector destinations for the intermediate type (TYPES) received
2868 from supportable_narrowing_operation, and store them in the correct order
2869 for future use in vect_create_vectorized_demotion_stmts(). */
2870 if (multi_step_cvt)
2871 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2872 else
2873 vec_dsts = VEC_alloc (tree, heap, 1);
2874
2875 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2876 VEC_quick_push (tree, vec_dsts, vec_dest);
2877
2878 if (multi_step_cvt)
2879 {
2880 for (i = VEC_length (tree, interm_types) - 1;
2881 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2882 {
2883 vec_dest = vect_create_destination_var (scalar_dest,
2884 intermediate_type);
2885 VEC_quick_push (tree, vec_dsts, vec_dest);
2886 }
2887 }
2888
2889 /* In case the vectorization factor (VF) is bigger than the number
2890 of elements that we can fit in a vectype (nunits), we have to generate
2891 more than one vector stmt - i.e - we need to "unroll" the
2892 vector stmt by a factor VF/nunits. */
2893 last_oprnd = op0;
2894 prev_stmt_info = NULL;
2895 for (j = 0; j < ncopies; j++)
2896 {
2897 /* Handle uses. */
2898 if (slp_node)
2899 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
2900 else
2901 {
2902 VEC_free (tree, heap, vec_oprnds0);
2903 vec_oprnds0 = VEC_alloc (tree, heap,
2904 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2905 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2906 vect_pow2 (multi_step_cvt) - 1);
2907 }
2908
2909 /* Arguments are ready. Create the new vector stmts. */
2910 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2911 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2912 multi_step_cvt, stmt, tmp_vec_dsts,
2913 gsi, slp_node, code1,
2914 &prev_stmt_info);
2915 }
2916
2917 VEC_free (tree, heap, vec_oprnds0);
2918 VEC_free (tree, heap, vec_dsts);
2919 VEC_free (tree, heap, tmp_vec_dsts);
2920 VEC_free (tree, heap, interm_types);
2921
2922 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2923 return true;
2924 }
2925
2926
2927 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2928 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2929 the resulting vectors and call the function recursively. */
2930
2931 static void
2932 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2933 VEC (tree, heap) **vec_oprnds1,
2934 int multi_step_cvt, gimple stmt,
2935 VEC (tree, heap) *vec_dsts,
2936 gimple_stmt_iterator *gsi,
2937 slp_tree slp_node, enum tree_code code1,
2938 enum tree_code code2, tree decl1,
2939 tree decl2, int op_type,
2940 stmt_vec_info *prev_stmt_info)
2941 {
2942 int i;
2943 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2944 gimple new_stmt1, new_stmt2;
2945 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2946 VEC (tree, heap) *vec_tmp;
2947
2948 vec_dest = VEC_pop (tree, vec_dsts);
2949 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2950
2951 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
2952 {
2953 if (op_type == binary_op)
2954 vop1 = VEC_index (tree, *vec_oprnds1, i);
2955 else
2956 vop1 = NULL_TREE;
2957
2958 /* Generate the two halves of promotion operation. */
2959 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2960 op_type, vec_dest, gsi, stmt);
2961 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2962 op_type, vec_dest, gsi, stmt);
2963 if (is_gimple_call (new_stmt1))
2964 {
2965 new_tmp1 = gimple_call_lhs (new_stmt1);
2966 new_tmp2 = gimple_call_lhs (new_stmt2);
2967 }
2968 else
2969 {
2970 new_tmp1 = gimple_assign_lhs (new_stmt1);
2971 new_tmp2 = gimple_assign_lhs (new_stmt2);
2972 }
2973
2974 if (multi_step_cvt)
2975 {
2976 /* Store the results for the recursive call. */
2977 VEC_quick_push (tree, vec_tmp, new_tmp1);
2978 VEC_quick_push (tree, vec_tmp, new_tmp2);
2979 }
2980 else
2981 {
2982 /* Last step of promotion sequience - store the results. */
2983 if (slp_node)
2984 {
2985 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2986 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2987 }
2988 else
2989 {
2990 if (!*prev_stmt_info)
2991 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2992 else
2993 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2994
2995 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2996 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2997 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2998 }
2999 }
3000 }
3001
3002 if (multi_step_cvt)
3003 {
3004 /* For multi-step promotion operation we first generate we call the
3005 function recurcively for every stage. We start from the input type,
3006 create promotion operations to the intermediate types, and then
3007 create promotions to the output type. */
3008 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3009 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3010 multi_step_cvt - 1, stmt,
3011 vec_dsts, gsi, slp_node, code1,
3012 code2, decl2, decl2, op_type,
3013 prev_stmt_info);
3014 }
3015
3016 VEC_free (tree, heap, vec_tmp);
3017 }
3018
3019
3020 /* Function vectorizable_type_promotion
3021
3022 Check if STMT performs a binary or unary operation that involves
3023 type promotion, and if it can be vectorized.
3024 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3025 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3026 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3027
3028 static bool
3029 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3030 gimple *vec_stmt, slp_tree slp_node)
3031 {
3032 tree vec_dest;
3033 tree scalar_dest;
3034 tree op0, op1 = NULL;
3035 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3036 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3037 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3038 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3039 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3040 int op_type;
3041 tree def;
3042 gimple def_stmt;
3043 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3044 stmt_vec_info prev_stmt_info;
3045 int nunits_in;
3046 int nunits_out;
3047 tree vectype_out;
3048 int ncopies;
3049 int j, i;
3050 tree vectype_in;
3051 tree intermediate_type = NULL_TREE;
3052 int multi_step_cvt = 0;
3053 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3054 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3055
3056 /* FORNOW: not supported by basic block SLP vectorization. */
3057 gcc_assert (loop_vinfo);
3058
3059 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3060 return false;
3061
3062 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3063 return false;
3064
3065 /* Is STMT a vectorizable type-promotion operation? */
3066 if (!is_gimple_assign (stmt))
3067 return false;
3068
3069 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3070 return false;
3071
3072 code = gimple_assign_rhs_code (stmt);
3073 if (!CONVERT_EXPR_CODE_P (code)
3074 && code != WIDEN_MULT_EXPR)
3075 return false;
3076
3077 scalar_dest = gimple_assign_lhs (stmt);
3078 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3079
3080 /* Check the operands of the operation. */
3081 op0 = gimple_assign_rhs1 (stmt);
3082 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3083 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3084 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3085 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3086 && CONVERT_EXPR_CODE_P (code))))
3087 return false;
3088 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3089 &def_stmt, &def, &dt[0], &vectype_in))
3090 {
3091 if (vect_print_dump_info (REPORT_DETAILS))
3092 fprintf (vect_dump, "use not simple.");
3093 return false;
3094 }
3095 /* If op0 is an external or constant def use a vector type with
3096 the same size as the output vector type. */
3097 if (!vectype_in)
3098 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3099 if (vec_stmt)
3100 gcc_assert (vectype_in);
3101 if (!vectype_in)
3102 {
3103 if (vect_print_dump_info (REPORT_DETAILS))
3104 {
3105 fprintf (vect_dump, "no vectype for scalar type ");
3106 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3107 }
3108
3109 return false;
3110 }
3111
3112 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3113 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3114 if (nunits_in <= nunits_out)
3115 return false;
3116
3117 /* Multiple types in SLP are handled by creating the appropriate number of
3118 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3119 case of SLP. */
3120 if (slp_node)
3121 ncopies = 1;
3122 else
3123 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3124
3125 gcc_assert (ncopies >= 1);
3126
3127 op_type = TREE_CODE_LENGTH (code);
3128 if (op_type == binary_op)
3129 {
3130 op1 = gimple_assign_rhs2 (stmt);
3131 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3132 {
3133 if (vect_print_dump_info (REPORT_DETAILS))
3134 fprintf (vect_dump, "use not simple.");
3135 return false;
3136 }
3137 }
3138
3139 /* Supportable by target? */
3140 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3141 &decl1, &decl2, &code1, &code2,
3142 &multi_step_cvt, &interm_types))
3143 return false;
3144
3145 /* Binary widening operation can only be supported directly by the
3146 architecture. */
3147 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3148
3149 if (!vec_stmt) /* transformation not required. */
3150 {
3151 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3152 if (vect_print_dump_info (REPORT_DETAILS))
3153 fprintf (vect_dump, "=== vectorizable_promotion ===");
3154 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3155 return true;
3156 }
3157
3158 /** Transform. **/
3159
3160 if (vect_print_dump_info (REPORT_DETAILS))
3161 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3162 ncopies);
3163
3164 /* Handle def. */
3165 /* In case of multi-step promotion, we first generate promotion operations
3166 to the intermediate types, and then from that types to the final one.
3167 We store vector destination in VEC_DSTS in the correct order for
3168 recursive creation of promotion operations in
3169 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3170 according to TYPES recieved from supportable_widening_operation(). */
3171 if (multi_step_cvt)
3172 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3173 else
3174 vec_dsts = VEC_alloc (tree, heap, 1);
3175
3176 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3177 VEC_quick_push (tree, vec_dsts, vec_dest);
3178
3179 if (multi_step_cvt)
3180 {
3181 for (i = VEC_length (tree, interm_types) - 1;
3182 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3183 {
3184 vec_dest = vect_create_destination_var (scalar_dest,
3185 intermediate_type);
3186 VEC_quick_push (tree, vec_dsts, vec_dest);
3187 }
3188 }
3189
3190 if (!slp_node)
3191 {
3192 vec_oprnds0 = VEC_alloc (tree, heap,
3193 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3194 if (op_type == binary_op)
3195 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3196 }
3197
3198 /* In case the vectorization factor (VF) is bigger than the number
3199 of elements that we can fit in a vectype (nunits), we have to generate
3200 more than one vector stmt - i.e - we need to "unroll" the
3201 vector stmt by a factor VF/nunits. */
3202
3203 prev_stmt_info = NULL;
3204 for (j = 0; j < ncopies; j++)
3205 {
3206 /* Handle uses. */
3207 if (j == 0)
3208 {
3209 if (slp_node)
3210 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3211 &vec_oprnds1, -1);
3212 else
3213 {
3214 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3215 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3216 if (op_type == binary_op)
3217 {
3218 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3219 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3220 }
3221 }
3222 }
3223 else
3224 {
3225 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3226 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3227 if (op_type == binary_op)
3228 {
3229 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3230 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3231 }
3232 }
3233
3234 /* Arguments are ready. Create the new vector stmts. */
3235 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3236 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3237 multi_step_cvt, stmt,
3238 tmp_vec_dsts,
3239 gsi, slp_node, code1, code2,
3240 decl1, decl2, op_type,
3241 &prev_stmt_info);
3242 }
3243
3244 VEC_free (tree, heap, vec_dsts);
3245 VEC_free (tree, heap, tmp_vec_dsts);
3246 VEC_free (tree, heap, interm_types);
3247 VEC_free (tree, heap, vec_oprnds0);
3248 VEC_free (tree, heap, vec_oprnds1);
3249
3250 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3251 return true;
3252 }
3253
3254
3255 /* Function vectorizable_store.
3256
3257 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3258 can be vectorized.
3259 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3260 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3261 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3262
3263 static bool
3264 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3265 slp_tree slp_node)
3266 {
3267 tree scalar_dest;
3268 tree data_ref;
3269 tree op;
3270 tree vec_oprnd = NULL_TREE;
3271 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3272 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3273 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3274 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3275 struct loop *loop = NULL;
3276 enum machine_mode vec_mode;
3277 tree dummy;
3278 enum dr_alignment_support alignment_support_scheme;
3279 tree def;
3280 gimple def_stmt;
3281 enum vect_def_type dt;
3282 stmt_vec_info prev_stmt_info = NULL;
3283 tree dataref_ptr = NULL_TREE;
3284 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3285 int ncopies;
3286 int j;
3287 gimple next_stmt, first_stmt = NULL;
3288 bool strided_store = false;
3289 unsigned int group_size, i;
3290 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3291 bool inv_p;
3292 VEC(tree,heap) *vec_oprnds = NULL;
3293 bool slp = (slp_node != NULL);
3294 unsigned int vec_num;
3295 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3296
3297 if (loop_vinfo)
3298 loop = LOOP_VINFO_LOOP (loop_vinfo);
3299
3300 /* Multiple types in SLP are handled by creating the appropriate number of
3301 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3302 case of SLP. */
3303 if (slp)
3304 ncopies = 1;
3305 else
3306 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3307
3308 gcc_assert (ncopies >= 1);
3309
3310 /* FORNOW. This restriction should be relaxed. */
3311 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3312 {
3313 if (vect_print_dump_info (REPORT_DETAILS))
3314 fprintf (vect_dump, "multiple types in nested loop.");
3315 return false;
3316 }
3317
3318 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3319 return false;
3320
3321 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3322 return false;
3323
3324 /* Is vectorizable store? */
3325
3326 if (!is_gimple_assign (stmt))
3327 return false;
3328
3329 scalar_dest = gimple_assign_lhs (stmt);
3330 if (TREE_CODE (scalar_dest) != ARRAY_REF
3331 && TREE_CODE (scalar_dest) != INDIRECT_REF
3332 && TREE_CODE (scalar_dest) != COMPONENT_REF
3333 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3334 && TREE_CODE (scalar_dest) != REALPART_EXPR
3335 && TREE_CODE (scalar_dest) != MEM_REF)
3336 return false;
3337
3338 gcc_assert (gimple_assign_single_p (stmt));
3339 op = gimple_assign_rhs1 (stmt);
3340 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3341 {
3342 if (vect_print_dump_info (REPORT_DETAILS))
3343 fprintf (vect_dump, "use not simple.");
3344 return false;
3345 }
3346
3347 /* The scalar rhs type needs to be trivially convertible to the vector
3348 component type. This should always be the case. */
3349 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3350 {
3351 if (vect_print_dump_info (REPORT_DETAILS))
3352 fprintf (vect_dump, "??? operands of different types");
3353 return false;
3354 }
3355
3356 vec_mode = TYPE_MODE (vectype);
3357 /* FORNOW. In some cases can vectorize even if data-type not supported
3358 (e.g. - array initialization with 0). */
3359 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3360 return false;
3361
3362 if (!STMT_VINFO_DATA_REF (stmt_info))
3363 return false;
3364
3365 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3366 {
3367 if (vect_print_dump_info (REPORT_DETAILS))
3368 fprintf (vect_dump, "negative step for store.");
3369 return false;
3370 }
3371
3372 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3373 {
3374 strided_store = true;
3375 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3376 if (!vect_strided_store_supported (vectype)
3377 && !PURE_SLP_STMT (stmt_info) && !slp)
3378 return false;
3379
3380 if (first_stmt == stmt)
3381 {
3382 /* STMT is the leader of the group. Check the operands of all the
3383 stmts of the group. */
3384 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3385 while (next_stmt)
3386 {
3387 gcc_assert (gimple_assign_single_p (next_stmt));
3388 op = gimple_assign_rhs1 (next_stmt);
3389 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3390 &def, &dt))
3391 {
3392 if (vect_print_dump_info (REPORT_DETAILS))
3393 fprintf (vect_dump, "use not simple.");
3394 return false;
3395 }
3396 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3397 }
3398 }
3399 }
3400
3401 if (!vec_stmt) /* transformation not required. */
3402 {
3403 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3404 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3405 return true;
3406 }
3407
3408 /** Transform. **/
3409
3410 if (strided_store)
3411 {
3412 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3413 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3414
3415 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3416
3417 /* FORNOW */
3418 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3419
3420 /* We vectorize all the stmts of the interleaving group when we
3421 reach the last stmt in the group. */
3422 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3423 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3424 && !slp)
3425 {
3426 *vec_stmt = NULL;
3427 return true;
3428 }
3429
3430 if (slp)
3431 {
3432 strided_store = false;
3433 /* VEC_NUM is the number of vect stmts to be created for this
3434 group. */
3435 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3436 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3437 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3438 }
3439 else
3440 /* VEC_NUM is the number of vect stmts to be created for this
3441 group. */
3442 vec_num = group_size;
3443 }
3444 else
3445 {
3446 first_stmt = stmt;
3447 first_dr = dr;
3448 group_size = vec_num = 1;
3449 }
3450
3451 if (vect_print_dump_info (REPORT_DETAILS))
3452 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3453
3454 dr_chain = VEC_alloc (tree, heap, group_size);
3455 oprnds = VEC_alloc (tree, heap, group_size);
3456
3457 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3458 gcc_assert (alignment_support_scheme);
3459
3460 /* In case the vectorization factor (VF) is bigger than the number
3461 of elements that we can fit in a vectype (nunits), we have to generate
3462 more than one vector stmt - i.e - we need to "unroll" the
3463 vector stmt by a factor VF/nunits. For more details see documentation in
3464 vect_get_vec_def_for_copy_stmt. */
3465
3466 /* In case of interleaving (non-unit strided access):
3467
3468 S1: &base + 2 = x2
3469 S2: &base = x0
3470 S3: &base + 1 = x1
3471 S4: &base + 3 = x3
3472
3473 We create vectorized stores starting from base address (the access of the
3474 first stmt in the chain (S2 in the above example), when the last store stmt
3475 of the chain (S4) is reached:
3476
3477 VS1: &base = vx2
3478 VS2: &base + vec_size*1 = vx0
3479 VS3: &base + vec_size*2 = vx1
3480 VS4: &base + vec_size*3 = vx3
3481
3482 Then permutation statements are generated:
3483
3484 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3485 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3486 ...
3487
3488 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3489 (the order of the data-refs in the output of vect_permute_store_chain
3490 corresponds to the order of scalar stmts in the interleaving chain - see
3491 the documentation of vect_permute_store_chain()).
3492
3493 In case of both multiple types and interleaving, above vector stores and
3494 permutation stmts are created for every copy. The result vector stmts are
3495 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3496 STMT_VINFO_RELATED_STMT for the next copies.
3497 */
3498
3499 prev_stmt_info = NULL;
3500 for (j = 0; j < ncopies; j++)
3501 {
3502 gimple new_stmt;
3503 gimple ptr_incr;
3504
3505 if (j == 0)
3506 {
3507 if (slp)
3508 {
3509 /* Get vectorized arguments for SLP_NODE. */
3510 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3511 NULL, -1);
3512
3513 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3514 }
3515 else
3516 {
3517 /* For interleaved stores we collect vectorized defs for all the
3518 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3519 used as an input to vect_permute_store_chain(), and OPRNDS as
3520 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3521
3522 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3523 OPRNDS are of size 1. */
3524 next_stmt = first_stmt;
3525 for (i = 0; i < group_size; i++)
3526 {
3527 /* Since gaps are not supported for interleaved stores,
3528 GROUP_SIZE is the exact number of stmts in the chain.
3529 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3530 there is no interleaving, GROUP_SIZE is 1, and only one
3531 iteration of the loop will be executed. */
3532 gcc_assert (next_stmt
3533 && gimple_assign_single_p (next_stmt));
3534 op = gimple_assign_rhs1 (next_stmt);
3535
3536 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3537 NULL);
3538 VEC_quick_push(tree, dr_chain, vec_oprnd);
3539 VEC_quick_push(tree, oprnds, vec_oprnd);
3540 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3541 }
3542 }
3543
3544 /* We should have catched mismatched types earlier. */
3545 gcc_assert (useless_type_conversion_p (vectype,
3546 TREE_TYPE (vec_oprnd)));
3547 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3548 &dummy, &ptr_incr, false,
3549 &inv_p);
3550 gcc_assert (bb_vinfo || !inv_p);
3551 }
3552 else
3553 {
3554 /* For interleaved stores we created vectorized defs for all the
3555 defs stored in OPRNDS in the previous iteration (previous copy).
3556 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3557 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3558 next copy.
3559 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3560 OPRNDS are of size 1. */
3561 for (i = 0; i < group_size; i++)
3562 {
3563 op = VEC_index (tree, oprnds, i);
3564 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3565 &dt);
3566 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3567 VEC_replace(tree, dr_chain, i, vec_oprnd);
3568 VEC_replace(tree, oprnds, i, vec_oprnd);
3569 }
3570 dataref_ptr =
3571 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3572 }
3573
3574 if (strided_store)
3575 {
3576 result_chain = VEC_alloc (tree, heap, group_size);
3577 /* Permute. */
3578 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3579 &result_chain))
3580 return false;
3581 }
3582
3583 next_stmt = first_stmt;
3584 for (i = 0; i < vec_num; i++)
3585 {
3586 struct ptr_info_def *pi;
3587
3588 if (i > 0)
3589 /* Bump the vector pointer. */
3590 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3591 NULL_TREE);
3592
3593 if (slp)
3594 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3595 else if (strided_store)
3596 /* For strided stores vectorized defs are interleaved in
3597 vect_permute_store_chain(). */
3598 vec_oprnd = VEC_index (tree, result_chain, i);
3599
3600 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3601 build_int_cst (reference_alias_ptr_type
3602 (DR_REF (first_dr)), 0));
3603 pi = get_ptr_info (dataref_ptr);
3604 pi->align = TYPE_ALIGN_UNIT (vectype);
3605 if (aligned_access_p (first_dr))
3606 pi->misalign = 0;
3607 else if (DR_MISALIGNMENT (first_dr) == -1)
3608 {
3609 TREE_TYPE (data_ref)
3610 = build_aligned_type (TREE_TYPE (data_ref),
3611 TYPE_ALIGN (TREE_TYPE (vectype)));
3612 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3613 pi->misalign = 0;
3614 }
3615 else
3616 {
3617 TREE_TYPE (data_ref)
3618 = build_aligned_type (TREE_TYPE (data_ref),
3619 TYPE_ALIGN (TREE_TYPE (vectype)));
3620 pi->misalign = DR_MISALIGNMENT (first_dr);
3621 }
3622
3623 /* Arguments are ready. Create the new vector stmt. */
3624 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3625 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3626 mark_symbols_for_renaming (new_stmt);
3627
3628 if (slp)
3629 continue;
3630
3631 if (j == 0)
3632 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3633 else
3634 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3635
3636 prev_stmt_info = vinfo_for_stmt (new_stmt);
3637 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3638 if (!next_stmt)
3639 break;
3640 }
3641 }
3642
3643 VEC_free (tree, heap, dr_chain);
3644 VEC_free (tree, heap, oprnds);
3645 if (result_chain)
3646 VEC_free (tree, heap, result_chain);
3647 if (vec_oprnds)
3648 VEC_free (tree, heap, vec_oprnds);
3649
3650 return true;
3651 }
3652
3653 /* Given a vector type VECTYPE returns a builtin DECL to be used
3654 for vector permutation and stores a mask into *MASK that implements
3655 reversal of the vector elements. If that is impossible to do
3656 returns NULL (and *MASK is unchanged). */
3657
3658 static tree
3659 perm_mask_for_reverse (tree vectype, tree *mask)
3660 {
3661 tree builtin_decl;
3662 tree mask_element_type, mask_type;
3663 tree mask_vec = NULL;
3664 int i;
3665 int nunits;
3666 if (!targetm.vectorize.builtin_vec_perm)
3667 return NULL;
3668
3669 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3670 &mask_element_type);
3671 if (!builtin_decl || !mask_element_type)
3672 return NULL;
3673
3674 mask_type = get_vectype_for_scalar_type (mask_element_type);
3675 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3676 if (!mask_type
3677 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3678 return NULL;
3679
3680 for (i = 0; i < nunits; i++)
3681 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3682 mask_vec = build_vector (mask_type, mask_vec);
3683
3684 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3685 return NULL;
3686 if (mask)
3687 *mask = mask_vec;
3688 return builtin_decl;
3689 }
3690
3691 /* Given a vector variable X, that was generated for the scalar LHS of
3692 STMT, generate instructions to reverse the vector elements of X,
3693 insert them a *GSI and return the permuted vector variable. */
3694
3695 static tree
3696 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3697 {
3698 tree vectype = TREE_TYPE (x);
3699 tree mask_vec, builtin_decl;
3700 tree perm_dest, data_ref;
3701 gimple perm_stmt;
3702
3703 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3704
3705 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3706
3707 /* Generate the permute statement. */
3708 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3709 data_ref = make_ssa_name (perm_dest, perm_stmt);
3710 gimple_call_set_lhs (perm_stmt, data_ref);
3711 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3712
3713 return data_ref;
3714 }
3715
3716 /* vectorizable_load.
3717
3718 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3719 can be vectorized.
3720 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3721 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3722 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3723
3724 static bool
3725 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3726 slp_tree slp_node, slp_instance slp_node_instance)
3727 {
3728 tree scalar_dest;
3729 tree vec_dest = NULL;
3730 tree data_ref = NULL;
3731 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3732 stmt_vec_info prev_stmt_info;
3733 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3734 struct loop *loop = NULL;
3735 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3736 bool nested_in_vect_loop = false;
3737 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3738 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3739 tree new_temp;
3740 enum machine_mode mode;
3741 gimple new_stmt = NULL;
3742 tree dummy;
3743 enum dr_alignment_support alignment_support_scheme;
3744 tree dataref_ptr = NULL_TREE;
3745 gimple ptr_incr;
3746 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3747 int ncopies;
3748 int i, j, group_size;
3749 tree msq = NULL_TREE, lsq;
3750 tree offset = NULL_TREE;
3751 tree realignment_token = NULL_TREE;
3752 gimple phi = NULL;
3753 VEC(tree,heap) *dr_chain = NULL;
3754 bool strided_load = false;
3755 gimple first_stmt;
3756 tree scalar_type;
3757 bool inv_p;
3758 bool negative;
3759 bool compute_in_loop = false;
3760 struct loop *at_loop;
3761 int vec_num;
3762 bool slp = (slp_node != NULL);
3763 bool slp_perm = false;
3764 enum tree_code code;
3765 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3766 int vf;
3767
3768 if (loop_vinfo)
3769 {
3770 loop = LOOP_VINFO_LOOP (loop_vinfo);
3771 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3772 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3773 }
3774 else
3775 vf = 1;
3776
3777 /* Multiple types in SLP are handled by creating the appropriate number of
3778 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3779 case of SLP. */
3780 if (slp)
3781 ncopies = 1;
3782 else
3783 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3784
3785 gcc_assert (ncopies >= 1);
3786
3787 /* FORNOW. This restriction should be relaxed. */
3788 if (nested_in_vect_loop && ncopies > 1)
3789 {
3790 if (vect_print_dump_info (REPORT_DETAILS))
3791 fprintf (vect_dump, "multiple types in nested loop.");
3792 return false;
3793 }
3794
3795 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3796 return false;
3797
3798 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3799 return false;
3800
3801 /* Is vectorizable load? */
3802 if (!is_gimple_assign (stmt))
3803 return false;
3804
3805 scalar_dest = gimple_assign_lhs (stmt);
3806 if (TREE_CODE (scalar_dest) != SSA_NAME)
3807 return false;
3808
3809 code = gimple_assign_rhs_code (stmt);
3810 if (code != ARRAY_REF
3811 && code != INDIRECT_REF
3812 && code != COMPONENT_REF
3813 && code != IMAGPART_EXPR
3814 && code != REALPART_EXPR
3815 && code != MEM_REF)
3816 return false;
3817
3818 if (!STMT_VINFO_DATA_REF (stmt_info))
3819 return false;
3820
3821 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
3822 if (negative && ncopies > 1)
3823 {
3824 if (vect_print_dump_info (REPORT_DETAILS))
3825 fprintf (vect_dump, "multiple types with negative step.");
3826 return false;
3827 }
3828
3829 scalar_type = TREE_TYPE (DR_REF (dr));
3830 mode = TYPE_MODE (vectype);
3831
3832 /* FORNOW. In some cases can vectorize even if data-type not supported
3833 (e.g. - data copies). */
3834 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3835 {
3836 if (vect_print_dump_info (REPORT_DETAILS))
3837 fprintf (vect_dump, "Aligned load, but unsupported type.");
3838 return false;
3839 }
3840
3841 /* The vector component type needs to be trivially convertible to the
3842 scalar lhs. This should always be the case. */
3843 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3844 {
3845 if (vect_print_dump_info (REPORT_DETAILS))
3846 fprintf (vect_dump, "??? operands of different types");
3847 return false;
3848 }
3849
3850 /* Check if the load is a part of an interleaving chain. */
3851 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3852 {
3853 strided_load = true;
3854 /* FORNOW */
3855 gcc_assert (! nested_in_vect_loop);
3856
3857 /* Check if interleaving is supported. */
3858 if (!vect_strided_load_supported (vectype)
3859 && !PURE_SLP_STMT (stmt_info) && !slp)
3860 return false;
3861 }
3862
3863 if (negative)
3864 {
3865 gcc_assert (!strided_load);
3866 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
3867 if (alignment_support_scheme != dr_aligned
3868 && alignment_support_scheme != dr_unaligned_supported)
3869 {
3870 if (vect_print_dump_info (REPORT_DETAILS))
3871 fprintf (vect_dump, "negative step but alignment required.");
3872 return false;
3873 }
3874 if (!perm_mask_for_reverse (vectype, NULL))
3875 {
3876 if (vect_print_dump_info (REPORT_DETAILS))
3877 fprintf (vect_dump, "negative step and reversing not supported.");
3878 return false;
3879 }
3880 }
3881
3882 if (!vec_stmt) /* transformation not required. */
3883 {
3884 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3885 vect_model_load_cost (stmt_info, ncopies, NULL);
3886 return true;
3887 }
3888
3889 if (vect_print_dump_info (REPORT_DETAILS))
3890 fprintf (vect_dump, "transform load.");
3891
3892 /** Transform. **/
3893
3894 if (strided_load)
3895 {
3896 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3897 /* Check if the chain of loads is already vectorized. */
3898 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3899 {
3900 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3901 return true;
3902 }
3903 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3904 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3905
3906 /* VEC_NUM is the number of vect stmts to be created for this group. */
3907 if (slp)
3908 {
3909 strided_load = false;
3910 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3911 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3912 slp_perm = true;
3913 }
3914 else
3915 vec_num = group_size;
3916
3917 dr_chain = VEC_alloc (tree, heap, vec_num);
3918 }
3919 else
3920 {
3921 first_stmt = stmt;
3922 first_dr = dr;
3923 group_size = vec_num = 1;
3924 }
3925
3926 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3927 gcc_assert (alignment_support_scheme);
3928
3929 /* In case the vectorization factor (VF) is bigger than the number
3930 of elements that we can fit in a vectype (nunits), we have to generate
3931 more than one vector stmt - i.e - we need to "unroll" the
3932 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3933 from one copy of the vector stmt to the next, in the field
3934 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3935 stages to find the correct vector defs to be used when vectorizing
3936 stmts that use the defs of the current stmt. The example below
3937 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3938 need to create 4 vectorized stmts):
3939
3940 before vectorization:
3941 RELATED_STMT VEC_STMT
3942 S1: x = memref - -
3943 S2: z = x + 1 - -
3944
3945 step 1: vectorize stmt S1:
3946 We first create the vector stmt VS1_0, and, as usual, record a
3947 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3948 Next, we create the vector stmt VS1_1, and record a pointer to
3949 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3950 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3951 stmts and pointers:
3952 RELATED_STMT VEC_STMT
3953 VS1_0: vx0 = memref0 VS1_1 -
3954 VS1_1: vx1 = memref1 VS1_2 -
3955 VS1_2: vx2 = memref2 VS1_3 -
3956 VS1_3: vx3 = memref3 - -
3957 S1: x = load - VS1_0
3958 S2: z = x + 1 - -
3959
3960 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3961 information we recorded in RELATED_STMT field is used to vectorize
3962 stmt S2. */
3963
3964 /* In case of interleaving (non-unit strided access):
3965
3966 S1: x2 = &base + 2
3967 S2: x0 = &base
3968 S3: x1 = &base + 1
3969 S4: x3 = &base + 3
3970
3971 Vectorized loads are created in the order of memory accesses
3972 starting from the access of the first stmt of the chain:
3973
3974 VS1: vx0 = &base
3975 VS2: vx1 = &base + vec_size*1
3976 VS3: vx3 = &base + vec_size*2
3977 VS4: vx4 = &base + vec_size*3
3978
3979 Then permutation statements are generated:
3980
3981 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3982 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3983 ...
3984
3985 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3986 (the order of the data-refs in the output of vect_permute_load_chain
3987 corresponds to the order of scalar stmts in the interleaving chain - see
3988 the documentation of vect_permute_load_chain()).
3989 The generation of permutation stmts and recording them in
3990 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3991
3992 In case of both multiple types and interleaving, the vector loads and
3993 permutation stmts above are created for every copy. The result vector
3994 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
3995 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
3996
3997 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3998 on a target that supports unaligned accesses (dr_unaligned_supported)
3999 we generate the following code:
4000 p = initial_addr;
4001 indx = 0;
4002 loop {
4003 p = p + indx * vectype_size;
4004 vec_dest = *(p);
4005 indx = indx + 1;
4006 }
4007
4008 Otherwise, the data reference is potentially unaligned on a target that
4009 does not support unaligned accesses (dr_explicit_realign_optimized) -
4010 then generate the following code, in which the data in each iteration is
4011 obtained by two vector loads, one from the previous iteration, and one
4012 from the current iteration:
4013 p1 = initial_addr;
4014 msq_init = *(floor(p1))
4015 p2 = initial_addr + VS - 1;
4016 realignment_token = call target_builtin;
4017 indx = 0;
4018 loop {
4019 p2 = p2 + indx * vectype_size
4020 lsq = *(floor(p2))
4021 vec_dest = realign_load (msq, lsq, realignment_token)
4022 indx = indx + 1;
4023 msq = lsq;
4024 } */
4025
4026 /* If the misalignment remains the same throughout the execution of the
4027 loop, we can create the init_addr and permutation mask at the loop
4028 preheader. Otherwise, it needs to be created inside the loop.
4029 This can only occur when vectorizing memory accesses in the inner-loop
4030 nested within an outer-loop that is being vectorized. */
4031
4032 if (loop && nested_in_vect_loop_p (loop, stmt)
4033 && (TREE_INT_CST_LOW (DR_STEP (dr))
4034 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4035 {
4036 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4037 compute_in_loop = true;
4038 }
4039
4040 if ((alignment_support_scheme == dr_explicit_realign_optimized
4041 || alignment_support_scheme == dr_explicit_realign)
4042 && !compute_in_loop)
4043 {
4044 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4045 alignment_support_scheme, NULL_TREE,
4046 &at_loop);
4047 if (alignment_support_scheme == dr_explicit_realign_optimized)
4048 {
4049 phi = SSA_NAME_DEF_STMT (msq);
4050 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4051 }
4052 }
4053 else
4054 at_loop = loop;
4055
4056 if (negative)
4057 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4058
4059 prev_stmt_info = NULL;
4060 for (j = 0; j < ncopies; j++)
4061 {
4062 /* 1. Create the vector pointer update chain. */
4063 if (j == 0)
4064 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
4065 at_loop, offset,
4066 &dummy, &ptr_incr, false,
4067 &inv_p);
4068 else
4069 dataref_ptr =
4070 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
4071
4072 for (i = 0; i < vec_num; i++)
4073 {
4074 if (i > 0)
4075 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4076 NULL_TREE);
4077
4078 /* 2. Create the vector-load in the loop. */
4079 switch (alignment_support_scheme)
4080 {
4081 case dr_aligned:
4082 case dr_unaligned_supported:
4083 {
4084 struct ptr_info_def *pi;
4085 data_ref
4086 = build2 (MEM_REF, vectype, dataref_ptr,
4087 build_int_cst (reference_alias_ptr_type
4088 (DR_REF (first_dr)), 0));
4089 pi = get_ptr_info (dataref_ptr);
4090 pi->align = TYPE_ALIGN_UNIT (vectype);
4091 if (alignment_support_scheme == dr_aligned)
4092 {
4093 gcc_assert (aligned_access_p (first_dr));
4094 pi->misalign = 0;
4095 }
4096 else if (DR_MISALIGNMENT (first_dr) == -1)
4097 {
4098 TREE_TYPE (data_ref)
4099 = build_aligned_type (TREE_TYPE (data_ref),
4100 TYPE_ALIGN (TREE_TYPE (vectype)));
4101 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
4102 pi->misalign = 0;
4103 }
4104 else
4105 {
4106 TREE_TYPE (data_ref)
4107 = build_aligned_type (TREE_TYPE (data_ref),
4108 TYPE_ALIGN (TREE_TYPE (vectype)));
4109 pi->misalign = DR_MISALIGNMENT (first_dr);
4110 }
4111 break;
4112 }
4113 case dr_explicit_realign:
4114 {
4115 tree ptr, bump;
4116 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4117
4118 if (compute_in_loop)
4119 msq = vect_setup_realignment (first_stmt, gsi,
4120 &realignment_token,
4121 dr_explicit_realign,
4122 dataref_ptr, NULL);
4123
4124 new_stmt = gimple_build_assign_with_ops
4125 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4126 build_int_cst
4127 (TREE_TYPE (dataref_ptr),
4128 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4129 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4130 gimple_assign_set_lhs (new_stmt, ptr);
4131 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4132 data_ref
4133 = build2 (MEM_REF, vectype, ptr,
4134 build_int_cst (reference_alias_ptr_type
4135 (DR_REF (first_dr)), 0));
4136 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4137 new_stmt = gimple_build_assign (vec_dest, data_ref);
4138 new_temp = make_ssa_name (vec_dest, new_stmt);
4139 gimple_assign_set_lhs (new_stmt, new_temp);
4140 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4141 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4142 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4143 msq = new_temp;
4144
4145 bump = size_binop (MULT_EXPR, vs_minus_1,
4146 TYPE_SIZE_UNIT (scalar_type));
4147 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4148 new_stmt = gimple_build_assign_with_ops
4149 (BIT_AND_EXPR, NULL_TREE, ptr,
4150 build_int_cst
4151 (TREE_TYPE (ptr),
4152 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4153 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4154 gimple_assign_set_lhs (new_stmt, ptr);
4155 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4156 data_ref
4157 = build2 (MEM_REF, vectype, ptr,
4158 build_int_cst (reference_alias_ptr_type
4159 (DR_REF (first_dr)), 0));
4160 break;
4161 }
4162 case dr_explicit_realign_optimized:
4163 new_stmt = gimple_build_assign_with_ops
4164 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4165 build_int_cst
4166 (TREE_TYPE (dataref_ptr),
4167 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4168 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4169 gimple_assign_set_lhs (new_stmt, new_temp);
4170 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4171 data_ref
4172 = build2 (MEM_REF, vectype, new_temp,
4173 build_int_cst (reference_alias_ptr_type
4174 (DR_REF (first_dr)), 0));
4175 break;
4176 default:
4177 gcc_unreachable ();
4178 }
4179 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4180 new_stmt = gimple_build_assign (vec_dest, data_ref);
4181 new_temp = make_ssa_name (vec_dest, new_stmt);
4182 gimple_assign_set_lhs (new_stmt, new_temp);
4183 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4184 mark_symbols_for_renaming (new_stmt);
4185
4186 /* 3. Handle explicit realignment if necessary/supported. Create in
4187 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
4188 if (alignment_support_scheme == dr_explicit_realign_optimized
4189 || alignment_support_scheme == dr_explicit_realign)
4190 {
4191 tree tmp;
4192
4193 lsq = gimple_assign_lhs (new_stmt);
4194 if (!realignment_token)
4195 realignment_token = dataref_ptr;
4196 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4197 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
4198 realignment_token);
4199 new_stmt = gimple_build_assign (vec_dest, tmp);
4200 new_temp = make_ssa_name (vec_dest, new_stmt);
4201 gimple_assign_set_lhs (new_stmt, new_temp);
4202 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4203
4204 if (alignment_support_scheme == dr_explicit_realign_optimized)
4205 {
4206 gcc_assert (phi);
4207 if (i == vec_num - 1 && j == ncopies - 1)
4208 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
4209 UNKNOWN_LOCATION);
4210 msq = lsq;
4211 }
4212 }
4213
4214 /* 4. Handle invariant-load. */
4215 if (inv_p && !bb_vinfo)
4216 {
4217 gcc_assert (!strided_load);
4218 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4219 if (j == 0)
4220 {
4221 int k;
4222 tree t = NULL_TREE;
4223 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4224
4225 /* CHECKME: bitpos depends on endianess? */
4226 bitpos = bitsize_zero_node;
4227 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4228 bitsize, bitpos);
4229 vec_dest =
4230 vect_create_destination_var (scalar_dest, NULL_TREE);
4231 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4232 new_temp = make_ssa_name (vec_dest, new_stmt);
4233 gimple_assign_set_lhs (new_stmt, new_temp);
4234 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4235
4236 for (k = nunits - 1; k >= 0; --k)
4237 t = tree_cons (NULL_TREE, new_temp, t);
4238 /* FIXME: use build_constructor directly. */
4239 vec_inv = build_constructor_from_list (vectype, t);
4240 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
4241 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4242 }
4243 else
4244 gcc_unreachable (); /* FORNOW. */
4245 }
4246
4247 if (negative)
4248 {
4249 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4250 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4251 }
4252
4253 /* Collect vector loads and later create their permutation in
4254 vect_transform_strided_load (). */
4255 if (strided_load || slp_perm)
4256 VEC_quick_push (tree, dr_chain, new_temp);
4257
4258 /* Store vector loads in the corresponding SLP_NODE. */
4259 if (slp && !slp_perm)
4260 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4261 }
4262
4263 if (slp && !slp_perm)
4264 continue;
4265
4266 if (slp_perm)
4267 {
4268 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4269 slp_node_instance, false))
4270 {
4271 VEC_free (tree, heap, dr_chain);
4272 return false;
4273 }
4274 }
4275 else
4276 {
4277 if (strided_load)
4278 {
4279 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
4280 return false;
4281
4282 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4283 VEC_free (tree, heap, dr_chain);
4284 dr_chain = VEC_alloc (tree, heap, group_size);
4285 }
4286 else
4287 {
4288 if (j == 0)
4289 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4290 else
4291 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4292 prev_stmt_info = vinfo_for_stmt (new_stmt);
4293 }
4294 }
4295 }
4296
4297 if (dr_chain)
4298 VEC_free (tree, heap, dr_chain);
4299
4300 return true;
4301 }
4302
4303 /* Function vect_is_simple_cond.
4304
4305 Input:
4306 LOOP - the loop that is being vectorized.
4307 COND - Condition that is checked for simple use.
4308
4309 Returns whether a COND can be vectorized. Checks whether
4310 condition operands are supportable using vec_is_simple_use. */
4311
4312 static bool
4313 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4314 {
4315 tree lhs, rhs;
4316 tree def;
4317 enum vect_def_type dt;
4318
4319 if (!COMPARISON_CLASS_P (cond))
4320 return false;
4321
4322 lhs = TREE_OPERAND (cond, 0);
4323 rhs = TREE_OPERAND (cond, 1);
4324
4325 if (TREE_CODE (lhs) == SSA_NAME)
4326 {
4327 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4328 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4329 &dt))
4330 return false;
4331 }
4332 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4333 && TREE_CODE (lhs) != FIXED_CST)
4334 return false;
4335
4336 if (TREE_CODE (rhs) == SSA_NAME)
4337 {
4338 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4339 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4340 &dt))
4341 return false;
4342 }
4343 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4344 && TREE_CODE (rhs) != FIXED_CST)
4345 return false;
4346
4347 return true;
4348 }
4349
4350 /* vectorizable_condition.
4351
4352 Check if STMT is conditional modify expression that can be vectorized.
4353 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4354 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4355 at GSI.
4356
4357 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4358 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4359 else caluse if it is 2).
4360
4361 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4362
4363 bool
4364 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4365 gimple *vec_stmt, tree reduc_def, int reduc_index)
4366 {
4367 tree scalar_dest = NULL_TREE;
4368 tree vec_dest = NULL_TREE;
4369 tree op = NULL_TREE;
4370 tree cond_expr, then_clause, else_clause;
4371 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4372 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4373 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4374 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4375 tree vec_compare, vec_cond_expr;
4376 tree new_temp;
4377 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4378 enum machine_mode vec_mode;
4379 tree def;
4380 enum vect_def_type dt, dts[4];
4381 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4382 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4383 enum tree_code code;
4384 stmt_vec_info prev_stmt_info = NULL;
4385 int j;
4386
4387 /* FORNOW: unsupported in basic block SLP. */
4388 gcc_assert (loop_vinfo);
4389
4390 gcc_assert (ncopies >= 1);
4391 if (reduc_index && ncopies > 1)
4392 return false; /* FORNOW */
4393
4394 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4395 return false;
4396
4397 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4398 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4399 && reduc_def))
4400 return false;
4401
4402 /* FORNOW: SLP not supported. */
4403 if (STMT_SLP_TYPE (stmt_info))
4404 return false;
4405
4406 /* FORNOW: not yet supported. */
4407 if (STMT_VINFO_LIVE_P (stmt_info))
4408 {
4409 if (vect_print_dump_info (REPORT_DETAILS))
4410 fprintf (vect_dump, "value used after loop.");
4411 return false;
4412 }
4413
4414 /* Is vectorizable conditional operation? */
4415 if (!is_gimple_assign (stmt))
4416 return false;
4417
4418 code = gimple_assign_rhs_code (stmt);
4419
4420 if (code != COND_EXPR)
4421 return false;
4422
4423 gcc_assert (gimple_assign_single_p (stmt));
4424 op = gimple_assign_rhs1 (stmt);
4425 cond_expr = TREE_OPERAND (op, 0);
4426 then_clause = TREE_OPERAND (op, 1);
4427 else_clause = TREE_OPERAND (op, 2);
4428
4429 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4430 return false;
4431
4432 /* We do not handle two different vector types for the condition
4433 and the values. */
4434 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4435 TREE_TYPE (vectype)))
4436 return false;
4437
4438 if (TREE_CODE (then_clause) == SSA_NAME)
4439 {
4440 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4441 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4442 &then_def_stmt, &def, &dt))
4443 return false;
4444 }
4445 else if (TREE_CODE (then_clause) != INTEGER_CST
4446 && TREE_CODE (then_clause) != REAL_CST
4447 && TREE_CODE (then_clause) != FIXED_CST)
4448 return false;
4449
4450 if (TREE_CODE (else_clause) == SSA_NAME)
4451 {
4452 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4453 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4454 &else_def_stmt, &def, &dt))
4455 return false;
4456 }
4457 else if (TREE_CODE (else_clause) != INTEGER_CST
4458 && TREE_CODE (else_clause) != REAL_CST
4459 && TREE_CODE (else_clause) != FIXED_CST)
4460 return false;
4461
4462
4463 vec_mode = TYPE_MODE (vectype);
4464
4465 if (!vec_stmt)
4466 {
4467 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4468 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4469 }
4470
4471 /* Transform */
4472
4473 /* Handle def. */
4474 scalar_dest = gimple_assign_lhs (stmt);
4475 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4476
4477 /* Handle cond expr. */
4478 for (j = 0; j < ncopies; j++)
4479 {
4480 gimple new_stmt;
4481 if (j == 0)
4482 {
4483 gimple gtemp;
4484 vec_cond_lhs =
4485 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4486 stmt, NULL);
4487 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4488 NULL, &gtemp, &def, &dts[0]);
4489 vec_cond_rhs =
4490 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4491 stmt, NULL);
4492 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4493 NULL, &gtemp, &def, &dts[1]);
4494 if (reduc_index == 1)
4495 vec_then_clause = reduc_def;
4496 else
4497 {
4498 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4499 stmt, NULL);
4500 vect_is_simple_use (then_clause, loop_vinfo,
4501 NULL, &gtemp, &def, &dts[2]);
4502 }
4503 if (reduc_index == 2)
4504 vec_else_clause = reduc_def;
4505 else
4506 {
4507 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4508 stmt, NULL);
4509 vect_is_simple_use (else_clause, loop_vinfo,
4510 NULL, &gtemp, &def, &dts[3]);
4511 }
4512 }
4513 else
4514 {
4515 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4516 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4517 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4518 vec_then_clause);
4519 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4520 vec_else_clause);
4521 }
4522
4523 /* Arguments are ready. Create the new vector stmt. */
4524 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4525 vec_cond_lhs, vec_cond_rhs);
4526 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4527 vec_compare, vec_then_clause, vec_else_clause);
4528
4529 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4530 new_temp = make_ssa_name (vec_dest, new_stmt);
4531 gimple_assign_set_lhs (new_stmt, new_temp);
4532 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4533 if (j == 0)
4534 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4535 else
4536 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4537
4538 prev_stmt_info = vinfo_for_stmt (new_stmt);
4539 }
4540
4541 return true;
4542 }
4543
4544
4545 /* Make sure the statement is vectorizable. */
4546
4547 bool
4548 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4549 {
4550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4551 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4552 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4553 bool ok;
4554 tree scalar_type, vectype;
4555
4556 if (vect_print_dump_info (REPORT_DETAILS))
4557 {
4558 fprintf (vect_dump, "==> examining statement: ");
4559 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4560 }
4561
4562 if (gimple_has_volatile_ops (stmt))
4563 {
4564 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4565 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4566
4567 return false;
4568 }
4569
4570 /* Skip stmts that do not need to be vectorized. In loops this is expected
4571 to include:
4572 - the COND_EXPR which is the loop exit condition
4573 - any LABEL_EXPRs in the loop
4574 - computations that are used only for array indexing or loop control.
4575 In basic blocks we only analyze statements that are a part of some SLP
4576 instance, therefore, all the statements are relevant. */
4577
4578 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4579 && !STMT_VINFO_LIVE_P (stmt_info))
4580 {
4581 if (vect_print_dump_info (REPORT_DETAILS))
4582 fprintf (vect_dump, "irrelevant.");
4583
4584 return true;
4585 }
4586
4587 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4588 {
4589 case vect_internal_def:
4590 break;
4591
4592 case vect_reduction_def:
4593 case vect_nested_cycle:
4594 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4595 || relevance == vect_used_in_outer_by_reduction
4596 || relevance == vect_unused_in_scope));
4597 break;
4598
4599 case vect_induction_def:
4600 case vect_constant_def:
4601 case vect_external_def:
4602 case vect_unknown_def_type:
4603 default:
4604 gcc_unreachable ();
4605 }
4606
4607 if (bb_vinfo)
4608 {
4609 gcc_assert (PURE_SLP_STMT (stmt_info));
4610
4611 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4612 if (vect_print_dump_info (REPORT_DETAILS))
4613 {
4614 fprintf (vect_dump, "get vectype for scalar type: ");
4615 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4616 }
4617
4618 vectype = get_vectype_for_scalar_type (scalar_type);
4619 if (!vectype)
4620 {
4621 if (vect_print_dump_info (REPORT_DETAILS))
4622 {
4623 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4624 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4625 }
4626 return false;
4627 }
4628
4629 if (vect_print_dump_info (REPORT_DETAILS))
4630 {
4631 fprintf (vect_dump, "vectype: ");
4632 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4633 }
4634
4635 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4636 }
4637
4638 if (STMT_VINFO_RELEVANT_P (stmt_info))
4639 {
4640 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4641 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4642 *need_to_vectorize = true;
4643 }
4644
4645 ok = true;
4646 if (!bb_vinfo
4647 && (STMT_VINFO_RELEVANT_P (stmt_info)
4648 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4649 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4650 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4651 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4652 || vectorizable_shift (stmt, NULL, NULL, NULL)
4653 || vectorizable_operation (stmt, NULL, NULL, NULL)
4654 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4655 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4656 || vectorizable_call (stmt, NULL, NULL)
4657 || vectorizable_store (stmt, NULL, NULL, NULL)
4658 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4659 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4660 else
4661 {
4662 if (bb_vinfo)
4663 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4664 || vectorizable_operation (stmt, NULL, NULL, node)
4665 || vectorizable_assignment (stmt, NULL, NULL, node)
4666 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4667 || vectorizable_store (stmt, NULL, NULL, node));
4668 }
4669
4670 if (!ok)
4671 {
4672 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4673 {
4674 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4675 fprintf (vect_dump, "supported: ");
4676 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4677 }
4678
4679 return false;
4680 }
4681
4682 if (bb_vinfo)
4683 return true;
4684
4685 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4686 need extra handling, except for vectorizable reductions. */
4687 if (STMT_VINFO_LIVE_P (stmt_info)
4688 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4689 ok = vectorizable_live_operation (stmt, NULL, NULL);
4690
4691 if (!ok)
4692 {
4693 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4694 {
4695 fprintf (vect_dump, "not vectorized: live stmt not ");
4696 fprintf (vect_dump, "supported: ");
4697 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4698 }
4699
4700 return false;
4701 }
4702
4703 if (!PURE_SLP_STMT (stmt_info))
4704 {
4705 /* Groups of strided accesses whose size is not a power of 2 are not
4706 vectorizable yet using loop-vectorization. Therefore, if this stmt
4707 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4708 loop-based vectorized), the loop cannot be vectorized. */
4709 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4710 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4711 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4712 {
4713 if (vect_print_dump_info (REPORT_DETAILS))
4714 {
4715 fprintf (vect_dump, "not vectorized: the size of group "
4716 "of strided accesses is not a power of 2");
4717 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4718 }
4719
4720 return false;
4721 }
4722 }
4723
4724 return true;
4725 }
4726
4727
4728 /* Function vect_transform_stmt.
4729
4730 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4731
4732 bool
4733 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4734 bool *strided_store, slp_tree slp_node,
4735 slp_instance slp_node_instance)
4736 {
4737 bool is_store = false;
4738 gimple vec_stmt = NULL;
4739 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4740 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4741 bool done;
4742
4743 switch (STMT_VINFO_TYPE (stmt_info))
4744 {
4745 case type_demotion_vec_info_type:
4746 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4747 gcc_assert (done);
4748 break;
4749
4750 case type_promotion_vec_info_type:
4751 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4752 gcc_assert (done);
4753 break;
4754
4755 case type_conversion_vec_info_type:
4756 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4757 gcc_assert (done);
4758 break;
4759
4760 case induc_vec_info_type:
4761 gcc_assert (!slp_node);
4762 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4763 gcc_assert (done);
4764 break;
4765
4766 case shift_vec_info_type:
4767 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4768 gcc_assert (done);
4769 break;
4770
4771 case op_vec_info_type:
4772 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4773 gcc_assert (done);
4774 break;
4775
4776 case assignment_vec_info_type:
4777 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4778 gcc_assert (done);
4779 break;
4780
4781 case load_vec_info_type:
4782 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4783 slp_node_instance);
4784 gcc_assert (done);
4785 break;
4786
4787 case store_vec_info_type:
4788 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4789 gcc_assert (done);
4790 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4791 {
4792 /* In case of interleaving, the whole chain is vectorized when the
4793 last store in the chain is reached. Store stmts before the last
4794 one are skipped, and there vec_stmt_info shouldn't be freed
4795 meanwhile. */
4796 *strided_store = true;
4797 if (STMT_VINFO_VEC_STMT (stmt_info))
4798 is_store = true;
4799 }
4800 else
4801 is_store = true;
4802 break;
4803
4804 case condition_vec_info_type:
4805 gcc_assert (!slp_node);
4806 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4807 gcc_assert (done);
4808 break;
4809
4810 case call_vec_info_type:
4811 gcc_assert (!slp_node);
4812 done = vectorizable_call (stmt, gsi, &vec_stmt);
4813 stmt = gsi_stmt (*gsi);
4814 break;
4815
4816 case reduc_vec_info_type:
4817 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4818 gcc_assert (done);
4819 break;
4820
4821 default:
4822 if (!STMT_VINFO_LIVE_P (stmt_info))
4823 {
4824 if (vect_print_dump_info (REPORT_DETAILS))
4825 fprintf (vect_dump, "stmt not supported.");
4826 gcc_unreachable ();
4827 }
4828 }
4829
4830 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4831 is being vectorized, but outside the immediately enclosing loop. */
4832 if (vec_stmt
4833 && STMT_VINFO_LOOP_VINFO (stmt_info)
4834 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4835 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4836 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4837 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4838 || STMT_VINFO_RELEVANT (stmt_info) ==
4839 vect_used_in_outer_by_reduction))
4840 {
4841 struct loop *innerloop = LOOP_VINFO_LOOP (
4842 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4843 imm_use_iterator imm_iter;
4844 use_operand_p use_p;
4845 tree scalar_dest;
4846 gimple exit_phi;
4847
4848 if (vect_print_dump_info (REPORT_DETAILS))
4849 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4850
4851 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4852 (to be used when vectorizing outer-loop stmts that use the DEF of
4853 STMT). */
4854 if (gimple_code (stmt) == GIMPLE_PHI)
4855 scalar_dest = PHI_RESULT (stmt);
4856 else
4857 scalar_dest = gimple_assign_lhs (stmt);
4858
4859 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4860 {
4861 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4862 {
4863 exit_phi = USE_STMT (use_p);
4864 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4865 }
4866 }
4867 }
4868
4869 /* Handle stmts whose DEF is used outside the loop-nest that is
4870 being vectorized. */
4871 if (STMT_VINFO_LIVE_P (stmt_info)
4872 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4873 {
4874 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4875 gcc_assert (done);
4876 }
4877
4878 if (vec_stmt)
4879 {
4880 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4881 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4882 if (orig_stmt_in_pattern)
4883 {
4884 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4885 /* STMT was inserted by the vectorizer to replace a computation idiom.
4886 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4887 computed this idiom. We need to record a pointer to VEC_STMT in
4888 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4889 documentation of vect_pattern_recog. */
4890 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4891 {
4892 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4893 == orig_scalar_stmt);
4894 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4895 }
4896 }
4897 }
4898
4899 return is_store;
4900 }
4901
4902
4903 /* Remove a group of stores (for SLP or interleaving), free their
4904 stmt_vec_info. */
4905
4906 void
4907 vect_remove_stores (gimple first_stmt)
4908 {
4909 gimple next = first_stmt;
4910 gimple tmp;
4911 gimple_stmt_iterator next_si;
4912
4913 while (next)
4914 {
4915 /* Free the attached stmt_vec_info and remove the stmt. */
4916 next_si = gsi_for_stmt (next);
4917 gsi_remove (&next_si, true);
4918 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4919 free_stmt_vec_info (next);
4920 next = tmp;
4921 }
4922 }
4923
4924
4925 /* Function new_stmt_vec_info.
4926
4927 Create and initialize a new stmt_vec_info struct for STMT. */
4928
4929 stmt_vec_info
4930 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4931 bb_vec_info bb_vinfo)
4932 {
4933 stmt_vec_info res;
4934 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4935
4936 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4937 STMT_VINFO_STMT (res) = stmt;
4938 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4939 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4940 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4941 STMT_VINFO_LIVE_P (res) = false;
4942 STMT_VINFO_VECTYPE (res) = NULL;
4943 STMT_VINFO_VEC_STMT (res) = NULL;
4944 STMT_VINFO_VECTORIZABLE (res) = true;
4945 STMT_VINFO_IN_PATTERN_P (res) = false;
4946 STMT_VINFO_RELATED_STMT (res) = NULL;
4947 STMT_VINFO_DATA_REF (res) = NULL;
4948
4949 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4950 STMT_VINFO_DR_OFFSET (res) = NULL;
4951 STMT_VINFO_DR_INIT (res) = NULL;
4952 STMT_VINFO_DR_STEP (res) = NULL;
4953 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4954
4955 if (gimple_code (stmt) == GIMPLE_PHI
4956 && is_loop_header_bb_p (gimple_bb (stmt)))
4957 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4958 else
4959 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4960
4961 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4962 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4963 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4964 STMT_SLP_TYPE (res) = loop_vect;
4965 DR_GROUP_FIRST_DR (res) = NULL;
4966 DR_GROUP_NEXT_DR (res) = NULL;
4967 DR_GROUP_SIZE (res) = 0;
4968 DR_GROUP_STORE_COUNT (res) = 0;
4969 DR_GROUP_GAP (res) = 0;
4970 DR_GROUP_SAME_DR_STMT (res) = NULL;
4971 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4972
4973 return res;
4974 }
4975
4976
4977 /* Create a hash table for stmt_vec_info. */
4978
4979 void
4980 init_stmt_vec_info_vec (void)
4981 {
4982 gcc_assert (!stmt_vec_info_vec);
4983 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4984 }
4985
4986
4987 /* Free hash table for stmt_vec_info. */
4988
4989 void
4990 free_stmt_vec_info_vec (void)
4991 {
4992 gcc_assert (stmt_vec_info_vec);
4993 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4994 }
4995
4996
4997 /* Free stmt vectorization related info. */
4998
4999 void
5000 free_stmt_vec_info (gimple stmt)
5001 {
5002 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5003
5004 if (!stmt_info)
5005 return;
5006
5007 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5008 set_vinfo_for_stmt (stmt, NULL);
5009 free (stmt_info);
5010 }
5011
5012
5013 /* Function get_vectype_for_scalar_type_and_size.
5014
5015 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5016 by the target. */
5017
5018 static tree
5019 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5020 {
5021 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5022 enum machine_mode simd_mode;
5023 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5024 int nunits;
5025 tree vectype;
5026
5027 if (nbytes == 0)
5028 return NULL_TREE;
5029
5030 /* We can't build a vector type of elements with alignment bigger than
5031 their size. */
5032 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5033 return NULL_TREE;
5034
5035 /* If we'd build a vector type of elements whose mode precision doesn't
5036 match their types precision we'll get mismatched types on vector
5037 extracts via BIT_FIELD_REFs. This effectively means we disable
5038 vectorization of bool and/or enum types in some languages. */
5039 if (INTEGRAL_TYPE_P (scalar_type)
5040 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5041 return NULL_TREE;
5042
5043 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5044 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5045 return NULL_TREE;
5046
5047 /* If no size was supplied use the mode the target prefers. Otherwise
5048 lookup a vector mode of the specified size. */
5049 if (size == 0)
5050 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5051 else
5052 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5053 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5054 if (nunits <= 1)
5055 return NULL_TREE;
5056
5057 vectype = build_vector_type (scalar_type, nunits);
5058 if (vect_print_dump_info (REPORT_DETAILS))
5059 {
5060 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5061 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5062 }
5063
5064 if (!vectype)
5065 return NULL_TREE;
5066
5067 if (vect_print_dump_info (REPORT_DETAILS))
5068 {
5069 fprintf (vect_dump, "vectype: ");
5070 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5071 }
5072
5073 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5074 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5075 {
5076 if (vect_print_dump_info (REPORT_DETAILS))
5077 fprintf (vect_dump, "mode not supported by target.");
5078 return NULL_TREE;
5079 }
5080
5081 return vectype;
5082 }
5083
5084 unsigned int current_vector_size;
5085
5086 /* Function get_vectype_for_scalar_type.
5087
5088 Returns the vector type corresponding to SCALAR_TYPE as supported
5089 by the target. */
5090
5091 tree
5092 get_vectype_for_scalar_type (tree scalar_type)
5093 {
5094 tree vectype;
5095 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5096 current_vector_size);
5097 if (vectype
5098 && current_vector_size == 0)
5099 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5100 return vectype;
5101 }
5102
5103 /* Function get_same_sized_vectype
5104
5105 Returns a vector type corresponding to SCALAR_TYPE of size
5106 VECTOR_TYPE if supported by the target. */
5107
5108 tree
5109 get_same_sized_vectype (tree scalar_type, tree vector_type)
5110 {
5111 return get_vectype_for_scalar_type_and_size
5112 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5113 }
5114
5115 /* Function vect_is_simple_use.
5116
5117 Input:
5118 LOOP_VINFO - the vect info of the loop that is being vectorized.
5119 BB_VINFO - the vect info of the basic block that is being vectorized.
5120 OPERAND - operand of a stmt in the loop or bb.
5121 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5122
5123 Returns whether a stmt with OPERAND can be vectorized.
5124 For loops, supportable operands are constants, loop invariants, and operands
5125 that are defined by the current iteration of the loop. Unsupportable
5126 operands are those that are defined by a previous iteration of the loop (as
5127 is the case in reduction/induction computations).
5128 For basic blocks, supportable operands are constants and bb invariants.
5129 For now, operands defined outside the basic block are not supported. */
5130
5131 bool
5132 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5133 bb_vec_info bb_vinfo, gimple *def_stmt,
5134 tree *def, enum vect_def_type *dt)
5135 {
5136 basic_block bb;
5137 stmt_vec_info stmt_vinfo;
5138 struct loop *loop = NULL;
5139
5140 if (loop_vinfo)
5141 loop = LOOP_VINFO_LOOP (loop_vinfo);
5142
5143 *def_stmt = NULL;
5144 *def = NULL_TREE;
5145
5146 if (vect_print_dump_info (REPORT_DETAILS))
5147 {
5148 fprintf (vect_dump, "vect_is_simple_use: operand ");
5149 print_generic_expr (vect_dump, operand, TDF_SLIM);
5150 }
5151
5152 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5153 {
5154 *dt = vect_constant_def;
5155 return true;
5156 }
5157
5158 if (is_gimple_min_invariant (operand))
5159 {
5160 *def = operand;
5161 *dt = vect_external_def;
5162 return true;
5163 }
5164
5165 if (TREE_CODE (operand) == PAREN_EXPR)
5166 {
5167 if (vect_print_dump_info (REPORT_DETAILS))
5168 fprintf (vect_dump, "non-associatable copy.");
5169 operand = TREE_OPERAND (operand, 0);
5170 }
5171
5172 if (TREE_CODE (operand) != SSA_NAME)
5173 {
5174 if (vect_print_dump_info (REPORT_DETAILS))
5175 fprintf (vect_dump, "not ssa-name.");
5176 return false;
5177 }
5178
5179 *def_stmt = SSA_NAME_DEF_STMT (operand);
5180 if (*def_stmt == NULL)
5181 {
5182 if (vect_print_dump_info (REPORT_DETAILS))
5183 fprintf (vect_dump, "no def_stmt.");
5184 return false;
5185 }
5186
5187 if (vect_print_dump_info (REPORT_DETAILS))
5188 {
5189 fprintf (vect_dump, "def_stmt: ");
5190 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5191 }
5192
5193 /* Empty stmt is expected only in case of a function argument.
5194 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5195 if (gimple_nop_p (*def_stmt))
5196 {
5197 *def = operand;
5198 *dt = vect_external_def;
5199 return true;
5200 }
5201
5202 bb = gimple_bb (*def_stmt);
5203
5204 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5205 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5206 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5207 *dt = vect_external_def;
5208 else
5209 {
5210 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5211 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5212 }
5213
5214 if (*dt == vect_unknown_def_type)
5215 {
5216 if (vect_print_dump_info (REPORT_DETAILS))
5217 fprintf (vect_dump, "Unsupported pattern.");
5218 return false;
5219 }
5220
5221 if (vect_print_dump_info (REPORT_DETAILS))
5222 fprintf (vect_dump, "type of def: %d.",*dt);
5223
5224 switch (gimple_code (*def_stmt))
5225 {
5226 case GIMPLE_PHI:
5227 *def = gimple_phi_result (*def_stmt);
5228 break;
5229
5230 case GIMPLE_ASSIGN:
5231 *def = gimple_assign_lhs (*def_stmt);
5232 break;
5233
5234 case GIMPLE_CALL:
5235 *def = gimple_call_lhs (*def_stmt);
5236 if (*def != NULL)
5237 break;
5238 /* FALLTHRU */
5239 default:
5240 if (vect_print_dump_info (REPORT_DETAILS))
5241 fprintf (vect_dump, "unsupported defining stmt: ");
5242 return false;
5243 }
5244
5245 return true;
5246 }
5247
5248 /* Function vect_is_simple_use_1.
5249
5250 Same as vect_is_simple_use_1 but also determines the vector operand
5251 type of OPERAND and stores it to *VECTYPE. If the definition of
5252 OPERAND is vect_uninitialized_def, vect_constant_def or
5253 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5254 is responsible to compute the best suited vector type for the
5255 scalar operand. */
5256
5257 bool
5258 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5259 bb_vec_info bb_vinfo, gimple *def_stmt,
5260 tree *def, enum vect_def_type *dt, tree *vectype)
5261 {
5262 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5263 return false;
5264
5265 /* Now get a vector type if the def is internal, otherwise supply
5266 NULL_TREE and leave it up to the caller to figure out a proper
5267 type for the use stmt. */
5268 if (*dt == vect_internal_def
5269 || *dt == vect_induction_def
5270 || *dt == vect_reduction_def
5271 || *dt == vect_double_reduction_def
5272 || *dt == vect_nested_cycle)
5273 {
5274 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5275 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5276 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5277 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5278 gcc_assert (*vectype != NULL_TREE);
5279 }
5280 else if (*dt == vect_uninitialized_def
5281 || *dt == vect_constant_def
5282 || *dt == vect_external_def)
5283 *vectype = NULL_TREE;
5284 else
5285 gcc_unreachable ();
5286
5287 return true;
5288 }
5289
5290
5291 /* Function supportable_widening_operation
5292
5293 Check whether an operation represented by the code CODE is a
5294 widening operation that is supported by the target platform in
5295 vector form (i.e., when operating on arguments of type VECTYPE_IN
5296 producing a result of type VECTYPE_OUT).
5297
5298 Widening operations we currently support are NOP (CONVERT), FLOAT
5299 and WIDEN_MULT. This function checks if these operations are supported
5300 by the target platform either directly (via vector tree-codes), or via
5301 target builtins.
5302
5303 Output:
5304 - CODE1 and CODE2 are codes of vector operations to be used when
5305 vectorizing the operation, if available.
5306 - DECL1 and DECL2 are decls of target builtin functions to be used
5307 when vectorizing the operation, if available. In this case,
5308 CODE1 and CODE2 are CALL_EXPR.
5309 - MULTI_STEP_CVT determines the number of required intermediate steps in
5310 case of multi-step conversion (like char->short->int - in that case
5311 MULTI_STEP_CVT will be 1).
5312 - INTERM_TYPES contains the intermediate type required to perform the
5313 widening operation (short in the above example). */
5314
5315 bool
5316 supportable_widening_operation (enum tree_code code, gimple stmt,
5317 tree vectype_out, tree vectype_in,
5318 tree *decl1, tree *decl2,
5319 enum tree_code *code1, enum tree_code *code2,
5320 int *multi_step_cvt,
5321 VEC (tree, heap) **interm_types)
5322 {
5323 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5324 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5325 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5326 bool ordered_p;
5327 enum machine_mode vec_mode;
5328 enum insn_code icode1, icode2;
5329 optab optab1, optab2;
5330 tree vectype = vectype_in;
5331 tree wide_vectype = vectype_out;
5332 enum tree_code c1, c2;
5333
5334 /* The result of a vectorized widening operation usually requires two vectors
5335 (because the widened results do not fit int one vector). The generated
5336 vector results would normally be expected to be generated in the same
5337 order as in the original scalar computation, i.e. if 8 results are
5338 generated in each vector iteration, they are to be organized as follows:
5339 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5340
5341 However, in the special case that the result of the widening operation is
5342 used in a reduction computation only, the order doesn't matter (because
5343 when vectorizing a reduction we change the order of the computation).
5344 Some targets can take advantage of this and generate more efficient code.
5345 For example, targets like Altivec, that support widen_mult using a sequence
5346 of {mult_even,mult_odd} generate the following vectors:
5347 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5348
5349 When vectorizing outer-loops, we execute the inner-loop sequentially
5350 (each vectorized inner-loop iteration contributes to VF outer-loop
5351 iterations in parallel). We therefore don't allow to change the order
5352 of the computation in the inner-loop during outer-loop vectorization. */
5353
5354 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5355 && !nested_in_vect_loop_p (vect_loop, stmt))
5356 ordered_p = false;
5357 else
5358 ordered_p = true;
5359
5360 if (!ordered_p
5361 && code == WIDEN_MULT_EXPR
5362 && targetm.vectorize.builtin_mul_widen_even
5363 && targetm.vectorize.builtin_mul_widen_even (vectype)
5364 && targetm.vectorize.builtin_mul_widen_odd
5365 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5366 {
5367 if (vect_print_dump_info (REPORT_DETAILS))
5368 fprintf (vect_dump, "Unordered widening operation detected.");
5369
5370 *code1 = *code2 = CALL_EXPR;
5371 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5372 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5373 return true;
5374 }
5375
5376 switch (code)
5377 {
5378 case WIDEN_MULT_EXPR:
5379 if (BYTES_BIG_ENDIAN)
5380 {
5381 c1 = VEC_WIDEN_MULT_HI_EXPR;
5382 c2 = VEC_WIDEN_MULT_LO_EXPR;
5383 }
5384 else
5385 {
5386 c2 = VEC_WIDEN_MULT_HI_EXPR;
5387 c1 = VEC_WIDEN_MULT_LO_EXPR;
5388 }
5389 break;
5390
5391 CASE_CONVERT:
5392 if (BYTES_BIG_ENDIAN)
5393 {
5394 c1 = VEC_UNPACK_HI_EXPR;
5395 c2 = VEC_UNPACK_LO_EXPR;
5396 }
5397 else
5398 {
5399 c2 = VEC_UNPACK_HI_EXPR;
5400 c1 = VEC_UNPACK_LO_EXPR;
5401 }
5402 break;
5403
5404 case FLOAT_EXPR:
5405 if (BYTES_BIG_ENDIAN)
5406 {
5407 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5408 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5409 }
5410 else
5411 {
5412 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5413 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5414 }
5415 break;
5416
5417 case FIX_TRUNC_EXPR:
5418 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5419 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5420 computing the operation. */
5421 return false;
5422
5423 default:
5424 gcc_unreachable ();
5425 }
5426
5427 if (code == FIX_TRUNC_EXPR)
5428 {
5429 /* The signedness is determined from output operand. */
5430 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5431 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5432 }
5433 else
5434 {
5435 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5436 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5437 }
5438
5439 if (!optab1 || !optab2)
5440 return false;
5441
5442 vec_mode = TYPE_MODE (vectype);
5443 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5444 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5445 return false;
5446
5447 /* Check if it's a multi-step conversion that can be done using intermediate
5448 types. */
5449 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5450 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5451 {
5452 int i;
5453 tree prev_type = vectype, intermediate_type;
5454 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5455 optab optab3, optab4;
5456
5457 if (!CONVERT_EXPR_CODE_P (code))
5458 return false;
5459
5460 *code1 = c1;
5461 *code2 = c2;
5462
5463 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5464 intermediate steps in promotion sequence. We try
5465 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5466 not. */
5467 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5468 for (i = 0; i < 3; i++)
5469 {
5470 intermediate_mode = insn_data[icode1].operand[0].mode;
5471 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5472 TYPE_UNSIGNED (prev_type));
5473 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5474 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5475
5476 if (!optab3 || !optab4
5477 || ((icode1 = optab_handler (optab1, prev_mode))
5478 == CODE_FOR_nothing)
5479 || insn_data[icode1].operand[0].mode != intermediate_mode
5480 || ((icode2 = optab_handler (optab2, prev_mode))
5481 == CODE_FOR_nothing)
5482 || insn_data[icode2].operand[0].mode != intermediate_mode
5483 || ((icode1 = optab_handler (optab3, intermediate_mode))
5484 == CODE_FOR_nothing)
5485 || ((icode2 = optab_handler (optab4, intermediate_mode))
5486 == CODE_FOR_nothing))
5487 return false;
5488
5489 VEC_quick_push (tree, *interm_types, intermediate_type);
5490 (*multi_step_cvt)++;
5491
5492 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5493 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5494 return true;
5495
5496 prev_type = intermediate_type;
5497 prev_mode = intermediate_mode;
5498 }
5499
5500 return false;
5501 }
5502
5503 *code1 = c1;
5504 *code2 = c2;
5505 return true;
5506 }
5507
5508
5509 /* Function supportable_narrowing_operation
5510
5511 Check whether an operation represented by the code CODE is a
5512 narrowing operation that is supported by the target platform in
5513 vector form (i.e., when operating on arguments of type VECTYPE_IN
5514 and producing a result of type VECTYPE_OUT).
5515
5516 Narrowing operations we currently support are NOP (CONVERT) and
5517 FIX_TRUNC. This function checks if these operations are supported by
5518 the target platform directly via vector tree-codes.
5519
5520 Output:
5521 - CODE1 is the code of a vector operation to be used when
5522 vectorizing the operation, if available.
5523 - MULTI_STEP_CVT determines the number of required intermediate steps in
5524 case of multi-step conversion (like int->short->char - in that case
5525 MULTI_STEP_CVT will be 1).
5526 - INTERM_TYPES contains the intermediate type required to perform the
5527 narrowing operation (short in the above example). */
5528
5529 bool
5530 supportable_narrowing_operation (enum tree_code code,
5531 tree vectype_out, tree vectype_in,
5532 enum tree_code *code1, int *multi_step_cvt,
5533 VEC (tree, heap) **interm_types)
5534 {
5535 enum machine_mode vec_mode;
5536 enum insn_code icode1;
5537 optab optab1, interm_optab;
5538 tree vectype = vectype_in;
5539 tree narrow_vectype = vectype_out;
5540 enum tree_code c1;
5541 tree intermediate_type, prev_type;
5542 int i;
5543
5544 switch (code)
5545 {
5546 CASE_CONVERT:
5547 c1 = VEC_PACK_TRUNC_EXPR;
5548 break;
5549
5550 case FIX_TRUNC_EXPR:
5551 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5552 break;
5553
5554 case FLOAT_EXPR:
5555 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5556 tree code and optabs used for computing the operation. */
5557 return false;
5558
5559 default:
5560 gcc_unreachable ();
5561 }
5562
5563 if (code == FIX_TRUNC_EXPR)
5564 /* The signedness is determined from output operand. */
5565 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5566 else
5567 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5568
5569 if (!optab1)
5570 return false;
5571
5572 vec_mode = TYPE_MODE (vectype);
5573 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5574 return false;
5575
5576 /* Check if it's a multi-step conversion that can be done using intermediate
5577 types. */
5578 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5579 {
5580 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5581
5582 *code1 = c1;
5583 prev_type = vectype;
5584 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5585 intermediate steps in promotion sequence. We try
5586 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5587 not. */
5588 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5589 for (i = 0; i < 3; i++)
5590 {
5591 intermediate_mode = insn_data[icode1].operand[0].mode;
5592 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5593 TYPE_UNSIGNED (prev_type));
5594 interm_optab = optab_for_tree_code (c1, intermediate_type,
5595 optab_default);
5596 if (!interm_optab
5597 || ((icode1 = optab_handler (optab1, prev_mode))
5598 == CODE_FOR_nothing)
5599 || insn_data[icode1].operand[0].mode != intermediate_mode
5600 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5601 == CODE_FOR_nothing))
5602 return false;
5603
5604 VEC_quick_push (tree, *interm_types, intermediate_type);
5605 (*multi_step_cvt)++;
5606
5607 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5608 return true;
5609
5610 prev_type = intermediate_type;
5611 prev_mode = intermediate_mode;
5612 }
5613
5614 return false;
5615 }
5616
5617 *code1 = c1;
5618 return true;
5619 }