re PR tree-optimization/46049 (ICE: in expand_widen_pattern_expr, at optabs.c:522...
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "toplev.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
44
45
46 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47
48 /* Function vect_mark_relevant.
49
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51
52 static void
53 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
55 {
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 {
65 gimple pattern_stmt;
66
67 /* This is the last stmt in a sequence that was detected as a
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
71
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
81 }
82
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 {
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
93 }
94
95 VEC_safe_push (gimple, heap, *worklist, stmt);
96 }
97
98
99 /* Function vect_stmt_relevant_p.
100
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
103
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
108
109 CHECKME: what other side effects would the vectorizer allow? */
110
111 static bool
112 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
114 {
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
120
121 *relevant = vect_unused_in_scope;
122 *live_p = false;
123
124 /* cond stmt other than loop exit cond. */
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
128 *relevant = vect_used_in_scope;
129
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
132 if (gimple_vdef (stmt))
133 {
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
136 *relevant = vect_used_in_scope;
137 }
138
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 {
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 {
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
146 {
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
152
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
157
158 *live_p = true;
159 }
160 }
161 }
162
163 return (*live_p || *relevant);
164 }
165
166
167 /* Function exist_non_indexing_operands_for_use_p
168
169 USE is one of the uses attached to STMT. Check if USE is
170 used in STMT for anything other than indexing an array. */
171
172 static bool
173 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174 {
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
177
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
183
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
189
190 'var' in the second case corresponds to a def, not a use,
191 so USE cannot correspond to any operands that are not used
192 for array indexing.
193
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
196
197 if (!gimple_assign_copy_p (stmt))
198 return false;
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
201 operand = gimple_assign_rhs1 (stmt);
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
204
205 if (operand == use)
206 return true;
207
208 return false;
209 }
210
211
212 /*
213 Function process_use.
214
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
228 which does not need to be directly vectorized, then the liveness/relevance
229 of the respective DEF_STMT is left unchanged.
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
234
235 Return true if everything is as expected. Return false otherwise. */
236
237 static bool
238 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240 {
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
248
249 /* case 1: we are only interested in uses that need to be vectorized. Uses
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
253
254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
255 {
256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
259 }
260
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
263
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
266 {
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
270 }
271
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
284 {
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
292 return true;
293 }
294
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 {
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
306
307 switch (relevant)
308 {
309 case vect_unused_in_scope:
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
312 break;
313
314 case vect_used_in_outer_by_reduction:
315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
316 relevant = vect_used_by_reduction;
317 break;
318
319 case vect_used_in_outer:
320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
321 relevant = vect_used_in_scope;
322 break;
323
324 case vect_used_in_scope:
325 break;
326
327 default:
328 gcc_unreachable ();
329 }
330 }
331
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
334 ...
335 inner-loop:
336 d = def_stmt
337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 {
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
343
344 switch (relevant)
345 {
346 case vect_unused_in_scope:
347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
350 break;
351
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
355
356 case vect_used_in_scope:
357 relevant = vect_used_in_outer;
358 break;
359
360 default:
361 gcc_unreachable ();
362 }
363 }
364
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
367 }
368
369
370 /* Function vect_mark_stmts_to_be_vectorized.
371
372 Not all stmts in the loop need to be vectorized. For example:
373
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
378
379 3. j = j + 1
380
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
383
384 This pass detects such stmts. */
385
386 bool
387 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388 {
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
402
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405
406 worklist = VEC_alloc (gimple, heap, 64);
407
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
410 {
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
413 {
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
416 {
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 }
420
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 }
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 {
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
428 {
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 }
432
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 }
436 }
437
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
440 {
441 use_operand_p use_p;
442 ssa_op_iter iter;
443
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
446 {
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 }
450
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
471 order of the results that they produce does not have to be kept. */
472
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
476 {
477 case vect_reduction_def:
478 switch (tmp_relevant)
479 {
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
483
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
488
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
492
493 VEC_free (gimple, heap, worklist);
494 return false;
495 }
496
497 live_p = false;
498 break;
499
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
504 {
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
507
508 VEC_free (gimple, heap, worklist);
509 return false;
510 }
511
512 live_p = false;
513 break;
514
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
518 {
519 if (vect_print_dump_info (REPORT_DETAILS))
520 fprintf (vect_dump, "unsupported use of double reduction.");
521
522 VEC_free (gimple, heap, worklist);
523 return false;
524 }
525
526 live_p = false;
527 break;
528
529 default:
530 break;
531 }
532
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 {
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 {
538 VEC_free (gimple, heap, worklist);
539 return false;
540 }
541 }
542 } /* while worklist */
543
544 VEC_free (gimple, heap, worklist);
545 return true;
546 }
547
548
549 /* Get cost by calling cost target builtin. */
550
551 static inline
552 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553 {
554 tree dummy_type = NULL;
555 int dummy = 0;
556
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
559 }
560
561
562 /* Get cost for STMT. */
563
564 int
565 cost_for_stmt (gimple stmt)
566 {
567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
568
569 switch (STMT_VINFO_TYPE (stmt_info))
570 {
571 case load_vec_info_type:
572 return vect_get_stmt_cost (scalar_load);
573 case store_vec_info_type:
574 return vect_get_stmt_cost (scalar_store);
575 case op_vec_info_type:
576 case condition_vec_info_type:
577 case assignment_vec_info_type:
578 case reduc_vec_info_type:
579 case induc_vec_info_type:
580 case type_promotion_vec_info_type:
581 case type_demotion_vec_info_type:
582 case type_conversion_vec_info_type:
583 case call_vec_info_type:
584 return vect_get_stmt_cost (scalar_stmt);
585 case undef_vec_info_type:
586 default:
587 gcc_unreachable ();
588 }
589 }
590
591 /* Function vect_model_simple_cost.
592
593 Models cost for simple operations, i.e. those that only emit ncopies of a
594 single op. Right now, this does not account for multiple insns that could
595 be generated for the single vector op. We will handle that shortly. */
596
597 void
598 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
599 enum vect_def_type *dt, slp_tree slp_node)
600 {
601 int i;
602 int inside_cost = 0, outside_cost = 0;
603
604 /* The SLP costs were already calculated during SLP tree build. */
605 if (PURE_SLP_STMT (stmt_info))
606 return;
607
608 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
609
610 /* FORNOW: Assuming maximum 2 args per stmts. */
611 for (i = 0; i < 2; i++)
612 {
613 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
614 outside_cost += vect_get_stmt_cost (vector_stmt);
615 }
616
617 if (vect_print_dump_info (REPORT_COST))
618 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
619 "outside_cost = %d .", inside_cost, outside_cost);
620
621 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
622 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
623 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
624 }
625
626
627 /* Function vect_cost_strided_group_size
628
629 For strided load or store, return the group_size only if it is the first
630 load or store of a group, else return 1. This ensures that group size is
631 only returned once per group. */
632
633 static int
634 vect_cost_strided_group_size (stmt_vec_info stmt_info)
635 {
636 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
637
638 if (first_stmt == STMT_VINFO_STMT (stmt_info))
639 return DR_GROUP_SIZE (stmt_info);
640
641 return 1;
642 }
643
644
645 /* Function vect_model_store_cost
646
647 Models cost for stores. In the case of strided accesses, one access
648 has the overhead of the strided access attributed to it. */
649
650 void
651 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
652 enum vect_def_type dt, slp_tree slp_node)
653 {
654 int group_size;
655 unsigned int inside_cost = 0, outside_cost = 0;
656 struct data_reference *first_dr;
657 gimple first_stmt;
658
659 /* The SLP costs were already calculated during SLP tree build. */
660 if (PURE_SLP_STMT (stmt_info))
661 return;
662
663 if (dt == vect_constant_def || dt == vect_external_def)
664 outside_cost = vect_get_stmt_cost (scalar_to_vec);
665
666 /* Strided access? */
667 if (DR_GROUP_FIRST_DR (stmt_info))
668 {
669 if (slp_node)
670 {
671 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
672 group_size = 1;
673 }
674 else
675 {
676 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
677 group_size = vect_cost_strided_group_size (stmt_info);
678 }
679
680 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
681 }
682 /* Not a strided access. */
683 else
684 {
685 group_size = 1;
686 first_dr = STMT_VINFO_DATA_REF (stmt_info);
687 }
688
689 /* Is this an access in a group of stores, which provide strided access?
690 If so, add in the cost of the permutes. */
691 if (group_size > 1)
692 {
693 /* Uses a high and low interleave operation for each needed permute. */
694 inside_cost = ncopies * exact_log2(group_size) * group_size
695 * vect_get_stmt_cost (vector_stmt);
696
697 if (vect_print_dump_info (REPORT_COST))
698 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
699 group_size);
700
701 }
702
703 /* Costs of the stores. */
704 vect_get_store_cost (first_dr, ncopies, &inside_cost);
705
706 if (vect_print_dump_info (REPORT_COST))
707 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
708 "outside_cost = %d .", inside_cost, outside_cost);
709
710 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
711 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
712 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
713 }
714
715
716 /* Calculate cost of DR's memory access. */
717 void
718 vect_get_store_cost (struct data_reference *dr, int ncopies,
719 unsigned int *inside_cost)
720 {
721 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
722
723 switch (alignment_support_scheme)
724 {
725 case dr_aligned:
726 {
727 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
728
729 if (vect_print_dump_info (REPORT_COST))
730 fprintf (vect_dump, "vect_model_store_cost: aligned.");
731
732 break;
733 }
734
735 case dr_unaligned_supported:
736 {
737 gimple stmt = DR_STMT (dr);
738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
740
741 /* Here, we assign an additional cost for the unaligned store. */
742 *inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
744 vectype, DR_MISALIGNMENT (dr));
745
746 if (vect_print_dump_info (REPORT_COST))
747 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
748 "hardware.");
749
750 break;
751 }
752
753 default:
754 gcc_unreachable ();
755 }
756 }
757
758
759 /* Function vect_model_load_cost
760
761 Models cost for loads. In the case of strided accesses, the last access
762 has the overhead of the strided access attributed to it. Since unaligned
763 accesses are supported for loads, we also account for the costs of the
764 access scheme chosen. */
765
766 void
767 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
768
769 {
770 int group_size;
771 gimple first_stmt;
772 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
773 unsigned int inside_cost = 0, outside_cost = 0;
774
775 /* The SLP costs were already calculated during SLP tree build. */
776 if (PURE_SLP_STMT (stmt_info))
777 return;
778
779 /* Strided accesses? */
780 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
781 if (first_stmt && !slp_node)
782 {
783 group_size = vect_cost_strided_group_size (stmt_info);
784 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
785 }
786 /* Not a strided access. */
787 else
788 {
789 group_size = 1;
790 first_dr = dr;
791 }
792
793 /* Is this an access in a group of loads providing strided access?
794 If so, add in the cost of the permutes. */
795 if (group_size > 1)
796 {
797 /* Uses an even and odd extract operations for each needed permute. */
798 inside_cost = ncopies * exact_log2(group_size) * group_size
799 * vect_get_stmt_cost (vector_stmt);
800
801 if (vect_print_dump_info (REPORT_COST))
802 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
803 group_size);
804 }
805
806 /* The loads themselves. */
807 vect_get_load_cost (first_dr, ncopies,
808 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
809 &inside_cost, &outside_cost);
810
811 if (vect_print_dump_info (REPORT_COST))
812 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
813 "outside_cost = %d .", inside_cost, outside_cost);
814
815 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
816 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
817 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
818 }
819
820
821 /* Calculate cost of DR's memory access. */
822 void
823 vect_get_load_cost (struct data_reference *dr, int ncopies,
824 bool add_realign_cost, unsigned int *inside_cost,
825 unsigned int *outside_cost)
826 {
827 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
828
829 switch (alignment_support_scheme)
830 {
831 case dr_aligned:
832 {
833 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
834
835 if (vect_print_dump_info (REPORT_COST))
836 fprintf (vect_dump, "vect_model_load_cost: aligned.");
837
838 break;
839 }
840 case dr_unaligned_supported:
841 {
842 gimple stmt = DR_STMT (dr);
843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
844 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
845
846 /* Here, we assign an additional cost for the unaligned load. */
847 *inside_cost += ncopies
848 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
849 vectype, DR_MISALIGNMENT (dr));
850 if (vect_print_dump_info (REPORT_COST))
851 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
852 "hardware.");
853
854 break;
855 }
856 case dr_explicit_realign:
857 {
858 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
859 + vect_get_stmt_cost (vector_stmt));
860
861 /* FIXME: If the misalignment remains fixed across the iterations of
862 the containing loop, the following cost should be added to the
863 outside costs. */
864 if (targetm.vectorize.builtin_mask_for_load)
865 *inside_cost += vect_get_stmt_cost (vector_stmt);
866
867 break;
868 }
869 case dr_explicit_realign_optimized:
870 {
871 if (vect_print_dump_info (REPORT_COST))
872 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
873 "pipelined.");
874
875 /* Unaligned software pipeline has a load of an address, an initial
876 load, and possibly a mask operation to "prime" the loop. However,
877 if this is an access in a group of loads, which provide strided
878 access, then the above cost should only be considered for one
879 access in the group. Inside the loop, there is a load op
880 and a realignment op. */
881
882 if (add_realign_cost)
883 {
884 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
885 if (targetm.vectorize.builtin_mask_for_load)
886 *outside_cost += vect_get_stmt_cost (vector_stmt);
887 }
888
889 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
890 + vect_get_stmt_cost (vector_stmt));
891 break;
892 }
893
894 default:
895 gcc_unreachable ();
896 }
897 }
898
899
900 /* Function vect_init_vector.
901
902 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
903 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
904 is not NULL. Otherwise, place the initialization at the loop preheader.
905 Return the DEF of INIT_STMT.
906 It will be used in the vectorization of STMT. */
907
908 tree
909 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
910 gimple_stmt_iterator *gsi)
911 {
912 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
913 tree new_var;
914 gimple init_stmt;
915 tree vec_oprnd;
916 edge pe;
917 tree new_temp;
918 basic_block new_bb;
919
920 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
921 add_referenced_var (new_var);
922 init_stmt = gimple_build_assign (new_var, vector_var);
923 new_temp = make_ssa_name (new_var, init_stmt);
924 gimple_assign_set_lhs (init_stmt, new_temp);
925
926 if (gsi)
927 vect_finish_stmt_generation (stmt, init_stmt, gsi);
928 else
929 {
930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
931
932 if (loop_vinfo)
933 {
934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
935
936 if (nested_in_vect_loop_p (loop, stmt))
937 loop = loop->inner;
938
939 pe = loop_preheader_edge (loop);
940 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
941 gcc_assert (!new_bb);
942 }
943 else
944 {
945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
946 basic_block bb;
947 gimple_stmt_iterator gsi_bb_start;
948
949 gcc_assert (bb_vinfo);
950 bb = BB_VINFO_BB (bb_vinfo);
951 gsi_bb_start = gsi_after_labels (bb);
952 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
953 }
954 }
955
956 if (vect_print_dump_info (REPORT_DETAILS))
957 {
958 fprintf (vect_dump, "created new init_stmt: ");
959 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
960 }
961
962 vec_oprnd = gimple_assign_lhs (init_stmt);
963 return vec_oprnd;
964 }
965
966
967 /* Function vect_get_vec_def_for_operand.
968
969 OP is an operand in STMT. This function returns a (vector) def that will be
970 used in the vectorized stmt for STMT.
971
972 In the case that OP is an SSA_NAME which is defined in the loop, then
973 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
974
975 In case OP is an invariant or constant, a new stmt that creates a vector def
976 needs to be introduced. */
977
978 tree
979 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
980 {
981 tree vec_oprnd;
982 gimple vec_stmt;
983 gimple def_stmt;
984 stmt_vec_info def_stmt_info = NULL;
985 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
986 unsigned int nunits;
987 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
988 tree vec_inv;
989 tree vec_cst;
990 tree t = NULL_TREE;
991 tree def;
992 int i;
993 enum vect_def_type dt;
994 bool is_simple_use;
995 tree vector_type;
996
997 if (vect_print_dump_info (REPORT_DETAILS))
998 {
999 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1000 print_generic_expr (vect_dump, op, TDF_SLIM);
1001 }
1002
1003 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1004 &dt);
1005 gcc_assert (is_simple_use);
1006 if (vect_print_dump_info (REPORT_DETAILS))
1007 {
1008 if (def)
1009 {
1010 fprintf (vect_dump, "def = ");
1011 print_generic_expr (vect_dump, def, TDF_SLIM);
1012 }
1013 if (def_stmt)
1014 {
1015 fprintf (vect_dump, " def_stmt = ");
1016 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1017 }
1018 }
1019
1020 switch (dt)
1021 {
1022 /* Case 1: operand is a constant. */
1023 case vect_constant_def:
1024 {
1025 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1026 gcc_assert (vector_type);
1027 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1028
1029 if (scalar_def)
1030 *scalar_def = op;
1031
1032 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1033 if (vect_print_dump_info (REPORT_DETAILS))
1034 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1035
1036 for (i = nunits - 1; i >= 0; --i)
1037 {
1038 t = tree_cons (NULL_TREE, op, t);
1039 }
1040 vec_cst = build_vector (vector_type, t);
1041 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1042 }
1043
1044 /* Case 2: operand is defined outside the loop - loop invariant. */
1045 case vect_external_def:
1046 {
1047 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1048 gcc_assert (vector_type);
1049 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1050
1051 if (scalar_def)
1052 *scalar_def = def;
1053
1054 /* Create 'vec_inv = {inv,inv,..,inv}' */
1055 if (vect_print_dump_info (REPORT_DETAILS))
1056 fprintf (vect_dump, "Create vector_inv.");
1057
1058 for (i = nunits - 1; i >= 0; --i)
1059 {
1060 t = tree_cons (NULL_TREE, def, t);
1061 }
1062
1063 /* FIXME: use build_constructor directly. */
1064 vec_inv = build_constructor_from_list (vector_type, t);
1065 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1066 }
1067
1068 /* Case 3: operand is defined inside the loop. */
1069 case vect_internal_def:
1070 {
1071 if (scalar_def)
1072 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1073
1074 /* Get the def from the vectorized stmt. */
1075 def_stmt_info = vinfo_for_stmt (def_stmt);
1076 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1077 gcc_assert (vec_stmt);
1078 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1079 vec_oprnd = PHI_RESULT (vec_stmt);
1080 else if (is_gimple_call (vec_stmt))
1081 vec_oprnd = gimple_call_lhs (vec_stmt);
1082 else
1083 vec_oprnd = gimple_assign_lhs (vec_stmt);
1084 return vec_oprnd;
1085 }
1086
1087 /* Case 4: operand is defined by a loop header phi - reduction */
1088 case vect_reduction_def:
1089 case vect_double_reduction_def:
1090 case vect_nested_cycle:
1091 {
1092 struct loop *loop;
1093
1094 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1095 loop = (gimple_bb (def_stmt))->loop_father;
1096
1097 /* Get the def before the loop */
1098 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1099 return get_initial_def_for_reduction (stmt, op, scalar_def);
1100 }
1101
1102 /* Case 5: operand is defined by loop-header phi - induction. */
1103 case vect_induction_def:
1104 {
1105 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1106
1107 /* Get the def from the vectorized stmt. */
1108 def_stmt_info = vinfo_for_stmt (def_stmt);
1109 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1110 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1111 vec_oprnd = PHI_RESULT (vec_stmt);
1112 return vec_oprnd;
1113 }
1114
1115 default:
1116 gcc_unreachable ();
1117 }
1118 }
1119
1120
1121 /* Function vect_get_vec_def_for_stmt_copy
1122
1123 Return a vector-def for an operand. This function is used when the
1124 vectorized stmt to be created (by the caller to this function) is a "copy"
1125 created in case the vectorized result cannot fit in one vector, and several
1126 copies of the vector-stmt are required. In this case the vector-def is
1127 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1128 of the stmt that defines VEC_OPRND.
1129 DT is the type of the vector def VEC_OPRND.
1130
1131 Context:
1132 In case the vectorization factor (VF) is bigger than the number
1133 of elements that can fit in a vectype (nunits), we have to generate
1134 more than one vector stmt to vectorize the scalar stmt. This situation
1135 arises when there are multiple data-types operated upon in the loop; the
1136 smallest data-type determines the VF, and as a result, when vectorizing
1137 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1138 vector stmt (each computing a vector of 'nunits' results, and together
1139 computing 'VF' results in each iteration). This function is called when
1140 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1141 which VF=16 and nunits=4, so the number of copies required is 4):
1142
1143 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1144
1145 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1146 VS1.1: vx.1 = memref1 VS1.2
1147 VS1.2: vx.2 = memref2 VS1.3
1148 VS1.3: vx.3 = memref3
1149
1150 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1151 VSnew.1: vz1 = vx.1 + ... VSnew.2
1152 VSnew.2: vz2 = vx.2 + ... VSnew.3
1153 VSnew.3: vz3 = vx.3 + ...
1154
1155 The vectorization of S1 is explained in vectorizable_load.
1156 The vectorization of S2:
1157 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1158 the function 'vect_get_vec_def_for_operand' is called to
1159 get the relevant vector-def for each operand of S2. For operand x it
1160 returns the vector-def 'vx.0'.
1161
1162 To create the remaining copies of the vector-stmt (VSnew.j), this
1163 function is called to get the relevant vector-def for each operand. It is
1164 obtained from the respective VS1.j stmt, which is recorded in the
1165 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1166
1167 For example, to obtain the vector-def 'vx.1' in order to create the
1168 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1169 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1170 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1171 and return its def ('vx.1').
1172 Overall, to create the above sequence this function will be called 3 times:
1173 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1174 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1175 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1176
1177 tree
1178 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1179 {
1180 gimple vec_stmt_for_operand;
1181 stmt_vec_info def_stmt_info;
1182
1183 /* Do nothing; can reuse same def. */
1184 if (dt == vect_external_def || dt == vect_constant_def )
1185 return vec_oprnd;
1186
1187 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1188 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1189 gcc_assert (def_stmt_info);
1190 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1191 gcc_assert (vec_stmt_for_operand);
1192 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1193 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1194 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1195 else
1196 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1197 return vec_oprnd;
1198 }
1199
1200
1201 /* Get vectorized definitions for the operands to create a copy of an original
1202 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1203
1204 static void
1205 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1206 VEC(tree,heap) **vec_oprnds0,
1207 VEC(tree,heap) **vec_oprnds1)
1208 {
1209 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1210
1211 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1212 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1213
1214 if (vec_oprnds1 && *vec_oprnds1)
1215 {
1216 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1217 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1218 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1219 }
1220 }
1221
1222
1223 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1224 NULL. */
1225
1226 static void
1227 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1228 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1229 slp_tree slp_node)
1230 {
1231 if (slp_node)
1232 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1233 else
1234 {
1235 tree vec_oprnd;
1236
1237 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1238 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1239 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1240
1241 if (op1)
1242 {
1243 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1244 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1245 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1246 }
1247 }
1248 }
1249
1250
1251 /* Function vect_finish_stmt_generation.
1252
1253 Insert a new stmt. */
1254
1255 void
1256 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1257 gimple_stmt_iterator *gsi)
1258 {
1259 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1262
1263 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1264
1265 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1266
1267 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1268 bb_vinfo));
1269
1270 if (vect_print_dump_info (REPORT_DETAILS))
1271 {
1272 fprintf (vect_dump, "add new stmt: ");
1273 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1274 }
1275
1276 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1277 }
1278
1279 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1280 a function declaration if the target has a vectorized version
1281 of the function, or NULL_TREE if the function cannot be vectorized. */
1282
1283 tree
1284 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1285 {
1286 tree fndecl = gimple_call_fndecl (call);
1287
1288 /* We only handle functions that do not read or clobber memory -- i.e.
1289 const or novops ones. */
1290 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1291 return NULL_TREE;
1292
1293 if (!fndecl
1294 || TREE_CODE (fndecl) != FUNCTION_DECL
1295 || !DECL_BUILT_IN (fndecl))
1296 return NULL_TREE;
1297
1298 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1299 vectype_in);
1300 }
1301
1302 /* Function vectorizable_call.
1303
1304 Check if STMT performs a function call that can be vectorized.
1305 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1306 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1307 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1308
1309 static bool
1310 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1311 {
1312 tree vec_dest;
1313 tree scalar_dest;
1314 tree op, type;
1315 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1316 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1317 tree vectype_out, vectype_in;
1318 int nunits_in;
1319 int nunits_out;
1320 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1321 tree fndecl, new_temp, def, rhs_type;
1322 gimple def_stmt;
1323 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1324 gimple new_stmt = NULL;
1325 int ncopies, j;
1326 VEC(tree, heap) *vargs = NULL;
1327 enum { NARROW, NONE, WIDEN } modifier;
1328 size_t i, nargs;
1329
1330 /* FORNOW: unsupported in basic block SLP. */
1331 gcc_assert (loop_vinfo);
1332
1333 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1334 return false;
1335
1336 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1337 return false;
1338
1339 /* FORNOW: SLP not supported. */
1340 if (STMT_SLP_TYPE (stmt_info))
1341 return false;
1342
1343 /* Is STMT a vectorizable call? */
1344 if (!is_gimple_call (stmt))
1345 return false;
1346
1347 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1348 return false;
1349
1350 if (stmt_could_throw_p (stmt))
1351 return false;
1352
1353 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1354
1355 /* Process function arguments. */
1356 rhs_type = NULL_TREE;
1357 vectype_in = NULL_TREE;
1358 nargs = gimple_call_num_args (stmt);
1359
1360 /* Bail out if the function has more than three arguments, we do not have
1361 interesting builtin functions to vectorize with more than two arguments
1362 except for fma. No arguments is also not good. */
1363 if (nargs == 0 || nargs > 3)
1364 return false;
1365
1366 for (i = 0; i < nargs; i++)
1367 {
1368 tree opvectype;
1369
1370 op = gimple_call_arg (stmt, i);
1371
1372 /* We can only handle calls with arguments of the same type. */
1373 if (rhs_type
1374 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1375 {
1376 if (vect_print_dump_info (REPORT_DETAILS))
1377 fprintf (vect_dump, "argument types differ.");
1378 return false;
1379 }
1380 if (!rhs_type)
1381 rhs_type = TREE_TYPE (op);
1382
1383 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1384 &def_stmt, &def, &dt[i], &opvectype))
1385 {
1386 if (vect_print_dump_info (REPORT_DETAILS))
1387 fprintf (vect_dump, "use not simple.");
1388 return false;
1389 }
1390
1391 if (!vectype_in)
1392 vectype_in = opvectype;
1393 else if (opvectype
1394 && opvectype != vectype_in)
1395 {
1396 if (vect_print_dump_info (REPORT_DETAILS))
1397 fprintf (vect_dump, "argument vector types differ.");
1398 return false;
1399 }
1400 }
1401 /* If all arguments are external or constant defs use a vector type with
1402 the same size as the output vector type. */
1403 if (!vectype_in)
1404 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1405 if (vec_stmt)
1406 gcc_assert (vectype_in);
1407 if (!vectype_in)
1408 {
1409 if (vect_print_dump_info (REPORT_DETAILS))
1410 {
1411 fprintf (vect_dump, "no vectype for scalar type ");
1412 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1413 }
1414
1415 return false;
1416 }
1417
1418 /* FORNOW */
1419 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1420 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1421 if (nunits_in == nunits_out / 2)
1422 modifier = NARROW;
1423 else if (nunits_out == nunits_in)
1424 modifier = NONE;
1425 else if (nunits_out == nunits_in / 2)
1426 modifier = WIDEN;
1427 else
1428 return false;
1429
1430 /* For now, we only vectorize functions if a target specific builtin
1431 is available. TODO -- in some cases, it might be profitable to
1432 insert the calls for pieces of the vector, in order to be able
1433 to vectorize other operations in the loop. */
1434 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1435 if (fndecl == NULL_TREE)
1436 {
1437 if (vect_print_dump_info (REPORT_DETAILS))
1438 fprintf (vect_dump, "function is not vectorizable.");
1439
1440 return false;
1441 }
1442
1443 gcc_assert (!gimple_vuse (stmt));
1444
1445 if (modifier == NARROW)
1446 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1447 else
1448 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1449
1450 /* Sanity check: make sure that at least one copy of the vectorized stmt
1451 needs to be generated. */
1452 gcc_assert (ncopies >= 1);
1453
1454 if (!vec_stmt) /* transformation not required. */
1455 {
1456 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1457 if (vect_print_dump_info (REPORT_DETAILS))
1458 fprintf (vect_dump, "=== vectorizable_call ===");
1459 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1460 return true;
1461 }
1462
1463 /** Transform. **/
1464
1465 if (vect_print_dump_info (REPORT_DETAILS))
1466 fprintf (vect_dump, "transform operation.");
1467
1468 /* Handle def. */
1469 scalar_dest = gimple_call_lhs (stmt);
1470 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1471
1472 prev_stmt_info = NULL;
1473 switch (modifier)
1474 {
1475 case NONE:
1476 for (j = 0; j < ncopies; ++j)
1477 {
1478 /* Build argument list for the vectorized call. */
1479 if (j == 0)
1480 vargs = VEC_alloc (tree, heap, nargs);
1481 else
1482 VEC_truncate (tree, vargs, 0);
1483
1484 for (i = 0; i < nargs; i++)
1485 {
1486 op = gimple_call_arg (stmt, i);
1487 if (j == 0)
1488 vec_oprnd0
1489 = vect_get_vec_def_for_operand (op, stmt, NULL);
1490 else
1491 {
1492 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1493 vec_oprnd0
1494 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1495 }
1496
1497 VEC_quick_push (tree, vargs, vec_oprnd0);
1498 }
1499
1500 new_stmt = gimple_build_call_vec (fndecl, vargs);
1501 new_temp = make_ssa_name (vec_dest, new_stmt);
1502 gimple_call_set_lhs (new_stmt, new_temp);
1503
1504 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1505 mark_symbols_for_renaming (new_stmt);
1506
1507 if (j == 0)
1508 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1509 else
1510 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1511
1512 prev_stmt_info = vinfo_for_stmt (new_stmt);
1513 }
1514
1515 break;
1516
1517 case NARROW:
1518 for (j = 0; j < ncopies; ++j)
1519 {
1520 /* Build argument list for the vectorized call. */
1521 if (j == 0)
1522 vargs = VEC_alloc (tree, heap, nargs * 2);
1523 else
1524 VEC_truncate (tree, vargs, 0);
1525
1526 for (i = 0; i < nargs; i++)
1527 {
1528 op = gimple_call_arg (stmt, i);
1529 if (j == 0)
1530 {
1531 vec_oprnd0
1532 = vect_get_vec_def_for_operand (op, stmt, NULL);
1533 vec_oprnd1
1534 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1535 }
1536 else
1537 {
1538 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1539 vec_oprnd0
1540 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1541 vec_oprnd1
1542 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1543 }
1544
1545 VEC_quick_push (tree, vargs, vec_oprnd0);
1546 VEC_quick_push (tree, vargs, vec_oprnd1);
1547 }
1548
1549 new_stmt = gimple_build_call_vec (fndecl, vargs);
1550 new_temp = make_ssa_name (vec_dest, new_stmt);
1551 gimple_call_set_lhs (new_stmt, new_temp);
1552
1553 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1554 mark_symbols_for_renaming (new_stmt);
1555
1556 if (j == 0)
1557 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1558 else
1559 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1560
1561 prev_stmt_info = vinfo_for_stmt (new_stmt);
1562 }
1563
1564 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1565
1566 break;
1567
1568 case WIDEN:
1569 /* No current target implements this case. */
1570 return false;
1571 }
1572
1573 VEC_free (tree, heap, vargs);
1574
1575 /* Update the exception handling table with the vector stmt if necessary. */
1576 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1577 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1578
1579 /* The call in STMT might prevent it from being removed in dce.
1580 We however cannot remove it here, due to the way the ssa name
1581 it defines is mapped to the new definition. So just replace
1582 rhs of the statement with something harmless. */
1583
1584 type = TREE_TYPE (scalar_dest);
1585 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1586 fold_convert (type, integer_zero_node));
1587 set_vinfo_for_stmt (new_stmt, stmt_info);
1588 set_vinfo_for_stmt (stmt, NULL);
1589 STMT_VINFO_STMT (stmt_info) = new_stmt;
1590 gsi_replace (gsi, new_stmt, false);
1591 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1592
1593 return true;
1594 }
1595
1596
1597 /* Function vect_gen_widened_results_half
1598
1599 Create a vector stmt whose code, type, number of arguments, and result
1600 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1601 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1602 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1603 needs to be created (DECL is a function-decl of a target-builtin).
1604 STMT is the original scalar stmt that we are vectorizing. */
1605
1606 static gimple
1607 vect_gen_widened_results_half (enum tree_code code,
1608 tree decl,
1609 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1610 tree vec_dest, gimple_stmt_iterator *gsi,
1611 gimple stmt)
1612 {
1613 gimple new_stmt;
1614 tree new_temp;
1615
1616 /* Generate half of the widened result: */
1617 if (code == CALL_EXPR)
1618 {
1619 /* Target specific support */
1620 if (op_type == binary_op)
1621 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1622 else
1623 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1624 new_temp = make_ssa_name (vec_dest, new_stmt);
1625 gimple_call_set_lhs (new_stmt, new_temp);
1626 }
1627 else
1628 {
1629 /* Generic support */
1630 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1631 if (op_type != binary_op)
1632 vec_oprnd1 = NULL;
1633 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1634 vec_oprnd1);
1635 new_temp = make_ssa_name (vec_dest, new_stmt);
1636 gimple_assign_set_lhs (new_stmt, new_temp);
1637 }
1638 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1639
1640 return new_stmt;
1641 }
1642
1643
1644 /* Check if STMT performs a conversion operation, that can be vectorized.
1645 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1646 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1647 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1648
1649 static bool
1650 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1651 gimple *vec_stmt, slp_tree slp_node)
1652 {
1653 tree vec_dest;
1654 tree scalar_dest;
1655 tree op0;
1656 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1658 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1659 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1660 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1661 tree new_temp;
1662 tree def;
1663 gimple def_stmt;
1664 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1665 gimple new_stmt = NULL;
1666 stmt_vec_info prev_stmt_info;
1667 int nunits_in;
1668 int nunits_out;
1669 tree vectype_out, vectype_in;
1670 int ncopies, j;
1671 tree rhs_type;
1672 tree builtin_decl;
1673 enum { NARROW, NONE, WIDEN } modifier;
1674 int i;
1675 VEC(tree,heap) *vec_oprnds0 = NULL;
1676 tree vop0;
1677 VEC(tree,heap) *dummy = NULL;
1678 int dummy_int;
1679
1680 /* Is STMT a vectorizable conversion? */
1681
1682 /* FORNOW: unsupported in basic block SLP. */
1683 gcc_assert (loop_vinfo);
1684
1685 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1686 return false;
1687
1688 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1689 return false;
1690
1691 if (!is_gimple_assign (stmt))
1692 return false;
1693
1694 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1695 return false;
1696
1697 code = gimple_assign_rhs_code (stmt);
1698 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1699 return false;
1700
1701 /* Check types of lhs and rhs. */
1702 scalar_dest = gimple_assign_lhs (stmt);
1703 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1704
1705 op0 = gimple_assign_rhs1 (stmt);
1706 rhs_type = TREE_TYPE (op0);
1707 /* Check the operands of the operation. */
1708 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1709 &def_stmt, &def, &dt[0], &vectype_in))
1710 {
1711 if (vect_print_dump_info (REPORT_DETAILS))
1712 fprintf (vect_dump, "use not simple.");
1713 return false;
1714 }
1715 /* If op0 is an external or constant defs use a vector type of
1716 the same size as the output vector type. */
1717 if (!vectype_in)
1718 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1719 if (vec_stmt)
1720 gcc_assert (vectype_in);
1721 if (!vectype_in)
1722 {
1723 if (vect_print_dump_info (REPORT_DETAILS))
1724 {
1725 fprintf (vect_dump, "no vectype for scalar type ");
1726 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1727 }
1728
1729 return false;
1730 }
1731
1732 /* FORNOW */
1733 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1734 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1735 if (nunits_in == nunits_out / 2)
1736 modifier = NARROW;
1737 else if (nunits_out == nunits_in)
1738 modifier = NONE;
1739 else if (nunits_out == nunits_in / 2)
1740 modifier = WIDEN;
1741 else
1742 return false;
1743
1744 if (modifier == NARROW)
1745 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1746 else
1747 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1748
1749 /* Multiple types in SLP are handled by creating the appropriate number of
1750 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1751 case of SLP. */
1752 if (slp_node)
1753 ncopies = 1;
1754
1755 /* Sanity check: make sure that at least one copy of the vectorized stmt
1756 needs to be generated. */
1757 gcc_assert (ncopies >= 1);
1758
1759 /* Supportable by target? */
1760 if ((modifier == NONE
1761 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1762 || (modifier == WIDEN
1763 && !supportable_widening_operation (code, stmt,
1764 vectype_out, vectype_in,
1765 &decl1, &decl2,
1766 &code1, &code2,
1767 &dummy_int, &dummy))
1768 || (modifier == NARROW
1769 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1770 &code1, &dummy_int, &dummy)))
1771 {
1772 if (vect_print_dump_info (REPORT_DETAILS))
1773 fprintf (vect_dump, "conversion not supported by target.");
1774 return false;
1775 }
1776
1777 if (modifier != NONE)
1778 {
1779 /* FORNOW: SLP not supported. */
1780 if (STMT_SLP_TYPE (stmt_info))
1781 return false;
1782 }
1783
1784 if (!vec_stmt) /* transformation not required. */
1785 {
1786 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1787 return true;
1788 }
1789
1790 /** Transform. **/
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "transform conversion.");
1793
1794 /* Handle def. */
1795 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1796
1797 if (modifier == NONE && !slp_node)
1798 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1799
1800 prev_stmt_info = NULL;
1801 switch (modifier)
1802 {
1803 case NONE:
1804 for (j = 0; j < ncopies; j++)
1805 {
1806 if (j == 0)
1807 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1808 else
1809 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1810
1811 builtin_decl =
1812 targetm.vectorize.builtin_conversion (code,
1813 vectype_out, vectype_in);
1814 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1815 {
1816 /* Arguments are ready. create the new vector stmt. */
1817 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1818 new_temp = make_ssa_name (vec_dest, new_stmt);
1819 gimple_call_set_lhs (new_stmt, new_temp);
1820 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1821 if (slp_node)
1822 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1823 }
1824
1825 if (j == 0)
1826 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1827 else
1828 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1829 prev_stmt_info = vinfo_for_stmt (new_stmt);
1830 }
1831 break;
1832
1833 case WIDEN:
1834 /* In case the vectorization factor (VF) is bigger than the number
1835 of elements that we can fit in a vectype (nunits), we have to
1836 generate more than one vector stmt - i.e - we need to "unroll"
1837 the vector stmt by a factor VF/nunits. */
1838 for (j = 0; j < ncopies; j++)
1839 {
1840 if (j == 0)
1841 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1842 else
1843 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1844
1845 /* Generate first half of the widened result: */
1846 new_stmt
1847 = vect_gen_widened_results_half (code1, decl1,
1848 vec_oprnd0, vec_oprnd1,
1849 unary_op, vec_dest, gsi, stmt);
1850 if (j == 0)
1851 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1852 else
1853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1854 prev_stmt_info = vinfo_for_stmt (new_stmt);
1855
1856 /* Generate second half of the widened result: */
1857 new_stmt
1858 = vect_gen_widened_results_half (code2, decl2,
1859 vec_oprnd0, vec_oprnd1,
1860 unary_op, vec_dest, gsi, stmt);
1861 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1862 prev_stmt_info = vinfo_for_stmt (new_stmt);
1863 }
1864 break;
1865
1866 case NARROW:
1867 /* In case the vectorization factor (VF) is bigger than the number
1868 of elements that we can fit in a vectype (nunits), we have to
1869 generate more than one vector stmt - i.e - we need to "unroll"
1870 the vector stmt by a factor VF/nunits. */
1871 for (j = 0; j < ncopies; j++)
1872 {
1873 /* Handle uses. */
1874 if (j == 0)
1875 {
1876 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1877 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1878 }
1879 else
1880 {
1881 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1882 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1883 }
1884
1885 /* Arguments are ready. Create the new vector stmt. */
1886 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1887 vec_oprnd1);
1888 new_temp = make_ssa_name (vec_dest, new_stmt);
1889 gimple_assign_set_lhs (new_stmt, new_temp);
1890 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1891
1892 if (j == 0)
1893 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1894 else
1895 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1896
1897 prev_stmt_info = vinfo_for_stmt (new_stmt);
1898 }
1899
1900 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1901 }
1902
1903 if (vec_oprnds0)
1904 VEC_free (tree, heap, vec_oprnds0);
1905
1906 return true;
1907 }
1908
1909
1910 /* Function vectorizable_assignment.
1911
1912 Check if STMT performs an assignment (copy) that can be vectorized.
1913 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1914 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1915 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1916
1917 static bool
1918 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1919 gimple *vec_stmt, slp_tree slp_node)
1920 {
1921 tree vec_dest;
1922 tree scalar_dest;
1923 tree op;
1924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1925 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1926 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1927 tree new_temp;
1928 tree def;
1929 gimple def_stmt;
1930 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1931 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1932 int ncopies;
1933 int i, j;
1934 VEC(tree,heap) *vec_oprnds = NULL;
1935 tree vop;
1936 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1937 gimple new_stmt = NULL;
1938 stmt_vec_info prev_stmt_info = NULL;
1939 enum tree_code code;
1940 tree vectype_in;
1941
1942 /* Multiple types in SLP are handled by creating the appropriate number of
1943 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1944 case of SLP. */
1945 if (slp_node)
1946 ncopies = 1;
1947 else
1948 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1949
1950 gcc_assert (ncopies >= 1);
1951
1952 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1953 return false;
1954
1955 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1956 return false;
1957
1958 /* Is vectorizable assignment? */
1959 if (!is_gimple_assign (stmt))
1960 return false;
1961
1962 scalar_dest = gimple_assign_lhs (stmt);
1963 if (TREE_CODE (scalar_dest) != SSA_NAME)
1964 return false;
1965
1966 code = gimple_assign_rhs_code (stmt);
1967 if (gimple_assign_single_p (stmt)
1968 || code == PAREN_EXPR
1969 || CONVERT_EXPR_CODE_P (code))
1970 op = gimple_assign_rhs1 (stmt);
1971 else
1972 return false;
1973
1974 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1975 &def_stmt, &def, &dt[0], &vectype_in))
1976 {
1977 if (vect_print_dump_info (REPORT_DETAILS))
1978 fprintf (vect_dump, "use not simple.");
1979 return false;
1980 }
1981
1982 /* We can handle NOP_EXPR conversions that do not change the number
1983 of elements or the vector size. */
1984 if (CONVERT_EXPR_CODE_P (code)
1985 && (!vectype_in
1986 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1987 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1988 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1989 return false;
1990
1991 if (!vec_stmt) /* transformation not required. */
1992 {
1993 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1994 if (vect_print_dump_info (REPORT_DETAILS))
1995 fprintf (vect_dump, "=== vectorizable_assignment ===");
1996 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1997 return true;
1998 }
1999
2000 /** Transform. **/
2001 if (vect_print_dump_info (REPORT_DETAILS))
2002 fprintf (vect_dump, "transform assignment.");
2003
2004 /* Handle def. */
2005 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2006
2007 /* Handle use. */
2008 for (j = 0; j < ncopies; j++)
2009 {
2010 /* Handle uses. */
2011 if (j == 0)
2012 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2013 else
2014 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2015
2016 /* Arguments are ready. create the new vector stmt. */
2017 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2018 {
2019 if (CONVERT_EXPR_CODE_P (code))
2020 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2021 new_stmt = gimple_build_assign (vec_dest, vop);
2022 new_temp = make_ssa_name (vec_dest, new_stmt);
2023 gimple_assign_set_lhs (new_stmt, new_temp);
2024 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2025 if (slp_node)
2026 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2027 }
2028
2029 if (slp_node)
2030 continue;
2031
2032 if (j == 0)
2033 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2034 else
2035 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2036
2037 prev_stmt_info = vinfo_for_stmt (new_stmt);
2038 }
2039
2040 VEC_free (tree, heap, vec_oprnds);
2041 return true;
2042 }
2043
2044
2045 /* Function vectorizable_shift.
2046
2047 Check if STMT performs a shift operation that can be vectorized.
2048 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2049 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2050 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2051
2052 static bool
2053 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2054 gimple *vec_stmt, slp_tree slp_node)
2055 {
2056 tree vec_dest;
2057 tree scalar_dest;
2058 tree op0, op1 = NULL;
2059 tree vec_oprnd1 = NULL_TREE;
2060 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2061 tree vectype;
2062 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2063 enum tree_code code;
2064 enum machine_mode vec_mode;
2065 tree new_temp;
2066 optab optab;
2067 int icode;
2068 enum machine_mode optab_op2_mode;
2069 tree def;
2070 gimple def_stmt;
2071 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2072 gimple new_stmt = NULL;
2073 stmt_vec_info prev_stmt_info;
2074 int nunits_in;
2075 int nunits_out;
2076 tree vectype_out;
2077 int ncopies;
2078 int j, i;
2079 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2080 tree vop0, vop1;
2081 unsigned int k;
2082 bool scalar_shift_arg = false;
2083 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2084 int vf;
2085
2086 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2087 return false;
2088
2089 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2090 return false;
2091
2092 /* Is STMT a vectorizable binary/unary operation? */
2093 if (!is_gimple_assign (stmt))
2094 return false;
2095
2096 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2097 return false;
2098
2099 code = gimple_assign_rhs_code (stmt);
2100
2101 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2102 || code == RROTATE_EXPR))
2103 return false;
2104
2105 scalar_dest = gimple_assign_lhs (stmt);
2106 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2107
2108 op0 = gimple_assign_rhs1 (stmt);
2109 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2110 &def_stmt, &def, &dt[0], &vectype))
2111 {
2112 if (vect_print_dump_info (REPORT_DETAILS))
2113 fprintf (vect_dump, "use not simple.");
2114 return false;
2115 }
2116 /* If op0 is an external or constant def use a vector type with
2117 the same size as the output vector type. */
2118 if (!vectype)
2119 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2120 if (vec_stmt)
2121 gcc_assert (vectype);
2122 if (!vectype)
2123 {
2124 if (vect_print_dump_info (REPORT_DETAILS))
2125 {
2126 fprintf (vect_dump, "no vectype for scalar type ");
2127 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2128 }
2129
2130 return false;
2131 }
2132
2133 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2134 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2135 if (nunits_out != nunits_in)
2136 return false;
2137
2138 op1 = gimple_assign_rhs2 (stmt);
2139 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2140 {
2141 if (vect_print_dump_info (REPORT_DETAILS))
2142 fprintf (vect_dump, "use not simple.");
2143 return false;
2144 }
2145
2146 if (loop_vinfo)
2147 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2148 else
2149 vf = 1;
2150
2151 /* Multiple types in SLP are handled by creating the appropriate number of
2152 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2153 case of SLP. */
2154 if (slp_node)
2155 ncopies = 1;
2156 else
2157 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2158
2159 gcc_assert (ncopies >= 1);
2160
2161 /* Determine whether the shift amount is a vector, or scalar. If the
2162 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2163
2164 /* Vector shifted by vector. */
2165 if (dt[1] == vect_internal_def)
2166 {
2167 optab = optab_for_tree_code (code, vectype, optab_vector);
2168 if (vect_print_dump_info (REPORT_DETAILS))
2169 fprintf (vect_dump, "vector/vector shift/rotate found.");
2170 }
2171 /* See if the machine has a vector shifted by scalar insn and if not
2172 then see if it has a vector shifted by vector insn. */
2173 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2174 {
2175 optab = optab_for_tree_code (code, vectype, optab_scalar);
2176 if (optab
2177 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2178 {
2179 scalar_shift_arg = true;
2180 if (vect_print_dump_info (REPORT_DETAILS))
2181 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2182 }
2183 else
2184 {
2185 optab = optab_for_tree_code (code, vectype, optab_vector);
2186 if (optab
2187 && (optab_handler (optab, TYPE_MODE (vectype))
2188 != CODE_FOR_nothing))
2189 {
2190 if (vect_print_dump_info (REPORT_DETAILS))
2191 fprintf (vect_dump, "vector/vector shift/rotate found.");
2192
2193 /* Unlike the other binary operators, shifts/rotates have
2194 the rhs being int, instead of the same type as the lhs,
2195 so make sure the scalar is the right type if we are
2196 dealing with vectors of short/char. */
2197 if (dt[1] == vect_constant_def)
2198 op1 = fold_convert (TREE_TYPE (vectype), op1);
2199 }
2200 }
2201 }
2202 else
2203 {
2204 if (vect_print_dump_info (REPORT_DETAILS))
2205 fprintf (vect_dump, "operand mode requires invariant argument.");
2206 return false;
2207 }
2208
2209 /* Supportable by target? */
2210 if (!optab)
2211 {
2212 if (vect_print_dump_info (REPORT_DETAILS))
2213 fprintf (vect_dump, "no optab.");
2214 return false;
2215 }
2216 vec_mode = TYPE_MODE (vectype);
2217 icode = (int) optab_handler (optab, vec_mode);
2218 if (icode == CODE_FOR_nothing)
2219 {
2220 if (vect_print_dump_info (REPORT_DETAILS))
2221 fprintf (vect_dump, "op not supported by target.");
2222 /* Check only during analysis. */
2223 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2224 || (vf < vect_min_worthwhile_factor (code)
2225 && !vec_stmt))
2226 return false;
2227 if (vect_print_dump_info (REPORT_DETAILS))
2228 fprintf (vect_dump, "proceeding using word mode.");
2229 }
2230
2231 /* Worthwhile without SIMD support? Check only during analysis. */
2232 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2233 && vf < vect_min_worthwhile_factor (code)
2234 && !vec_stmt)
2235 {
2236 if (vect_print_dump_info (REPORT_DETAILS))
2237 fprintf (vect_dump, "not worthwhile without SIMD support.");
2238 return false;
2239 }
2240
2241 if (!vec_stmt) /* transformation not required. */
2242 {
2243 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2244 if (vect_print_dump_info (REPORT_DETAILS))
2245 fprintf (vect_dump, "=== vectorizable_shift ===");
2246 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2247 return true;
2248 }
2249
2250 /** Transform. **/
2251
2252 if (vect_print_dump_info (REPORT_DETAILS))
2253 fprintf (vect_dump, "transform binary/unary operation.");
2254
2255 /* Handle def. */
2256 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2257
2258 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2259 created in the previous stages of the recursion, so no allocation is
2260 needed, except for the case of shift with scalar shift argument. In that
2261 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2262 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2263 In case of loop-based vectorization we allocate VECs of size 1. We
2264 allocate VEC_OPRNDS1 only in case of binary operation. */
2265 if (!slp_node)
2266 {
2267 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2268 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2269 }
2270 else if (scalar_shift_arg)
2271 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2272
2273 prev_stmt_info = NULL;
2274 for (j = 0; j < ncopies; j++)
2275 {
2276 /* Handle uses. */
2277 if (j == 0)
2278 {
2279 if (scalar_shift_arg)
2280 {
2281 /* Vector shl and shr insn patterns can be defined with scalar
2282 operand 2 (shift operand). In this case, use constant or loop
2283 invariant op1 directly, without extending it to vector mode
2284 first. */
2285 optab_op2_mode = insn_data[icode].operand[2].mode;
2286 if (!VECTOR_MODE_P (optab_op2_mode))
2287 {
2288 if (vect_print_dump_info (REPORT_DETAILS))
2289 fprintf (vect_dump, "operand 1 using scalar mode.");
2290 vec_oprnd1 = op1;
2291 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2292 if (slp_node)
2293 {
2294 /* Store vec_oprnd1 for every vector stmt to be created
2295 for SLP_NODE. We check during the analysis that all
2296 the shift arguments are the same.
2297 TODO: Allow different constants for different vector
2298 stmts generated for an SLP instance. */
2299 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2300 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2301 }
2302 }
2303 }
2304
2305 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2306 (a special case for certain kind of vector shifts); otherwise,
2307 operand 1 should be of a vector type (the usual case). */
2308 if (vec_oprnd1)
2309 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2310 slp_node);
2311 else
2312 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2313 slp_node);
2314 }
2315 else
2316 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2317
2318 /* Arguments are ready. Create the new vector stmt. */
2319 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2320 {
2321 vop1 = VEC_index (tree, vec_oprnds1, i);
2322 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2323 new_temp = make_ssa_name (vec_dest, new_stmt);
2324 gimple_assign_set_lhs (new_stmt, new_temp);
2325 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2326 if (slp_node)
2327 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2328 }
2329
2330 if (slp_node)
2331 continue;
2332
2333 if (j == 0)
2334 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2335 else
2336 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2337 prev_stmt_info = vinfo_for_stmt (new_stmt);
2338 }
2339
2340 VEC_free (tree, heap, vec_oprnds0);
2341 VEC_free (tree, heap, vec_oprnds1);
2342
2343 return true;
2344 }
2345
2346
2347 /* Function vectorizable_operation.
2348
2349 Check if STMT performs a binary or unary operation that can be vectorized.
2350 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2351 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2352 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2353
2354 static bool
2355 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2356 gimple *vec_stmt, slp_tree slp_node)
2357 {
2358 tree vec_dest;
2359 tree scalar_dest;
2360 tree op0, op1 = NULL;
2361 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2362 tree vectype;
2363 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2364 enum tree_code code;
2365 enum machine_mode vec_mode;
2366 tree new_temp;
2367 int op_type;
2368 optab optab;
2369 int icode;
2370 tree def;
2371 gimple def_stmt;
2372 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2373 gimple new_stmt = NULL;
2374 stmt_vec_info prev_stmt_info;
2375 int nunits_in;
2376 int nunits_out;
2377 tree vectype_out;
2378 int ncopies;
2379 int j, i;
2380 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2381 tree vop0, vop1;
2382 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2383 int vf;
2384
2385 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2386 return false;
2387
2388 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2389 return false;
2390
2391 /* Is STMT a vectorizable binary/unary operation? */
2392 if (!is_gimple_assign (stmt))
2393 return false;
2394
2395 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2396 return false;
2397
2398 code = gimple_assign_rhs_code (stmt);
2399
2400 /* For pointer addition, we should use the normal plus for
2401 the vector addition. */
2402 if (code == POINTER_PLUS_EXPR)
2403 code = PLUS_EXPR;
2404
2405 /* Support only unary or binary operations. */
2406 op_type = TREE_CODE_LENGTH (code);
2407 if (op_type != unary_op && op_type != binary_op)
2408 {
2409 if (vect_print_dump_info (REPORT_DETAILS))
2410 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2411 return false;
2412 }
2413
2414 scalar_dest = gimple_assign_lhs (stmt);
2415 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2416
2417 op0 = gimple_assign_rhs1 (stmt);
2418 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2419 &def_stmt, &def, &dt[0], &vectype))
2420 {
2421 if (vect_print_dump_info (REPORT_DETAILS))
2422 fprintf (vect_dump, "use not simple.");
2423 return false;
2424 }
2425 /* If op0 is an external or constant def use a vector type with
2426 the same size as the output vector type. */
2427 if (!vectype)
2428 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2429 if (vec_stmt)
2430 gcc_assert (vectype);
2431 if (!vectype)
2432 {
2433 if (vect_print_dump_info (REPORT_DETAILS))
2434 {
2435 fprintf (vect_dump, "no vectype for scalar type ");
2436 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2437 }
2438
2439 return false;
2440 }
2441
2442 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2443 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2444 if (nunits_out != nunits_in)
2445 return false;
2446
2447 if (op_type == binary_op)
2448 {
2449 op1 = gimple_assign_rhs2 (stmt);
2450 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2451 &dt[1]))
2452 {
2453 if (vect_print_dump_info (REPORT_DETAILS))
2454 fprintf (vect_dump, "use not simple.");
2455 return false;
2456 }
2457 }
2458
2459 if (loop_vinfo)
2460 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2461 else
2462 vf = 1;
2463
2464 /* Multiple types in SLP are handled by creating the appropriate number of
2465 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2466 case of SLP. */
2467 if (slp_node)
2468 ncopies = 1;
2469 else
2470 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2471
2472 gcc_assert (ncopies >= 1);
2473
2474 /* Shifts are handled in vectorizable_shift (). */
2475 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2476 || code == RROTATE_EXPR)
2477 return false;
2478
2479 optab = optab_for_tree_code (code, vectype, optab_default);
2480
2481 /* Supportable by target? */
2482 if (!optab)
2483 {
2484 if (vect_print_dump_info (REPORT_DETAILS))
2485 fprintf (vect_dump, "no optab.");
2486 return false;
2487 }
2488 vec_mode = TYPE_MODE (vectype);
2489 icode = (int) optab_handler (optab, vec_mode);
2490 if (icode == CODE_FOR_nothing)
2491 {
2492 if (vect_print_dump_info (REPORT_DETAILS))
2493 fprintf (vect_dump, "op not supported by target.");
2494 /* Check only during analysis. */
2495 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2496 || (vf < vect_min_worthwhile_factor (code)
2497 && !vec_stmt))
2498 return false;
2499 if (vect_print_dump_info (REPORT_DETAILS))
2500 fprintf (vect_dump, "proceeding using word mode.");
2501 }
2502
2503 /* Worthwhile without SIMD support? Check only during analysis. */
2504 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2505 && vf < vect_min_worthwhile_factor (code)
2506 && !vec_stmt)
2507 {
2508 if (vect_print_dump_info (REPORT_DETAILS))
2509 fprintf (vect_dump, "not worthwhile without SIMD support.");
2510 return false;
2511 }
2512
2513 if (!vec_stmt) /* transformation not required. */
2514 {
2515 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2516 if (vect_print_dump_info (REPORT_DETAILS))
2517 fprintf (vect_dump, "=== vectorizable_operation ===");
2518 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2519 return true;
2520 }
2521
2522 /** Transform. **/
2523
2524 if (vect_print_dump_info (REPORT_DETAILS))
2525 fprintf (vect_dump, "transform binary/unary operation.");
2526
2527 /* Handle def. */
2528 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2529
2530 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2531 created in the previous stages of the recursion, so no allocation is
2532 needed, except for the case of shift with scalar shift argument. In that
2533 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2534 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2535 In case of loop-based vectorization we allocate VECs of size 1. We
2536 allocate VEC_OPRNDS1 only in case of binary operation. */
2537 if (!slp_node)
2538 {
2539 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2540 if (op_type == binary_op)
2541 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2542 }
2543
2544 /* In case the vectorization factor (VF) is bigger than the number
2545 of elements that we can fit in a vectype (nunits), we have to generate
2546 more than one vector stmt - i.e - we need to "unroll" the
2547 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2548 from one copy of the vector stmt to the next, in the field
2549 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2550 stages to find the correct vector defs to be used when vectorizing
2551 stmts that use the defs of the current stmt. The example below
2552 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2553 we need to create 4 vectorized stmts):
2554
2555 before vectorization:
2556 RELATED_STMT VEC_STMT
2557 S1: x = memref - -
2558 S2: z = x + 1 - -
2559
2560 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2561 there):
2562 RELATED_STMT VEC_STMT
2563 VS1_0: vx0 = memref0 VS1_1 -
2564 VS1_1: vx1 = memref1 VS1_2 -
2565 VS1_2: vx2 = memref2 VS1_3 -
2566 VS1_3: vx3 = memref3 - -
2567 S1: x = load - VS1_0
2568 S2: z = x + 1 - -
2569
2570 step2: vectorize stmt S2 (done here):
2571 To vectorize stmt S2 we first need to find the relevant vector
2572 def for the first operand 'x'. This is, as usual, obtained from
2573 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2574 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2575 relevant vector def 'vx0'. Having found 'vx0' we can generate
2576 the vector stmt VS2_0, and as usual, record it in the
2577 STMT_VINFO_VEC_STMT of stmt S2.
2578 When creating the second copy (VS2_1), we obtain the relevant vector
2579 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2580 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2581 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2582 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2583 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2584 chain of stmts and pointers:
2585 RELATED_STMT VEC_STMT
2586 VS1_0: vx0 = memref0 VS1_1 -
2587 VS1_1: vx1 = memref1 VS1_2 -
2588 VS1_2: vx2 = memref2 VS1_3 -
2589 VS1_3: vx3 = memref3 - -
2590 S1: x = load - VS1_0
2591 VS2_0: vz0 = vx0 + v1 VS2_1 -
2592 VS2_1: vz1 = vx1 + v1 VS2_2 -
2593 VS2_2: vz2 = vx2 + v1 VS2_3 -
2594 VS2_3: vz3 = vx3 + v1 - -
2595 S2: z = x + 1 - VS2_0 */
2596
2597 prev_stmt_info = NULL;
2598 for (j = 0; j < ncopies; j++)
2599 {
2600 /* Handle uses. */
2601 if (j == 0)
2602 {
2603 if (op_type == binary_op)
2604 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2605 slp_node);
2606 else
2607 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2608 slp_node);
2609 }
2610 else
2611 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2612
2613 /* Arguments are ready. Create the new vector stmt. */
2614 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2615 {
2616 vop1 = ((op_type == binary_op)
2617 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2618 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2619 new_temp = make_ssa_name (vec_dest, new_stmt);
2620 gimple_assign_set_lhs (new_stmt, new_temp);
2621 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2622 if (slp_node)
2623 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2624 }
2625
2626 if (slp_node)
2627 continue;
2628
2629 if (j == 0)
2630 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2631 else
2632 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2633 prev_stmt_info = vinfo_for_stmt (new_stmt);
2634 }
2635
2636 VEC_free (tree, heap, vec_oprnds0);
2637 if (vec_oprnds1)
2638 VEC_free (tree, heap, vec_oprnds1);
2639
2640 return true;
2641 }
2642
2643
2644 /* Get vectorized definitions for loop-based vectorization. For the first
2645 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2646 scalar operand), and for the rest we get a copy with
2647 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2648 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2649 The vectors are collected into VEC_OPRNDS. */
2650
2651 static void
2652 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2653 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2654 {
2655 tree vec_oprnd;
2656
2657 /* Get first vector operand. */
2658 /* All the vector operands except the very first one (that is scalar oprnd)
2659 are stmt copies. */
2660 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2661 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2662 else
2663 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2664
2665 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2666
2667 /* Get second vector operand. */
2668 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2669 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2670
2671 *oprnd = vec_oprnd;
2672
2673 /* For conversion in multiple steps, continue to get operands
2674 recursively. */
2675 if (multi_step_cvt)
2676 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2677 }
2678
2679
2680 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2681 For multi-step conversions store the resulting vectors and call the function
2682 recursively. */
2683
2684 static void
2685 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2686 int multi_step_cvt, gimple stmt,
2687 VEC (tree, heap) *vec_dsts,
2688 gimple_stmt_iterator *gsi,
2689 slp_tree slp_node, enum tree_code code,
2690 stmt_vec_info *prev_stmt_info)
2691 {
2692 unsigned int i;
2693 tree vop0, vop1, new_tmp, vec_dest;
2694 gimple new_stmt;
2695 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2696
2697 vec_dest = VEC_pop (tree, vec_dsts);
2698
2699 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2700 {
2701 /* Create demotion operation. */
2702 vop0 = VEC_index (tree, *vec_oprnds, i);
2703 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2704 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2705 new_tmp = make_ssa_name (vec_dest, new_stmt);
2706 gimple_assign_set_lhs (new_stmt, new_tmp);
2707 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2708
2709 if (multi_step_cvt)
2710 /* Store the resulting vector for next recursive call. */
2711 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2712 else
2713 {
2714 /* This is the last step of the conversion sequence. Store the
2715 vectors in SLP_NODE or in vector info of the scalar statement
2716 (or in STMT_VINFO_RELATED_STMT chain). */
2717 if (slp_node)
2718 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2719 else
2720 {
2721 if (!*prev_stmt_info)
2722 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2723 else
2724 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2725
2726 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2727 }
2728 }
2729 }
2730
2731 /* For multi-step demotion operations we first generate demotion operations
2732 from the source type to the intermediate types, and then combine the
2733 results (stored in VEC_OPRNDS) in demotion operation to the destination
2734 type. */
2735 if (multi_step_cvt)
2736 {
2737 /* At each level of recursion we have have of the operands we had at the
2738 previous level. */
2739 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2740 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2741 stmt, vec_dsts, gsi, slp_node,
2742 code, prev_stmt_info);
2743 }
2744 }
2745
2746
2747 /* Function vectorizable_type_demotion
2748
2749 Check if STMT performs a binary or unary operation that involves
2750 type demotion, and if it can be vectorized.
2751 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2752 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2753 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2754
2755 static bool
2756 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2757 gimple *vec_stmt, slp_tree slp_node)
2758 {
2759 tree vec_dest;
2760 tree scalar_dest;
2761 tree op0;
2762 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2763 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2764 enum tree_code code, code1 = ERROR_MARK;
2765 tree def;
2766 gimple def_stmt;
2767 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2768 stmt_vec_info prev_stmt_info;
2769 int nunits_in;
2770 int nunits_out;
2771 tree vectype_out;
2772 int ncopies;
2773 int j, i;
2774 tree vectype_in;
2775 int multi_step_cvt = 0;
2776 VEC (tree, heap) *vec_oprnds0 = NULL;
2777 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2778 tree last_oprnd, intermediate_type;
2779
2780 /* FORNOW: not supported by basic block SLP vectorization. */
2781 gcc_assert (loop_vinfo);
2782
2783 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2784 return false;
2785
2786 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2787 return false;
2788
2789 /* Is STMT a vectorizable type-demotion operation? */
2790 if (!is_gimple_assign (stmt))
2791 return false;
2792
2793 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2794 return false;
2795
2796 code = gimple_assign_rhs_code (stmt);
2797 if (!CONVERT_EXPR_CODE_P (code))
2798 return false;
2799
2800 scalar_dest = gimple_assign_lhs (stmt);
2801 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2802
2803 /* Check the operands of the operation. */
2804 op0 = gimple_assign_rhs1 (stmt);
2805 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2806 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2807 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2808 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2809 && CONVERT_EXPR_CODE_P (code))))
2810 return false;
2811 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2812 &def_stmt, &def, &dt[0], &vectype_in))
2813 {
2814 if (vect_print_dump_info (REPORT_DETAILS))
2815 fprintf (vect_dump, "use not simple.");
2816 return false;
2817 }
2818 /* If op0 is an external def use a vector type with the
2819 same size as the output vector type if possible. */
2820 if (!vectype_in)
2821 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2822 if (vec_stmt)
2823 gcc_assert (vectype_in);
2824 if (!vectype_in)
2825 {
2826 if (vect_print_dump_info (REPORT_DETAILS))
2827 {
2828 fprintf (vect_dump, "no vectype for scalar type ");
2829 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2830 }
2831
2832 return false;
2833 }
2834
2835 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2836 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2837 if (nunits_in >= nunits_out)
2838 return false;
2839
2840 /* Multiple types in SLP are handled by creating the appropriate number of
2841 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2842 case of SLP. */
2843 if (slp_node)
2844 ncopies = 1;
2845 else
2846 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2847 gcc_assert (ncopies >= 1);
2848
2849 /* Supportable by target? */
2850 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2851 &code1, &multi_step_cvt, &interm_types))
2852 return false;
2853
2854 if (!vec_stmt) /* transformation not required. */
2855 {
2856 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2857 if (vect_print_dump_info (REPORT_DETAILS))
2858 fprintf (vect_dump, "=== vectorizable_demotion ===");
2859 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2860 return true;
2861 }
2862
2863 /** Transform. **/
2864 if (vect_print_dump_info (REPORT_DETAILS))
2865 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2866 ncopies);
2867
2868 /* In case of multi-step demotion, we first generate demotion operations to
2869 the intermediate types, and then from that types to the final one.
2870 We create vector destinations for the intermediate type (TYPES) received
2871 from supportable_narrowing_operation, and store them in the correct order
2872 for future use in vect_create_vectorized_demotion_stmts(). */
2873 if (multi_step_cvt)
2874 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2875 else
2876 vec_dsts = VEC_alloc (tree, heap, 1);
2877
2878 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2879 VEC_quick_push (tree, vec_dsts, vec_dest);
2880
2881 if (multi_step_cvt)
2882 {
2883 for (i = VEC_length (tree, interm_types) - 1;
2884 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2885 {
2886 vec_dest = vect_create_destination_var (scalar_dest,
2887 intermediate_type);
2888 VEC_quick_push (tree, vec_dsts, vec_dest);
2889 }
2890 }
2891
2892 /* In case the vectorization factor (VF) is bigger than the number
2893 of elements that we can fit in a vectype (nunits), we have to generate
2894 more than one vector stmt - i.e - we need to "unroll" the
2895 vector stmt by a factor VF/nunits. */
2896 last_oprnd = op0;
2897 prev_stmt_info = NULL;
2898 for (j = 0; j < ncopies; j++)
2899 {
2900 /* Handle uses. */
2901 if (slp_node)
2902 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
2903 else
2904 {
2905 VEC_free (tree, heap, vec_oprnds0);
2906 vec_oprnds0 = VEC_alloc (tree, heap,
2907 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2908 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2909 vect_pow2 (multi_step_cvt) - 1);
2910 }
2911
2912 /* Arguments are ready. Create the new vector stmts. */
2913 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2914 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2915 multi_step_cvt, stmt, tmp_vec_dsts,
2916 gsi, slp_node, code1,
2917 &prev_stmt_info);
2918 }
2919
2920 VEC_free (tree, heap, vec_oprnds0);
2921 VEC_free (tree, heap, vec_dsts);
2922 VEC_free (tree, heap, tmp_vec_dsts);
2923 VEC_free (tree, heap, interm_types);
2924
2925 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2926 return true;
2927 }
2928
2929
2930 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2931 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2932 the resulting vectors and call the function recursively. */
2933
2934 static void
2935 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2936 VEC (tree, heap) **vec_oprnds1,
2937 int multi_step_cvt, gimple stmt,
2938 VEC (tree, heap) *vec_dsts,
2939 gimple_stmt_iterator *gsi,
2940 slp_tree slp_node, enum tree_code code1,
2941 enum tree_code code2, tree decl1,
2942 tree decl2, int op_type,
2943 stmt_vec_info *prev_stmt_info)
2944 {
2945 int i;
2946 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2947 gimple new_stmt1, new_stmt2;
2948 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2949 VEC (tree, heap) *vec_tmp;
2950
2951 vec_dest = VEC_pop (tree, vec_dsts);
2952 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2953
2954 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
2955 {
2956 if (op_type == binary_op)
2957 vop1 = VEC_index (tree, *vec_oprnds1, i);
2958 else
2959 vop1 = NULL_TREE;
2960
2961 /* Generate the two halves of promotion operation. */
2962 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2963 op_type, vec_dest, gsi, stmt);
2964 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2965 op_type, vec_dest, gsi, stmt);
2966 if (is_gimple_call (new_stmt1))
2967 {
2968 new_tmp1 = gimple_call_lhs (new_stmt1);
2969 new_tmp2 = gimple_call_lhs (new_stmt2);
2970 }
2971 else
2972 {
2973 new_tmp1 = gimple_assign_lhs (new_stmt1);
2974 new_tmp2 = gimple_assign_lhs (new_stmt2);
2975 }
2976
2977 if (multi_step_cvt)
2978 {
2979 /* Store the results for the recursive call. */
2980 VEC_quick_push (tree, vec_tmp, new_tmp1);
2981 VEC_quick_push (tree, vec_tmp, new_tmp2);
2982 }
2983 else
2984 {
2985 /* Last step of promotion sequience - store the results. */
2986 if (slp_node)
2987 {
2988 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2989 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2990 }
2991 else
2992 {
2993 if (!*prev_stmt_info)
2994 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2995 else
2996 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2997
2998 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2999 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3000 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3001 }
3002 }
3003 }
3004
3005 if (multi_step_cvt)
3006 {
3007 /* For multi-step promotion operation we first generate we call the
3008 function recurcively for every stage. We start from the input type,
3009 create promotion operations to the intermediate types, and then
3010 create promotions to the output type. */
3011 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3012 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3013 multi_step_cvt - 1, stmt,
3014 vec_dsts, gsi, slp_node, code1,
3015 code2, decl2, decl2, op_type,
3016 prev_stmt_info);
3017 }
3018
3019 VEC_free (tree, heap, vec_tmp);
3020 }
3021
3022
3023 /* Function vectorizable_type_promotion
3024
3025 Check if STMT performs a binary or unary operation that involves
3026 type promotion, and if it can be vectorized.
3027 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3028 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3029 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3030
3031 static bool
3032 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3033 gimple *vec_stmt, slp_tree slp_node)
3034 {
3035 tree vec_dest;
3036 tree scalar_dest;
3037 tree op0, op1 = NULL;
3038 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3039 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3040 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3041 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3042 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3043 int op_type;
3044 tree def;
3045 gimple def_stmt;
3046 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3047 stmt_vec_info prev_stmt_info;
3048 int nunits_in;
3049 int nunits_out;
3050 tree vectype_out;
3051 int ncopies;
3052 int j, i;
3053 tree vectype_in;
3054 tree intermediate_type = NULL_TREE;
3055 int multi_step_cvt = 0;
3056 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3057 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3058
3059 /* FORNOW: not supported by basic block SLP vectorization. */
3060 gcc_assert (loop_vinfo);
3061
3062 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3063 return false;
3064
3065 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3066 return false;
3067
3068 /* Is STMT a vectorizable type-promotion operation? */
3069 if (!is_gimple_assign (stmt))
3070 return false;
3071
3072 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3073 return false;
3074
3075 code = gimple_assign_rhs_code (stmt);
3076 if (!CONVERT_EXPR_CODE_P (code)
3077 && code != WIDEN_MULT_EXPR)
3078 return false;
3079
3080 scalar_dest = gimple_assign_lhs (stmt);
3081 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3082
3083 /* Check the operands of the operation. */
3084 op0 = gimple_assign_rhs1 (stmt);
3085 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3086 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3087 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3088 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3089 && CONVERT_EXPR_CODE_P (code))))
3090 return false;
3091 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3092 &def_stmt, &def, &dt[0], &vectype_in))
3093 {
3094 if (vect_print_dump_info (REPORT_DETAILS))
3095 fprintf (vect_dump, "use not simple.");
3096 return false;
3097 }
3098 /* If op0 is an external or constant def use a vector type with
3099 the same size as the output vector type. */
3100 if (!vectype_in)
3101 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3102 if (vec_stmt)
3103 gcc_assert (vectype_in);
3104 if (!vectype_in)
3105 {
3106 if (vect_print_dump_info (REPORT_DETAILS))
3107 {
3108 fprintf (vect_dump, "no vectype for scalar type ");
3109 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3110 }
3111
3112 return false;
3113 }
3114
3115 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3116 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3117 if (nunits_in <= nunits_out)
3118 return false;
3119
3120 /* Multiple types in SLP are handled by creating the appropriate number of
3121 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3122 case of SLP. */
3123 if (slp_node)
3124 ncopies = 1;
3125 else
3126 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3127
3128 gcc_assert (ncopies >= 1);
3129
3130 op_type = TREE_CODE_LENGTH (code);
3131 if (op_type == binary_op)
3132 {
3133 op1 = gimple_assign_rhs2 (stmt);
3134 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3135 {
3136 if (vect_print_dump_info (REPORT_DETAILS))
3137 fprintf (vect_dump, "use not simple.");
3138 return false;
3139 }
3140 }
3141
3142 /* Supportable by target? */
3143 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3144 &decl1, &decl2, &code1, &code2,
3145 &multi_step_cvt, &interm_types))
3146 return false;
3147
3148 /* Binary widening operation can only be supported directly by the
3149 architecture. */
3150 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3151
3152 if (!vec_stmt) /* transformation not required. */
3153 {
3154 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3155 if (vect_print_dump_info (REPORT_DETAILS))
3156 fprintf (vect_dump, "=== vectorizable_promotion ===");
3157 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3158 return true;
3159 }
3160
3161 /** Transform. **/
3162
3163 if (vect_print_dump_info (REPORT_DETAILS))
3164 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3165 ncopies);
3166
3167 /* Handle def. */
3168 /* In case of multi-step promotion, we first generate promotion operations
3169 to the intermediate types, and then from that types to the final one.
3170 We store vector destination in VEC_DSTS in the correct order for
3171 recursive creation of promotion operations in
3172 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3173 according to TYPES recieved from supportable_widening_operation(). */
3174 if (multi_step_cvt)
3175 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3176 else
3177 vec_dsts = VEC_alloc (tree, heap, 1);
3178
3179 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3180 VEC_quick_push (tree, vec_dsts, vec_dest);
3181
3182 if (multi_step_cvt)
3183 {
3184 for (i = VEC_length (tree, interm_types) - 1;
3185 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3186 {
3187 vec_dest = vect_create_destination_var (scalar_dest,
3188 intermediate_type);
3189 VEC_quick_push (tree, vec_dsts, vec_dest);
3190 }
3191 }
3192
3193 if (!slp_node)
3194 {
3195 vec_oprnds0 = VEC_alloc (tree, heap,
3196 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3197 if (op_type == binary_op)
3198 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3199 }
3200
3201 /* In case the vectorization factor (VF) is bigger than the number
3202 of elements that we can fit in a vectype (nunits), we have to generate
3203 more than one vector stmt - i.e - we need to "unroll" the
3204 vector stmt by a factor VF/nunits. */
3205
3206 prev_stmt_info = NULL;
3207 for (j = 0; j < ncopies; j++)
3208 {
3209 /* Handle uses. */
3210 if (j == 0)
3211 {
3212 if (slp_node)
3213 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3214 &vec_oprnds1, -1);
3215 else
3216 {
3217 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3218 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3219 if (op_type == binary_op)
3220 {
3221 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3222 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3223 }
3224 }
3225 }
3226 else
3227 {
3228 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3229 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3230 if (op_type == binary_op)
3231 {
3232 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3233 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3234 }
3235 }
3236
3237 /* Arguments are ready. Create the new vector stmts. */
3238 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3239 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3240 multi_step_cvt, stmt,
3241 tmp_vec_dsts,
3242 gsi, slp_node, code1, code2,
3243 decl1, decl2, op_type,
3244 &prev_stmt_info);
3245 }
3246
3247 VEC_free (tree, heap, vec_dsts);
3248 VEC_free (tree, heap, tmp_vec_dsts);
3249 VEC_free (tree, heap, interm_types);
3250 VEC_free (tree, heap, vec_oprnds0);
3251 VEC_free (tree, heap, vec_oprnds1);
3252
3253 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3254 return true;
3255 }
3256
3257
3258 /* Function vectorizable_store.
3259
3260 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3261 can be vectorized.
3262 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3263 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3264 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3265
3266 static bool
3267 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3268 slp_tree slp_node)
3269 {
3270 tree scalar_dest;
3271 tree data_ref;
3272 tree op;
3273 tree vec_oprnd = NULL_TREE;
3274 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3275 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3276 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3277 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3278 struct loop *loop = NULL;
3279 enum machine_mode vec_mode;
3280 tree dummy;
3281 enum dr_alignment_support alignment_support_scheme;
3282 tree def;
3283 gimple def_stmt;
3284 enum vect_def_type dt;
3285 stmt_vec_info prev_stmt_info = NULL;
3286 tree dataref_ptr = NULL_TREE;
3287 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3288 int ncopies;
3289 int j;
3290 gimple next_stmt, first_stmt = NULL;
3291 bool strided_store = false;
3292 unsigned int group_size, i;
3293 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3294 bool inv_p;
3295 VEC(tree,heap) *vec_oprnds = NULL;
3296 bool slp = (slp_node != NULL);
3297 unsigned int vec_num;
3298 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3299
3300 if (loop_vinfo)
3301 loop = LOOP_VINFO_LOOP (loop_vinfo);
3302
3303 /* Multiple types in SLP are handled by creating the appropriate number of
3304 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3305 case of SLP. */
3306 if (slp)
3307 ncopies = 1;
3308 else
3309 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3310
3311 gcc_assert (ncopies >= 1);
3312
3313 /* FORNOW. This restriction should be relaxed. */
3314 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3315 {
3316 if (vect_print_dump_info (REPORT_DETAILS))
3317 fprintf (vect_dump, "multiple types in nested loop.");
3318 return false;
3319 }
3320
3321 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3322 return false;
3323
3324 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3325 return false;
3326
3327 /* Is vectorizable store? */
3328
3329 if (!is_gimple_assign (stmt))
3330 return false;
3331
3332 scalar_dest = gimple_assign_lhs (stmt);
3333 if (TREE_CODE (scalar_dest) != ARRAY_REF
3334 && TREE_CODE (scalar_dest) != INDIRECT_REF
3335 && TREE_CODE (scalar_dest) != COMPONENT_REF
3336 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3337 && TREE_CODE (scalar_dest) != REALPART_EXPR
3338 && TREE_CODE (scalar_dest) != MEM_REF)
3339 return false;
3340
3341 gcc_assert (gimple_assign_single_p (stmt));
3342 op = gimple_assign_rhs1 (stmt);
3343 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3344 {
3345 if (vect_print_dump_info (REPORT_DETAILS))
3346 fprintf (vect_dump, "use not simple.");
3347 return false;
3348 }
3349
3350 /* The scalar rhs type needs to be trivially convertible to the vector
3351 component type. This should always be the case. */
3352 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3353 {
3354 if (vect_print_dump_info (REPORT_DETAILS))
3355 fprintf (vect_dump, "??? operands of different types");
3356 return false;
3357 }
3358
3359 vec_mode = TYPE_MODE (vectype);
3360 /* FORNOW. In some cases can vectorize even if data-type not supported
3361 (e.g. - array initialization with 0). */
3362 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3363 return false;
3364
3365 if (!STMT_VINFO_DATA_REF (stmt_info))
3366 return false;
3367
3368 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3369 {
3370 if (vect_print_dump_info (REPORT_DETAILS))
3371 fprintf (vect_dump, "negative step for store.");
3372 return false;
3373 }
3374
3375 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3376 {
3377 strided_store = true;
3378 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3379 if (!vect_strided_store_supported (vectype)
3380 && !PURE_SLP_STMT (stmt_info) && !slp)
3381 return false;
3382
3383 if (first_stmt == stmt)
3384 {
3385 /* STMT is the leader of the group. Check the operands of all the
3386 stmts of the group. */
3387 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3388 while (next_stmt)
3389 {
3390 gcc_assert (gimple_assign_single_p (next_stmt));
3391 op = gimple_assign_rhs1 (next_stmt);
3392 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3393 &def, &dt))
3394 {
3395 if (vect_print_dump_info (REPORT_DETAILS))
3396 fprintf (vect_dump, "use not simple.");
3397 return false;
3398 }
3399 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3400 }
3401 }
3402 }
3403
3404 if (!vec_stmt) /* transformation not required. */
3405 {
3406 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3407 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3408 return true;
3409 }
3410
3411 /** Transform. **/
3412
3413 if (strided_store)
3414 {
3415 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3416 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3417
3418 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3419
3420 /* FORNOW */
3421 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3422
3423 /* We vectorize all the stmts of the interleaving group when we
3424 reach the last stmt in the group. */
3425 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3426 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3427 && !slp)
3428 {
3429 *vec_stmt = NULL;
3430 return true;
3431 }
3432
3433 if (slp)
3434 {
3435 strided_store = false;
3436 /* VEC_NUM is the number of vect stmts to be created for this
3437 group. */
3438 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3439 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3440 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3441 }
3442 else
3443 /* VEC_NUM is the number of vect stmts to be created for this
3444 group. */
3445 vec_num = group_size;
3446 }
3447 else
3448 {
3449 first_stmt = stmt;
3450 first_dr = dr;
3451 group_size = vec_num = 1;
3452 }
3453
3454 if (vect_print_dump_info (REPORT_DETAILS))
3455 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3456
3457 dr_chain = VEC_alloc (tree, heap, group_size);
3458 oprnds = VEC_alloc (tree, heap, group_size);
3459
3460 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3461 gcc_assert (alignment_support_scheme);
3462
3463 /* In case the vectorization factor (VF) is bigger than the number
3464 of elements that we can fit in a vectype (nunits), we have to generate
3465 more than one vector stmt - i.e - we need to "unroll" the
3466 vector stmt by a factor VF/nunits. For more details see documentation in
3467 vect_get_vec_def_for_copy_stmt. */
3468
3469 /* In case of interleaving (non-unit strided access):
3470
3471 S1: &base + 2 = x2
3472 S2: &base = x0
3473 S3: &base + 1 = x1
3474 S4: &base + 3 = x3
3475
3476 We create vectorized stores starting from base address (the access of the
3477 first stmt in the chain (S2 in the above example), when the last store stmt
3478 of the chain (S4) is reached:
3479
3480 VS1: &base = vx2
3481 VS2: &base + vec_size*1 = vx0
3482 VS3: &base + vec_size*2 = vx1
3483 VS4: &base + vec_size*3 = vx3
3484
3485 Then permutation statements are generated:
3486
3487 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3488 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3489 ...
3490
3491 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3492 (the order of the data-refs in the output of vect_permute_store_chain
3493 corresponds to the order of scalar stmts in the interleaving chain - see
3494 the documentation of vect_permute_store_chain()).
3495
3496 In case of both multiple types and interleaving, above vector stores and
3497 permutation stmts are created for every copy. The result vector stmts are
3498 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3499 STMT_VINFO_RELATED_STMT for the next copies.
3500 */
3501
3502 prev_stmt_info = NULL;
3503 for (j = 0; j < ncopies; j++)
3504 {
3505 gimple new_stmt;
3506 gimple ptr_incr;
3507
3508 if (j == 0)
3509 {
3510 if (slp)
3511 {
3512 /* Get vectorized arguments for SLP_NODE. */
3513 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3514 NULL, -1);
3515
3516 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3517 }
3518 else
3519 {
3520 /* For interleaved stores we collect vectorized defs for all the
3521 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3522 used as an input to vect_permute_store_chain(), and OPRNDS as
3523 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3524
3525 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3526 OPRNDS are of size 1. */
3527 next_stmt = first_stmt;
3528 for (i = 0; i < group_size; i++)
3529 {
3530 /* Since gaps are not supported for interleaved stores,
3531 GROUP_SIZE is the exact number of stmts in the chain.
3532 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3533 there is no interleaving, GROUP_SIZE is 1, and only one
3534 iteration of the loop will be executed. */
3535 gcc_assert (next_stmt
3536 && gimple_assign_single_p (next_stmt));
3537 op = gimple_assign_rhs1 (next_stmt);
3538
3539 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3540 NULL);
3541 VEC_quick_push(tree, dr_chain, vec_oprnd);
3542 VEC_quick_push(tree, oprnds, vec_oprnd);
3543 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3544 }
3545 }
3546
3547 /* We should have catched mismatched types earlier. */
3548 gcc_assert (useless_type_conversion_p (vectype,
3549 TREE_TYPE (vec_oprnd)));
3550 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3551 &dummy, &ptr_incr, false,
3552 &inv_p);
3553 gcc_assert (bb_vinfo || !inv_p);
3554 }
3555 else
3556 {
3557 /* For interleaved stores we created vectorized defs for all the
3558 defs stored in OPRNDS in the previous iteration (previous copy).
3559 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3560 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3561 next copy.
3562 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3563 OPRNDS are of size 1. */
3564 for (i = 0; i < group_size; i++)
3565 {
3566 op = VEC_index (tree, oprnds, i);
3567 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3568 &dt);
3569 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3570 VEC_replace(tree, dr_chain, i, vec_oprnd);
3571 VEC_replace(tree, oprnds, i, vec_oprnd);
3572 }
3573 dataref_ptr =
3574 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3575 }
3576
3577 if (strided_store)
3578 {
3579 result_chain = VEC_alloc (tree, heap, group_size);
3580 /* Permute. */
3581 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3582 &result_chain))
3583 return false;
3584 }
3585
3586 next_stmt = first_stmt;
3587 for (i = 0; i < vec_num; i++)
3588 {
3589 struct ptr_info_def *pi;
3590
3591 if (i > 0)
3592 /* Bump the vector pointer. */
3593 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3594 NULL_TREE);
3595
3596 if (slp)
3597 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3598 else if (strided_store)
3599 /* For strided stores vectorized defs are interleaved in
3600 vect_permute_store_chain(). */
3601 vec_oprnd = VEC_index (tree, result_chain, i);
3602
3603 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3604 build_int_cst (reference_alias_ptr_type
3605 (DR_REF (first_dr)), 0));
3606 pi = get_ptr_info (dataref_ptr);
3607 pi->align = TYPE_ALIGN_UNIT (vectype);
3608 if (aligned_access_p (first_dr))
3609 pi->misalign = 0;
3610 else if (DR_MISALIGNMENT (first_dr) == -1)
3611 {
3612 TREE_TYPE (data_ref)
3613 = build_aligned_type (TREE_TYPE (data_ref),
3614 TYPE_ALIGN (TREE_TYPE (vectype)));
3615 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3616 pi->misalign = 0;
3617 }
3618 else
3619 {
3620 TREE_TYPE (data_ref)
3621 = build_aligned_type (TREE_TYPE (data_ref),
3622 TYPE_ALIGN (TREE_TYPE (vectype)));
3623 pi->misalign = DR_MISALIGNMENT (first_dr);
3624 }
3625
3626 /* Arguments are ready. Create the new vector stmt. */
3627 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3628 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3629 mark_symbols_for_renaming (new_stmt);
3630
3631 if (slp)
3632 continue;
3633
3634 if (j == 0)
3635 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3636 else
3637 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3638
3639 prev_stmt_info = vinfo_for_stmt (new_stmt);
3640 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3641 if (!next_stmt)
3642 break;
3643 }
3644 }
3645
3646 VEC_free (tree, heap, dr_chain);
3647 VEC_free (tree, heap, oprnds);
3648 if (result_chain)
3649 VEC_free (tree, heap, result_chain);
3650 if (vec_oprnds)
3651 VEC_free (tree, heap, vec_oprnds);
3652
3653 return true;
3654 }
3655
3656 /* Given a vector type VECTYPE returns a builtin DECL to be used
3657 for vector permutation and stores a mask into *MASK that implements
3658 reversal of the vector elements. If that is impossible to do
3659 returns NULL (and *MASK is unchanged). */
3660
3661 static tree
3662 perm_mask_for_reverse (tree vectype, tree *mask)
3663 {
3664 tree builtin_decl;
3665 tree mask_element_type, mask_type;
3666 tree mask_vec = NULL;
3667 int i;
3668 int nunits;
3669 if (!targetm.vectorize.builtin_vec_perm)
3670 return NULL;
3671
3672 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3673 &mask_element_type);
3674 if (!builtin_decl || !mask_element_type)
3675 return NULL;
3676
3677 mask_type = get_vectype_for_scalar_type (mask_element_type);
3678 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3679 if (!mask_type
3680 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3681 return NULL;
3682
3683 for (i = 0; i < nunits; i++)
3684 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3685 mask_vec = build_vector (mask_type, mask_vec);
3686
3687 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3688 return NULL;
3689 if (mask)
3690 *mask = mask_vec;
3691 return builtin_decl;
3692 }
3693
3694 /* Given a vector variable X, that was generated for the scalar LHS of
3695 STMT, generate instructions to reverse the vector elements of X,
3696 insert them a *GSI and return the permuted vector variable. */
3697
3698 static tree
3699 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3700 {
3701 tree vectype = TREE_TYPE (x);
3702 tree mask_vec, builtin_decl;
3703 tree perm_dest, data_ref;
3704 gimple perm_stmt;
3705
3706 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3707
3708 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3709
3710 /* Generate the permute statement. */
3711 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3712 data_ref = make_ssa_name (perm_dest, perm_stmt);
3713 gimple_call_set_lhs (perm_stmt, data_ref);
3714 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3715
3716 return data_ref;
3717 }
3718
3719 /* vectorizable_load.
3720
3721 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3722 can be vectorized.
3723 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3724 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3725 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3726
3727 static bool
3728 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3729 slp_tree slp_node, slp_instance slp_node_instance)
3730 {
3731 tree scalar_dest;
3732 tree vec_dest = NULL;
3733 tree data_ref = NULL;
3734 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3735 stmt_vec_info prev_stmt_info;
3736 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3737 struct loop *loop = NULL;
3738 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3739 bool nested_in_vect_loop = false;
3740 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3741 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3742 tree new_temp;
3743 enum machine_mode mode;
3744 gimple new_stmt = NULL;
3745 tree dummy;
3746 enum dr_alignment_support alignment_support_scheme;
3747 tree dataref_ptr = NULL_TREE;
3748 gimple ptr_incr;
3749 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3750 int ncopies;
3751 int i, j, group_size;
3752 tree msq = NULL_TREE, lsq;
3753 tree offset = NULL_TREE;
3754 tree realignment_token = NULL_TREE;
3755 gimple phi = NULL;
3756 VEC(tree,heap) *dr_chain = NULL;
3757 bool strided_load = false;
3758 gimple first_stmt;
3759 tree scalar_type;
3760 bool inv_p;
3761 bool negative;
3762 bool compute_in_loop = false;
3763 struct loop *at_loop;
3764 int vec_num;
3765 bool slp = (slp_node != NULL);
3766 bool slp_perm = false;
3767 enum tree_code code;
3768 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3769 int vf;
3770
3771 if (loop_vinfo)
3772 {
3773 loop = LOOP_VINFO_LOOP (loop_vinfo);
3774 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3775 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3776 }
3777 else
3778 vf = 1;
3779
3780 /* Multiple types in SLP are handled by creating the appropriate number of
3781 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3782 case of SLP. */
3783 if (slp)
3784 ncopies = 1;
3785 else
3786 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3787
3788 gcc_assert (ncopies >= 1);
3789
3790 /* FORNOW. This restriction should be relaxed. */
3791 if (nested_in_vect_loop && ncopies > 1)
3792 {
3793 if (vect_print_dump_info (REPORT_DETAILS))
3794 fprintf (vect_dump, "multiple types in nested loop.");
3795 return false;
3796 }
3797
3798 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3799 return false;
3800
3801 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3802 return false;
3803
3804 /* Is vectorizable load? */
3805 if (!is_gimple_assign (stmt))
3806 return false;
3807
3808 scalar_dest = gimple_assign_lhs (stmt);
3809 if (TREE_CODE (scalar_dest) != SSA_NAME)
3810 return false;
3811
3812 code = gimple_assign_rhs_code (stmt);
3813 if (code != ARRAY_REF
3814 && code != INDIRECT_REF
3815 && code != COMPONENT_REF
3816 && code != IMAGPART_EXPR
3817 && code != REALPART_EXPR
3818 && code != MEM_REF)
3819 return false;
3820
3821 if (!STMT_VINFO_DATA_REF (stmt_info))
3822 return false;
3823
3824 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
3825 if (negative && ncopies > 1)
3826 {
3827 if (vect_print_dump_info (REPORT_DETAILS))
3828 fprintf (vect_dump, "multiple types with negative step.");
3829 return false;
3830 }
3831
3832 scalar_type = TREE_TYPE (DR_REF (dr));
3833 mode = TYPE_MODE (vectype);
3834
3835 /* FORNOW. In some cases can vectorize even if data-type not supported
3836 (e.g. - data copies). */
3837 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3838 {
3839 if (vect_print_dump_info (REPORT_DETAILS))
3840 fprintf (vect_dump, "Aligned load, but unsupported type.");
3841 return false;
3842 }
3843
3844 /* The vector component type needs to be trivially convertible to the
3845 scalar lhs. This should always be the case. */
3846 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3847 {
3848 if (vect_print_dump_info (REPORT_DETAILS))
3849 fprintf (vect_dump, "??? operands of different types");
3850 return false;
3851 }
3852
3853 /* Check if the load is a part of an interleaving chain. */
3854 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3855 {
3856 strided_load = true;
3857 /* FORNOW */
3858 gcc_assert (! nested_in_vect_loop);
3859
3860 /* Check if interleaving is supported. */
3861 if (!vect_strided_load_supported (vectype)
3862 && !PURE_SLP_STMT (stmt_info) && !slp)
3863 return false;
3864 }
3865
3866 if (negative)
3867 {
3868 gcc_assert (!strided_load);
3869 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
3870 if (alignment_support_scheme != dr_aligned
3871 && alignment_support_scheme != dr_unaligned_supported)
3872 {
3873 if (vect_print_dump_info (REPORT_DETAILS))
3874 fprintf (vect_dump, "negative step but alignment required.");
3875 return false;
3876 }
3877 if (!perm_mask_for_reverse (vectype, NULL))
3878 {
3879 if (vect_print_dump_info (REPORT_DETAILS))
3880 fprintf (vect_dump, "negative step and reversing not supported.");
3881 return false;
3882 }
3883 }
3884
3885 if (!vec_stmt) /* transformation not required. */
3886 {
3887 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3888 vect_model_load_cost (stmt_info, ncopies, NULL);
3889 return true;
3890 }
3891
3892 if (vect_print_dump_info (REPORT_DETAILS))
3893 fprintf (vect_dump, "transform load.");
3894
3895 /** Transform. **/
3896
3897 if (strided_load)
3898 {
3899 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3900 /* Check if the chain of loads is already vectorized. */
3901 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3902 {
3903 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3904 return true;
3905 }
3906 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3907 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3908
3909 /* VEC_NUM is the number of vect stmts to be created for this group. */
3910 if (slp)
3911 {
3912 strided_load = false;
3913 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3914 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3915 slp_perm = true;
3916 }
3917 else
3918 vec_num = group_size;
3919
3920 dr_chain = VEC_alloc (tree, heap, vec_num);
3921 }
3922 else
3923 {
3924 first_stmt = stmt;
3925 first_dr = dr;
3926 group_size = vec_num = 1;
3927 }
3928
3929 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3930 gcc_assert (alignment_support_scheme);
3931
3932 /* In case the vectorization factor (VF) is bigger than the number
3933 of elements that we can fit in a vectype (nunits), we have to generate
3934 more than one vector stmt - i.e - we need to "unroll" the
3935 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3936 from one copy of the vector stmt to the next, in the field
3937 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3938 stages to find the correct vector defs to be used when vectorizing
3939 stmts that use the defs of the current stmt. The example below
3940 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3941 need to create 4 vectorized stmts):
3942
3943 before vectorization:
3944 RELATED_STMT VEC_STMT
3945 S1: x = memref - -
3946 S2: z = x + 1 - -
3947
3948 step 1: vectorize stmt S1:
3949 We first create the vector stmt VS1_0, and, as usual, record a
3950 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3951 Next, we create the vector stmt VS1_1, and record a pointer to
3952 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3953 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3954 stmts and pointers:
3955 RELATED_STMT VEC_STMT
3956 VS1_0: vx0 = memref0 VS1_1 -
3957 VS1_1: vx1 = memref1 VS1_2 -
3958 VS1_2: vx2 = memref2 VS1_3 -
3959 VS1_3: vx3 = memref3 - -
3960 S1: x = load - VS1_0
3961 S2: z = x + 1 - -
3962
3963 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3964 information we recorded in RELATED_STMT field is used to vectorize
3965 stmt S2. */
3966
3967 /* In case of interleaving (non-unit strided access):
3968
3969 S1: x2 = &base + 2
3970 S2: x0 = &base
3971 S3: x1 = &base + 1
3972 S4: x3 = &base + 3
3973
3974 Vectorized loads are created in the order of memory accesses
3975 starting from the access of the first stmt of the chain:
3976
3977 VS1: vx0 = &base
3978 VS2: vx1 = &base + vec_size*1
3979 VS3: vx3 = &base + vec_size*2
3980 VS4: vx4 = &base + vec_size*3
3981
3982 Then permutation statements are generated:
3983
3984 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3985 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3986 ...
3987
3988 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3989 (the order of the data-refs in the output of vect_permute_load_chain
3990 corresponds to the order of scalar stmts in the interleaving chain - see
3991 the documentation of vect_permute_load_chain()).
3992 The generation of permutation stmts and recording them in
3993 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3994
3995 In case of both multiple types and interleaving, the vector loads and
3996 permutation stmts above are created for every copy. The result vector
3997 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
3998 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
3999
4000 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4001 on a target that supports unaligned accesses (dr_unaligned_supported)
4002 we generate the following code:
4003 p = initial_addr;
4004 indx = 0;
4005 loop {
4006 p = p + indx * vectype_size;
4007 vec_dest = *(p);
4008 indx = indx + 1;
4009 }
4010
4011 Otherwise, the data reference is potentially unaligned on a target that
4012 does not support unaligned accesses (dr_explicit_realign_optimized) -
4013 then generate the following code, in which the data in each iteration is
4014 obtained by two vector loads, one from the previous iteration, and one
4015 from the current iteration:
4016 p1 = initial_addr;
4017 msq_init = *(floor(p1))
4018 p2 = initial_addr + VS - 1;
4019 realignment_token = call target_builtin;
4020 indx = 0;
4021 loop {
4022 p2 = p2 + indx * vectype_size
4023 lsq = *(floor(p2))
4024 vec_dest = realign_load (msq, lsq, realignment_token)
4025 indx = indx + 1;
4026 msq = lsq;
4027 } */
4028
4029 /* If the misalignment remains the same throughout the execution of the
4030 loop, we can create the init_addr and permutation mask at the loop
4031 preheader. Otherwise, it needs to be created inside the loop.
4032 This can only occur when vectorizing memory accesses in the inner-loop
4033 nested within an outer-loop that is being vectorized. */
4034
4035 if (loop && nested_in_vect_loop_p (loop, stmt)
4036 && (TREE_INT_CST_LOW (DR_STEP (dr))
4037 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4038 {
4039 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4040 compute_in_loop = true;
4041 }
4042
4043 if ((alignment_support_scheme == dr_explicit_realign_optimized
4044 || alignment_support_scheme == dr_explicit_realign)
4045 && !compute_in_loop)
4046 {
4047 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4048 alignment_support_scheme, NULL_TREE,
4049 &at_loop);
4050 if (alignment_support_scheme == dr_explicit_realign_optimized)
4051 {
4052 phi = SSA_NAME_DEF_STMT (msq);
4053 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4054 }
4055 }
4056 else
4057 at_loop = loop;
4058
4059 if (negative)
4060 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4061
4062 prev_stmt_info = NULL;
4063 for (j = 0; j < ncopies; j++)
4064 {
4065 /* 1. Create the vector pointer update chain. */
4066 if (j == 0)
4067 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
4068 at_loop, offset,
4069 &dummy, &ptr_incr, false,
4070 &inv_p);
4071 else
4072 dataref_ptr =
4073 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
4074
4075 for (i = 0; i < vec_num; i++)
4076 {
4077 if (i > 0)
4078 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4079 NULL_TREE);
4080
4081 /* 2. Create the vector-load in the loop. */
4082 switch (alignment_support_scheme)
4083 {
4084 case dr_aligned:
4085 case dr_unaligned_supported:
4086 {
4087 struct ptr_info_def *pi;
4088 data_ref
4089 = build2 (MEM_REF, vectype, dataref_ptr,
4090 build_int_cst (reference_alias_ptr_type
4091 (DR_REF (first_dr)), 0));
4092 pi = get_ptr_info (dataref_ptr);
4093 pi->align = TYPE_ALIGN_UNIT (vectype);
4094 if (alignment_support_scheme == dr_aligned)
4095 {
4096 gcc_assert (aligned_access_p (first_dr));
4097 pi->misalign = 0;
4098 }
4099 else if (DR_MISALIGNMENT (first_dr) == -1)
4100 {
4101 TREE_TYPE (data_ref)
4102 = build_aligned_type (TREE_TYPE (data_ref),
4103 TYPE_ALIGN (TREE_TYPE (vectype)));
4104 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
4105 pi->misalign = 0;
4106 }
4107 else
4108 {
4109 TREE_TYPE (data_ref)
4110 = build_aligned_type (TREE_TYPE (data_ref),
4111 TYPE_ALIGN (TREE_TYPE (vectype)));
4112 pi->misalign = DR_MISALIGNMENT (first_dr);
4113 }
4114 break;
4115 }
4116 case dr_explicit_realign:
4117 {
4118 tree ptr, bump;
4119 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4120
4121 if (compute_in_loop)
4122 msq = vect_setup_realignment (first_stmt, gsi,
4123 &realignment_token,
4124 dr_explicit_realign,
4125 dataref_ptr, NULL);
4126
4127 new_stmt = gimple_build_assign_with_ops
4128 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4129 build_int_cst
4130 (TREE_TYPE (dataref_ptr),
4131 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4132 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4133 gimple_assign_set_lhs (new_stmt, ptr);
4134 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4135 data_ref
4136 = build2 (MEM_REF, vectype, ptr,
4137 build_int_cst (reference_alias_ptr_type
4138 (DR_REF (first_dr)), 0));
4139 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4140 new_stmt = gimple_build_assign (vec_dest, data_ref);
4141 new_temp = make_ssa_name (vec_dest, new_stmt);
4142 gimple_assign_set_lhs (new_stmt, new_temp);
4143 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4144 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4145 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4146 msq = new_temp;
4147
4148 bump = size_binop (MULT_EXPR, vs_minus_1,
4149 TYPE_SIZE_UNIT (scalar_type));
4150 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4151 new_stmt = gimple_build_assign_with_ops
4152 (BIT_AND_EXPR, NULL_TREE, ptr,
4153 build_int_cst
4154 (TREE_TYPE (ptr),
4155 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4156 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4157 gimple_assign_set_lhs (new_stmt, ptr);
4158 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4159 data_ref
4160 = build2 (MEM_REF, vectype, ptr,
4161 build_int_cst (reference_alias_ptr_type
4162 (DR_REF (first_dr)), 0));
4163 break;
4164 }
4165 case dr_explicit_realign_optimized:
4166 new_stmt = gimple_build_assign_with_ops
4167 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4168 build_int_cst
4169 (TREE_TYPE (dataref_ptr),
4170 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4171 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4172 gimple_assign_set_lhs (new_stmt, new_temp);
4173 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4174 data_ref
4175 = build2 (MEM_REF, vectype, new_temp,
4176 build_int_cst (reference_alias_ptr_type
4177 (DR_REF (first_dr)), 0));
4178 break;
4179 default:
4180 gcc_unreachable ();
4181 }
4182 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4183 new_stmt = gimple_build_assign (vec_dest, data_ref);
4184 new_temp = make_ssa_name (vec_dest, new_stmt);
4185 gimple_assign_set_lhs (new_stmt, new_temp);
4186 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4187 mark_symbols_for_renaming (new_stmt);
4188
4189 /* 3. Handle explicit realignment if necessary/supported. Create in
4190 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
4191 if (alignment_support_scheme == dr_explicit_realign_optimized
4192 || alignment_support_scheme == dr_explicit_realign)
4193 {
4194 tree tmp;
4195
4196 lsq = gimple_assign_lhs (new_stmt);
4197 if (!realignment_token)
4198 realignment_token = dataref_ptr;
4199 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4200 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
4201 realignment_token);
4202 new_stmt = gimple_build_assign (vec_dest, tmp);
4203 new_temp = make_ssa_name (vec_dest, new_stmt);
4204 gimple_assign_set_lhs (new_stmt, new_temp);
4205 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4206
4207 if (alignment_support_scheme == dr_explicit_realign_optimized)
4208 {
4209 gcc_assert (phi);
4210 if (i == vec_num - 1 && j == ncopies - 1)
4211 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
4212 UNKNOWN_LOCATION);
4213 msq = lsq;
4214 }
4215 }
4216
4217 /* 4. Handle invariant-load. */
4218 if (inv_p && !bb_vinfo)
4219 {
4220 gcc_assert (!strided_load);
4221 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4222 if (j == 0)
4223 {
4224 int k;
4225 tree t = NULL_TREE;
4226 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4227
4228 /* CHECKME: bitpos depends on endianess? */
4229 bitpos = bitsize_zero_node;
4230 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4231 bitsize, bitpos);
4232 vec_dest =
4233 vect_create_destination_var (scalar_dest, NULL_TREE);
4234 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4235 new_temp = make_ssa_name (vec_dest, new_stmt);
4236 gimple_assign_set_lhs (new_stmt, new_temp);
4237 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4238
4239 for (k = nunits - 1; k >= 0; --k)
4240 t = tree_cons (NULL_TREE, new_temp, t);
4241 /* FIXME: use build_constructor directly. */
4242 vec_inv = build_constructor_from_list (vectype, t);
4243 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
4244 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4245 }
4246 else
4247 gcc_unreachable (); /* FORNOW. */
4248 }
4249
4250 if (negative)
4251 {
4252 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4253 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4254 }
4255
4256 /* Collect vector loads and later create their permutation in
4257 vect_transform_strided_load (). */
4258 if (strided_load || slp_perm)
4259 VEC_quick_push (tree, dr_chain, new_temp);
4260
4261 /* Store vector loads in the corresponding SLP_NODE. */
4262 if (slp && !slp_perm)
4263 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4264 }
4265
4266 if (slp && !slp_perm)
4267 continue;
4268
4269 if (slp_perm)
4270 {
4271 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4272 slp_node_instance, false))
4273 {
4274 VEC_free (tree, heap, dr_chain);
4275 return false;
4276 }
4277 }
4278 else
4279 {
4280 if (strided_load)
4281 {
4282 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
4283 return false;
4284
4285 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4286 VEC_free (tree, heap, dr_chain);
4287 dr_chain = VEC_alloc (tree, heap, group_size);
4288 }
4289 else
4290 {
4291 if (j == 0)
4292 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4293 else
4294 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4295 prev_stmt_info = vinfo_for_stmt (new_stmt);
4296 }
4297 }
4298 }
4299
4300 if (dr_chain)
4301 VEC_free (tree, heap, dr_chain);
4302
4303 return true;
4304 }
4305
4306 /* Function vect_is_simple_cond.
4307
4308 Input:
4309 LOOP - the loop that is being vectorized.
4310 COND - Condition that is checked for simple use.
4311
4312 Returns whether a COND can be vectorized. Checks whether
4313 condition operands are supportable using vec_is_simple_use. */
4314
4315 static bool
4316 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4317 {
4318 tree lhs, rhs;
4319 tree def;
4320 enum vect_def_type dt;
4321
4322 if (!COMPARISON_CLASS_P (cond))
4323 return false;
4324
4325 lhs = TREE_OPERAND (cond, 0);
4326 rhs = TREE_OPERAND (cond, 1);
4327
4328 if (TREE_CODE (lhs) == SSA_NAME)
4329 {
4330 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4331 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4332 &dt))
4333 return false;
4334 }
4335 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4336 && TREE_CODE (lhs) != FIXED_CST)
4337 return false;
4338
4339 if (TREE_CODE (rhs) == SSA_NAME)
4340 {
4341 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4342 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4343 &dt))
4344 return false;
4345 }
4346 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4347 && TREE_CODE (rhs) != FIXED_CST)
4348 return false;
4349
4350 return true;
4351 }
4352
4353 /* vectorizable_condition.
4354
4355 Check if STMT is conditional modify expression that can be vectorized.
4356 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4357 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4358 at GSI.
4359
4360 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4361 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4362 else caluse if it is 2).
4363
4364 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4365
4366 bool
4367 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4368 gimple *vec_stmt, tree reduc_def, int reduc_index)
4369 {
4370 tree scalar_dest = NULL_TREE;
4371 tree vec_dest = NULL_TREE;
4372 tree op = NULL_TREE;
4373 tree cond_expr, then_clause, else_clause;
4374 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4375 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4376 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4377 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4378 tree vec_compare, vec_cond_expr;
4379 tree new_temp;
4380 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4381 enum machine_mode vec_mode;
4382 tree def;
4383 enum vect_def_type dt, dts[4];
4384 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4385 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4386 enum tree_code code;
4387 stmt_vec_info prev_stmt_info = NULL;
4388 int j;
4389
4390 /* FORNOW: unsupported in basic block SLP. */
4391 gcc_assert (loop_vinfo);
4392
4393 gcc_assert (ncopies >= 1);
4394 if (reduc_index && ncopies > 1)
4395 return false; /* FORNOW */
4396
4397 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4398 return false;
4399
4400 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4401 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4402 && reduc_def))
4403 return false;
4404
4405 /* FORNOW: SLP not supported. */
4406 if (STMT_SLP_TYPE (stmt_info))
4407 return false;
4408
4409 /* FORNOW: not yet supported. */
4410 if (STMT_VINFO_LIVE_P (stmt_info))
4411 {
4412 if (vect_print_dump_info (REPORT_DETAILS))
4413 fprintf (vect_dump, "value used after loop.");
4414 return false;
4415 }
4416
4417 /* Is vectorizable conditional operation? */
4418 if (!is_gimple_assign (stmt))
4419 return false;
4420
4421 code = gimple_assign_rhs_code (stmt);
4422
4423 if (code != COND_EXPR)
4424 return false;
4425
4426 gcc_assert (gimple_assign_single_p (stmt));
4427 op = gimple_assign_rhs1 (stmt);
4428 cond_expr = TREE_OPERAND (op, 0);
4429 then_clause = TREE_OPERAND (op, 1);
4430 else_clause = TREE_OPERAND (op, 2);
4431
4432 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4433 return false;
4434
4435 /* We do not handle two different vector types for the condition
4436 and the values. */
4437 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4438 TREE_TYPE (vectype)))
4439 return false;
4440
4441 if (TREE_CODE (then_clause) == SSA_NAME)
4442 {
4443 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4444 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4445 &then_def_stmt, &def, &dt))
4446 return false;
4447 }
4448 else if (TREE_CODE (then_clause) != INTEGER_CST
4449 && TREE_CODE (then_clause) != REAL_CST
4450 && TREE_CODE (then_clause) != FIXED_CST)
4451 return false;
4452
4453 if (TREE_CODE (else_clause) == SSA_NAME)
4454 {
4455 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4456 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4457 &else_def_stmt, &def, &dt))
4458 return false;
4459 }
4460 else if (TREE_CODE (else_clause) != INTEGER_CST
4461 && TREE_CODE (else_clause) != REAL_CST
4462 && TREE_CODE (else_clause) != FIXED_CST)
4463 return false;
4464
4465
4466 vec_mode = TYPE_MODE (vectype);
4467
4468 if (!vec_stmt)
4469 {
4470 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4471 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4472 }
4473
4474 /* Transform */
4475
4476 /* Handle def. */
4477 scalar_dest = gimple_assign_lhs (stmt);
4478 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4479
4480 /* Handle cond expr. */
4481 for (j = 0; j < ncopies; j++)
4482 {
4483 gimple new_stmt;
4484 if (j == 0)
4485 {
4486 gimple gtemp;
4487 vec_cond_lhs =
4488 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4489 stmt, NULL);
4490 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4491 NULL, &gtemp, &def, &dts[0]);
4492 vec_cond_rhs =
4493 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4494 stmt, NULL);
4495 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4496 NULL, &gtemp, &def, &dts[1]);
4497 if (reduc_index == 1)
4498 vec_then_clause = reduc_def;
4499 else
4500 {
4501 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4502 stmt, NULL);
4503 vect_is_simple_use (then_clause, loop_vinfo,
4504 NULL, &gtemp, &def, &dts[2]);
4505 }
4506 if (reduc_index == 2)
4507 vec_else_clause = reduc_def;
4508 else
4509 {
4510 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4511 stmt, NULL);
4512 vect_is_simple_use (else_clause, loop_vinfo,
4513 NULL, &gtemp, &def, &dts[3]);
4514 }
4515 }
4516 else
4517 {
4518 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4519 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4520 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4521 vec_then_clause);
4522 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4523 vec_else_clause);
4524 }
4525
4526 /* Arguments are ready. Create the new vector stmt. */
4527 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4528 vec_cond_lhs, vec_cond_rhs);
4529 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4530 vec_compare, vec_then_clause, vec_else_clause);
4531
4532 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4533 new_temp = make_ssa_name (vec_dest, new_stmt);
4534 gimple_assign_set_lhs (new_stmt, new_temp);
4535 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4536 if (j == 0)
4537 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4538 else
4539 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4540
4541 prev_stmt_info = vinfo_for_stmt (new_stmt);
4542 }
4543
4544 return true;
4545 }
4546
4547
4548 /* Make sure the statement is vectorizable. */
4549
4550 bool
4551 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4552 {
4553 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4554 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4555 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4556 bool ok;
4557 tree scalar_type, vectype;
4558
4559 if (vect_print_dump_info (REPORT_DETAILS))
4560 {
4561 fprintf (vect_dump, "==> examining statement: ");
4562 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4563 }
4564
4565 if (gimple_has_volatile_ops (stmt))
4566 {
4567 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4568 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4569
4570 return false;
4571 }
4572
4573 /* Skip stmts that do not need to be vectorized. In loops this is expected
4574 to include:
4575 - the COND_EXPR which is the loop exit condition
4576 - any LABEL_EXPRs in the loop
4577 - computations that are used only for array indexing or loop control.
4578 In basic blocks we only analyze statements that are a part of some SLP
4579 instance, therefore, all the statements are relevant. */
4580
4581 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4582 && !STMT_VINFO_LIVE_P (stmt_info))
4583 {
4584 if (vect_print_dump_info (REPORT_DETAILS))
4585 fprintf (vect_dump, "irrelevant.");
4586
4587 return true;
4588 }
4589
4590 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4591 {
4592 case vect_internal_def:
4593 break;
4594
4595 case vect_reduction_def:
4596 case vect_nested_cycle:
4597 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4598 || relevance == vect_used_in_outer_by_reduction
4599 || relevance == vect_unused_in_scope));
4600 break;
4601
4602 case vect_induction_def:
4603 case vect_constant_def:
4604 case vect_external_def:
4605 case vect_unknown_def_type:
4606 default:
4607 gcc_unreachable ();
4608 }
4609
4610 if (bb_vinfo)
4611 {
4612 gcc_assert (PURE_SLP_STMT (stmt_info));
4613
4614 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4615 if (vect_print_dump_info (REPORT_DETAILS))
4616 {
4617 fprintf (vect_dump, "get vectype for scalar type: ");
4618 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4619 }
4620
4621 vectype = get_vectype_for_scalar_type (scalar_type);
4622 if (!vectype)
4623 {
4624 if (vect_print_dump_info (REPORT_DETAILS))
4625 {
4626 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4627 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4628 }
4629 return false;
4630 }
4631
4632 if (vect_print_dump_info (REPORT_DETAILS))
4633 {
4634 fprintf (vect_dump, "vectype: ");
4635 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4636 }
4637
4638 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4639 }
4640
4641 if (STMT_VINFO_RELEVANT_P (stmt_info))
4642 {
4643 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4644 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4645 *need_to_vectorize = true;
4646 }
4647
4648 ok = true;
4649 if (!bb_vinfo
4650 && (STMT_VINFO_RELEVANT_P (stmt_info)
4651 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4652 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4653 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4654 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4655 || vectorizable_shift (stmt, NULL, NULL, NULL)
4656 || vectorizable_operation (stmt, NULL, NULL, NULL)
4657 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4658 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4659 || vectorizable_call (stmt, NULL, NULL)
4660 || vectorizable_store (stmt, NULL, NULL, NULL)
4661 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4662 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4663 else
4664 {
4665 if (bb_vinfo)
4666 ok = (vectorizable_shift (stmt, NULL, NULL, NULL)
4667 || vectorizable_operation (stmt, NULL, NULL, node)
4668 || vectorizable_assignment (stmt, NULL, NULL, node)
4669 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4670 || vectorizable_store (stmt, NULL, NULL, node));
4671 }
4672
4673 if (!ok)
4674 {
4675 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4676 {
4677 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4678 fprintf (vect_dump, "supported: ");
4679 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4680 }
4681
4682 return false;
4683 }
4684
4685 if (bb_vinfo)
4686 return true;
4687
4688 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4689 need extra handling, except for vectorizable reductions. */
4690 if (STMT_VINFO_LIVE_P (stmt_info)
4691 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4692 ok = vectorizable_live_operation (stmt, NULL, NULL);
4693
4694 if (!ok)
4695 {
4696 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4697 {
4698 fprintf (vect_dump, "not vectorized: live stmt not ");
4699 fprintf (vect_dump, "supported: ");
4700 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4701 }
4702
4703 return false;
4704 }
4705
4706 if (!PURE_SLP_STMT (stmt_info))
4707 {
4708 /* Groups of strided accesses whose size is not a power of 2 are not
4709 vectorizable yet using loop-vectorization. Therefore, if this stmt
4710 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4711 loop-based vectorized), the loop cannot be vectorized. */
4712 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4713 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4714 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4715 {
4716 if (vect_print_dump_info (REPORT_DETAILS))
4717 {
4718 fprintf (vect_dump, "not vectorized: the size of group "
4719 "of strided accesses is not a power of 2");
4720 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4721 }
4722
4723 return false;
4724 }
4725 }
4726
4727 return true;
4728 }
4729
4730
4731 /* Function vect_transform_stmt.
4732
4733 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4734
4735 bool
4736 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4737 bool *strided_store, slp_tree slp_node,
4738 slp_instance slp_node_instance)
4739 {
4740 bool is_store = false;
4741 gimple vec_stmt = NULL;
4742 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4743 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4744 bool done;
4745
4746 switch (STMT_VINFO_TYPE (stmt_info))
4747 {
4748 case type_demotion_vec_info_type:
4749 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4750 gcc_assert (done);
4751 break;
4752
4753 case type_promotion_vec_info_type:
4754 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4755 gcc_assert (done);
4756 break;
4757
4758 case type_conversion_vec_info_type:
4759 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4760 gcc_assert (done);
4761 break;
4762
4763 case induc_vec_info_type:
4764 gcc_assert (!slp_node);
4765 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4766 gcc_assert (done);
4767 break;
4768
4769 case shift_vec_info_type:
4770 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4771 gcc_assert (done);
4772 break;
4773
4774 case op_vec_info_type:
4775 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4776 gcc_assert (done);
4777 break;
4778
4779 case assignment_vec_info_type:
4780 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4781 gcc_assert (done);
4782 break;
4783
4784 case load_vec_info_type:
4785 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4786 slp_node_instance);
4787 gcc_assert (done);
4788 break;
4789
4790 case store_vec_info_type:
4791 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4792 gcc_assert (done);
4793 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4794 {
4795 /* In case of interleaving, the whole chain is vectorized when the
4796 last store in the chain is reached. Store stmts before the last
4797 one are skipped, and there vec_stmt_info shouldn't be freed
4798 meanwhile. */
4799 *strided_store = true;
4800 if (STMT_VINFO_VEC_STMT (stmt_info))
4801 is_store = true;
4802 }
4803 else
4804 is_store = true;
4805 break;
4806
4807 case condition_vec_info_type:
4808 gcc_assert (!slp_node);
4809 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4810 gcc_assert (done);
4811 break;
4812
4813 case call_vec_info_type:
4814 gcc_assert (!slp_node);
4815 done = vectorizable_call (stmt, gsi, &vec_stmt);
4816 stmt = gsi_stmt (*gsi);
4817 break;
4818
4819 case reduc_vec_info_type:
4820 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4821 gcc_assert (done);
4822 break;
4823
4824 default:
4825 if (!STMT_VINFO_LIVE_P (stmt_info))
4826 {
4827 if (vect_print_dump_info (REPORT_DETAILS))
4828 fprintf (vect_dump, "stmt not supported.");
4829 gcc_unreachable ();
4830 }
4831 }
4832
4833 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4834 is being vectorized, but outside the immediately enclosing loop. */
4835 if (vec_stmt
4836 && STMT_VINFO_LOOP_VINFO (stmt_info)
4837 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4838 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4839 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4840 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4841 || STMT_VINFO_RELEVANT (stmt_info) ==
4842 vect_used_in_outer_by_reduction))
4843 {
4844 struct loop *innerloop = LOOP_VINFO_LOOP (
4845 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4846 imm_use_iterator imm_iter;
4847 use_operand_p use_p;
4848 tree scalar_dest;
4849 gimple exit_phi;
4850
4851 if (vect_print_dump_info (REPORT_DETAILS))
4852 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4853
4854 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4855 (to be used when vectorizing outer-loop stmts that use the DEF of
4856 STMT). */
4857 if (gimple_code (stmt) == GIMPLE_PHI)
4858 scalar_dest = PHI_RESULT (stmt);
4859 else
4860 scalar_dest = gimple_assign_lhs (stmt);
4861
4862 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4863 {
4864 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4865 {
4866 exit_phi = USE_STMT (use_p);
4867 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4868 }
4869 }
4870 }
4871
4872 /* Handle stmts whose DEF is used outside the loop-nest that is
4873 being vectorized. */
4874 if (STMT_VINFO_LIVE_P (stmt_info)
4875 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4876 {
4877 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4878 gcc_assert (done);
4879 }
4880
4881 if (vec_stmt)
4882 {
4883 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4884 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4885 if (orig_stmt_in_pattern)
4886 {
4887 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4888 /* STMT was inserted by the vectorizer to replace a computation idiom.
4889 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4890 computed this idiom. We need to record a pointer to VEC_STMT in
4891 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4892 documentation of vect_pattern_recog. */
4893 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4894 {
4895 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4896 == orig_scalar_stmt);
4897 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4898 }
4899 }
4900 }
4901
4902 return is_store;
4903 }
4904
4905
4906 /* Remove a group of stores (for SLP or interleaving), free their
4907 stmt_vec_info. */
4908
4909 void
4910 vect_remove_stores (gimple first_stmt)
4911 {
4912 gimple next = first_stmt;
4913 gimple tmp;
4914 gimple_stmt_iterator next_si;
4915
4916 while (next)
4917 {
4918 /* Free the attached stmt_vec_info and remove the stmt. */
4919 next_si = gsi_for_stmt (next);
4920 gsi_remove (&next_si, true);
4921 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4922 free_stmt_vec_info (next);
4923 next = tmp;
4924 }
4925 }
4926
4927
4928 /* Function new_stmt_vec_info.
4929
4930 Create and initialize a new stmt_vec_info struct for STMT. */
4931
4932 stmt_vec_info
4933 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4934 bb_vec_info bb_vinfo)
4935 {
4936 stmt_vec_info res;
4937 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4938
4939 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4940 STMT_VINFO_STMT (res) = stmt;
4941 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4942 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4943 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4944 STMT_VINFO_LIVE_P (res) = false;
4945 STMT_VINFO_VECTYPE (res) = NULL;
4946 STMT_VINFO_VEC_STMT (res) = NULL;
4947 STMT_VINFO_VECTORIZABLE (res) = true;
4948 STMT_VINFO_IN_PATTERN_P (res) = false;
4949 STMT_VINFO_RELATED_STMT (res) = NULL;
4950 STMT_VINFO_DATA_REF (res) = NULL;
4951
4952 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4953 STMT_VINFO_DR_OFFSET (res) = NULL;
4954 STMT_VINFO_DR_INIT (res) = NULL;
4955 STMT_VINFO_DR_STEP (res) = NULL;
4956 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4957
4958 if (gimple_code (stmt) == GIMPLE_PHI
4959 && is_loop_header_bb_p (gimple_bb (stmt)))
4960 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4961 else
4962 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4963
4964 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4965 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4966 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4967 STMT_SLP_TYPE (res) = loop_vect;
4968 DR_GROUP_FIRST_DR (res) = NULL;
4969 DR_GROUP_NEXT_DR (res) = NULL;
4970 DR_GROUP_SIZE (res) = 0;
4971 DR_GROUP_STORE_COUNT (res) = 0;
4972 DR_GROUP_GAP (res) = 0;
4973 DR_GROUP_SAME_DR_STMT (res) = NULL;
4974 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4975
4976 return res;
4977 }
4978
4979
4980 /* Create a hash table for stmt_vec_info. */
4981
4982 void
4983 init_stmt_vec_info_vec (void)
4984 {
4985 gcc_assert (!stmt_vec_info_vec);
4986 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4987 }
4988
4989
4990 /* Free hash table for stmt_vec_info. */
4991
4992 void
4993 free_stmt_vec_info_vec (void)
4994 {
4995 gcc_assert (stmt_vec_info_vec);
4996 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4997 }
4998
4999
5000 /* Free stmt vectorization related info. */
5001
5002 void
5003 free_stmt_vec_info (gimple stmt)
5004 {
5005 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5006
5007 if (!stmt_info)
5008 return;
5009
5010 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5011 set_vinfo_for_stmt (stmt, NULL);
5012 free (stmt_info);
5013 }
5014
5015
5016 /* Function get_vectype_for_scalar_type_and_size.
5017
5018 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5019 by the target. */
5020
5021 static tree
5022 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5023 {
5024 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5025 enum machine_mode simd_mode;
5026 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5027 int nunits;
5028 tree vectype;
5029
5030 if (nbytes == 0)
5031 return NULL_TREE;
5032
5033 /* We can't build a vector type of elements with alignment bigger than
5034 their size. */
5035 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5036 return NULL_TREE;
5037
5038 /* If we'd build a vector type of elements whose mode precision doesn't
5039 match their types precision we'll get mismatched types on vector
5040 extracts via BIT_FIELD_REFs. This effectively means we disable
5041 vectorization of bool and/or enum types in some languages. */
5042 if (INTEGRAL_TYPE_P (scalar_type)
5043 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5044 return NULL_TREE;
5045
5046 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5047 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5048 return NULL_TREE;
5049
5050 /* If no size was supplied use the mode the target prefers. Otherwise
5051 lookup a vector mode of the specified size. */
5052 if (size == 0)
5053 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5054 else
5055 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5056 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5057 if (nunits <= 1)
5058 return NULL_TREE;
5059
5060 vectype = build_vector_type (scalar_type, nunits);
5061 if (vect_print_dump_info (REPORT_DETAILS))
5062 {
5063 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5064 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5065 }
5066
5067 if (!vectype)
5068 return NULL_TREE;
5069
5070 if (vect_print_dump_info (REPORT_DETAILS))
5071 {
5072 fprintf (vect_dump, "vectype: ");
5073 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5074 }
5075
5076 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5077 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5078 {
5079 if (vect_print_dump_info (REPORT_DETAILS))
5080 fprintf (vect_dump, "mode not supported by target.");
5081 return NULL_TREE;
5082 }
5083
5084 return vectype;
5085 }
5086
5087 unsigned int current_vector_size;
5088
5089 /* Function get_vectype_for_scalar_type.
5090
5091 Returns the vector type corresponding to SCALAR_TYPE as supported
5092 by the target. */
5093
5094 tree
5095 get_vectype_for_scalar_type (tree scalar_type)
5096 {
5097 tree vectype;
5098 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5099 current_vector_size);
5100 if (vectype
5101 && current_vector_size == 0)
5102 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5103 return vectype;
5104 }
5105
5106 /* Function get_same_sized_vectype
5107
5108 Returns a vector type corresponding to SCALAR_TYPE of size
5109 VECTOR_TYPE if supported by the target. */
5110
5111 tree
5112 get_same_sized_vectype (tree scalar_type, tree vector_type)
5113 {
5114 return get_vectype_for_scalar_type_and_size
5115 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5116 }
5117
5118 /* Function vect_is_simple_use.
5119
5120 Input:
5121 LOOP_VINFO - the vect info of the loop that is being vectorized.
5122 BB_VINFO - the vect info of the basic block that is being vectorized.
5123 OPERAND - operand of a stmt in the loop or bb.
5124 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5125
5126 Returns whether a stmt with OPERAND can be vectorized.
5127 For loops, supportable operands are constants, loop invariants, and operands
5128 that are defined by the current iteration of the loop. Unsupportable
5129 operands are those that are defined by a previous iteration of the loop (as
5130 is the case in reduction/induction computations).
5131 For basic blocks, supportable operands are constants and bb invariants.
5132 For now, operands defined outside the basic block are not supported. */
5133
5134 bool
5135 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5136 bb_vec_info bb_vinfo, gimple *def_stmt,
5137 tree *def, enum vect_def_type *dt)
5138 {
5139 basic_block bb;
5140 stmt_vec_info stmt_vinfo;
5141 struct loop *loop = NULL;
5142
5143 if (loop_vinfo)
5144 loop = LOOP_VINFO_LOOP (loop_vinfo);
5145
5146 *def_stmt = NULL;
5147 *def = NULL_TREE;
5148
5149 if (vect_print_dump_info (REPORT_DETAILS))
5150 {
5151 fprintf (vect_dump, "vect_is_simple_use: operand ");
5152 print_generic_expr (vect_dump, operand, TDF_SLIM);
5153 }
5154
5155 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5156 {
5157 *dt = vect_constant_def;
5158 return true;
5159 }
5160
5161 if (is_gimple_min_invariant (operand))
5162 {
5163 *def = operand;
5164 *dt = vect_external_def;
5165 return true;
5166 }
5167
5168 if (TREE_CODE (operand) == PAREN_EXPR)
5169 {
5170 if (vect_print_dump_info (REPORT_DETAILS))
5171 fprintf (vect_dump, "non-associatable copy.");
5172 operand = TREE_OPERAND (operand, 0);
5173 }
5174
5175 if (TREE_CODE (operand) != SSA_NAME)
5176 {
5177 if (vect_print_dump_info (REPORT_DETAILS))
5178 fprintf (vect_dump, "not ssa-name.");
5179 return false;
5180 }
5181
5182 *def_stmt = SSA_NAME_DEF_STMT (operand);
5183 if (*def_stmt == NULL)
5184 {
5185 if (vect_print_dump_info (REPORT_DETAILS))
5186 fprintf (vect_dump, "no def_stmt.");
5187 return false;
5188 }
5189
5190 if (vect_print_dump_info (REPORT_DETAILS))
5191 {
5192 fprintf (vect_dump, "def_stmt: ");
5193 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5194 }
5195
5196 /* Empty stmt is expected only in case of a function argument.
5197 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5198 if (gimple_nop_p (*def_stmt))
5199 {
5200 *def = operand;
5201 *dt = vect_external_def;
5202 return true;
5203 }
5204
5205 bb = gimple_bb (*def_stmt);
5206
5207 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5208 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5209 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5210 *dt = vect_external_def;
5211 else
5212 {
5213 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5214 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5215 }
5216
5217 if (*dt == vect_unknown_def_type)
5218 {
5219 if (vect_print_dump_info (REPORT_DETAILS))
5220 fprintf (vect_dump, "Unsupported pattern.");
5221 return false;
5222 }
5223
5224 if (vect_print_dump_info (REPORT_DETAILS))
5225 fprintf (vect_dump, "type of def: %d.",*dt);
5226
5227 switch (gimple_code (*def_stmt))
5228 {
5229 case GIMPLE_PHI:
5230 *def = gimple_phi_result (*def_stmt);
5231 break;
5232
5233 case GIMPLE_ASSIGN:
5234 *def = gimple_assign_lhs (*def_stmt);
5235 break;
5236
5237 case GIMPLE_CALL:
5238 *def = gimple_call_lhs (*def_stmt);
5239 if (*def != NULL)
5240 break;
5241 /* FALLTHRU */
5242 default:
5243 if (vect_print_dump_info (REPORT_DETAILS))
5244 fprintf (vect_dump, "unsupported defining stmt: ");
5245 return false;
5246 }
5247
5248 return true;
5249 }
5250
5251 /* Function vect_is_simple_use_1.
5252
5253 Same as vect_is_simple_use_1 but also determines the vector operand
5254 type of OPERAND and stores it to *VECTYPE. If the definition of
5255 OPERAND is vect_uninitialized_def, vect_constant_def or
5256 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5257 is responsible to compute the best suited vector type for the
5258 scalar operand. */
5259
5260 bool
5261 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5262 bb_vec_info bb_vinfo, gimple *def_stmt,
5263 tree *def, enum vect_def_type *dt, tree *vectype)
5264 {
5265 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5266 return false;
5267
5268 /* Now get a vector type if the def is internal, otherwise supply
5269 NULL_TREE and leave it up to the caller to figure out a proper
5270 type for the use stmt. */
5271 if (*dt == vect_internal_def
5272 || *dt == vect_induction_def
5273 || *dt == vect_reduction_def
5274 || *dt == vect_double_reduction_def
5275 || *dt == vect_nested_cycle)
5276 {
5277 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5278 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5279 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5280 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5281 gcc_assert (*vectype != NULL_TREE);
5282 }
5283 else if (*dt == vect_uninitialized_def
5284 || *dt == vect_constant_def
5285 || *dt == vect_external_def)
5286 *vectype = NULL_TREE;
5287 else
5288 gcc_unreachable ();
5289
5290 return true;
5291 }
5292
5293
5294 /* Function supportable_widening_operation
5295
5296 Check whether an operation represented by the code CODE is a
5297 widening operation that is supported by the target platform in
5298 vector form (i.e., when operating on arguments of type VECTYPE_IN
5299 producing a result of type VECTYPE_OUT).
5300
5301 Widening operations we currently support are NOP (CONVERT), FLOAT
5302 and WIDEN_MULT. This function checks if these operations are supported
5303 by the target platform either directly (via vector tree-codes), or via
5304 target builtins.
5305
5306 Output:
5307 - CODE1 and CODE2 are codes of vector operations to be used when
5308 vectorizing the operation, if available.
5309 - DECL1 and DECL2 are decls of target builtin functions to be used
5310 when vectorizing the operation, if available. In this case,
5311 CODE1 and CODE2 are CALL_EXPR.
5312 - MULTI_STEP_CVT determines the number of required intermediate steps in
5313 case of multi-step conversion (like char->short->int - in that case
5314 MULTI_STEP_CVT will be 1).
5315 - INTERM_TYPES contains the intermediate type required to perform the
5316 widening operation (short in the above example). */
5317
5318 bool
5319 supportable_widening_operation (enum tree_code code, gimple stmt,
5320 tree vectype_out, tree vectype_in,
5321 tree *decl1, tree *decl2,
5322 enum tree_code *code1, enum tree_code *code2,
5323 int *multi_step_cvt,
5324 VEC (tree, heap) **interm_types)
5325 {
5326 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5327 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5328 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5329 bool ordered_p;
5330 enum machine_mode vec_mode;
5331 enum insn_code icode1, icode2;
5332 optab optab1, optab2;
5333 tree vectype = vectype_in;
5334 tree wide_vectype = vectype_out;
5335 enum tree_code c1, c2;
5336
5337 /* The result of a vectorized widening operation usually requires two vectors
5338 (because the widened results do not fit int one vector). The generated
5339 vector results would normally be expected to be generated in the same
5340 order as in the original scalar computation, i.e. if 8 results are
5341 generated in each vector iteration, they are to be organized as follows:
5342 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5343
5344 However, in the special case that the result of the widening operation is
5345 used in a reduction computation only, the order doesn't matter (because
5346 when vectorizing a reduction we change the order of the computation).
5347 Some targets can take advantage of this and generate more efficient code.
5348 For example, targets like Altivec, that support widen_mult using a sequence
5349 of {mult_even,mult_odd} generate the following vectors:
5350 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5351
5352 When vectorizing outer-loops, we execute the inner-loop sequentially
5353 (each vectorized inner-loop iteration contributes to VF outer-loop
5354 iterations in parallel). We therefore don't allow to change the order
5355 of the computation in the inner-loop during outer-loop vectorization. */
5356
5357 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5358 && !nested_in_vect_loop_p (vect_loop, stmt))
5359 ordered_p = false;
5360 else
5361 ordered_p = true;
5362
5363 if (!ordered_p
5364 && code == WIDEN_MULT_EXPR
5365 && targetm.vectorize.builtin_mul_widen_even
5366 && targetm.vectorize.builtin_mul_widen_even (vectype)
5367 && targetm.vectorize.builtin_mul_widen_odd
5368 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5369 {
5370 if (vect_print_dump_info (REPORT_DETAILS))
5371 fprintf (vect_dump, "Unordered widening operation detected.");
5372
5373 *code1 = *code2 = CALL_EXPR;
5374 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5375 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5376 return true;
5377 }
5378
5379 switch (code)
5380 {
5381 case WIDEN_MULT_EXPR:
5382 if (BYTES_BIG_ENDIAN)
5383 {
5384 c1 = VEC_WIDEN_MULT_HI_EXPR;
5385 c2 = VEC_WIDEN_MULT_LO_EXPR;
5386 }
5387 else
5388 {
5389 c2 = VEC_WIDEN_MULT_HI_EXPR;
5390 c1 = VEC_WIDEN_MULT_LO_EXPR;
5391 }
5392 break;
5393
5394 CASE_CONVERT:
5395 if (BYTES_BIG_ENDIAN)
5396 {
5397 c1 = VEC_UNPACK_HI_EXPR;
5398 c2 = VEC_UNPACK_LO_EXPR;
5399 }
5400 else
5401 {
5402 c2 = VEC_UNPACK_HI_EXPR;
5403 c1 = VEC_UNPACK_LO_EXPR;
5404 }
5405 break;
5406
5407 case FLOAT_EXPR:
5408 if (BYTES_BIG_ENDIAN)
5409 {
5410 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5411 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5412 }
5413 else
5414 {
5415 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5416 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5417 }
5418 break;
5419
5420 case FIX_TRUNC_EXPR:
5421 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5422 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5423 computing the operation. */
5424 return false;
5425
5426 default:
5427 gcc_unreachable ();
5428 }
5429
5430 if (code == FIX_TRUNC_EXPR)
5431 {
5432 /* The signedness is determined from output operand. */
5433 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5434 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5435 }
5436 else
5437 {
5438 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5439 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5440 }
5441
5442 if (!optab1 || !optab2)
5443 return false;
5444
5445 vec_mode = TYPE_MODE (vectype);
5446 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5447 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5448 return false;
5449
5450 /* Check if it's a multi-step conversion that can be done using intermediate
5451 types. */
5452 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5453 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5454 {
5455 int i;
5456 tree prev_type = vectype, intermediate_type;
5457 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5458 optab optab3, optab4;
5459
5460 if (!CONVERT_EXPR_CODE_P (code))
5461 return false;
5462
5463 *code1 = c1;
5464 *code2 = c2;
5465
5466 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5467 intermediate steps in promotion sequence. We try
5468 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5469 not. */
5470 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5471 for (i = 0; i < 3; i++)
5472 {
5473 intermediate_mode = insn_data[icode1].operand[0].mode;
5474 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5475 TYPE_UNSIGNED (prev_type));
5476 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5477 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5478
5479 if (!optab3 || !optab4
5480 || ((icode1 = optab_handler (optab1, prev_mode))
5481 == CODE_FOR_nothing)
5482 || insn_data[icode1].operand[0].mode != intermediate_mode
5483 || ((icode2 = optab_handler (optab2, prev_mode))
5484 == CODE_FOR_nothing)
5485 || insn_data[icode2].operand[0].mode != intermediate_mode
5486 || ((icode1 = optab_handler (optab3, intermediate_mode))
5487 == CODE_FOR_nothing)
5488 || ((icode2 = optab_handler (optab4, intermediate_mode))
5489 == CODE_FOR_nothing))
5490 return false;
5491
5492 VEC_quick_push (tree, *interm_types, intermediate_type);
5493 (*multi_step_cvt)++;
5494
5495 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5496 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5497 return true;
5498
5499 prev_type = intermediate_type;
5500 prev_mode = intermediate_mode;
5501 }
5502
5503 return false;
5504 }
5505
5506 *code1 = c1;
5507 *code2 = c2;
5508 return true;
5509 }
5510
5511
5512 /* Function supportable_narrowing_operation
5513
5514 Check whether an operation represented by the code CODE is a
5515 narrowing operation that is supported by the target platform in
5516 vector form (i.e., when operating on arguments of type VECTYPE_IN
5517 and producing a result of type VECTYPE_OUT).
5518
5519 Narrowing operations we currently support are NOP (CONVERT) and
5520 FIX_TRUNC. This function checks if these operations are supported by
5521 the target platform directly via vector tree-codes.
5522
5523 Output:
5524 - CODE1 is the code of a vector operation to be used when
5525 vectorizing the operation, if available.
5526 - MULTI_STEP_CVT determines the number of required intermediate steps in
5527 case of multi-step conversion (like int->short->char - in that case
5528 MULTI_STEP_CVT will be 1).
5529 - INTERM_TYPES contains the intermediate type required to perform the
5530 narrowing operation (short in the above example). */
5531
5532 bool
5533 supportable_narrowing_operation (enum tree_code code,
5534 tree vectype_out, tree vectype_in,
5535 enum tree_code *code1, int *multi_step_cvt,
5536 VEC (tree, heap) **interm_types)
5537 {
5538 enum machine_mode vec_mode;
5539 enum insn_code icode1;
5540 optab optab1, interm_optab;
5541 tree vectype = vectype_in;
5542 tree narrow_vectype = vectype_out;
5543 enum tree_code c1;
5544 tree intermediate_type, prev_type;
5545 int i;
5546
5547 switch (code)
5548 {
5549 CASE_CONVERT:
5550 c1 = VEC_PACK_TRUNC_EXPR;
5551 break;
5552
5553 case FIX_TRUNC_EXPR:
5554 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5555 break;
5556
5557 case FLOAT_EXPR:
5558 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5559 tree code and optabs used for computing the operation. */
5560 return false;
5561
5562 default:
5563 gcc_unreachable ();
5564 }
5565
5566 if (code == FIX_TRUNC_EXPR)
5567 /* The signedness is determined from output operand. */
5568 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5569 else
5570 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5571
5572 if (!optab1)
5573 return false;
5574
5575 vec_mode = TYPE_MODE (vectype);
5576 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5577 return false;
5578
5579 /* Check if it's a multi-step conversion that can be done using intermediate
5580 types. */
5581 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5582 {
5583 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5584
5585 *code1 = c1;
5586 prev_type = vectype;
5587 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5588 intermediate steps in promotion sequence. We try
5589 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5590 not. */
5591 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5592 for (i = 0; i < 3; i++)
5593 {
5594 intermediate_mode = insn_data[icode1].operand[0].mode;
5595 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5596 TYPE_UNSIGNED (prev_type));
5597 interm_optab = optab_for_tree_code (c1, intermediate_type,
5598 optab_default);
5599 if (!interm_optab
5600 || ((icode1 = optab_handler (optab1, prev_mode))
5601 == CODE_FOR_nothing)
5602 || insn_data[icode1].operand[0].mode != intermediate_mode
5603 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5604 == CODE_FOR_nothing))
5605 return false;
5606
5607 VEC_quick_push (tree, *interm_types, intermediate_type);
5608 (*multi_step_cvt)++;
5609
5610 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5611 return true;
5612
5613 prev_type = intermediate_type;
5614 prev_mode = intermediate_mode;
5615 }
5616
5617 return false;
5618 }
5619
5620 *code1 = c1;
5621 return true;
5622 }