1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "cfglayout.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
45 /* Return a variable of type ELEM_TYPE[NELEMS]. */
48 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
50 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
54 /* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
60 read_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
61 tree array
, unsigned HOST_WIDE_INT n
)
63 tree vect_type
, vect
, vect_name
, array_ref
;
66 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
67 vect_type
= TREE_TYPE (TREE_TYPE (array
));
68 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
69 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
70 build_int_cst (size_type_node
, n
),
71 NULL_TREE
, NULL_TREE
);
73 new_stmt
= gimple_build_assign (vect
, array_ref
);
74 vect_name
= make_ssa_name (vect
, new_stmt
);
75 gimple_assign_set_lhs (new_stmt
, vect_name
);
76 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
77 mark_symbols_for_renaming (new_stmt
);
82 /* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
87 write_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
88 tree array
, unsigned HOST_WIDE_INT n
)
93 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
94 build_int_cst (size_type_node
, n
),
95 NULL_TREE
, NULL_TREE
);
97 new_stmt
= gimple_build_assign (array_ref
, vect
);
98 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
99 mark_symbols_for_renaming (new_stmt
);
102 /* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
107 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
109 struct ptr_info_def
*pi
;
110 tree mem_ref
, alias_ptr_type
;
112 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
113 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
114 /* Arrays have the same alignment as their type. */
115 pi
= get_ptr_info (ptr
);
116 pi
->align
= TYPE_ALIGN_UNIT (type
);
121 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
123 /* Function vect_mark_relevant.
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
128 vect_mark_relevant (VEC(gimple
,heap
) **worklist
, gimple stmt
,
129 enum vect_relevant relevant
, bool live_p
)
131 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
132 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
133 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
135 if (vect_print_dump_info (REPORT_DETAILS
))
136 fprintf (vect_dump
, "mark relevant %d, live %d.", relevant
, live_p
);
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
142 /* This is the last stmt in a sequence that was detected as a
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
147 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
149 if (vect_print_dump_info (REPORT_DETAILS
))
150 fprintf (vect_dump
, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info
= vinfo_for_stmt (pattern_stmt
);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
153 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
154 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
158 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
159 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
160 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
162 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
165 if (vect_print_dump_info (REPORT_DETAILS
))
166 fprintf (vect_dump
, "already marked relevant/live.");
170 VEC_safe_push (gimple
, heap
, *worklist
, stmt
);
174 /* Function vect_stmt_relevant_p.
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
184 CHECKME: what other side effects would the vectorizer allow? */
187 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
188 enum vect_relevant
*relevant
, bool *live_p
)
190 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
192 imm_use_iterator imm_iter
;
196 *relevant
= vect_unused_in_scope
;
199 /* cond stmt other than loop exit cond. */
200 if (is_ctrl_stmt (stmt
)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
202 != loop_exit_ctrl_vec_info_type
)
203 *relevant
= vect_used_in_scope
;
205 /* changing memory. */
206 if (gimple_code (stmt
) != GIMPLE_PHI
)
207 if (gimple_vdef (stmt
))
209 if (vect_print_dump_info (REPORT_DETAILS
))
210 fprintf (vect_dump
, "vec_stmt_relevant_p: stmt has vdefs.");
211 *relevant
= vect_used_in_scope
;
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
217 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
219 basic_block bb
= gimple_bb (USE_STMT (use_p
));
220 if (!flow_bb_inside_loop_p (loop
, bb
))
222 if (vect_print_dump_info (REPORT_DETAILS
))
223 fprintf (vect_dump
, "vec_stmt_relevant_p: used out of loop.");
225 if (is_gimple_debug (USE_STMT (use_p
)))
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
231 gcc_assert (bb
== single_exit (loop
)->dest
);
238 return (*live_p
|| *relevant
);
242 /* Function exist_non_indexing_operands_for_use_p
244 USE is one of the uses attached to STMT. Check if USE is
245 used in STMT for anything other than indexing an array. */
248 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
251 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
253 /* USE corresponds to some operand in STMT. If there is no data
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info
))
259 /* STMT has a data_ref. FORNOW this means that its of one of
263 (This should have been verified in analyze_data_refs).
265 'var' in the second case corresponds to a def, not a use,
266 so USE cannot correspond to any operands that are not used
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
272 if (!gimple_assign_copy_p (stmt
))
274 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
276 operand
= gimple_assign_rhs1 (stmt
);
277 if (TREE_CODE (operand
) != SSA_NAME
)
288 Function process_use.
291 - a USE in STMT in a loop represented by LOOP_VINFO
292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
293 that defined USE. This is done by calling mark_relevant and passing it
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
302 - case 1: If USE is used only for address computations (e.g. array indexing),
303 which does not need to be directly vectorized, then the liveness/relevance
304 of the respective DEF_STMT is left unchanged.
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
310 Return true if everything is as expected. Return false otherwise. */
313 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
314 enum vect_relevant relevant
, VEC(gimple
,heap
) **worklist
)
316 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
317 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
318 stmt_vec_info dstmt_vinfo
;
319 basic_block bb
, def_bb
;
322 enum vect_def_type dt
;
324 /* case 1: we are only interested in uses that need to be vectorized. Uses
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use
, stmt
))
329 if (!vect_is_simple_use (use
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
332 fprintf (vect_dump
, "not vectorized: unsupported use in stmt.");
336 if (!def_stmt
|| gimple_nop_p (def_stmt
))
339 def_bb
= gimple_bb (def_stmt
);
340 if (!flow_bb_inside_loop_p (loop
, def_bb
))
342 if (vect_print_dump_info (REPORT_DETAILS
))
343 fprintf (vect_dump
, "def_stmt is out of loop.");
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
353 bb
= gimple_bb (stmt
);
354 if (gimple_code (stmt
) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
356 && gimple_code (def_stmt
) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
358 && bb
->loop_father
== def_bb
->loop_father
)
360 if (vect_print_dump_info (REPORT_DETAILS
))
361 fprintf (vect_dump
, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
363 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
366 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
377 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
379 if (vect_print_dump_info (REPORT_DETAILS
))
380 fprintf (vect_dump
, "outer-loop def-stmt defining inner-loop stmt.");
384 case vect_unused_in_scope
:
385 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
386 vect_used_in_scope
: vect_unused_in_scope
;
389 case vect_used_in_outer_by_reduction
:
390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
391 relevant
= vect_used_by_reduction
;
394 case vect_used_in_outer
:
395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
396 relevant
= vect_used_in_scope
;
399 case vect_used_in_scope
:
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
414 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
416 if (vect_print_dump_info (REPORT_DETAILS
))
417 fprintf (vect_dump
, "inner-loop def-stmt defining outer-loop stmt.");
421 case vect_unused_in_scope
:
422 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
423 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
424 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
427 case vect_used_by_reduction
:
428 relevant
= vect_used_in_outer_by_reduction
;
431 case vect_used_in_scope
:
432 relevant
= vect_used_in_outer
;
440 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
445 /* Function vect_mark_stmts_to_be_vectorized.
447 Not all stmts in the loop need to be vectorized. For example:
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
459 This pass detects such stmts. */
462 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
464 VEC(gimple
,heap
) *worklist
;
465 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
466 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
467 unsigned int nbbs
= loop
->num_nodes
;
468 gimple_stmt_iterator si
;
471 stmt_vec_info stmt_vinfo
;
475 enum vect_relevant relevant
, tmp_relevant
;
476 enum vect_def_type def_type
;
478 if (vect_print_dump_info (REPORT_DETAILS
))
479 fprintf (vect_dump
, "=== vect_mark_stmts_to_be_vectorized ===");
481 worklist
= VEC_alloc (gimple
, heap
, 64);
483 /* 1. Init worklist. */
484 for (i
= 0; i
< nbbs
; i
++)
487 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
490 if (vect_print_dump_info (REPORT_DETAILS
))
492 fprintf (vect_dump
, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
496 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
497 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
499 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
501 stmt
= gsi_stmt (si
);
502 if (vect_print_dump_info (REPORT_DETAILS
))
504 fprintf (vect_dump
, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
508 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
509 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
513 /* 2. Process_worklist */
514 while (VEC_length (gimple
, worklist
) > 0)
519 stmt
= VEC_pop (gimple
, worklist
);
520 if (vect_print_dump_info (REPORT_DETAILS
))
522 fprintf (vect_dump
, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
528 liveness and relevance properties of STMT. */
529 stmt_vinfo
= vinfo_for_stmt (stmt
);
530 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
531 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
543 those that are used by a reduction computation, and those that are
544 (also) used by a regular computation. This allows us later on to
545 identify stmts that are used solely by a reduction, and therefore the
546 order of the results that they produce does not have to be kept. */
548 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
549 tmp_relevant
= relevant
;
552 case vect_reduction_def
:
553 switch (tmp_relevant
)
555 case vect_unused_in_scope
:
556 relevant
= vect_used_by_reduction
;
559 case vect_used_by_reduction
:
560 if (gimple_code (stmt
) == GIMPLE_PHI
)
565 if (vect_print_dump_info (REPORT_DETAILS
))
566 fprintf (vect_dump
, "unsupported use of reduction.");
568 VEC_free (gimple
, heap
, worklist
);
575 case vect_nested_cycle
:
576 if (tmp_relevant
!= vect_unused_in_scope
577 && tmp_relevant
!= vect_used_in_outer_by_reduction
578 && tmp_relevant
!= vect_used_in_outer
)
580 if (vect_print_dump_info (REPORT_DETAILS
))
581 fprintf (vect_dump
, "unsupported use of nested cycle.");
583 VEC_free (gimple
, heap
, worklist
);
590 case vect_double_reduction_def
:
591 if (tmp_relevant
!= vect_unused_in_scope
592 && tmp_relevant
!= vect_used_by_reduction
)
594 if (vect_print_dump_info (REPORT_DETAILS
))
595 fprintf (vect_dump
, "unsupported use of double reduction.");
597 VEC_free (gimple
, heap
, worklist
);
608 if (is_pattern_stmt_p (vinfo_for_stmt (stmt
)))
610 /* Pattern statements are not inserted into the code, so
611 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
612 have to scan the RHS or function arguments instead. */
613 if (is_gimple_assign (stmt
))
615 for (i
= 1; i
< gimple_num_ops (stmt
); i
++)
617 tree op
= gimple_op (stmt
, i
);
618 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
621 VEC_free (gimple
, heap
, worklist
);
626 else if (is_gimple_call (stmt
))
628 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
630 tree arg
= gimple_call_arg (stmt
, i
);
631 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
634 VEC_free (gimple
, heap
, worklist
);
641 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
643 tree op
= USE_FROM_PTR (use_p
);
644 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
647 VEC_free (gimple
, heap
, worklist
);
651 } /* while worklist */
653 VEC_free (gimple
, heap
, worklist
);
658 /* Get cost by calling cost target builtin. */
661 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost
)
663 tree dummy_type
= NULL
;
666 return targetm
.vectorize
.builtin_vectorization_cost (type_of_cost
,
671 /* Get cost for STMT. */
674 cost_for_stmt (gimple stmt
)
676 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
678 switch (STMT_VINFO_TYPE (stmt_info
))
680 case load_vec_info_type
:
681 return vect_get_stmt_cost (scalar_load
);
682 case store_vec_info_type
:
683 return vect_get_stmt_cost (scalar_store
);
684 case op_vec_info_type
:
685 case condition_vec_info_type
:
686 case assignment_vec_info_type
:
687 case reduc_vec_info_type
:
688 case induc_vec_info_type
:
689 case type_promotion_vec_info_type
:
690 case type_demotion_vec_info_type
:
691 case type_conversion_vec_info_type
:
692 case call_vec_info_type
:
693 return vect_get_stmt_cost (scalar_stmt
);
694 case undef_vec_info_type
:
700 /* Function vect_model_simple_cost.
702 Models cost for simple operations, i.e. those that only emit ncopies of a
703 single op. Right now, this does not account for multiple insns that could
704 be generated for the single vector op. We will handle that shortly. */
707 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
708 enum vect_def_type
*dt
, slp_tree slp_node
)
711 int inside_cost
= 0, outside_cost
= 0;
713 /* The SLP costs were already calculated during SLP tree build. */
714 if (PURE_SLP_STMT (stmt_info
))
717 inside_cost
= ncopies
* vect_get_stmt_cost (vector_stmt
);
719 /* FORNOW: Assuming maximum 2 args per stmts. */
720 for (i
= 0; i
< 2; i
++)
722 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
723 outside_cost
+= vect_get_stmt_cost (vector_stmt
);
726 if (vect_print_dump_info (REPORT_COST
))
727 fprintf (vect_dump
, "vect_model_simple_cost: inside_cost = %d, "
728 "outside_cost = %d .", inside_cost
, outside_cost
);
730 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
731 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
732 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
736 /* Function vect_cost_strided_group_size
738 For strided load or store, return the group_size only if it is the first
739 load or store of a group, else return 1. This ensures that group size is
740 only returned once per group. */
743 vect_cost_strided_group_size (stmt_vec_info stmt_info
)
745 gimple first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
747 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
748 return GROUP_SIZE (stmt_info
);
754 /* Function vect_model_store_cost
756 Models cost for stores. In the case of strided accesses, one access
757 has the overhead of the strided access attributed to it. */
760 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
761 bool store_lanes_p
, enum vect_def_type dt
,
765 unsigned int inside_cost
= 0, outside_cost
= 0;
766 struct data_reference
*first_dr
;
769 /* The SLP costs were already calculated during SLP tree build. */
770 if (PURE_SLP_STMT (stmt_info
))
773 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
774 outside_cost
= vect_get_stmt_cost (scalar_to_vec
);
776 /* Strided access? */
777 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
781 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
786 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
787 group_size
= vect_cost_strided_group_size (stmt_info
);
790 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
792 /* Not a strided access. */
796 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
799 /* We assume that the cost of a single store-lanes instruction is
800 equivalent to the cost of GROUP_SIZE separate stores. If a strided
801 access is instead being provided by a permute-and-store operation,
802 include the cost of the permutes. */
803 if (!store_lanes_p
&& group_size
> 1)
805 /* Uses a high and low interleave operation for each needed permute. */
806 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
807 * vect_get_stmt_cost (vector_stmt
);
809 if (vect_print_dump_info (REPORT_COST
))
810 fprintf (vect_dump
, "vect_model_store_cost: strided group_size = %d .",
815 /* Costs of the stores. */
816 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
);
818 if (vect_print_dump_info (REPORT_COST
))
819 fprintf (vect_dump
, "vect_model_store_cost: inside_cost = %d, "
820 "outside_cost = %d .", inside_cost
, outside_cost
);
822 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
823 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
824 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
828 /* Calculate cost of DR's memory access. */
830 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
831 unsigned int *inside_cost
)
833 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
835 switch (alignment_support_scheme
)
839 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_store
);
841 if (vect_print_dump_info (REPORT_COST
))
842 fprintf (vect_dump
, "vect_model_store_cost: aligned.");
847 case dr_unaligned_supported
:
849 gimple stmt
= DR_STMT (dr
);
850 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
851 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
853 /* Here, we assign an additional cost for the unaligned store. */
854 *inside_cost
+= ncopies
855 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_store
,
856 vectype
, DR_MISALIGNMENT (dr
));
858 if (vect_print_dump_info (REPORT_COST
))
859 fprintf (vect_dump
, "vect_model_store_cost: unaligned supported by "
871 /* Function vect_model_load_cost
873 Models cost for loads. In the case of strided accesses, the last access
874 has the overhead of the strided access attributed to it. Since unaligned
875 accesses are supported for loads, we also account for the costs of the
876 access scheme chosen. */
879 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
, bool load_lanes_p
,
884 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
885 unsigned int inside_cost
= 0, outside_cost
= 0;
887 /* The SLP costs were already calculated during SLP tree build. */
888 if (PURE_SLP_STMT (stmt_info
))
891 /* Strided accesses? */
892 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
893 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
895 group_size
= vect_cost_strided_group_size (stmt_info
);
896 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
898 /* Not a strided access. */
905 /* We assume that the cost of a single load-lanes instruction is
906 equivalent to the cost of GROUP_SIZE separate loads. If a strided
907 access is instead being provided by a load-and-permute operation,
908 include the cost of the permutes. */
909 if (!load_lanes_p
&& group_size
> 1)
911 /* Uses an even and odd extract operations for each needed permute. */
912 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
913 * vect_get_stmt_cost (vector_stmt
);
915 if (vect_print_dump_info (REPORT_COST
))
916 fprintf (vect_dump
, "vect_model_load_cost: strided group_size = %d .",
920 /* The loads themselves. */
921 vect_get_load_cost (first_dr
, ncopies
,
922 ((!STMT_VINFO_STRIDED_ACCESS (stmt_info
)) || group_size
> 1
924 &inside_cost
, &outside_cost
);
926 if (vect_print_dump_info (REPORT_COST
))
927 fprintf (vect_dump
, "vect_model_load_cost: inside_cost = %d, "
928 "outside_cost = %d .", inside_cost
, outside_cost
);
930 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
931 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
932 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
936 /* Calculate cost of DR's memory access. */
938 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
939 bool add_realign_cost
, unsigned int *inside_cost
,
940 unsigned int *outside_cost
)
942 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
944 switch (alignment_support_scheme
)
948 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_load
);
950 if (vect_print_dump_info (REPORT_COST
))
951 fprintf (vect_dump
, "vect_model_load_cost: aligned.");
955 case dr_unaligned_supported
:
957 gimple stmt
= DR_STMT (dr
);
958 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
959 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
961 /* Here, we assign an additional cost for the unaligned load. */
962 *inside_cost
+= ncopies
963 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_load
,
964 vectype
, DR_MISALIGNMENT (dr
));
965 if (vect_print_dump_info (REPORT_COST
))
966 fprintf (vect_dump
, "vect_model_load_cost: unaligned supported by "
971 case dr_explicit_realign
:
973 *inside_cost
+= ncopies
* (2 * vect_get_stmt_cost (vector_load
)
974 + vect_get_stmt_cost (vector_stmt
));
976 /* FIXME: If the misalignment remains fixed across the iterations of
977 the containing loop, the following cost should be added to the
979 if (targetm
.vectorize
.builtin_mask_for_load
)
980 *inside_cost
+= vect_get_stmt_cost (vector_stmt
);
984 case dr_explicit_realign_optimized
:
986 if (vect_print_dump_info (REPORT_COST
))
987 fprintf (vect_dump
, "vect_model_load_cost: unaligned software "
990 /* Unaligned software pipeline has a load of an address, an initial
991 load, and possibly a mask operation to "prime" the loop. However,
992 if this is an access in a group of loads, which provide strided
993 access, then the above cost should only be considered for one
994 access in the group. Inside the loop, there is a load op
995 and a realignment op. */
997 if (add_realign_cost
)
999 *outside_cost
= 2 * vect_get_stmt_cost (vector_stmt
);
1000 if (targetm
.vectorize
.builtin_mask_for_load
)
1001 *outside_cost
+= vect_get_stmt_cost (vector_stmt
);
1004 *inside_cost
+= ncopies
* (vect_get_stmt_cost (vector_load
)
1005 + vect_get_stmt_cost (vector_stmt
));
1015 /* Function vect_init_vector.
1017 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
1018 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
1019 is not NULL. Otherwise, place the initialization at the loop preheader.
1020 Return the DEF of INIT_STMT.
1021 It will be used in the vectorization of STMT. */
1024 vect_init_vector (gimple stmt
, tree vector_var
, tree vector_type
,
1025 gimple_stmt_iterator
*gsi
)
1027 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1035 new_var
= vect_get_new_vect_var (vector_type
, vect_simple_var
, "cst_");
1036 add_referenced_var (new_var
);
1037 init_stmt
= gimple_build_assign (new_var
, vector_var
);
1038 new_temp
= make_ssa_name (new_var
, init_stmt
);
1039 gimple_assign_set_lhs (init_stmt
, new_temp
);
1042 vect_finish_stmt_generation (stmt
, init_stmt
, gsi
);
1045 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1049 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1051 if (nested_in_vect_loop_p (loop
, stmt
))
1054 pe
= loop_preheader_edge (loop
);
1055 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
1056 gcc_assert (!new_bb
);
1060 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1062 gimple_stmt_iterator gsi_bb_start
;
1064 gcc_assert (bb_vinfo
);
1065 bb
= BB_VINFO_BB (bb_vinfo
);
1066 gsi_bb_start
= gsi_after_labels (bb
);
1067 gsi_insert_before (&gsi_bb_start
, init_stmt
, GSI_SAME_STMT
);
1071 if (vect_print_dump_info (REPORT_DETAILS
))
1073 fprintf (vect_dump
, "created new init_stmt: ");
1074 print_gimple_stmt (vect_dump
, init_stmt
, 0, TDF_SLIM
);
1077 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1082 /* Function vect_get_vec_def_for_operand.
1084 OP is an operand in STMT. This function returns a (vector) def that will be
1085 used in the vectorized stmt for STMT.
1087 In the case that OP is an SSA_NAME which is defined in the loop, then
1088 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1090 In case OP is an invariant or constant, a new stmt that creates a vector def
1091 needs to be introduced. */
1094 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
1099 stmt_vec_info def_stmt_info
= NULL
;
1100 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1101 unsigned int nunits
;
1102 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1108 enum vect_def_type dt
;
1112 if (vect_print_dump_info (REPORT_DETAILS
))
1114 fprintf (vect_dump
, "vect_get_vec_def_for_operand: ");
1115 print_generic_expr (vect_dump
, op
, TDF_SLIM
);
1118 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, NULL
, &def_stmt
, &def
,
1120 gcc_assert (is_simple_use
);
1121 if (vect_print_dump_info (REPORT_DETAILS
))
1125 fprintf (vect_dump
, "def = ");
1126 print_generic_expr (vect_dump
, def
, TDF_SLIM
);
1130 fprintf (vect_dump
, " def_stmt = ");
1131 print_gimple_stmt (vect_dump
, def_stmt
, 0, TDF_SLIM
);
1137 /* Case 1: operand is a constant. */
1138 case vect_constant_def
:
1140 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1141 gcc_assert (vector_type
);
1142 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1147 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1148 if (vect_print_dump_info (REPORT_DETAILS
))
1149 fprintf (vect_dump
, "Create vector_cst. nunits = %d", nunits
);
1151 vec_cst
= build_vector_from_val (vector_type
, op
);
1152 return vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
1155 /* Case 2: operand is defined outside the loop - loop invariant. */
1156 case vect_external_def
:
1158 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1159 gcc_assert (vector_type
);
1160 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1165 /* Create 'vec_inv = {inv,inv,..,inv}' */
1166 if (vect_print_dump_info (REPORT_DETAILS
))
1167 fprintf (vect_dump
, "Create vector_inv.");
1169 for (i
= nunits
- 1; i
>= 0; --i
)
1171 t
= tree_cons (NULL_TREE
, def
, t
);
1174 /* FIXME: use build_constructor directly. */
1175 vec_inv
= build_constructor_from_list (vector_type
, t
);
1176 return vect_init_vector (stmt
, vec_inv
, vector_type
, NULL
);
1179 /* Case 3: operand is defined inside the loop. */
1180 case vect_internal_def
:
1183 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1185 /* Get the def from the vectorized stmt. */
1186 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1187 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1188 gcc_assert (vec_stmt
);
1189 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1190 vec_oprnd
= PHI_RESULT (vec_stmt
);
1191 else if (is_gimple_call (vec_stmt
))
1192 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1194 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1198 /* Case 4: operand is defined by a loop header phi - reduction */
1199 case vect_reduction_def
:
1200 case vect_double_reduction_def
:
1201 case vect_nested_cycle
:
1205 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1206 loop
= (gimple_bb (def_stmt
))->loop_father
;
1208 /* Get the def before the loop */
1209 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1210 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1213 /* Case 5: operand is defined by loop-header phi - induction. */
1214 case vect_induction_def
:
1216 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1218 /* Get the def from the vectorized stmt. */
1219 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1220 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1221 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1222 vec_oprnd
= PHI_RESULT (vec_stmt
);
1224 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1234 /* Function vect_get_vec_def_for_stmt_copy
1236 Return a vector-def for an operand. This function is used when the
1237 vectorized stmt to be created (by the caller to this function) is a "copy"
1238 created in case the vectorized result cannot fit in one vector, and several
1239 copies of the vector-stmt are required. In this case the vector-def is
1240 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1241 of the stmt that defines VEC_OPRND.
1242 DT is the type of the vector def VEC_OPRND.
1245 In case the vectorization factor (VF) is bigger than the number
1246 of elements that can fit in a vectype (nunits), we have to generate
1247 more than one vector stmt to vectorize the scalar stmt. This situation
1248 arises when there are multiple data-types operated upon in the loop; the
1249 smallest data-type determines the VF, and as a result, when vectorizing
1250 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1251 vector stmt (each computing a vector of 'nunits' results, and together
1252 computing 'VF' results in each iteration). This function is called when
1253 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1254 which VF=16 and nunits=4, so the number of copies required is 4):
1256 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1258 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1259 VS1.1: vx.1 = memref1 VS1.2
1260 VS1.2: vx.2 = memref2 VS1.3
1261 VS1.3: vx.3 = memref3
1263 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1264 VSnew.1: vz1 = vx.1 + ... VSnew.2
1265 VSnew.2: vz2 = vx.2 + ... VSnew.3
1266 VSnew.3: vz3 = vx.3 + ...
1268 The vectorization of S1 is explained in vectorizable_load.
1269 The vectorization of S2:
1270 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1271 the function 'vect_get_vec_def_for_operand' is called to
1272 get the relevant vector-def for each operand of S2. For operand x it
1273 returns the vector-def 'vx.0'.
1275 To create the remaining copies of the vector-stmt (VSnew.j), this
1276 function is called to get the relevant vector-def for each operand. It is
1277 obtained from the respective VS1.j stmt, which is recorded in the
1278 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1280 For example, to obtain the vector-def 'vx.1' in order to create the
1281 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1282 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1283 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1284 and return its def ('vx.1').
1285 Overall, to create the above sequence this function will be called 3 times:
1286 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1287 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1288 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1291 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1293 gimple vec_stmt_for_operand
;
1294 stmt_vec_info def_stmt_info
;
1296 /* Do nothing; can reuse same def. */
1297 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1300 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1301 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1302 gcc_assert (def_stmt_info
);
1303 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1304 gcc_assert (vec_stmt_for_operand
);
1305 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1306 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1307 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1309 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1314 /* Get vectorized definitions for the operands to create a copy of an original
1315 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1318 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1319 VEC(tree
,heap
) **vec_oprnds0
,
1320 VEC(tree
,heap
) **vec_oprnds1
)
1322 tree vec_oprnd
= VEC_pop (tree
, *vec_oprnds0
);
1324 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1325 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1327 if (vec_oprnds1
&& *vec_oprnds1
)
1329 vec_oprnd
= VEC_pop (tree
, *vec_oprnds1
);
1330 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1331 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1336 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1340 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1341 VEC(tree
,heap
) **vec_oprnds0
, VEC(tree
,heap
) **vec_oprnds1
,
1345 vect_get_slp_defs (op0
, op1
, slp_node
, vec_oprnds0
, vec_oprnds1
, -1);
1350 *vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1351 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1352 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1356 *vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
1357 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1358 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1364 /* Function vect_finish_stmt_generation.
1366 Insert a new stmt. */
1369 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1370 gimple_stmt_iterator
*gsi
)
1372 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1373 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1374 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1376 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1378 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1380 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1383 if (vect_print_dump_info (REPORT_DETAILS
))
1385 fprintf (vect_dump
, "add new stmt: ");
1386 print_gimple_stmt (vect_dump
, vec_stmt
, 0, TDF_SLIM
);
1389 gimple_set_location (vec_stmt
, gimple_location (gsi_stmt (*gsi
)));
1392 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1393 a function declaration if the target has a vectorized version
1394 of the function, or NULL_TREE if the function cannot be vectorized. */
1397 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1399 tree fndecl
= gimple_call_fndecl (call
);
1401 /* We only handle functions that do not read or clobber memory -- i.e.
1402 const or novops ones. */
1403 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1407 || TREE_CODE (fndecl
) != FUNCTION_DECL
1408 || !DECL_BUILT_IN (fndecl
))
1411 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1415 /* Function vectorizable_call.
1417 Check if STMT performs a function call that can be vectorized.
1418 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1419 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1420 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1423 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
)
1428 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1429 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
1430 tree vectype_out
, vectype_in
;
1433 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1434 tree fndecl
, new_temp
, def
, rhs_type
;
1436 enum vect_def_type dt
[3]
1437 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
1438 gimple new_stmt
= NULL
;
1440 VEC(tree
, heap
) *vargs
= NULL
;
1441 enum { NARROW
, NONE
, WIDEN
} modifier
;
1445 /* FORNOW: unsupported in basic block SLP. */
1446 gcc_assert (loop_vinfo
);
1448 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1451 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1454 /* FORNOW: SLP not supported. */
1455 if (STMT_SLP_TYPE (stmt_info
))
1458 /* Is STMT a vectorizable call? */
1459 if (!is_gimple_call (stmt
))
1462 if (TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
1465 if (stmt_can_throw_internal (stmt
))
1468 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1470 /* Process function arguments. */
1471 rhs_type
= NULL_TREE
;
1472 vectype_in
= NULL_TREE
;
1473 nargs
= gimple_call_num_args (stmt
);
1475 /* Bail out if the function has more than three arguments, we do not have
1476 interesting builtin functions to vectorize with more than two arguments
1477 except for fma. No arguments is also not good. */
1478 if (nargs
== 0 || nargs
> 3)
1481 for (i
= 0; i
< nargs
; i
++)
1485 op
= gimple_call_arg (stmt
, i
);
1487 /* We can only handle calls with arguments of the same type. */
1489 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
1491 if (vect_print_dump_info (REPORT_DETAILS
))
1492 fprintf (vect_dump
, "argument types differ.");
1496 rhs_type
= TREE_TYPE (op
);
1498 if (!vect_is_simple_use_1 (op
, loop_vinfo
, NULL
,
1499 &def_stmt
, &def
, &dt
[i
], &opvectype
))
1501 if (vect_print_dump_info (REPORT_DETAILS
))
1502 fprintf (vect_dump
, "use not simple.");
1507 vectype_in
= opvectype
;
1509 && opvectype
!= vectype_in
)
1511 if (vect_print_dump_info (REPORT_DETAILS
))
1512 fprintf (vect_dump
, "argument vector types differ.");
1516 /* If all arguments are external or constant defs use a vector type with
1517 the same size as the output vector type. */
1519 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1521 gcc_assert (vectype_in
);
1524 if (vect_print_dump_info (REPORT_DETAILS
))
1526 fprintf (vect_dump
, "no vectype for scalar type ");
1527 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1534 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1535 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1536 if (nunits_in
== nunits_out
/ 2)
1538 else if (nunits_out
== nunits_in
)
1540 else if (nunits_out
== nunits_in
/ 2)
1545 /* For now, we only vectorize functions if a target specific builtin
1546 is available. TODO -- in some cases, it might be profitable to
1547 insert the calls for pieces of the vector, in order to be able
1548 to vectorize other operations in the loop. */
1549 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
1550 if (fndecl
== NULL_TREE
)
1552 if (vect_print_dump_info (REPORT_DETAILS
))
1553 fprintf (vect_dump
, "function is not vectorizable.");
1558 gcc_assert (!gimple_vuse (stmt
));
1560 if (modifier
== NARROW
)
1561 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1563 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1565 /* Sanity check: make sure that at least one copy of the vectorized stmt
1566 needs to be generated. */
1567 gcc_assert (ncopies
>= 1);
1569 if (!vec_stmt
) /* transformation not required. */
1571 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1572 if (vect_print_dump_info (REPORT_DETAILS
))
1573 fprintf (vect_dump
, "=== vectorizable_call ===");
1574 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
1580 if (vect_print_dump_info (REPORT_DETAILS
))
1581 fprintf (vect_dump
, "transform call.");
1584 scalar_dest
= gimple_call_lhs (stmt
);
1585 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1587 prev_stmt_info
= NULL
;
1591 for (j
= 0; j
< ncopies
; ++j
)
1593 /* Build argument list for the vectorized call. */
1595 vargs
= VEC_alloc (tree
, heap
, nargs
);
1597 VEC_truncate (tree
, vargs
, 0);
1599 for (i
= 0; i
< nargs
; i
++)
1601 op
= gimple_call_arg (stmt
, i
);
1604 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1607 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
1609 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1612 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1615 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1616 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1617 gimple_call_set_lhs (new_stmt
, new_temp
);
1619 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1620 mark_symbols_for_renaming (new_stmt
);
1623 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1625 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1627 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1633 for (j
= 0; j
< ncopies
; ++j
)
1635 /* Build argument list for the vectorized call. */
1637 vargs
= VEC_alloc (tree
, heap
, nargs
* 2);
1639 VEC_truncate (tree
, vargs
, 0);
1641 for (i
= 0; i
< nargs
; i
++)
1643 op
= gimple_call_arg (stmt
, i
);
1647 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1649 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1653 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
);
1655 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
1657 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1660 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1661 VEC_quick_push (tree
, vargs
, vec_oprnd1
);
1664 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1665 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1666 gimple_call_set_lhs (new_stmt
, new_temp
);
1668 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1669 mark_symbols_for_renaming (new_stmt
);
1672 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1674 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1676 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1679 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1684 /* No current target implements this case. */
1688 VEC_free (tree
, heap
, vargs
);
1690 /* Update the exception handling table with the vector stmt if necessary. */
1691 if (maybe_clean_or_replace_eh_stmt (stmt
, *vec_stmt
))
1692 gimple_purge_dead_eh_edges (gimple_bb (stmt
));
1694 /* The call in STMT might prevent it from being removed in dce.
1695 We however cannot remove it here, due to the way the ssa name
1696 it defines is mapped to the new definition. So just replace
1697 rhs of the statement with something harmless. */
1699 type
= TREE_TYPE (scalar_dest
);
1700 if (is_pattern_stmt_p (stmt_info
))
1701 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
1703 lhs
= gimple_call_lhs (stmt
);
1704 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
1705 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1706 set_vinfo_for_stmt (stmt
, NULL
);
1707 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1708 gsi_replace (gsi
, new_stmt
, false);
1709 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
1715 /* Function vect_gen_widened_results_half
1717 Create a vector stmt whose code, type, number of arguments, and result
1718 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1719 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1720 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1721 needs to be created (DECL is a function-decl of a target-builtin).
1722 STMT is the original scalar stmt that we are vectorizing. */
1725 vect_gen_widened_results_half (enum tree_code code
,
1727 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
1728 tree vec_dest
, gimple_stmt_iterator
*gsi
,
1734 /* Generate half of the widened result: */
1735 if (code
== CALL_EXPR
)
1737 /* Target specific support */
1738 if (op_type
== binary_op
)
1739 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
1741 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
1742 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1743 gimple_call_set_lhs (new_stmt
, new_temp
);
1747 /* Generic support */
1748 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
1749 if (op_type
!= binary_op
)
1751 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
1753 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1754 gimple_assign_set_lhs (new_stmt
, new_temp
);
1756 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1762 /* Check if STMT performs a conversion operation, that can be vectorized.
1763 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1764 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1765 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1768 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
1769 gimple
*vec_stmt
, slp_tree slp_node
)
1774 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1775 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1776 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1777 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
1778 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
1782 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1783 gimple new_stmt
= NULL
;
1784 stmt_vec_info prev_stmt_info
;
1787 tree vectype_out
, vectype_in
;
1791 enum { NARROW
, NONE
, WIDEN
} modifier
;
1793 VEC(tree
,heap
) *vec_oprnds0
= NULL
;
1795 VEC(tree
,heap
) *dummy
= NULL
;
1798 /* Is STMT a vectorizable conversion? */
1800 /* FORNOW: unsupported in basic block SLP. */
1801 gcc_assert (loop_vinfo
);
1803 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1806 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1809 if (!is_gimple_assign (stmt
))
1812 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
1815 code
= gimple_assign_rhs_code (stmt
);
1816 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
1819 /* Check types of lhs and rhs. */
1820 scalar_dest
= gimple_assign_lhs (stmt
);
1821 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1823 op0
= gimple_assign_rhs1 (stmt
);
1824 rhs_type
= TREE_TYPE (op0
);
1825 /* Check the operands of the operation. */
1826 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
1827 &def_stmt
, &def
, &dt
[0], &vectype_in
))
1829 if (vect_print_dump_info (REPORT_DETAILS
))
1830 fprintf (vect_dump
, "use not simple.");
1833 /* If op0 is an external or constant defs use a vector type of
1834 the same size as the output vector type. */
1836 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1838 gcc_assert (vectype_in
);
1841 if (vect_print_dump_info (REPORT_DETAILS
))
1843 fprintf (vect_dump
, "no vectype for scalar type ");
1844 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1851 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1852 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1853 if (nunits_in
== nunits_out
/ 2)
1855 else if (nunits_out
== nunits_in
)
1857 else if (nunits_out
== nunits_in
/ 2)
1862 if (modifier
== NARROW
)
1863 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1865 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1867 /* Multiple types in SLP are handled by creating the appropriate number of
1868 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1870 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
1873 /* Sanity check: make sure that at least one copy of the vectorized stmt
1874 needs to be generated. */
1875 gcc_assert (ncopies
>= 1);
1877 /* Supportable by target? */
1878 if ((modifier
== NONE
1879 && !targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
1880 || (modifier
== WIDEN
1881 && !supportable_widening_operation (code
, stmt
,
1882 vectype_out
, vectype_in
,
1885 &dummy_int
, &dummy
))
1886 || (modifier
== NARROW
1887 && !supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
1888 &code1
, &dummy_int
, &dummy
)))
1890 if (vect_print_dump_info (REPORT_DETAILS
))
1891 fprintf (vect_dump
, "conversion not supported by target.");
1895 if (modifier
!= NONE
)
1897 /* FORNOW: SLP not supported. */
1898 if (STMT_SLP_TYPE (stmt_info
))
1902 if (!vec_stmt
) /* transformation not required. */
1904 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
1909 if (vect_print_dump_info (REPORT_DETAILS
))
1910 fprintf (vect_dump
, "transform conversion.");
1913 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1915 if (modifier
== NONE
&& !slp_node
)
1916 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1918 prev_stmt_info
= NULL
;
1922 for (j
= 0; j
< ncopies
; j
++)
1925 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
1927 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
1930 targetm
.vectorize
.builtin_conversion (code
,
1931 vectype_out
, vectype_in
);
1932 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
1934 /* Arguments are ready. create the new vector stmt. */
1935 new_stmt
= gimple_build_call (builtin_decl
, 1, vop0
);
1936 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1937 gimple_call_set_lhs (new_stmt
, new_temp
);
1938 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1940 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
1944 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1946 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1947 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1952 /* In case the vectorization factor (VF) is bigger than the number
1953 of elements that we can fit in a vectype (nunits), we have to
1954 generate more than one vector stmt - i.e - we need to "unroll"
1955 the vector stmt by a factor VF/nunits. */
1956 for (j
= 0; j
< ncopies
; j
++)
1959 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1961 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1963 /* Generate first half of the widened result: */
1965 = vect_gen_widened_results_half (code1
, decl1
,
1966 vec_oprnd0
, vec_oprnd1
,
1967 unary_op
, vec_dest
, gsi
, stmt
);
1969 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1971 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1972 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1974 /* Generate second half of the widened result: */
1976 = vect_gen_widened_results_half (code2
, decl2
,
1977 vec_oprnd0
, vec_oprnd1
,
1978 unary_op
, vec_dest
, gsi
, stmt
);
1979 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1980 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1985 /* In case the vectorization factor (VF) is bigger than the number
1986 of elements that we can fit in a vectype (nunits), we have to
1987 generate more than one vector stmt - i.e - we need to "unroll"
1988 the vector stmt by a factor VF/nunits. */
1989 for (j
= 0; j
< ncopies
; j
++)
1994 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1995 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1999 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd1
);
2000 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
2003 /* Arguments are ready. Create the new vector stmt. */
2004 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
, vec_oprnd0
,
2006 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2007 gimple_assign_set_lhs (new_stmt
, new_temp
);
2008 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2011 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2013 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2015 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2018 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2022 VEC_free (tree
, heap
, vec_oprnds0
);
2028 /* Function vectorizable_assignment.
2030 Check if STMT performs an assignment (copy) that can be vectorized.
2031 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2032 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2033 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2036 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
2037 gimple
*vec_stmt
, slp_tree slp_node
)
2042 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2043 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2044 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2048 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2049 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2052 VEC(tree
,heap
) *vec_oprnds
= NULL
;
2054 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2055 gimple new_stmt
= NULL
;
2056 stmt_vec_info prev_stmt_info
= NULL
;
2057 enum tree_code code
;
2060 /* Multiple types in SLP are handled by creating the appropriate number of
2061 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2063 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2066 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2068 gcc_assert (ncopies
>= 1);
2070 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2073 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2076 /* Is vectorizable assignment? */
2077 if (!is_gimple_assign (stmt
))
2080 scalar_dest
= gimple_assign_lhs (stmt
);
2081 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
2084 code
= gimple_assign_rhs_code (stmt
);
2085 if (gimple_assign_single_p (stmt
)
2086 || code
== PAREN_EXPR
2087 || CONVERT_EXPR_CODE_P (code
))
2088 op
= gimple_assign_rhs1 (stmt
);
2092 if (code
== VIEW_CONVERT_EXPR
)
2093 op
= TREE_OPERAND (op
, 0);
2095 if (!vect_is_simple_use_1 (op
, loop_vinfo
, bb_vinfo
,
2096 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2098 if (vect_print_dump_info (REPORT_DETAILS
))
2099 fprintf (vect_dump
, "use not simple.");
2103 /* We can handle NOP_EXPR conversions that do not change the number
2104 of elements or the vector size. */
2105 if ((CONVERT_EXPR_CODE_P (code
)
2106 || code
== VIEW_CONVERT_EXPR
)
2108 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
2109 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
2110 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
2113 if (!vec_stmt
) /* transformation not required. */
2115 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
2116 if (vect_print_dump_info (REPORT_DETAILS
))
2117 fprintf (vect_dump
, "=== vectorizable_assignment ===");
2118 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2123 if (vect_print_dump_info (REPORT_DETAILS
))
2124 fprintf (vect_dump
, "transform assignment.");
2127 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2130 for (j
= 0; j
< ncopies
; j
++)
2134 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2136 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2138 /* Arguments are ready. create the new vector stmt. */
2139 FOR_EACH_VEC_ELT (tree
, vec_oprnds
, i
, vop
)
2141 if (CONVERT_EXPR_CODE_P (code
)
2142 || code
== VIEW_CONVERT_EXPR
)
2143 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
2144 new_stmt
= gimple_build_assign (vec_dest
, vop
);
2145 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2146 gimple_assign_set_lhs (new_stmt
, new_temp
);
2147 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2149 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2156 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2158 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2160 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2163 VEC_free (tree
, heap
, vec_oprnds
);
2168 /* Function vectorizable_shift.
2170 Check if STMT performs a shift operation that can be vectorized.
2171 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2172 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2173 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2176 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
2177 gimple
*vec_stmt
, slp_tree slp_node
)
2181 tree op0
, op1
= NULL
;
2182 tree vec_oprnd1
= NULL_TREE
;
2183 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2185 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2186 enum tree_code code
;
2187 enum machine_mode vec_mode
;
2191 enum machine_mode optab_op2_mode
;
2194 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2195 gimple new_stmt
= NULL
;
2196 stmt_vec_info prev_stmt_info
;
2202 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
2205 bool scalar_shift_arg
= true;
2206 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2209 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2212 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2215 /* Is STMT a vectorizable binary/unary operation? */
2216 if (!is_gimple_assign (stmt
))
2219 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2222 code
= gimple_assign_rhs_code (stmt
);
2224 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2225 || code
== RROTATE_EXPR
))
2228 scalar_dest
= gimple_assign_lhs (stmt
);
2229 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2231 op0
= gimple_assign_rhs1 (stmt
);
2232 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2233 &def_stmt
, &def
, &dt
[0], &vectype
))
2235 if (vect_print_dump_info (REPORT_DETAILS
))
2236 fprintf (vect_dump
, "use not simple.");
2239 /* If op0 is an external or constant def use a vector type with
2240 the same size as the output vector type. */
2242 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2244 gcc_assert (vectype
);
2247 if (vect_print_dump_info (REPORT_DETAILS
))
2249 fprintf (vect_dump
, "no vectype for scalar type ");
2250 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2256 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2257 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2258 if (nunits_out
!= nunits_in
)
2261 op1
= gimple_assign_rhs2 (stmt
);
2262 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
[1]))
2264 if (vect_print_dump_info (REPORT_DETAILS
))
2265 fprintf (vect_dump
, "use not simple.");
2270 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2274 /* Multiple types in SLP are handled by creating the appropriate number of
2275 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2277 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2280 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2282 gcc_assert (ncopies
>= 1);
2284 /* Determine whether the shift amount is a vector, or scalar. If the
2285 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2287 if (dt
[1] == vect_internal_def
&& !slp_node
)
2288 scalar_shift_arg
= false;
2289 else if (dt
[1] == vect_constant_def
2290 || dt
[1] == vect_external_def
2291 || dt
[1] == vect_internal_def
)
2293 /* In SLP, need to check whether the shift count is the same,
2294 in loops if it is a constant or invariant, it is always
2298 VEC (gimple
, heap
) *stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2301 FOR_EACH_VEC_ELT (gimple
, stmts
, k
, slpstmt
)
2302 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
2303 scalar_shift_arg
= false;
2308 if (vect_print_dump_info (REPORT_DETAILS
))
2309 fprintf (vect_dump
, "operand mode requires invariant argument.");
2313 /* Vector shifted by vector. */
2314 if (!scalar_shift_arg
)
2316 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2317 if (vect_print_dump_info (REPORT_DETAILS
))
2318 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2320 /* See if the machine has a vector shifted by scalar insn and if not
2321 then see if it has a vector shifted by vector insn. */
2324 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
2326 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
2328 if (vect_print_dump_info (REPORT_DETAILS
))
2329 fprintf (vect_dump
, "vector/scalar shift/rotate found.");
2333 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2335 && (optab_handler (optab
, TYPE_MODE (vectype
))
2336 != CODE_FOR_nothing
))
2338 scalar_shift_arg
= false;
2340 if (vect_print_dump_info (REPORT_DETAILS
))
2341 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2343 /* Unlike the other binary operators, shifts/rotates have
2344 the rhs being int, instead of the same type as the lhs,
2345 so make sure the scalar is the right type if we are
2346 dealing with vectors of short/char. */
2347 if (dt
[1] == vect_constant_def
)
2348 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
2353 /* Supportable by target? */
2356 if (vect_print_dump_info (REPORT_DETAILS
))
2357 fprintf (vect_dump
, "no optab.");
2360 vec_mode
= TYPE_MODE (vectype
);
2361 icode
= (int) optab_handler (optab
, vec_mode
);
2362 if (icode
== CODE_FOR_nothing
)
2364 if (vect_print_dump_info (REPORT_DETAILS
))
2365 fprintf (vect_dump
, "op not supported by target.");
2366 /* Check only during analysis. */
2367 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2368 || (vf
< vect_min_worthwhile_factor (code
)
2371 if (vect_print_dump_info (REPORT_DETAILS
))
2372 fprintf (vect_dump
, "proceeding using word mode.");
2375 /* Worthwhile without SIMD support? Check only during analysis. */
2376 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2377 && vf
< vect_min_worthwhile_factor (code
)
2380 if (vect_print_dump_info (REPORT_DETAILS
))
2381 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2385 if (!vec_stmt
) /* transformation not required. */
2387 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
2388 if (vect_print_dump_info (REPORT_DETAILS
))
2389 fprintf (vect_dump
, "=== vectorizable_shift ===");
2390 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2396 if (vect_print_dump_info (REPORT_DETAILS
))
2397 fprintf (vect_dump
, "transform binary/unary operation.");
2400 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2402 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2403 created in the previous stages of the recursion, so no allocation is
2404 needed, except for the case of shift with scalar shift argument. In that
2405 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2406 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2407 In case of loop-based vectorization we allocate VECs of size 1. We
2408 allocate VEC_OPRNDS1 only in case of binary operation. */
2411 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2412 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2414 else if (scalar_shift_arg
)
2415 vec_oprnds1
= VEC_alloc (tree
, heap
, slp_node
->vec_stmts_size
);
2417 prev_stmt_info
= NULL
;
2418 for (j
= 0; j
< ncopies
; j
++)
2423 if (scalar_shift_arg
)
2425 /* Vector shl and shr insn patterns can be defined with scalar
2426 operand 2 (shift operand). In this case, use constant or loop
2427 invariant op1 directly, without extending it to vector mode
2429 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
2430 if (!VECTOR_MODE_P (optab_op2_mode
))
2432 if (vect_print_dump_info (REPORT_DETAILS
))
2433 fprintf (vect_dump
, "operand 1 using scalar mode.");
2435 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2438 /* Store vec_oprnd1 for every vector stmt to be created
2439 for SLP_NODE. We check during the analysis that all
2440 the shift arguments are the same.
2441 TODO: Allow different constants for different vector
2442 stmts generated for an SLP instance. */
2443 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
2444 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2449 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2450 (a special case for certain kind of vector shifts); otherwise,
2451 operand 1 should be of a vector type (the usual case). */
2453 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2456 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2460 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2462 /* Arguments are ready. Create the new vector stmt. */
2463 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2465 vop1
= VEC_index (tree
, vec_oprnds1
, i
);
2466 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2467 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2468 gimple_assign_set_lhs (new_stmt
, new_temp
);
2469 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2471 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2478 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2480 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2481 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2484 VEC_free (tree
, heap
, vec_oprnds0
);
2485 VEC_free (tree
, heap
, vec_oprnds1
);
2491 /* Function vectorizable_operation.
2493 Check if STMT performs a binary, unary or ternary operation that can
2495 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2496 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2497 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2500 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
2501 gimple
*vec_stmt
, slp_tree slp_node
)
2505 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
2506 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2508 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2509 enum tree_code code
;
2510 enum machine_mode vec_mode
;
2517 enum vect_def_type dt
[3]
2518 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2519 gimple new_stmt
= NULL
;
2520 stmt_vec_info prev_stmt_info
;
2526 VEC(tree
,heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
, *vec_oprnds2
= NULL
;
2527 tree vop0
, vop1
, vop2
;
2528 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2531 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2534 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2537 /* Is STMT a vectorizable binary/unary operation? */
2538 if (!is_gimple_assign (stmt
))
2541 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2544 code
= gimple_assign_rhs_code (stmt
);
2546 /* For pointer addition, we should use the normal plus for
2547 the vector addition. */
2548 if (code
== POINTER_PLUS_EXPR
)
2551 /* Support only unary or binary operations. */
2552 op_type
= TREE_CODE_LENGTH (code
);
2553 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
2555 if (vect_print_dump_info (REPORT_DETAILS
))
2556 fprintf (vect_dump
, "num. args = %d (not unary/binary/ternary op).",
2561 scalar_dest
= gimple_assign_lhs (stmt
);
2562 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2564 op0
= gimple_assign_rhs1 (stmt
);
2565 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2566 &def_stmt
, &def
, &dt
[0], &vectype
))
2568 if (vect_print_dump_info (REPORT_DETAILS
))
2569 fprintf (vect_dump
, "use not simple.");
2572 /* If op0 is an external or constant def use a vector type with
2573 the same size as the output vector type. */
2575 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2577 gcc_assert (vectype
);
2580 if (vect_print_dump_info (REPORT_DETAILS
))
2582 fprintf (vect_dump
, "no vectype for scalar type ");
2583 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2589 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2590 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2591 if (nunits_out
!= nunits_in
)
2594 if (op_type
== binary_op
|| op_type
== ternary_op
)
2596 op1
= gimple_assign_rhs2 (stmt
);
2597 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2600 if (vect_print_dump_info (REPORT_DETAILS
))
2601 fprintf (vect_dump
, "use not simple.");
2605 if (op_type
== ternary_op
)
2607 op2
= gimple_assign_rhs3 (stmt
);
2608 if (!vect_is_simple_use (op2
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2611 if (vect_print_dump_info (REPORT_DETAILS
))
2612 fprintf (vect_dump
, "use not simple.");
2618 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2622 /* Multiple types in SLP are handled by creating the appropriate number of
2623 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2625 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2628 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2630 gcc_assert (ncopies
>= 1);
2632 /* Shifts are handled in vectorizable_shift (). */
2633 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2634 || code
== RROTATE_EXPR
)
2637 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
2639 /* Supportable by target? */
2642 if (vect_print_dump_info (REPORT_DETAILS
))
2643 fprintf (vect_dump
, "no optab.");
2646 vec_mode
= TYPE_MODE (vectype
);
2647 icode
= (int) optab_handler (optab
, vec_mode
);
2648 if (icode
== CODE_FOR_nothing
)
2650 if (vect_print_dump_info (REPORT_DETAILS
))
2651 fprintf (vect_dump
, "op not supported by target.");
2652 /* Check only during analysis. */
2653 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2654 || (vf
< vect_min_worthwhile_factor (code
)
2657 if (vect_print_dump_info (REPORT_DETAILS
))
2658 fprintf (vect_dump
, "proceeding using word mode.");
2661 /* Worthwhile without SIMD support? Check only during analysis. */
2662 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2663 && vf
< vect_min_worthwhile_factor (code
)
2666 if (vect_print_dump_info (REPORT_DETAILS
))
2667 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2671 if (!vec_stmt
) /* transformation not required. */
2673 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
2674 if (vect_print_dump_info (REPORT_DETAILS
))
2675 fprintf (vect_dump
, "=== vectorizable_operation ===");
2676 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2682 if (vect_print_dump_info (REPORT_DETAILS
))
2683 fprintf (vect_dump
, "transform binary/unary operation.");
2686 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2688 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2689 created in the previous stages of the recursion, so no allocation is
2690 needed, except for the case of shift with scalar shift argument. In that
2691 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2692 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2693 In case of loop-based vectorization we allocate VECs of size 1. We
2694 allocate VEC_OPRNDS1 only in case of binary operation. */
2697 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2698 if (op_type
== binary_op
|| op_type
== ternary_op
)
2699 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2700 if (op_type
== ternary_op
)
2701 vec_oprnds2
= VEC_alloc (tree
, heap
, 1);
2704 /* In case the vectorization factor (VF) is bigger than the number
2705 of elements that we can fit in a vectype (nunits), we have to generate
2706 more than one vector stmt - i.e - we need to "unroll" the
2707 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2708 from one copy of the vector stmt to the next, in the field
2709 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2710 stages to find the correct vector defs to be used when vectorizing
2711 stmts that use the defs of the current stmt. The example below
2712 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2713 we need to create 4 vectorized stmts):
2715 before vectorization:
2716 RELATED_STMT VEC_STMT
2720 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2722 RELATED_STMT VEC_STMT
2723 VS1_0: vx0 = memref0 VS1_1 -
2724 VS1_1: vx1 = memref1 VS1_2 -
2725 VS1_2: vx2 = memref2 VS1_3 -
2726 VS1_3: vx3 = memref3 - -
2727 S1: x = load - VS1_0
2730 step2: vectorize stmt S2 (done here):
2731 To vectorize stmt S2 we first need to find the relevant vector
2732 def for the first operand 'x'. This is, as usual, obtained from
2733 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2734 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2735 relevant vector def 'vx0'. Having found 'vx0' we can generate
2736 the vector stmt VS2_0, and as usual, record it in the
2737 STMT_VINFO_VEC_STMT of stmt S2.
2738 When creating the second copy (VS2_1), we obtain the relevant vector
2739 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2740 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2741 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2742 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2743 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2744 chain of stmts and pointers:
2745 RELATED_STMT VEC_STMT
2746 VS1_0: vx0 = memref0 VS1_1 -
2747 VS1_1: vx1 = memref1 VS1_2 -
2748 VS1_2: vx2 = memref2 VS1_3 -
2749 VS1_3: vx3 = memref3 - -
2750 S1: x = load - VS1_0
2751 VS2_0: vz0 = vx0 + v1 VS2_1 -
2752 VS2_1: vz1 = vx1 + v1 VS2_2 -
2753 VS2_2: vz2 = vx2 + v1 VS2_3 -
2754 VS2_3: vz3 = vx3 + v1 - -
2755 S2: z = x + 1 - VS2_0 */
2757 prev_stmt_info
= NULL
;
2758 for (j
= 0; j
< ncopies
; j
++)
2763 if (op_type
== binary_op
|| op_type
== ternary_op
)
2764 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2767 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2769 if (op_type
== ternary_op
)
2771 vec_oprnds2
= VEC_alloc (tree
, heap
, 1);
2772 VEC_quick_push (tree
, vec_oprnds2
,
2773 vect_get_vec_def_for_operand (op2
, stmt
, NULL
));
2778 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2779 if (op_type
== ternary_op
)
2781 tree vec_oprnd
= VEC_pop (tree
, vec_oprnds2
);
2782 VEC_quick_push (tree
, vec_oprnds2
,
2783 vect_get_vec_def_for_stmt_copy (dt
[2],
2788 /* Arguments are ready. Create the new vector stmt. */
2789 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2791 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
2792 ? VEC_index (tree
, vec_oprnds1
, i
) : NULL_TREE
);
2793 vop2
= ((op_type
== ternary_op
)
2794 ? VEC_index (tree
, vec_oprnds2
, i
) : NULL_TREE
);
2795 new_stmt
= gimple_build_assign_with_ops3 (code
, vec_dest
,
2797 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2798 gimple_assign_set_lhs (new_stmt
, new_temp
);
2799 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2801 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2808 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2810 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2811 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2814 VEC_free (tree
, heap
, vec_oprnds0
);
2816 VEC_free (tree
, heap
, vec_oprnds1
);
2818 VEC_free (tree
, heap
, vec_oprnds2
);
2824 /* Get vectorized definitions for loop-based vectorization. For the first
2825 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2826 scalar operand), and for the rest we get a copy with
2827 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2828 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2829 The vectors are collected into VEC_OPRNDS. */
2832 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
2833 VEC (tree
, heap
) **vec_oprnds
, int multi_step_cvt
)
2837 /* Get first vector operand. */
2838 /* All the vector operands except the very first one (that is scalar oprnd)
2840 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
2841 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
2843 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
2845 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2847 /* Get second vector operand. */
2848 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
2849 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2853 /* For conversion in multiple steps, continue to get operands
2856 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
2860 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2861 For multi-step conversions store the resulting vectors and call the function
2865 vect_create_vectorized_demotion_stmts (VEC (tree
, heap
) **vec_oprnds
,
2866 int multi_step_cvt
, gimple stmt
,
2867 VEC (tree
, heap
) *vec_dsts
,
2868 gimple_stmt_iterator
*gsi
,
2869 slp_tree slp_node
, enum tree_code code
,
2870 stmt_vec_info
*prev_stmt_info
)
2873 tree vop0
, vop1
, new_tmp
, vec_dest
;
2875 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2877 vec_dest
= VEC_pop (tree
, vec_dsts
);
2879 for (i
= 0; i
< VEC_length (tree
, *vec_oprnds
); i
+= 2)
2881 /* Create demotion operation. */
2882 vop0
= VEC_index (tree
, *vec_oprnds
, i
);
2883 vop1
= VEC_index (tree
, *vec_oprnds
, i
+ 1);
2884 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2885 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
2886 gimple_assign_set_lhs (new_stmt
, new_tmp
);
2887 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2890 /* Store the resulting vector for next recursive call. */
2891 VEC_replace (tree
, *vec_oprnds
, i
/2, new_tmp
);
2894 /* This is the last step of the conversion sequence. Store the
2895 vectors in SLP_NODE or in vector info of the scalar statement
2896 (or in STMT_VINFO_RELATED_STMT chain). */
2898 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2901 if (!*prev_stmt_info
)
2902 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2904 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
2906 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2911 /* For multi-step demotion operations we first generate demotion operations
2912 from the source type to the intermediate types, and then combine the
2913 results (stored in VEC_OPRNDS) in demotion operation to the destination
2917 /* At each level of recursion we have have of the operands we had at the
2919 VEC_truncate (tree
, *vec_oprnds
, (i
+1)/2);
2920 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
2921 stmt
, vec_dsts
, gsi
, slp_node
,
2922 code
, prev_stmt_info
);
2927 /* Function vectorizable_type_demotion
2929 Check if STMT performs a binary or unary operation that involves
2930 type demotion, and if it can be vectorized.
2931 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2932 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2933 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2936 vectorizable_type_demotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
2937 gimple
*vec_stmt
, slp_tree slp_node
)
2942 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2943 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2944 enum tree_code code
, code1
= ERROR_MARK
;
2947 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2948 stmt_vec_info prev_stmt_info
;
2955 int multi_step_cvt
= 0;
2956 VEC (tree
, heap
) *vec_oprnds0
= NULL
;
2957 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
2958 tree last_oprnd
, intermediate_type
;
2960 /* FORNOW: not supported by basic block SLP vectorization. */
2961 gcc_assert (loop_vinfo
);
2963 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2966 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2969 /* Is STMT a vectorizable type-demotion operation? */
2970 if (!is_gimple_assign (stmt
))
2973 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2976 code
= gimple_assign_rhs_code (stmt
);
2977 if (!CONVERT_EXPR_CODE_P (code
))
2980 scalar_dest
= gimple_assign_lhs (stmt
);
2981 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2983 /* Check the operands of the operation. */
2984 op0
= gimple_assign_rhs1 (stmt
);
2985 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
2986 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2987 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
2988 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
2989 && CONVERT_EXPR_CODE_P (code
))))
2991 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
2992 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2994 if (vect_print_dump_info (REPORT_DETAILS
))
2995 fprintf (vect_dump
, "use not simple.");
2998 /* If op0 is an external def use a vector type with the
2999 same size as the output vector type if possible. */
3001 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
3003 gcc_assert (vectype_in
);
3006 if (vect_print_dump_info (REPORT_DETAILS
))
3008 fprintf (vect_dump
, "no vectype for scalar type ");
3009 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
3015 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3016 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3017 if (nunits_in
>= nunits_out
)
3020 /* Multiple types in SLP are handled by creating the appropriate number of
3021 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3023 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3026 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3027 gcc_assert (ncopies
>= 1);
3029 /* Supportable by target? */
3030 if (!supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3031 &code1
, &multi_step_cvt
, &interm_types
))
3034 if (!vec_stmt
) /* transformation not required. */
3036 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3037 if (vect_print_dump_info (REPORT_DETAILS
))
3038 fprintf (vect_dump
, "=== vectorizable_demotion ===");
3039 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
3044 if (vect_print_dump_info (REPORT_DETAILS
))
3045 fprintf (vect_dump
, "transform type demotion operation. ncopies = %d.",
3048 /* In case of multi-step demotion, we first generate demotion operations to
3049 the intermediate types, and then from that types to the final one.
3050 We create vector destinations for the intermediate type (TYPES) received
3051 from supportable_narrowing_operation, and store them in the correct order
3052 for future use in vect_create_vectorized_demotion_stmts(). */
3054 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
3056 vec_dsts
= VEC_alloc (tree
, heap
, 1);
3058 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3059 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3063 for (i
= VEC_length (tree
, interm_types
) - 1;
3064 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
3066 vec_dest
= vect_create_destination_var (scalar_dest
,
3068 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3072 /* In case the vectorization factor (VF) is bigger than the number
3073 of elements that we can fit in a vectype (nunits), we have to generate
3074 more than one vector stmt - i.e - we need to "unroll" the
3075 vector stmt by a factor VF/nunits. */
3077 prev_stmt_info
= NULL
;
3078 for (j
= 0; j
< ncopies
; j
++)
3082 vect_get_slp_defs (op0
, NULL_TREE
, slp_node
, &vec_oprnds0
, NULL
, -1);
3085 VEC_free (tree
, heap
, vec_oprnds0
);
3086 vec_oprnds0
= VEC_alloc (tree
, heap
,
3087 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) * 2 : 2));
3088 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
3089 vect_pow2 (multi_step_cvt
) - 1);
3092 /* Arguments are ready. Create the new vector stmts. */
3093 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
3094 vect_create_vectorized_demotion_stmts (&vec_oprnds0
,
3095 multi_step_cvt
, stmt
, tmp_vec_dsts
,
3096 gsi
, slp_node
, code1
,
3100 VEC_free (tree
, heap
, vec_oprnds0
);
3101 VEC_free (tree
, heap
, vec_dsts
);
3102 VEC_free (tree
, heap
, tmp_vec_dsts
);
3103 VEC_free (tree
, heap
, interm_types
);
3105 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3110 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3111 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3112 the resulting vectors and call the function recursively. */
3115 vect_create_vectorized_promotion_stmts (VEC (tree
, heap
) **vec_oprnds0
,
3116 VEC (tree
, heap
) **vec_oprnds1
,
3117 int multi_step_cvt
, gimple stmt
,
3118 VEC (tree
, heap
) *vec_dsts
,
3119 gimple_stmt_iterator
*gsi
,
3120 slp_tree slp_node
, enum tree_code code1
,
3121 enum tree_code code2
, tree decl1
,
3122 tree decl2
, int op_type
,
3123 stmt_vec_info
*prev_stmt_info
)
3126 tree vop0
, vop1
, new_tmp1
, new_tmp2
, vec_dest
;
3127 gimple new_stmt1
, new_stmt2
;
3128 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3129 VEC (tree
, heap
) *vec_tmp
;
3131 vec_dest
= VEC_pop (tree
, vec_dsts
);
3132 vec_tmp
= VEC_alloc (tree
, heap
, VEC_length (tree
, *vec_oprnds0
) * 2);
3134 FOR_EACH_VEC_ELT (tree
, *vec_oprnds0
, i
, vop0
)
3136 if (op_type
== binary_op
)
3137 vop1
= VEC_index (tree
, *vec_oprnds1
, i
);
3141 /* Generate the two halves of promotion operation. */
3142 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3143 op_type
, vec_dest
, gsi
, stmt
);
3144 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3145 op_type
, vec_dest
, gsi
, stmt
);
3146 if (is_gimple_call (new_stmt1
))
3148 new_tmp1
= gimple_call_lhs (new_stmt1
);
3149 new_tmp2
= gimple_call_lhs (new_stmt2
);
3153 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3154 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3159 /* Store the results for the recursive call. */
3160 VEC_quick_push (tree
, vec_tmp
, new_tmp1
);
3161 VEC_quick_push (tree
, vec_tmp
, new_tmp2
);
3165 /* Last step of promotion sequience - store the results. */
3168 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt1
);
3169 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt2
);
3173 if (!*prev_stmt_info
)
3174 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt1
;
3176 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt1
;
3178 *prev_stmt_info
= vinfo_for_stmt (new_stmt1
);
3179 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt2
;
3180 *prev_stmt_info
= vinfo_for_stmt (new_stmt2
);
3187 /* For multi-step promotion operation we first generate we call the
3188 function recurcively for every stage. We start from the input type,
3189 create promotion operations to the intermediate types, and then
3190 create promotions to the output type. */
3191 *vec_oprnds0
= VEC_copy (tree
, heap
, vec_tmp
);
3192 vect_create_vectorized_promotion_stmts (vec_oprnds0
, vec_oprnds1
,
3193 multi_step_cvt
- 1, stmt
,
3194 vec_dsts
, gsi
, slp_node
, code1
,
3195 code2
, decl2
, decl2
, op_type
,
3199 VEC_free (tree
, heap
, vec_tmp
);
3203 /* Function vectorizable_type_promotion
3205 Check if STMT performs a binary or unary operation that involves
3206 type promotion, and if it can be vectorized.
3207 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3208 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3209 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3212 vectorizable_type_promotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3213 gimple
*vec_stmt
, slp_tree slp_node
)
3217 tree op0
, op1
= NULL
;
3218 tree vec_oprnd0
=NULL
, vec_oprnd1
=NULL
;
3219 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3220 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3221 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3222 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3226 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3227 stmt_vec_info prev_stmt_info
;
3234 tree intermediate_type
= NULL_TREE
;
3235 int multi_step_cvt
= 0;
3236 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
3237 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
3239 /* FORNOW: not supported by basic block SLP vectorization. */
3240 gcc_assert (loop_vinfo
);
3242 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
3245 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3248 /* Is STMT a vectorizable type-promotion operation? */
3249 if (!is_gimple_assign (stmt
))
3252 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3255 code
= gimple_assign_rhs_code (stmt
);
3256 if (!CONVERT_EXPR_CODE_P (code
)
3257 && code
!= WIDEN_MULT_EXPR
)
3260 scalar_dest
= gimple_assign_lhs (stmt
);
3261 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3263 /* Check the operands of the operation. */
3264 op0
= gimple_assign_rhs1 (stmt
);
3265 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
3266 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
3267 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
3268 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
3269 && CONVERT_EXPR_CODE_P (code
))))
3271 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
3272 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3274 if (vect_print_dump_info (REPORT_DETAILS
))
3275 fprintf (vect_dump
, "use not simple.");
3279 op_type
= TREE_CODE_LENGTH (code
);
3280 if (op_type
== binary_op
)
3284 op1
= gimple_assign_rhs2 (stmt
);
3285 if (code
== WIDEN_MULT_EXPR
)
3287 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3289 if (CONSTANT_CLASS_P (op0
))
3290 ok
= vect_is_simple_use_1 (op1
, loop_vinfo
, NULL
,
3291 &def_stmt
, &def
, &dt
[1], &vectype_in
);
3293 ok
= vect_is_simple_use (op1
, loop_vinfo
, NULL
, &def_stmt
, &def
,
3298 if (vect_print_dump_info (REPORT_DETAILS
))
3299 fprintf (vect_dump
, "use not simple.");
3305 /* If op0 is an external or constant def use a vector type with
3306 the same size as the output vector type. */
3308 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
3310 gcc_assert (vectype_in
);
3313 if (vect_print_dump_info (REPORT_DETAILS
))
3315 fprintf (vect_dump
, "no vectype for scalar type ");
3316 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
3322 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3323 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3324 if (nunits_in
<= nunits_out
)
3327 /* Multiple types in SLP are handled by creating the appropriate number of
3328 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3330 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3333 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3335 gcc_assert (ncopies
>= 1);
3337 /* Supportable by target? */
3338 if (!supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3339 &decl1
, &decl2
, &code1
, &code2
,
3340 &multi_step_cvt
, &interm_types
))
3343 /* Binary widening operation can only be supported directly by the
3345 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3347 if (!vec_stmt
) /* transformation not required. */
3349 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3350 if (vect_print_dump_info (REPORT_DETAILS
))
3351 fprintf (vect_dump
, "=== vectorizable_promotion ===");
3352 vect_model_simple_cost (stmt_info
, 2*ncopies
, dt
, NULL
);
3358 if (vect_print_dump_info (REPORT_DETAILS
))
3359 fprintf (vect_dump
, "transform type promotion operation. ncopies = %d.",
3362 if (code
== WIDEN_MULT_EXPR
)
3364 if (CONSTANT_CLASS_P (op0
))
3365 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3366 else if (CONSTANT_CLASS_P (op1
))
3367 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3371 /* In case of multi-step promotion, we first generate promotion operations
3372 to the intermediate types, and then from that types to the final one.
3373 We store vector destination in VEC_DSTS in the correct order for
3374 recursive creation of promotion operations in
3375 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3376 according to TYPES recieved from supportable_widening_operation(). */
3378 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
3380 vec_dsts
= VEC_alloc (tree
, heap
, 1);
3382 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3383 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3387 for (i
= VEC_length (tree
, interm_types
) - 1;
3388 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
3390 vec_dest
= vect_create_destination_var (scalar_dest
,
3392 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3398 vec_oprnds0
= VEC_alloc (tree
, heap
,
3399 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3400 if (op_type
== binary_op
)
3401 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
3404 /* In case the vectorization factor (VF) is bigger than the number
3405 of elements that we can fit in a vectype (nunits), we have to generate
3406 more than one vector stmt - i.e - we need to "unroll" the
3407 vector stmt by a factor VF/nunits. */
3409 prev_stmt_info
= NULL
;
3410 for (j
= 0; j
< ncopies
; j
++)
3416 vect_get_slp_defs (op0
, op1
, slp_node
, &vec_oprnds0
,
3420 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3421 VEC_quick_push (tree
, vec_oprnds0
, vec_oprnd0
);
3422 if (op_type
== binary_op
)
3424 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
3425 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
3431 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3432 VEC_replace (tree
, vec_oprnds0
, 0, vec_oprnd0
);
3433 if (op_type
== binary_op
)
3435 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd1
);
3436 VEC_replace (tree
, vec_oprnds1
, 0, vec_oprnd1
);
3440 /* Arguments are ready. Create the new vector stmts. */
3441 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
3442 vect_create_vectorized_promotion_stmts (&vec_oprnds0
, &vec_oprnds1
,
3443 multi_step_cvt
, stmt
,
3445 gsi
, slp_node
, code1
, code2
,
3446 decl1
, decl2
, op_type
,
3450 VEC_free (tree
, heap
, vec_dsts
);
3451 VEC_free (tree
, heap
, tmp_vec_dsts
);
3452 VEC_free (tree
, heap
, interm_types
);
3453 VEC_free (tree
, heap
, vec_oprnds0
);
3454 VEC_free (tree
, heap
, vec_oprnds1
);
3456 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3461 /* Function vectorizable_store.
3463 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3465 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3466 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3467 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3470 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3476 tree vec_oprnd
= NULL_TREE
;
3477 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3478 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
3479 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3481 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3482 struct loop
*loop
= NULL
;
3483 enum machine_mode vec_mode
;
3485 enum dr_alignment_support alignment_support_scheme
;
3488 enum vect_def_type dt
;
3489 stmt_vec_info prev_stmt_info
= NULL
;
3490 tree dataref_ptr
= NULL_TREE
;
3491 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3494 gimple next_stmt
, first_stmt
= NULL
;
3495 bool strided_store
= false;
3496 bool store_lanes_p
= false;
3497 unsigned int group_size
, i
;
3498 VEC(tree
,heap
) *dr_chain
= NULL
, *oprnds
= NULL
, *result_chain
= NULL
;
3500 VEC(tree
,heap
) *vec_oprnds
= NULL
;
3501 bool slp
= (slp_node
!= NULL
);
3502 unsigned int vec_num
;
3503 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3507 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3509 /* Multiple types in SLP are handled by creating the appropriate number of
3510 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3512 if (slp
|| PURE_SLP_STMT (stmt_info
))
3515 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3517 gcc_assert (ncopies
>= 1);
3519 /* FORNOW. This restriction should be relaxed. */
3520 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
3522 if (vect_print_dump_info (REPORT_DETAILS
))
3523 fprintf (vect_dump
, "multiple types in nested loop.");
3527 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3530 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3533 /* Is vectorizable store? */
3535 if (!is_gimple_assign (stmt
))
3538 scalar_dest
= gimple_assign_lhs (stmt
);
3539 if (TREE_CODE (scalar_dest
) != ARRAY_REF
3540 && TREE_CODE (scalar_dest
) != INDIRECT_REF
3541 && TREE_CODE (scalar_dest
) != COMPONENT_REF
3542 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
3543 && TREE_CODE (scalar_dest
) != REALPART_EXPR
3544 && TREE_CODE (scalar_dest
) != MEM_REF
)
3547 gcc_assert (gimple_assign_single_p (stmt
));
3548 op
= gimple_assign_rhs1 (stmt
);
3549 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
))
3551 if (vect_print_dump_info (REPORT_DETAILS
))
3552 fprintf (vect_dump
, "use not simple.");
3556 /* The scalar rhs type needs to be trivially convertible to the vector
3557 component type. This should always be the case. */
3558 elem_type
= TREE_TYPE (vectype
);
3559 if (!useless_type_conversion_p (elem_type
, TREE_TYPE (op
)))
3561 if (vect_print_dump_info (REPORT_DETAILS
))
3562 fprintf (vect_dump
, "??? operands of different types");
3566 vec_mode
= TYPE_MODE (vectype
);
3567 /* FORNOW. In some cases can vectorize even if data-type not supported
3568 (e.g. - array initialization with 0). */
3569 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
3572 if (!STMT_VINFO_DATA_REF (stmt_info
))
3575 if (tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0)
3577 if (vect_print_dump_info (REPORT_DETAILS
))
3578 fprintf (vect_dump
, "negative step for store.");
3582 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
3584 strided_store
= true;
3585 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
3586 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
3588 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3589 if (vect_store_lanes_supported (vectype
, group_size
))
3590 store_lanes_p
= true;
3591 else if (!vect_strided_store_supported (vectype
, group_size
))
3595 if (first_stmt
== stmt
)
3597 /* STMT is the leader of the group. Check the operands of all the
3598 stmts of the group. */
3599 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
3602 gcc_assert (gimple_assign_single_p (next_stmt
));
3603 op
= gimple_assign_rhs1 (next_stmt
);
3604 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3607 if (vect_print_dump_info (REPORT_DETAILS
))
3608 fprintf (vect_dump
, "use not simple.");
3611 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
3616 if (!vec_stmt
) /* transformation not required. */
3618 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
3619 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
, NULL
);
3627 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3628 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3630 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
3633 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
3635 /* We vectorize all the stmts of the interleaving group when we
3636 reach the last stmt in the group. */
3637 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
3638 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
3647 strided_store
= false;
3648 /* VEC_NUM is the number of vect stmts to be created for this
3650 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3651 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
3652 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3655 /* VEC_NUM is the number of vect stmts to be created for this
3657 vec_num
= group_size
;
3663 group_size
= vec_num
= 1;
3666 if (vect_print_dump_info (REPORT_DETAILS
))
3667 fprintf (vect_dump
, "transform store. ncopies = %d",ncopies
);
3669 dr_chain
= VEC_alloc (tree
, heap
, group_size
);
3670 oprnds
= VEC_alloc (tree
, heap
, group_size
);
3672 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
3673 gcc_assert (alignment_support_scheme
);
3674 /* Targets with store-lane instructions must not require explicit
3676 gcc_assert (!store_lanes_p
3677 || alignment_support_scheme
== dr_aligned
3678 || alignment_support_scheme
== dr_unaligned_supported
);
3681 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
3683 aggr_type
= vectype
;
3685 /* In case the vectorization factor (VF) is bigger than the number
3686 of elements that we can fit in a vectype (nunits), we have to generate
3687 more than one vector stmt - i.e - we need to "unroll" the
3688 vector stmt by a factor VF/nunits. For more details see documentation in
3689 vect_get_vec_def_for_copy_stmt. */
3691 /* In case of interleaving (non-unit strided access):
3698 We create vectorized stores starting from base address (the access of the
3699 first stmt in the chain (S2 in the above example), when the last store stmt
3700 of the chain (S4) is reached:
3703 VS2: &base + vec_size*1 = vx0
3704 VS3: &base + vec_size*2 = vx1
3705 VS4: &base + vec_size*3 = vx3
3707 Then permutation statements are generated:
3709 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3710 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3713 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3714 (the order of the data-refs in the output of vect_permute_store_chain
3715 corresponds to the order of scalar stmts in the interleaving chain - see
3716 the documentation of vect_permute_store_chain()).
3718 In case of both multiple types and interleaving, above vector stores and
3719 permutation stmts are created for every copy. The result vector stmts are
3720 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3721 STMT_VINFO_RELATED_STMT for the next copies.
3724 prev_stmt_info
= NULL
;
3725 for (j
= 0; j
< ncopies
; j
++)
3734 /* Get vectorized arguments for SLP_NODE. */
3735 vect_get_slp_defs (NULL_TREE
, NULL_TREE
, slp_node
, &vec_oprnds
,
3738 vec_oprnd
= VEC_index (tree
, vec_oprnds
, 0);
3742 /* For interleaved stores we collect vectorized defs for all the
3743 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3744 used as an input to vect_permute_store_chain(), and OPRNDS as
3745 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3747 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3748 OPRNDS are of size 1. */
3749 next_stmt
= first_stmt
;
3750 for (i
= 0; i
< group_size
; i
++)
3752 /* Since gaps are not supported for interleaved stores,
3753 GROUP_SIZE is the exact number of stmts in the chain.
3754 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3755 there is no interleaving, GROUP_SIZE is 1, and only one
3756 iteration of the loop will be executed. */
3757 gcc_assert (next_stmt
3758 && gimple_assign_single_p (next_stmt
));
3759 op
= gimple_assign_rhs1 (next_stmt
);
3761 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
3763 VEC_quick_push(tree
, dr_chain
, vec_oprnd
);
3764 VEC_quick_push(tree
, oprnds
, vec_oprnd
);
3765 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
3769 /* We should have catched mismatched types earlier. */
3770 gcc_assert (useless_type_conversion_p (vectype
,
3771 TREE_TYPE (vec_oprnd
)));
3772 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, aggr_type
, NULL
,
3773 NULL_TREE
, &dummy
, gsi
,
3774 &ptr_incr
, false, &inv_p
);
3775 gcc_assert (bb_vinfo
|| !inv_p
);
3779 /* For interleaved stores we created vectorized defs for all the
3780 defs stored in OPRNDS in the previous iteration (previous copy).
3781 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3782 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3784 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3785 OPRNDS are of size 1. */
3786 for (i
= 0; i
< group_size
; i
++)
3788 op
= VEC_index (tree
, oprnds
, i
);
3789 vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
3791 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
3792 VEC_replace(tree
, dr_chain
, i
, vec_oprnd
);
3793 VEC_replace(tree
, oprnds
, i
, vec_oprnd
);
3795 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
3796 TYPE_SIZE_UNIT (aggr_type
));
3803 /* Combine all the vectors into an array. */
3804 vec_array
= create_vector_array (vectype
, vec_num
);
3805 for (i
= 0; i
< vec_num
; i
++)
3807 vec_oprnd
= VEC_index (tree
, dr_chain
, i
);
3808 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
3812 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3813 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
3814 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
3815 gimple_call_set_lhs (new_stmt
, data_ref
);
3816 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3817 mark_symbols_for_renaming (new_stmt
);
3824 result_chain
= VEC_alloc (tree
, heap
, group_size
);
3826 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
3830 next_stmt
= first_stmt
;
3831 for (i
= 0; i
< vec_num
; i
++)
3833 struct ptr_info_def
*pi
;
3836 /* Bump the vector pointer. */
3837 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
3841 vec_oprnd
= VEC_index (tree
, vec_oprnds
, i
);
3842 else if (strided_store
)
3843 /* For strided stores vectorized defs are interleaved in
3844 vect_permute_store_chain(). */
3845 vec_oprnd
= VEC_index (tree
, result_chain
, i
);
3847 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
3848 build_int_cst (reference_alias_ptr_type
3849 (DR_REF (first_dr
)), 0));
3850 pi
= get_ptr_info (dataref_ptr
);
3851 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
3852 if (aligned_access_p (first_dr
))
3854 else if (DR_MISALIGNMENT (first_dr
) == -1)
3856 TREE_TYPE (data_ref
)
3857 = build_aligned_type (TREE_TYPE (data_ref
),
3858 TYPE_ALIGN (elem_type
));
3859 pi
->align
= TYPE_ALIGN_UNIT (elem_type
);
3864 TREE_TYPE (data_ref
)
3865 = build_aligned_type (TREE_TYPE (data_ref
),
3866 TYPE_ALIGN (elem_type
));
3867 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
3870 /* Arguments are ready. Create the new vector stmt. */
3871 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
3872 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3873 mark_symbols_for_renaming (new_stmt
);
3878 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
3886 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3888 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3889 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3893 VEC_free (tree
, heap
, dr_chain
);
3894 VEC_free (tree
, heap
, oprnds
);
3896 VEC_free (tree
, heap
, result_chain
);
3898 VEC_free (tree
, heap
, vec_oprnds
);
3903 /* Given a vector type VECTYPE returns a builtin DECL to be used
3904 for vector permutation and stores a mask into *MASK that implements
3905 reversal of the vector elements. If that is impossible to do
3906 returns NULL (and *MASK is unchanged). */
3909 perm_mask_for_reverse (tree vectype
, tree
*mask
)
3912 tree mask_element_type
, mask_type
;
3913 tree mask_vec
= NULL
;
3916 if (!targetm
.vectorize
.builtin_vec_perm
)
3919 builtin_decl
= targetm
.vectorize
.builtin_vec_perm (vectype
,
3920 &mask_element_type
);
3921 if (!builtin_decl
|| !mask_element_type
)
3924 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3925 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3927 || TYPE_VECTOR_SUBPARTS (vectype
) != TYPE_VECTOR_SUBPARTS (mask_type
))
3930 for (i
= 0; i
< nunits
; i
++)
3931 mask_vec
= tree_cons (NULL
, build_int_cst (mask_element_type
, i
), mask_vec
);
3932 mask_vec
= build_vector (mask_type
, mask_vec
);
3934 if (!targetm
.vectorize
.builtin_vec_perm_ok (vectype
, mask_vec
))
3938 return builtin_decl
;
3941 /* Given a vector variable X, that was generated for the scalar LHS of
3942 STMT, generate instructions to reverse the vector elements of X,
3943 insert them a *GSI and return the permuted vector variable. */
3946 reverse_vec_elements (tree x
, gimple stmt
, gimple_stmt_iterator
*gsi
)
3948 tree vectype
= TREE_TYPE (x
);
3949 tree mask_vec
, builtin_decl
;
3950 tree perm_dest
, data_ref
;
3953 builtin_decl
= perm_mask_for_reverse (vectype
, &mask_vec
);
3955 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3957 /* Generate the permute statement. */
3958 perm_stmt
= gimple_build_call (builtin_decl
, 3, x
, x
, mask_vec
);
3959 if (!useless_type_conversion_p (vectype
,
3960 TREE_TYPE (TREE_TYPE (builtin_decl
))))
3962 tree tem
= create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl
)), NULL
);
3963 tem
= make_ssa_name (tem
, perm_stmt
);
3964 gimple_call_set_lhs (perm_stmt
, tem
);
3965 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3966 perm_stmt
= gimple_build_assign (NULL_TREE
,
3967 build1 (VIEW_CONVERT_EXPR
,
3970 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3971 gimple_set_lhs (perm_stmt
, data_ref
);
3972 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3977 /* vectorizable_load.
3979 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3981 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3982 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3983 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3986 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3987 slp_tree slp_node
, slp_instance slp_node_instance
)
3990 tree vec_dest
= NULL
;
3991 tree data_ref
= NULL
;
3992 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3993 stmt_vec_info prev_stmt_info
;
3994 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3995 struct loop
*loop
= NULL
;
3996 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
3997 bool nested_in_vect_loop
= false;
3998 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
3999 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4002 enum machine_mode mode
;
4003 gimple new_stmt
= NULL
;
4005 enum dr_alignment_support alignment_support_scheme
;
4006 tree dataref_ptr
= NULL_TREE
;
4008 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4010 int i
, j
, group_size
;
4011 tree msq
= NULL_TREE
, lsq
;
4012 tree offset
= NULL_TREE
;
4013 tree realignment_token
= NULL_TREE
;
4015 VEC(tree
,heap
) *dr_chain
= NULL
;
4016 bool strided_load
= false;
4017 bool load_lanes_p
= false;
4022 bool compute_in_loop
= false;
4023 struct loop
*at_loop
;
4025 bool slp
= (slp_node
!= NULL
);
4026 bool slp_perm
= false;
4027 enum tree_code code
;
4028 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4034 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4035 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
4036 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4041 /* Multiple types in SLP are handled by creating the appropriate number of
4042 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4044 if (slp
|| PURE_SLP_STMT (stmt_info
))
4047 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4049 gcc_assert (ncopies
>= 1);
4051 /* FORNOW. This restriction should be relaxed. */
4052 if (nested_in_vect_loop
&& ncopies
> 1)
4054 if (vect_print_dump_info (REPORT_DETAILS
))
4055 fprintf (vect_dump
, "multiple types in nested loop.");
4059 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4062 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4065 /* Is vectorizable load? */
4066 if (!is_gimple_assign (stmt
))
4069 scalar_dest
= gimple_assign_lhs (stmt
);
4070 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4073 code
= gimple_assign_rhs_code (stmt
);
4074 if (code
!= ARRAY_REF
4075 && code
!= INDIRECT_REF
4076 && code
!= COMPONENT_REF
4077 && code
!= IMAGPART_EXPR
4078 && code
!= REALPART_EXPR
4082 if (!STMT_VINFO_DATA_REF (stmt_info
))
4085 negative
= tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0;
4086 if (negative
&& ncopies
> 1)
4088 if (vect_print_dump_info (REPORT_DETAILS
))
4089 fprintf (vect_dump
, "multiple types with negative step.");
4093 scalar_type
= TREE_TYPE (DR_REF (dr
));
4094 mode
= TYPE_MODE (vectype
);
4096 /* FORNOW. In some cases can vectorize even if data-type not supported
4097 (e.g. - data copies). */
4098 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
4100 if (vect_print_dump_info (REPORT_DETAILS
))
4101 fprintf (vect_dump
, "Aligned load, but unsupported type.");
4105 /* The vector component type needs to be trivially convertible to the
4106 scalar lhs. This should always be the case. */
4107 elem_type
= TREE_TYPE (vectype
);
4108 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest
), elem_type
))
4110 if (vect_print_dump_info (REPORT_DETAILS
))
4111 fprintf (vect_dump
, "??? operands of different types");
4115 /* Check if the load is a part of an interleaving chain. */
4116 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
4118 strided_load
= true;
4120 gcc_assert (! nested_in_vect_loop
);
4122 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
4123 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
4125 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
4126 if (vect_load_lanes_supported (vectype
, group_size
))
4127 load_lanes_p
= true;
4128 else if (!vect_strided_load_supported (vectype
, group_size
))
4135 gcc_assert (!strided_load
);
4136 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
4137 if (alignment_support_scheme
!= dr_aligned
4138 && alignment_support_scheme
!= dr_unaligned_supported
)
4140 if (vect_print_dump_info (REPORT_DETAILS
))
4141 fprintf (vect_dump
, "negative step but alignment required.");
4144 if (!perm_mask_for_reverse (vectype
, NULL
))
4146 if (vect_print_dump_info (REPORT_DETAILS
))
4147 fprintf (vect_dump
, "negative step and reversing not supported.");
4152 if (!vec_stmt
) /* transformation not required. */
4154 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
4155 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
, NULL
);
4159 if (vect_print_dump_info (REPORT_DETAILS
))
4160 fprintf (vect_dump
, "transform load. ncopies = %d", ncopies
);
4166 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
4167 /* Check if the chain of loads is already vectorized. */
4168 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
)))
4170 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4173 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
4174 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
4176 /* VEC_NUM is the number of vect stmts to be created for this group. */
4179 strided_load
= false;
4180 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
4181 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance
))
4185 vec_num
= group_size
;
4191 group_size
= vec_num
= 1;
4194 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
4195 gcc_assert (alignment_support_scheme
);
4196 /* Targets with load-lane instructions must not require explicit
4198 gcc_assert (!load_lanes_p
4199 || alignment_support_scheme
== dr_aligned
4200 || alignment_support_scheme
== dr_unaligned_supported
);
4202 /* In case the vectorization factor (VF) is bigger than the number
4203 of elements that we can fit in a vectype (nunits), we have to generate
4204 more than one vector stmt - i.e - we need to "unroll" the
4205 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4206 from one copy of the vector stmt to the next, in the field
4207 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4208 stages to find the correct vector defs to be used when vectorizing
4209 stmts that use the defs of the current stmt. The example below
4210 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4211 need to create 4 vectorized stmts):
4213 before vectorization:
4214 RELATED_STMT VEC_STMT
4218 step 1: vectorize stmt S1:
4219 We first create the vector stmt VS1_0, and, as usual, record a
4220 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4221 Next, we create the vector stmt VS1_1, and record a pointer to
4222 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4223 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4225 RELATED_STMT VEC_STMT
4226 VS1_0: vx0 = memref0 VS1_1 -
4227 VS1_1: vx1 = memref1 VS1_2 -
4228 VS1_2: vx2 = memref2 VS1_3 -
4229 VS1_3: vx3 = memref3 - -
4230 S1: x = load - VS1_0
4233 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4234 information we recorded in RELATED_STMT field is used to vectorize
4237 /* In case of interleaving (non-unit strided access):
4244 Vectorized loads are created in the order of memory accesses
4245 starting from the access of the first stmt of the chain:
4248 VS2: vx1 = &base + vec_size*1
4249 VS3: vx3 = &base + vec_size*2
4250 VS4: vx4 = &base + vec_size*3
4252 Then permutation statements are generated:
4254 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4255 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4258 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4259 (the order of the data-refs in the output of vect_permute_load_chain
4260 corresponds to the order of scalar stmts in the interleaving chain - see
4261 the documentation of vect_permute_load_chain()).
4262 The generation of permutation stmts and recording them in
4263 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4265 In case of both multiple types and interleaving, the vector loads and
4266 permutation stmts above are created for every copy. The result vector
4267 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4268 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4270 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4271 on a target that supports unaligned accesses (dr_unaligned_supported)
4272 we generate the following code:
4276 p = p + indx * vectype_size;
4281 Otherwise, the data reference is potentially unaligned on a target that
4282 does not support unaligned accesses (dr_explicit_realign_optimized) -
4283 then generate the following code, in which the data in each iteration is
4284 obtained by two vector loads, one from the previous iteration, and one
4285 from the current iteration:
4287 msq_init = *(floor(p1))
4288 p2 = initial_addr + VS - 1;
4289 realignment_token = call target_builtin;
4292 p2 = p2 + indx * vectype_size
4294 vec_dest = realign_load (msq, lsq, realignment_token)
4299 /* If the misalignment remains the same throughout the execution of the
4300 loop, we can create the init_addr and permutation mask at the loop
4301 preheader. Otherwise, it needs to be created inside the loop.
4302 This can only occur when vectorizing memory accesses in the inner-loop
4303 nested within an outer-loop that is being vectorized. */
4305 if (loop
&& nested_in_vect_loop_p (loop
, stmt
)
4306 && (TREE_INT_CST_LOW (DR_STEP (dr
))
4307 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
4309 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
4310 compute_in_loop
= true;
4313 if ((alignment_support_scheme
== dr_explicit_realign_optimized
4314 || alignment_support_scheme
== dr_explicit_realign
)
4315 && !compute_in_loop
)
4317 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
4318 alignment_support_scheme
, NULL_TREE
,
4320 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4322 phi
= SSA_NAME_DEF_STMT (msq
);
4323 offset
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
4330 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
4333 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
4335 aggr_type
= vectype
;
4337 prev_stmt_info
= NULL
;
4338 for (j
= 0; j
< ncopies
; j
++)
4340 /* 1. Create the vector or array pointer update chain. */
4342 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
4343 offset
, &dummy
, gsi
,
4344 &ptr_incr
, false, &inv_p
);
4346 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
4347 TYPE_SIZE_UNIT (aggr_type
));
4349 if (strided_load
|| slp_perm
)
4350 dr_chain
= VEC_alloc (tree
, heap
, vec_num
);
4356 vec_array
= create_vector_array (vectype
, vec_num
);
4359 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4360 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
4361 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
4362 gimple_call_set_lhs (new_stmt
, vec_array
);
4363 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4364 mark_symbols_for_renaming (new_stmt
);
4366 /* Extract each vector into an SSA_NAME. */
4367 for (i
= 0; i
< vec_num
; i
++)
4369 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
4371 VEC_quick_push (tree
, dr_chain
, new_temp
);
4374 /* Record the mapping between SSA_NAMEs and statements. */
4375 vect_record_strided_load_vectors (stmt
, dr_chain
);
4379 for (i
= 0; i
< vec_num
; i
++)
4382 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
4385 /* 2. Create the vector-load in the loop. */
4386 switch (alignment_support_scheme
)
4389 case dr_unaligned_supported
:
4391 struct ptr_info_def
*pi
;
4393 = build2 (MEM_REF
, vectype
, dataref_ptr
,
4394 build_int_cst (reference_alias_ptr_type
4395 (DR_REF (first_dr
)), 0));
4396 pi
= get_ptr_info (dataref_ptr
);
4397 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
4398 if (alignment_support_scheme
== dr_aligned
)
4400 gcc_assert (aligned_access_p (first_dr
));
4403 else if (DR_MISALIGNMENT (first_dr
) == -1)
4405 TREE_TYPE (data_ref
)
4406 = build_aligned_type (TREE_TYPE (data_ref
),
4407 TYPE_ALIGN (elem_type
));
4408 pi
->align
= TYPE_ALIGN_UNIT (elem_type
);
4413 TREE_TYPE (data_ref
)
4414 = build_aligned_type (TREE_TYPE (data_ref
),
4415 TYPE_ALIGN (elem_type
));
4416 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
4420 case dr_explicit_realign
:
4425 vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
4427 if (compute_in_loop
)
4428 msq
= vect_setup_realignment (first_stmt
, gsi
,
4430 dr_explicit_realign
,
4433 new_stmt
= gimple_build_assign_with_ops
4434 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
4436 (TREE_TYPE (dataref_ptr
),
4437 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4438 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4439 gimple_assign_set_lhs (new_stmt
, ptr
);
4440 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4442 = build2 (MEM_REF
, vectype
, ptr
,
4443 build_int_cst (reference_alias_ptr_type
4444 (DR_REF (first_dr
)), 0));
4445 vec_dest
= vect_create_destination_var (scalar_dest
,
4447 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4448 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4449 gimple_assign_set_lhs (new_stmt
, new_temp
);
4450 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
4451 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
4452 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4455 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
4456 TYPE_SIZE_UNIT (scalar_type
));
4457 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
4458 new_stmt
= gimple_build_assign_with_ops
4459 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
4462 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4463 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4464 gimple_assign_set_lhs (new_stmt
, ptr
);
4465 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4467 = build2 (MEM_REF
, vectype
, ptr
,
4468 build_int_cst (reference_alias_ptr_type
4469 (DR_REF (first_dr
)), 0));
4472 case dr_explicit_realign_optimized
:
4473 new_stmt
= gimple_build_assign_with_ops
4474 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
4476 (TREE_TYPE (dataref_ptr
),
4477 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4478 new_temp
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
),
4480 gimple_assign_set_lhs (new_stmt
, new_temp
);
4481 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4483 = build2 (MEM_REF
, vectype
, new_temp
,
4484 build_int_cst (reference_alias_ptr_type
4485 (DR_REF (first_dr
)), 0));
4490 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4491 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4492 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4493 gimple_assign_set_lhs (new_stmt
, new_temp
);
4494 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4495 mark_symbols_for_renaming (new_stmt
);
4497 /* 3. Handle explicit realignment if necessary/supported.
4499 vec_dest = realign_load (msq, lsq, realignment_token) */
4500 if (alignment_support_scheme
== dr_explicit_realign_optimized
4501 || alignment_support_scheme
== dr_explicit_realign
)
4503 lsq
= gimple_assign_lhs (new_stmt
);
4504 if (!realignment_token
)
4505 realignment_token
= dataref_ptr
;
4506 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4508 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR
,
4511 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4512 gimple_assign_set_lhs (new_stmt
, new_temp
);
4513 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4515 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4518 if (i
== vec_num
- 1 && j
== ncopies
- 1)
4519 add_phi_arg (phi
, lsq
,
4520 loop_latch_edge (containing_loop
),
4526 /* 4. Handle invariant-load. */
4527 if (inv_p
&& !bb_vinfo
)
4529 gcc_assert (!strided_load
);
4530 gcc_assert (nested_in_vect_loop_p (loop
, stmt
));
4535 tree vec_inv
, bitpos
, bitsize
= TYPE_SIZE (scalar_type
);
4537 /* CHECKME: bitpos depends on endianess? */
4538 bitpos
= bitsize_zero_node
;
4539 vec_inv
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
4541 vec_dest
= vect_create_destination_var (scalar_dest
,
4543 new_stmt
= gimple_build_assign (vec_dest
, vec_inv
);
4544 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4545 gimple_assign_set_lhs (new_stmt
, new_temp
);
4546 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4548 for (k
= nunits
- 1; k
>= 0; --k
)
4549 t
= tree_cons (NULL_TREE
, new_temp
, t
);
4550 /* FIXME: use build_constructor directly. */
4551 vec_inv
= build_constructor_from_list (vectype
, t
);
4552 new_temp
= vect_init_vector (stmt
, vec_inv
,
4554 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
4557 gcc_unreachable (); /* FORNOW. */
4562 new_temp
= reverse_vec_elements (new_temp
, stmt
, gsi
);
4563 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
4566 /* Collect vector loads and later create their permutation in
4567 vect_transform_strided_load (). */
4568 if (strided_load
|| slp_perm
)
4569 VEC_quick_push (tree
, dr_chain
, new_temp
);
4571 /* Store vector loads in the corresponding SLP_NODE. */
4572 if (slp
&& !slp_perm
)
4573 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
),
4578 if (slp
&& !slp_perm
)
4583 if (!vect_transform_slp_perm_load (stmt
, dr_chain
, gsi
, vf
,
4584 slp_node_instance
, false))
4586 VEC_free (tree
, heap
, dr_chain
);
4595 vect_transform_strided_load (stmt
, dr_chain
, group_size
, gsi
);
4596 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4601 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4603 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4604 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4608 VEC_free (tree
, heap
, dr_chain
);
4614 /* Function vect_is_simple_cond.
4617 LOOP - the loop that is being vectorized.
4618 COND - Condition that is checked for simple use.
4620 Returns whether a COND can be vectorized. Checks whether
4621 condition operands are supportable using vec_is_simple_use. */
4624 vect_is_simple_cond (tree cond
, loop_vec_info loop_vinfo
)
4628 enum vect_def_type dt
;
4630 if (!COMPARISON_CLASS_P (cond
))
4633 lhs
= TREE_OPERAND (cond
, 0);
4634 rhs
= TREE_OPERAND (cond
, 1);
4636 if (TREE_CODE (lhs
) == SSA_NAME
)
4638 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
4639 if (!vect_is_simple_use (lhs
, loop_vinfo
, NULL
, &lhs_def_stmt
, &def
,
4643 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
4644 && TREE_CODE (lhs
) != FIXED_CST
)
4647 if (TREE_CODE (rhs
) == SSA_NAME
)
4649 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
4650 if (!vect_is_simple_use (rhs
, loop_vinfo
, NULL
, &rhs_def_stmt
, &def
,
4654 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
4655 && TREE_CODE (rhs
) != FIXED_CST
)
4661 /* vectorizable_condition.
4663 Check if STMT is conditional modify expression that can be vectorized.
4664 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4665 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4668 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4669 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4670 else caluse if it is 2).
4672 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4675 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
4676 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
)
4678 tree scalar_dest
= NULL_TREE
;
4679 tree vec_dest
= NULL_TREE
;
4680 tree op
= NULL_TREE
;
4681 tree cond_expr
, then_clause
, else_clause
;
4682 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4683 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4684 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
4685 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
4686 tree vec_compare
, vec_cond_expr
;
4688 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4689 enum machine_mode vec_mode
;
4691 enum vect_def_type dt
, dts
[4];
4692 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4693 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4694 enum tree_code code
;
4695 stmt_vec_info prev_stmt_info
= NULL
;
4698 /* FORNOW: unsupported in basic block SLP. */
4699 gcc_assert (loop_vinfo
);
4701 /* FORNOW: SLP not supported. */
4702 if (STMT_SLP_TYPE (stmt_info
))
4705 gcc_assert (ncopies
>= 1);
4706 if (reduc_index
&& ncopies
> 1)
4707 return false; /* FORNOW */
4709 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
4712 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4713 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
4717 /* FORNOW: not yet supported. */
4718 if (STMT_VINFO_LIVE_P (stmt_info
))
4720 if (vect_print_dump_info (REPORT_DETAILS
))
4721 fprintf (vect_dump
, "value used after loop.");
4725 /* Is vectorizable conditional operation? */
4726 if (!is_gimple_assign (stmt
))
4729 code
= gimple_assign_rhs_code (stmt
);
4731 if (code
!= COND_EXPR
)
4734 gcc_assert (gimple_assign_single_p (stmt
));
4735 op
= gimple_assign_rhs1 (stmt
);
4736 cond_expr
= TREE_OPERAND (op
, 0);
4737 then_clause
= TREE_OPERAND (op
, 1);
4738 else_clause
= TREE_OPERAND (op
, 2);
4740 if (!vect_is_simple_cond (cond_expr
, loop_vinfo
))
4743 /* We do not handle two different vector types for the condition
4745 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr
, 0)),
4746 TREE_TYPE (vectype
)))
4749 if (TREE_CODE (then_clause
) == SSA_NAME
)
4751 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
4752 if (!vect_is_simple_use (then_clause
, loop_vinfo
, NULL
,
4753 &then_def_stmt
, &def
, &dt
))
4756 else if (TREE_CODE (then_clause
) != INTEGER_CST
4757 && TREE_CODE (then_clause
) != REAL_CST
4758 && TREE_CODE (then_clause
) != FIXED_CST
)
4761 if (TREE_CODE (else_clause
) == SSA_NAME
)
4763 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
4764 if (!vect_is_simple_use (else_clause
, loop_vinfo
, NULL
,
4765 &else_def_stmt
, &def
, &dt
))
4768 else if (TREE_CODE (else_clause
) != INTEGER_CST
4769 && TREE_CODE (else_clause
) != REAL_CST
4770 && TREE_CODE (else_clause
) != FIXED_CST
)
4774 vec_mode
= TYPE_MODE (vectype
);
4778 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
4779 return expand_vec_cond_expr_p (TREE_TYPE (op
), vec_mode
);
4785 scalar_dest
= gimple_assign_lhs (stmt
);
4786 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4788 /* Handle cond expr. */
4789 for (j
= 0; j
< ncopies
; j
++)
4796 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
4798 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), loop_vinfo
,
4799 NULL
, >emp
, &def
, &dts
[0]);
4801 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
4803 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), loop_vinfo
,
4804 NULL
, >emp
, &def
, &dts
[1]);
4805 if (reduc_index
== 1)
4806 vec_then_clause
= reduc_def
;
4809 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
4811 vect_is_simple_use (then_clause
, loop_vinfo
,
4812 NULL
, >emp
, &def
, &dts
[2]);
4814 if (reduc_index
== 2)
4815 vec_else_clause
= reduc_def
;
4818 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
4820 vect_is_simple_use (else_clause
, loop_vinfo
,
4821 NULL
, >emp
, &def
, &dts
[3]);
4826 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0], vec_cond_lhs
);
4827 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1], vec_cond_rhs
);
4828 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
4830 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
4834 /* Arguments are ready. Create the new vector stmt. */
4835 vec_compare
= build2 (TREE_CODE (cond_expr
), vectype
,
4836 vec_cond_lhs
, vec_cond_rhs
);
4837 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
4838 vec_compare
, vec_then_clause
, vec_else_clause
);
4840 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
4841 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4842 gimple_assign_set_lhs (new_stmt
, new_temp
);
4843 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4845 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4847 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4849 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4856 /* Make sure the statement is vectorizable. */
4859 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
4861 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4863 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
4865 tree scalar_type
, vectype
;
4867 if (vect_print_dump_info (REPORT_DETAILS
))
4869 fprintf (vect_dump
, "==> examining statement: ");
4870 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4873 if (gimple_has_volatile_ops (stmt
))
4875 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4876 fprintf (vect_dump
, "not vectorized: stmt has volatile operands");
4881 /* Skip stmts that do not need to be vectorized. In loops this is expected
4883 - the COND_EXPR which is the loop exit condition
4884 - any LABEL_EXPRs in the loop
4885 - computations that are used only for array indexing or loop control.
4886 In basic blocks we only analyze statements that are a part of some SLP
4887 instance, therefore, all the statements are relevant. */
4889 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4890 && !STMT_VINFO_LIVE_P (stmt_info
))
4892 gimple pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4893 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
4894 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
4895 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
4897 stmt
= pattern_stmt
;
4898 stmt_info
= vinfo_for_stmt (pattern_stmt
);
4899 if (vect_print_dump_info (REPORT_DETAILS
))
4901 fprintf (vect_dump
, "==> examining pattern statement: ");
4902 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4907 if (vect_print_dump_info (REPORT_DETAILS
))
4908 fprintf (vect_dump
, "irrelevant.");
4914 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
4916 case vect_internal_def
:
4919 case vect_reduction_def
:
4920 case vect_nested_cycle
:
4921 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
4922 || relevance
== vect_used_in_outer_by_reduction
4923 || relevance
== vect_unused_in_scope
));
4926 case vect_induction_def
:
4927 case vect_constant_def
:
4928 case vect_external_def
:
4929 case vect_unknown_def_type
:
4936 gcc_assert (PURE_SLP_STMT (stmt_info
));
4938 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
4939 if (vect_print_dump_info (REPORT_DETAILS
))
4941 fprintf (vect_dump
, "get vectype for scalar type: ");
4942 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4945 vectype
= get_vectype_for_scalar_type (scalar_type
);
4948 if (vect_print_dump_info (REPORT_DETAILS
))
4950 fprintf (vect_dump
, "not SLPed: unsupported data-type ");
4951 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4956 if (vect_print_dump_info (REPORT_DETAILS
))
4958 fprintf (vect_dump
, "vectype: ");
4959 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
4962 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
4965 if (STMT_VINFO_RELEVANT_P (stmt_info
))
4967 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
4968 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
4969 *need_to_vectorize
= true;
4974 && (STMT_VINFO_RELEVANT_P (stmt_info
)
4975 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
4976 ok
= (vectorizable_type_promotion (stmt
, NULL
, NULL
, NULL
)
4977 || vectorizable_type_demotion (stmt
, NULL
, NULL
, NULL
)
4978 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
4979 || vectorizable_shift (stmt
, NULL
, NULL
, NULL
)
4980 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
4981 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
4982 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
4983 || vectorizable_call (stmt
, NULL
, NULL
)
4984 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
4985 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
4986 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0));
4990 ok
= (vectorizable_shift (stmt
, NULL
, NULL
, node
)
4991 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
4992 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
4993 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
4994 || vectorizable_store (stmt
, NULL
, NULL
, node
));
4999 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
5001 fprintf (vect_dump
, "not vectorized: relevant stmt not ");
5002 fprintf (vect_dump
, "supported: ");
5003 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
5012 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
5013 need extra handling, except for vectorizable reductions. */
5014 if (STMT_VINFO_LIVE_P (stmt_info
)
5015 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
5016 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
5020 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
5022 fprintf (vect_dump
, "not vectorized: live stmt not ");
5023 fprintf (vect_dump
, "supported: ");
5024 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
5034 /* Function vect_transform_stmt.
5036 Create a vectorized stmt to replace STMT, and insert it at BSI. */
5039 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
5040 bool *strided_store
, slp_tree slp_node
,
5041 slp_instance slp_node_instance
)
5043 bool is_store
= false;
5044 gimple vec_stmt
= NULL
;
5045 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5046 gimple orig_stmt_in_pattern
, orig_scalar_stmt
= stmt
;
5049 switch (STMT_VINFO_TYPE (stmt_info
))
5051 case type_demotion_vec_info_type
:
5052 done
= vectorizable_type_demotion (stmt
, gsi
, &vec_stmt
, slp_node
);
5056 case type_promotion_vec_info_type
:
5057 done
= vectorizable_type_promotion (stmt
, gsi
, &vec_stmt
, slp_node
);
5061 case type_conversion_vec_info_type
:
5062 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
5066 case induc_vec_info_type
:
5067 gcc_assert (!slp_node
);
5068 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
5072 case shift_vec_info_type
:
5073 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
5077 case op_vec_info_type
:
5078 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
5082 case assignment_vec_info_type
:
5083 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
5087 case load_vec_info_type
:
5088 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
5093 case store_vec_info_type
:
5094 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
5096 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
) && !slp_node
)
5098 /* In case of interleaving, the whole chain is vectorized when the
5099 last store in the chain is reached. Store stmts before the last
5100 one are skipped, and there vec_stmt_info shouldn't be freed
5102 *strided_store
= true;
5103 if (STMT_VINFO_VEC_STMT (stmt_info
))
5110 case condition_vec_info_type
:
5111 gcc_assert (!slp_node
);
5112 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0);
5116 case call_vec_info_type
:
5117 gcc_assert (!slp_node
);
5118 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
);
5119 stmt
= gsi_stmt (*gsi
);
5122 case reduc_vec_info_type
:
5123 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
5128 if (!STMT_VINFO_LIVE_P (stmt_info
))
5130 if (vect_print_dump_info (REPORT_DETAILS
))
5131 fprintf (vect_dump
, "stmt not supported.");
5136 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5137 is being vectorized, but outside the immediately enclosing loop. */
5139 && STMT_VINFO_LOOP_VINFO (stmt_info
)
5140 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5141 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
5142 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
5143 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
5144 || STMT_VINFO_RELEVANT (stmt_info
) ==
5145 vect_used_in_outer_by_reduction
))
5147 struct loop
*innerloop
= LOOP_VINFO_LOOP (
5148 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
5149 imm_use_iterator imm_iter
;
5150 use_operand_p use_p
;
5154 if (vect_print_dump_info (REPORT_DETAILS
))
5155 fprintf (vect_dump
, "Record the vdef for outer-loop vectorization.");
5157 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5158 (to be used when vectorizing outer-loop stmts that use the DEF of
5160 if (gimple_code (stmt
) == GIMPLE_PHI
)
5161 scalar_dest
= PHI_RESULT (stmt
);
5163 scalar_dest
= gimple_assign_lhs (stmt
);
5165 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5167 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
5169 exit_phi
= USE_STMT (use_p
);
5170 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
5175 /* Handle stmts whose DEF is used outside the loop-nest that is
5176 being vectorized. */
5177 if (STMT_VINFO_LIVE_P (stmt_info
)
5178 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
5180 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
5186 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
5187 orig_stmt_in_pattern
= STMT_VINFO_RELATED_STMT (stmt_info
);
5188 if (orig_stmt_in_pattern
)
5190 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt_in_pattern
);
5191 /* STMT was inserted by the vectorizer to replace a computation idiom.
5192 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5193 computed this idiom. We need to record a pointer to VEC_STMT in
5194 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5195 documentation of vect_pattern_recog. */
5196 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
5198 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
)
5199 == orig_scalar_stmt
);
5200 STMT_VINFO_VEC_STMT (stmt_vinfo
) = vec_stmt
;
5209 /* Remove a group of stores (for SLP or interleaving), free their
5213 vect_remove_stores (gimple first_stmt
)
5215 gimple next
= first_stmt
;
5217 gimple_stmt_iterator next_si
;
5221 /* Free the attached stmt_vec_info and remove the stmt. */
5222 next_si
= gsi_for_stmt (next
);
5223 gsi_remove (&next_si
, true);
5224 tmp
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
5225 free_stmt_vec_info (next
);
5231 /* Function new_stmt_vec_info.
5233 Create and initialize a new stmt_vec_info struct for STMT. */
5236 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
5237 bb_vec_info bb_vinfo
)
5240 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
5242 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
5243 STMT_VINFO_STMT (res
) = stmt
;
5244 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
5245 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
5246 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
5247 STMT_VINFO_LIVE_P (res
) = false;
5248 STMT_VINFO_VECTYPE (res
) = NULL
;
5249 STMT_VINFO_VEC_STMT (res
) = NULL
;
5250 STMT_VINFO_VECTORIZABLE (res
) = true;
5251 STMT_VINFO_IN_PATTERN_P (res
) = false;
5252 STMT_VINFO_RELATED_STMT (res
) = NULL
;
5253 STMT_VINFO_DATA_REF (res
) = NULL
;
5255 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
5256 STMT_VINFO_DR_OFFSET (res
) = NULL
;
5257 STMT_VINFO_DR_INIT (res
) = NULL
;
5258 STMT_VINFO_DR_STEP (res
) = NULL
;
5259 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
5261 if (gimple_code (stmt
) == GIMPLE_PHI
5262 && is_loop_header_bb_p (gimple_bb (stmt
)))
5263 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
5265 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
5267 STMT_VINFO_SAME_ALIGN_REFS (res
) = VEC_alloc (dr_p
, heap
, 5);
5268 STMT_VINFO_INSIDE_OF_LOOP_COST (res
) = 0;
5269 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res
) = 0;
5270 STMT_SLP_TYPE (res
) = loop_vect
;
5271 GROUP_FIRST_ELEMENT (res
) = NULL
;
5272 GROUP_NEXT_ELEMENT (res
) = NULL
;
5273 GROUP_SIZE (res
) = 0;
5274 GROUP_STORE_COUNT (res
) = 0;
5275 GROUP_GAP (res
) = 0;
5276 GROUP_SAME_DR_STMT (res
) = NULL
;
5277 GROUP_READ_WRITE_DEPENDENCE (res
) = false;
5283 /* Create a hash table for stmt_vec_info. */
5286 init_stmt_vec_info_vec (void)
5288 gcc_assert (!stmt_vec_info_vec
);
5289 stmt_vec_info_vec
= VEC_alloc (vec_void_p
, heap
, 50);
5293 /* Free hash table for stmt_vec_info. */
5296 free_stmt_vec_info_vec (void)
5298 gcc_assert (stmt_vec_info_vec
);
5299 VEC_free (vec_void_p
, heap
, stmt_vec_info_vec
);
5303 /* Free stmt vectorization related info. */
5306 free_stmt_vec_info (gimple stmt
)
5308 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5313 VEC_free (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
5314 set_vinfo_for_stmt (stmt
, NULL
);
5319 /* Function get_vectype_for_scalar_type_and_size.
5321 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5325 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
5327 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
5328 enum machine_mode simd_mode
;
5329 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
5336 /* We can't build a vector type of elements with alignment bigger than
5338 if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
5341 /* If we'd build a vector type of elements whose mode precision doesn't
5342 match their types precision we'll get mismatched types on vector
5343 extracts via BIT_FIELD_REFs. This effectively means we disable
5344 vectorization of bool and/or enum types in some languages. */
5345 if (INTEGRAL_TYPE_P (scalar_type
)
5346 && GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
))
5349 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
5350 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
5353 /* If no size was supplied use the mode the target prefers. Otherwise
5354 lookup a vector mode of the specified size. */
5356 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
5358 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
5359 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
5363 vectype
= build_vector_type (scalar_type
, nunits
);
5364 if (vect_print_dump_info (REPORT_DETAILS
))
5366 fprintf (vect_dump
, "get vectype with %d units of type ", nunits
);
5367 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
5373 if (vect_print_dump_info (REPORT_DETAILS
))
5375 fprintf (vect_dump
, "vectype: ");
5376 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
5379 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
5380 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
5382 if (vect_print_dump_info (REPORT_DETAILS
))
5383 fprintf (vect_dump
, "mode not supported by target.");
5390 unsigned int current_vector_size
;
5392 /* Function get_vectype_for_scalar_type.
5394 Returns the vector type corresponding to SCALAR_TYPE as supported
5398 get_vectype_for_scalar_type (tree scalar_type
)
5401 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
5402 current_vector_size
);
5404 && current_vector_size
== 0)
5405 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
5409 /* Function get_same_sized_vectype
5411 Returns a vector type corresponding to SCALAR_TYPE of size
5412 VECTOR_TYPE if supported by the target. */
5415 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
5417 return get_vectype_for_scalar_type_and_size
5418 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
5421 /* Function vect_is_simple_use.
5424 LOOP_VINFO - the vect info of the loop that is being vectorized.
5425 BB_VINFO - the vect info of the basic block that is being vectorized.
5426 OPERAND - operand of a stmt in the loop or bb.
5427 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5429 Returns whether a stmt with OPERAND can be vectorized.
5430 For loops, supportable operands are constants, loop invariants, and operands
5431 that are defined by the current iteration of the loop. Unsupportable
5432 operands are those that are defined by a previous iteration of the loop (as
5433 is the case in reduction/induction computations).
5434 For basic blocks, supportable operands are constants and bb invariants.
5435 For now, operands defined outside the basic block are not supported. */
5438 vect_is_simple_use (tree operand
, loop_vec_info loop_vinfo
,
5439 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
5440 tree
*def
, enum vect_def_type
*dt
)
5443 stmt_vec_info stmt_vinfo
;
5444 struct loop
*loop
= NULL
;
5447 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5452 if (vect_print_dump_info (REPORT_DETAILS
))
5454 fprintf (vect_dump
, "vect_is_simple_use: operand ");
5455 print_generic_expr (vect_dump
, operand
, TDF_SLIM
);
5458 if (TREE_CODE (operand
) == INTEGER_CST
|| TREE_CODE (operand
) == REAL_CST
)
5460 *dt
= vect_constant_def
;
5464 if (is_gimple_min_invariant (operand
))
5467 *dt
= vect_external_def
;
5471 if (TREE_CODE (operand
) == PAREN_EXPR
)
5473 if (vect_print_dump_info (REPORT_DETAILS
))
5474 fprintf (vect_dump
, "non-associatable copy.");
5475 operand
= TREE_OPERAND (operand
, 0);
5478 if (TREE_CODE (operand
) != SSA_NAME
)
5480 if (vect_print_dump_info (REPORT_DETAILS
))
5481 fprintf (vect_dump
, "not ssa-name.");
5485 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
5486 if (*def_stmt
== NULL
)
5488 if (vect_print_dump_info (REPORT_DETAILS
))
5489 fprintf (vect_dump
, "no def_stmt.");
5493 if (vect_print_dump_info (REPORT_DETAILS
))
5495 fprintf (vect_dump
, "def_stmt: ");
5496 print_gimple_stmt (vect_dump
, *def_stmt
, 0, TDF_SLIM
);
5499 /* Empty stmt is expected only in case of a function argument.
5500 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5501 if (gimple_nop_p (*def_stmt
))
5504 *dt
= vect_external_def
;
5508 bb
= gimple_bb (*def_stmt
);
5510 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
5511 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
5512 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
5513 *dt
= vect_external_def
;
5516 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
5517 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
5520 if (*dt
== vect_unknown_def_type
)
5522 if (vect_print_dump_info (REPORT_DETAILS
))
5523 fprintf (vect_dump
, "Unsupported pattern.");
5527 if (vect_print_dump_info (REPORT_DETAILS
))
5528 fprintf (vect_dump
, "type of def: %d.",*dt
);
5530 switch (gimple_code (*def_stmt
))
5533 *def
= gimple_phi_result (*def_stmt
);
5537 *def
= gimple_assign_lhs (*def_stmt
);
5541 *def
= gimple_call_lhs (*def_stmt
);
5546 if (vect_print_dump_info (REPORT_DETAILS
))
5547 fprintf (vect_dump
, "unsupported defining stmt: ");
5554 /* Function vect_is_simple_use_1.
5556 Same as vect_is_simple_use_1 but also determines the vector operand
5557 type of OPERAND and stores it to *VECTYPE. If the definition of
5558 OPERAND is vect_uninitialized_def, vect_constant_def or
5559 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5560 is responsible to compute the best suited vector type for the
5564 vect_is_simple_use_1 (tree operand
, loop_vec_info loop_vinfo
,
5565 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
5566 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
5568 if (!vect_is_simple_use (operand
, loop_vinfo
, bb_vinfo
, def_stmt
, def
, dt
))
5571 /* Now get a vector type if the def is internal, otherwise supply
5572 NULL_TREE and leave it up to the caller to figure out a proper
5573 type for the use stmt. */
5574 if (*dt
== vect_internal_def
5575 || *dt
== vect_induction_def
5576 || *dt
== vect_reduction_def
5577 || *dt
== vect_double_reduction_def
5578 || *dt
== vect_nested_cycle
)
5580 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
5581 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
5582 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
5583 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5584 gcc_assert (*vectype
!= NULL_TREE
);
5586 else if (*dt
== vect_uninitialized_def
5587 || *dt
== vect_constant_def
5588 || *dt
== vect_external_def
)
5589 *vectype
= NULL_TREE
;
5597 /* Function supportable_widening_operation
5599 Check whether an operation represented by the code CODE is a
5600 widening operation that is supported by the target platform in
5601 vector form (i.e., when operating on arguments of type VECTYPE_IN
5602 producing a result of type VECTYPE_OUT).
5604 Widening operations we currently support are NOP (CONVERT), FLOAT
5605 and WIDEN_MULT. This function checks if these operations are supported
5606 by the target platform either directly (via vector tree-codes), or via
5610 - CODE1 and CODE2 are codes of vector operations to be used when
5611 vectorizing the operation, if available.
5612 - DECL1 and DECL2 are decls of target builtin functions to be used
5613 when vectorizing the operation, if available. In this case,
5614 CODE1 and CODE2 are CALL_EXPR.
5615 - MULTI_STEP_CVT determines the number of required intermediate steps in
5616 case of multi-step conversion (like char->short->int - in that case
5617 MULTI_STEP_CVT will be 1).
5618 - INTERM_TYPES contains the intermediate type required to perform the
5619 widening operation (short in the above example). */
5622 supportable_widening_operation (enum tree_code code
, gimple stmt
,
5623 tree vectype_out
, tree vectype_in
,
5624 tree
*decl1
, tree
*decl2
,
5625 enum tree_code
*code1
, enum tree_code
*code2
,
5626 int *multi_step_cvt
,
5627 VEC (tree
, heap
) **interm_types
)
5629 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5630 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5631 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
5633 enum machine_mode vec_mode
;
5634 enum insn_code icode1
, icode2
;
5635 optab optab1
, optab2
;
5636 tree vectype
= vectype_in
;
5637 tree wide_vectype
= vectype_out
;
5638 enum tree_code c1
, c2
;
5640 /* The result of a vectorized widening operation usually requires two vectors
5641 (because the widened results do not fit int one vector). The generated
5642 vector results would normally be expected to be generated in the same
5643 order as in the original scalar computation, i.e. if 8 results are
5644 generated in each vector iteration, they are to be organized as follows:
5645 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5647 However, in the special case that the result of the widening operation is
5648 used in a reduction computation only, the order doesn't matter (because
5649 when vectorizing a reduction we change the order of the computation).
5650 Some targets can take advantage of this and generate more efficient code.
5651 For example, targets like Altivec, that support widen_mult using a sequence
5652 of {mult_even,mult_odd} generate the following vectors:
5653 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5655 When vectorizing outer-loops, we execute the inner-loop sequentially
5656 (each vectorized inner-loop iteration contributes to VF outer-loop
5657 iterations in parallel). We therefore don't allow to change the order
5658 of the computation in the inner-loop during outer-loop vectorization. */
5660 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
5661 && !nested_in_vect_loop_p (vect_loop
, stmt
))
5667 && code
== WIDEN_MULT_EXPR
5668 && targetm
.vectorize
.builtin_mul_widen_even
5669 && targetm
.vectorize
.builtin_mul_widen_even (vectype
)
5670 && targetm
.vectorize
.builtin_mul_widen_odd
5671 && targetm
.vectorize
.builtin_mul_widen_odd (vectype
))
5673 if (vect_print_dump_info (REPORT_DETAILS
))
5674 fprintf (vect_dump
, "Unordered widening operation detected.");
5676 *code1
= *code2
= CALL_EXPR
;
5677 *decl1
= targetm
.vectorize
.builtin_mul_widen_even (vectype
);
5678 *decl2
= targetm
.vectorize
.builtin_mul_widen_odd (vectype
);
5684 case WIDEN_MULT_EXPR
:
5685 if (BYTES_BIG_ENDIAN
)
5687 c1
= VEC_WIDEN_MULT_HI_EXPR
;
5688 c2
= VEC_WIDEN_MULT_LO_EXPR
;
5692 c2
= VEC_WIDEN_MULT_HI_EXPR
;
5693 c1
= VEC_WIDEN_MULT_LO_EXPR
;
5698 if (BYTES_BIG_ENDIAN
)
5700 c1
= VEC_UNPACK_HI_EXPR
;
5701 c2
= VEC_UNPACK_LO_EXPR
;
5705 c2
= VEC_UNPACK_HI_EXPR
;
5706 c1
= VEC_UNPACK_LO_EXPR
;
5711 if (BYTES_BIG_ENDIAN
)
5713 c1
= VEC_UNPACK_FLOAT_HI_EXPR
;
5714 c2
= VEC_UNPACK_FLOAT_LO_EXPR
;
5718 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
5719 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
5723 case FIX_TRUNC_EXPR
:
5724 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5725 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5726 computing the operation. */
5733 if (code
== FIX_TRUNC_EXPR
)
5735 /* The signedness is determined from output operand. */
5736 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5737 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
5741 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5742 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
5745 if (!optab1
|| !optab2
)
5748 vec_mode
= TYPE_MODE (vectype
);
5749 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
5750 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
5753 /* Check if it's a multi-step conversion that can be done using intermediate
5755 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (wide_vectype
)
5756 || insn_data
[icode2
].operand
[0].mode
!= TYPE_MODE (wide_vectype
))
5759 tree prev_type
= vectype
, intermediate_type
;
5760 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5761 optab optab3
, optab4
;
5763 if (!CONVERT_EXPR_CODE_P (code
))
5769 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5770 intermediate steps in promotion sequence. We try
5771 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5773 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5774 for (i
= 0; i
< 3; i
++)
5776 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5777 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5778 TYPE_UNSIGNED (prev_type
));
5779 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
5780 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
5782 if (!optab3
|| !optab4
5783 || ((icode1
= optab_handler (optab1
, prev_mode
))
5784 == CODE_FOR_nothing
)
5785 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5786 || ((icode2
= optab_handler (optab2
, prev_mode
))
5787 == CODE_FOR_nothing
)
5788 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
5789 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
5790 == CODE_FOR_nothing
)
5791 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
5792 == CODE_FOR_nothing
))
5795 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5796 (*multi_step_cvt
)++;
5798 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
5799 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
5802 prev_type
= intermediate_type
;
5803 prev_mode
= intermediate_mode
;
5815 /* Function supportable_narrowing_operation
5817 Check whether an operation represented by the code CODE is a
5818 narrowing operation that is supported by the target platform in
5819 vector form (i.e., when operating on arguments of type VECTYPE_IN
5820 and producing a result of type VECTYPE_OUT).
5822 Narrowing operations we currently support are NOP (CONVERT) and
5823 FIX_TRUNC. This function checks if these operations are supported by
5824 the target platform directly via vector tree-codes.
5827 - CODE1 is the code of a vector operation to be used when
5828 vectorizing the operation, if available.
5829 - MULTI_STEP_CVT determines the number of required intermediate steps in
5830 case of multi-step conversion (like int->short->char - in that case
5831 MULTI_STEP_CVT will be 1).
5832 - INTERM_TYPES contains the intermediate type required to perform the
5833 narrowing operation (short in the above example). */
5836 supportable_narrowing_operation (enum tree_code code
,
5837 tree vectype_out
, tree vectype_in
,
5838 enum tree_code
*code1
, int *multi_step_cvt
,
5839 VEC (tree
, heap
) **interm_types
)
5841 enum machine_mode vec_mode
;
5842 enum insn_code icode1
;
5843 optab optab1
, interm_optab
;
5844 tree vectype
= vectype_in
;
5845 tree narrow_vectype
= vectype_out
;
5847 tree intermediate_type
, prev_type
;
5853 c1
= VEC_PACK_TRUNC_EXPR
;
5856 case FIX_TRUNC_EXPR
:
5857 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
5861 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5862 tree code and optabs used for computing the operation. */
5869 if (code
== FIX_TRUNC_EXPR
)
5870 /* The signedness is determined from output operand. */
5871 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5873 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5878 vec_mode
= TYPE_MODE (vectype
);
5879 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
5882 /* Check if it's a multi-step conversion that can be done using intermediate
5884 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (narrow_vectype
))
5886 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5889 prev_type
= vectype
;
5890 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5891 intermediate steps in promotion sequence. We try
5892 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5894 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5895 for (i
= 0; i
< 3; i
++)
5897 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5898 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5899 TYPE_UNSIGNED (prev_type
));
5900 interm_optab
= optab_for_tree_code (c1
, intermediate_type
,
5903 || ((icode1
= optab_handler (optab1
, prev_mode
))
5904 == CODE_FOR_nothing
)
5905 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5906 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
5907 == CODE_FOR_nothing
))
5910 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5911 (*multi_step_cvt
)++;
5913 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
5916 prev_type
= intermediate_type
;
5917 prev_mode
= intermediate_mode
;