1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Return the vectorized type for the given statement. */
58 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
60 return STMT_VINFO_VECTYPE (stmt_info
);
63 /* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
66 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
68 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
69 basic_block bb
= gimple_bb (stmt
);
70 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
76 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
78 return (bb
->loop_father
== loop
->inner
);
81 /* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
86 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
87 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
88 int misalign
, enum vect_cost_model_location where
)
92 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
93 stmt_info_for_cost si
= { count
, kind
,
94 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
96 body_cost_vec
->safe_push (si
);
98 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
101 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
102 count
, kind
, stmt_info
, misalign
, where
);
105 /* Return a variable of type ELEM_TYPE[NELEMS]. */
108 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
110 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
114 /* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
120 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
121 tree array
, unsigned HOST_WIDE_INT n
)
123 tree vect_type
, vect
, vect_name
, array_ref
;
126 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
127 vect_type
= TREE_TYPE (TREE_TYPE (array
));
128 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
129 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
130 build_int_cst (size_type_node
, n
),
131 NULL_TREE
, NULL_TREE
);
133 new_stmt
= gimple_build_assign (vect
, array_ref
);
134 vect_name
= make_ssa_name (vect
, new_stmt
);
135 gimple_assign_set_lhs (new_stmt
, vect_name
);
136 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
141 /* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
146 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
147 tree array
, unsigned HOST_WIDE_INT n
)
152 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
153 build_int_cst (size_type_node
, n
),
154 NULL_TREE
, NULL_TREE
);
156 new_stmt
= gimple_build_assign (array_ref
, vect
);
157 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
160 /* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
165 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
167 tree mem_ref
, alias_ptr_type
;
169 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
170 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
171 /* Arrays have the same alignment as their type. */
172 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
176 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
178 /* Function vect_mark_relevant.
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
183 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
184 enum vect_relevant relevant
, bool live_p
,
185 bool used_in_pattern
)
187 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
188 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
189 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
190 gimple
*pattern_stmt
;
192 if (dump_enabled_p ())
193 dump_printf_loc (MSG_NOTE
, vect_location
,
194 "mark relevant %d, live %d.\n", relevant
, live_p
);
196 /* If this stmt is an original stmt in a pattern, we might need to mark its
197 related pattern stmt instead of the original stmt. However, such stmts
198 may have their own uses that are not in any pattern, in such cases the
199 stmt itself should be marked. */
200 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
203 if (!used_in_pattern
)
205 imm_use_iterator imm_iter
;
209 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
210 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
212 if (is_gimple_assign (stmt
))
213 lhs
= gimple_assign_lhs (stmt
);
215 lhs
= gimple_call_lhs (stmt
);
217 /* This use is out of pattern use, if LHS has other uses that are
218 pattern uses, we should mark the stmt itself, and not the pattern
220 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
221 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
223 if (is_gimple_debug (USE_STMT (use_p
)))
225 use_stmt
= USE_STMT (use_p
);
227 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
230 if (vinfo_for_stmt (use_stmt
)
231 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
241 /* This is the last stmt in a sequence that was detected as a
242 pattern that can potentially be vectorized. Don't mark the stmt
243 as relevant/live because it's not going to be vectorized.
244 Instead mark the pattern-stmt that replaces it. */
246 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
248 if (dump_enabled_p ())
249 dump_printf_loc (MSG_NOTE
, vect_location
,
250 "last stmt in pattern. don't mark"
251 " relevant/live.\n");
252 stmt_info
= vinfo_for_stmt (pattern_stmt
);
253 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
254 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
255 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
260 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
261 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
262 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
264 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
265 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
267 if (dump_enabled_p ())
268 dump_printf_loc (MSG_NOTE
, vect_location
,
269 "already marked relevant/live.\n");
273 worklist
->safe_push (stmt
);
277 /* Function vect_stmt_relevant_p.
279 Return true if STMT in loop that is represented by LOOP_VINFO is
280 "relevant for vectorization".
282 A stmt is considered "relevant for vectorization" if:
283 - it has uses outside the loop.
284 - it has vdefs (it alters memory).
285 - control stmts in the loop (except for the exit condition).
287 CHECKME: what other side effects would the vectorizer allow? */
290 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
291 enum vect_relevant
*relevant
, bool *live_p
)
293 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
295 imm_use_iterator imm_iter
;
299 *relevant
= vect_unused_in_scope
;
302 /* cond stmt other than loop exit cond. */
303 if (is_ctrl_stmt (stmt
)
304 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
305 != loop_exit_ctrl_vec_info_type
)
306 *relevant
= vect_used_in_scope
;
308 /* changing memory. */
309 if (gimple_code (stmt
) != GIMPLE_PHI
)
310 if (gimple_vdef (stmt
)
311 && !gimple_clobber_p (stmt
))
313 if (dump_enabled_p ())
314 dump_printf_loc (MSG_NOTE
, vect_location
,
315 "vec_stmt_relevant_p: stmt has vdefs.\n");
316 *relevant
= vect_used_in_scope
;
319 /* uses outside the loop. */
320 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
322 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
324 basic_block bb
= gimple_bb (USE_STMT (use_p
));
325 if (!flow_bb_inside_loop_p (loop
, bb
))
327 if (dump_enabled_p ())
328 dump_printf_loc (MSG_NOTE
, vect_location
,
329 "vec_stmt_relevant_p: used out of loop.\n");
331 if (is_gimple_debug (USE_STMT (use_p
)))
334 /* We expect all such uses to be in the loop exit phis
335 (because of loop closed form) */
336 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
337 gcc_assert (bb
== single_exit (loop
)->dest
);
344 return (*live_p
|| *relevant
);
348 /* Function exist_non_indexing_operands_for_use_p
350 USE is one of the uses attached to STMT. Check if USE is
351 used in STMT for anything other than indexing an array. */
354 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
357 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
359 /* USE corresponds to some operand in STMT. If there is no data
360 reference in STMT, then any operand that corresponds to USE
361 is not indexing an array. */
362 if (!STMT_VINFO_DATA_REF (stmt_info
))
365 /* STMT has a data_ref. FORNOW this means that its of one of
369 (This should have been verified in analyze_data_refs).
371 'var' in the second case corresponds to a def, not a use,
372 so USE cannot correspond to any operands that are not used
375 Therefore, all we need to check is if STMT falls into the
376 first case, and whether var corresponds to USE. */
378 if (!gimple_assign_copy_p (stmt
))
380 if (is_gimple_call (stmt
)
381 && gimple_call_internal_p (stmt
))
382 switch (gimple_call_internal_fn (stmt
))
385 operand
= gimple_call_arg (stmt
, 3);
390 operand
= gimple_call_arg (stmt
, 2);
400 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
402 operand
= gimple_assign_rhs1 (stmt
);
403 if (TREE_CODE (operand
) != SSA_NAME
)
414 Function process_use.
417 - a USE in STMT in a loop represented by LOOP_VINFO
418 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
419 that defined USE. This is done by calling mark_relevant and passing it
420 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
421 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
425 Generally, LIVE_P and RELEVANT are used to define the liveness and
426 relevance info of the DEF_STMT of this USE:
427 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
428 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
430 - case 1: If USE is used only for address computations (e.g. array indexing),
431 which does not need to be directly vectorized, then the liveness/relevance
432 of the respective DEF_STMT is left unchanged.
433 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
434 skip DEF_STMT cause it had already been processed.
435 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
436 be modified accordingly.
438 Return true if everything is as expected. Return false otherwise. */
441 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
442 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
445 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
446 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
447 stmt_vec_info dstmt_vinfo
;
448 basic_block bb
, def_bb
;
450 enum vect_def_type dt
;
452 /* case 1: we are only interested in uses that need to be vectorized. Uses
453 that are used for address computation are not considered relevant. */
454 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
457 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
459 if (dump_enabled_p ())
460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
461 "not vectorized: unsupported use in stmt.\n");
465 if (!def_stmt
|| gimple_nop_p (def_stmt
))
468 def_bb
= gimple_bb (def_stmt
);
469 if (!flow_bb_inside_loop_p (loop
, def_bb
))
471 if (dump_enabled_p ())
472 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
476 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
477 DEF_STMT must have already been processed, because this should be the
478 only way that STMT, which is a reduction-phi, was put in the worklist,
479 as there should be no other uses for DEF_STMT in the loop. So we just
480 check that everything is as expected, and we are done. */
481 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
482 bb
= gimple_bb (stmt
);
483 if (gimple_code (stmt
) == GIMPLE_PHI
484 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
485 && gimple_code (def_stmt
) != GIMPLE_PHI
486 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
487 && bb
->loop_father
== def_bb
->loop_father
)
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE
, vect_location
,
491 "reduc-stmt defining reduc-phi in the same nest.\n");
492 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
493 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
494 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
495 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
496 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
500 /* case 3a: outer-loop stmt defining an inner-loop stmt:
501 outer-loop-header-bb:
507 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
509 if (dump_enabled_p ())
510 dump_printf_loc (MSG_NOTE
, vect_location
,
511 "outer-loop def-stmt defining inner-loop stmt.\n");
515 case vect_unused_in_scope
:
516 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
517 vect_used_in_scope
: vect_unused_in_scope
;
520 case vect_used_in_outer_by_reduction
:
521 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
522 relevant
= vect_used_by_reduction
;
525 case vect_used_in_outer
:
526 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
527 relevant
= vect_used_in_scope
;
530 case vect_used_in_scope
:
538 /* case 3b: inner-loop stmt defining an outer-loop stmt:
539 outer-loop-header-bb:
543 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
545 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
547 if (dump_enabled_p ())
548 dump_printf_loc (MSG_NOTE
, vect_location
,
549 "inner-loop def-stmt defining outer-loop stmt.\n");
553 case vect_unused_in_scope
:
554 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
555 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
556 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
559 case vect_used_by_reduction
:
560 relevant
= vect_used_in_outer_by_reduction
;
563 case vect_used_in_scope
:
564 relevant
= vect_used_in_outer
;
572 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
573 is_pattern_stmt_p (stmt_vinfo
));
578 /* Function vect_mark_stmts_to_be_vectorized.
580 Not all stmts in the loop need to be vectorized. For example:
589 Stmt 1 and 3 do not need to be vectorized, because loop control and
590 addressing of vectorized data-refs are handled differently.
592 This pass detects such stmts. */
595 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
597 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
598 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
599 unsigned int nbbs
= loop
->num_nodes
;
600 gimple_stmt_iterator si
;
603 stmt_vec_info stmt_vinfo
;
607 enum vect_relevant relevant
, tmp_relevant
;
608 enum vect_def_type def_type
;
610 if (dump_enabled_p ())
611 dump_printf_loc (MSG_NOTE
, vect_location
,
612 "=== vect_mark_stmts_to_be_vectorized ===\n");
614 auto_vec
<gimple
*, 64> worklist
;
616 /* 1. Init worklist. */
617 for (i
= 0; i
< nbbs
; i
++)
620 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
623 if (dump_enabled_p ())
625 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
626 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
629 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
630 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
632 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
634 stmt
= gsi_stmt (si
);
635 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
638 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
641 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
642 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
646 /* 2. Process_worklist */
647 while (worklist
.length () > 0)
652 stmt
= worklist
.pop ();
653 if (dump_enabled_p ())
655 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
656 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
659 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
660 (DEF_STMT) as relevant/irrelevant and live/dead according to the
661 liveness and relevance properties of STMT. */
662 stmt_vinfo
= vinfo_for_stmt (stmt
);
663 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
664 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
666 /* Generally, the liveness and relevance properties of STMT are
667 propagated as is to the DEF_STMTs of its USEs:
668 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
669 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
671 One exception is when STMT has been identified as defining a reduction
672 variable; in this case we set the liveness/relevance as follows:
674 relevant = vect_used_by_reduction
675 This is because we distinguish between two kinds of relevant stmts -
676 those that are used by a reduction computation, and those that are
677 (also) used by a regular computation. This allows us later on to
678 identify stmts that are used solely by a reduction, and therefore the
679 order of the results that they produce does not have to be kept. */
681 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
682 tmp_relevant
= relevant
;
685 case vect_reduction_def
:
686 switch (tmp_relevant
)
688 case vect_unused_in_scope
:
689 relevant
= vect_used_by_reduction
;
692 case vect_used_by_reduction
:
693 if (gimple_code (stmt
) == GIMPLE_PHI
)
698 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
700 "unsupported use of reduction.\n");
707 case vect_nested_cycle
:
708 if (tmp_relevant
!= vect_unused_in_scope
709 && tmp_relevant
!= vect_used_in_outer_by_reduction
710 && tmp_relevant
!= vect_used_in_outer
)
712 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
714 "unsupported use of nested cycle.\n");
722 case vect_double_reduction_def
:
723 if (tmp_relevant
!= vect_unused_in_scope
724 && tmp_relevant
!= vect_used_by_reduction
)
726 if (dump_enabled_p ())
727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
728 "unsupported use of double reduction.\n");
740 if (is_pattern_stmt_p (stmt_vinfo
))
742 /* Pattern statements are not inserted into the code, so
743 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
744 have to scan the RHS or function arguments instead. */
745 if (is_gimple_assign (stmt
))
747 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
748 tree op
= gimple_assign_rhs1 (stmt
);
751 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
753 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
754 live_p
, relevant
, &worklist
, false)
755 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
756 live_p
, relevant
, &worklist
, false))
760 for (; i
< gimple_num_ops (stmt
); i
++)
762 op
= gimple_op (stmt
, i
);
763 if (TREE_CODE (op
) == SSA_NAME
764 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
769 else if (is_gimple_call (stmt
))
771 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
773 tree arg
= gimple_call_arg (stmt
, i
);
774 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
781 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
783 tree op
= USE_FROM_PTR (use_p
);
784 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
789 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
792 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
794 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
798 } /* while worklist */
804 /* Function vect_model_simple_cost.
806 Models cost for simple operations, i.e. those that only emit ncopies of a
807 single op. Right now, this does not account for multiple insns that could
808 be generated for the single vector op. We will handle that shortly. */
811 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
812 enum vect_def_type
*dt
,
813 stmt_vector_for_cost
*prologue_cost_vec
,
814 stmt_vector_for_cost
*body_cost_vec
)
817 int inside_cost
= 0, prologue_cost
= 0;
819 /* The SLP costs were already calculated during SLP tree build. */
820 if (PURE_SLP_STMT (stmt_info
))
823 /* FORNOW: Assuming maximum 2 args per stmts. */
824 for (i
= 0; i
< 2; i
++)
825 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
826 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
827 stmt_info
, 0, vect_prologue
);
829 /* Pass the inside-of-loop statements to the target-specific cost model. */
830 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
831 stmt_info
, 0, vect_body
);
833 if (dump_enabled_p ())
834 dump_printf_loc (MSG_NOTE
, vect_location
,
835 "vect_model_simple_cost: inside_cost = %d, "
836 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
840 /* Model cost for type demotion and promotion operations. PWR is normally
841 zero for single-step promotions and demotions. It will be one if
842 two-step promotion/demotion is required, and so on. Each additional
843 step doubles the number of instructions required. */
846 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
847 enum vect_def_type
*dt
, int pwr
)
850 int inside_cost
= 0, prologue_cost
= 0;
851 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
852 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
853 void *target_cost_data
;
855 /* The SLP costs were already calculated during SLP tree build. */
856 if (PURE_SLP_STMT (stmt_info
))
860 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
862 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
864 for (i
= 0; i
< pwr
+ 1; i
++)
866 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
868 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
869 vec_promote_demote
, stmt_info
, 0,
873 /* FORNOW: Assuming maximum 2 args per stmts. */
874 for (i
= 0; i
< 2; i
++)
875 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
876 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
877 stmt_info
, 0, vect_prologue
);
879 if (dump_enabled_p ())
880 dump_printf_loc (MSG_NOTE
, vect_location
,
881 "vect_model_promotion_demotion_cost: inside_cost = %d, "
882 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
885 /* Function vect_cost_group_size
887 For grouped load or store, return the group_size only if it is the first
888 load or store of a group, else return 1. This ensures that group size is
889 only returned once per group. */
892 vect_cost_group_size (stmt_vec_info stmt_info
)
894 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
896 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
897 return GROUP_SIZE (stmt_info
);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
909 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
910 bool store_lanes_p
, enum vect_def_type dt
,
912 stmt_vector_for_cost
*prologue_cost_vec
,
913 stmt_vector_for_cost
*body_cost_vec
)
916 unsigned int inside_cost
= 0, prologue_cost
= 0;
917 struct data_reference
*first_dr
;
920 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
921 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
922 stmt_info
, 0, vect_prologue
);
924 /* Grouped access? */
925 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
929 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
934 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
935 group_size
= vect_cost_group_size (stmt_info
);
938 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
940 /* Not a grouped access. */
944 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
947 /* We assume that the cost of a single store-lanes instruction is
948 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
949 access is instead being provided by a permute-and-store operation,
950 include the cost of the permutes. */
951 if (!store_lanes_p
&& group_size
> 1
952 && !STMT_VINFO_STRIDED_P (stmt_info
))
954 /* Uses a high and low interleave or shuffle operations for each
956 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
957 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
958 stmt_info
, 0, vect_body
);
960 if (dump_enabled_p ())
961 dump_printf_loc (MSG_NOTE
, vect_location
,
962 "vect_model_store_cost: strided group_size = %d .\n",
966 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
967 /* Costs of the stores. */
968 if (STMT_VINFO_STRIDED_P (stmt_info
)
969 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
971 /* N scalar stores plus extracting the elements. */
972 inside_cost
+= record_stmt_cost (body_cost_vec
,
973 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
974 scalar_store
, stmt_info
, 0, vect_body
);
977 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
979 if (STMT_VINFO_STRIDED_P (stmt_info
))
980 inside_cost
+= record_stmt_cost (body_cost_vec
,
981 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
982 vec_to_scalar
, stmt_info
, 0, vect_body
);
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE
, vect_location
,
986 "vect_model_store_cost: inside_cost = %d, "
987 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
991 /* Calculate cost of DR's memory access. */
993 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
994 unsigned int *inside_cost
,
995 stmt_vector_for_cost
*body_cost_vec
)
997 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
998 gimple
*stmt
= DR_STMT (dr
);
999 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1001 switch (alignment_support_scheme
)
1005 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1006 vector_store
, stmt_info
, 0,
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_NOTE
, vect_location
,
1011 "vect_model_store_cost: aligned.\n");
1015 case dr_unaligned_supported
:
1017 /* Here, we assign an additional cost for the unaligned store. */
1018 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1019 unaligned_store
, stmt_info
,
1020 DR_MISALIGNMENT (dr
), vect_body
);
1021 if (dump_enabled_p ())
1022 dump_printf_loc (MSG_NOTE
, vect_location
,
1023 "vect_model_store_cost: unaligned supported by "
1028 case dr_unaligned_unsupported
:
1030 *inside_cost
= VECT_MAX_COST
;
1032 if (dump_enabled_p ())
1033 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1034 "vect_model_store_cost: unsupported access.\n");
1044 /* Function vect_model_load_cost
1046 Models cost for loads. In the case of grouped accesses, the last access
1047 has the overhead of the grouped access attributed to it. Since unaligned
1048 accesses are supported for loads, we also account for the costs of the
1049 access scheme chosen. */
1052 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1053 bool load_lanes_p
, slp_tree slp_node
,
1054 stmt_vector_for_cost
*prologue_cost_vec
,
1055 stmt_vector_for_cost
*body_cost_vec
)
1059 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1060 unsigned int inside_cost
= 0, prologue_cost
= 0;
1062 /* Grouped accesses? */
1063 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1064 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1066 group_size
= vect_cost_group_size (stmt_info
);
1067 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1069 /* Not a grouped access. */
1076 /* We assume that the cost of a single load-lanes instruction is
1077 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1078 access is instead being provided by a load-and-permute operation,
1079 include the cost of the permutes. */
1080 if (!load_lanes_p
&& group_size
> 1
1081 && !STMT_VINFO_STRIDED_P (stmt_info
))
1083 /* Uses an even and odd extract operations or shuffle operations
1084 for each needed permute. */
1085 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1086 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1087 stmt_info
, 0, vect_body
);
1089 if (dump_enabled_p ())
1090 dump_printf_loc (MSG_NOTE
, vect_location
,
1091 "vect_model_load_cost: strided group_size = %d .\n",
1095 /* The loads themselves. */
1096 if (STMT_VINFO_STRIDED_P (stmt_info
)
1097 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1099 /* N scalar loads plus gathering them into a vector. */
1100 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1101 inside_cost
+= record_stmt_cost (body_cost_vec
,
1102 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1103 scalar_load
, stmt_info
, 0, vect_body
);
1106 vect_get_load_cost (first_dr
, ncopies
,
1107 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1108 || group_size
> 1 || slp_node
),
1109 &inside_cost
, &prologue_cost
,
1110 prologue_cost_vec
, body_cost_vec
, true);
1111 if (STMT_VINFO_STRIDED_P (stmt_info
))
1112 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1113 stmt_info
, 0, vect_body
);
1115 if (dump_enabled_p ())
1116 dump_printf_loc (MSG_NOTE
, vect_location
,
1117 "vect_model_load_cost: inside_cost = %d, "
1118 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1122 /* Calculate cost of DR's memory access. */
1124 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1125 bool add_realign_cost
, unsigned int *inside_cost
,
1126 unsigned int *prologue_cost
,
1127 stmt_vector_for_cost
*prologue_cost_vec
,
1128 stmt_vector_for_cost
*body_cost_vec
,
1129 bool record_prologue_costs
)
1131 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1132 gimple
*stmt
= DR_STMT (dr
);
1133 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1135 switch (alignment_support_scheme
)
1139 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1140 stmt_info
, 0, vect_body
);
1142 if (dump_enabled_p ())
1143 dump_printf_loc (MSG_NOTE
, vect_location
,
1144 "vect_model_load_cost: aligned.\n");
1148 case dr_unaligned_supported
:
1150 /* Here, we assign an additional cost for the unaligned load. */
1151 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1152 unaligned_load
, stmt_info
,
1153 DR_MISALIGNMENT (dr
), vect_body
);
1155 if (dump_enabled_p ())
1156 dump_printf_loc (MSG_NOTE
, vect_location
,
1157 "vect_model_load_cost: unaligned supported by "
1162 case dr_explicit_realign
:
1164 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1165 vector_load
, stmt_info
, 0, vect_body
);
1166 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1167 vec_perm
, stmt_info
, 0, vect_body
);
1169 /* FIXME: If the misalignment remains fixed across the iterations of
1170 the containing loop, the following cost should be added to the
1172 if (targetm
.vectorize
.builtin_mask_for_load
)
1173 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1174 stmt_info
, 0, vect_body
);
1176 if (dump_enabled_p ())
1177 dump_printf_loc (MSG_NOTE
, vect_location
,
1178 "vect_model_load_cost: explicit realign\n");
1182 case dr_explicit_realign_optimized
:
1184 if (dump_enabled_p ())
1185 dump_printf_loc (MSG_NOTE
, vect_location
,
1186 "vect_model_load_cost: unaligned software "
1189 /* Unaligned software pipeline has a load of an address, an initial
1190 load, and possibly a mask operation to "prime" the loop. However,
1191 if this is an access in a group of loads, which provide grouped
1192 access, then the above cost should only be considered for one
1193 access in the group. Inside the loop, there is a load op
1194 and a realignment op. */
1196 if (add_realign_cost
&& record_prologue_costs
)
1198 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1199 vector_stmt
, stmt_info
,
1201 if (targetm
.vectorize
.builtin_mask_for_load
)
1202 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1203 vector_stmt
, stmt_info
,
1207 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1208 stmt_info
, 0, vect_body
);
1209 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1210 stmt_info
, 0, vect_body
);
1212 if (dump_enabled_p ())
1213 dump_printf_loc (MSG_NOTE
, vect_location
,
1214 "vect_model_load_cost: explicit realign optimized"
1220 case dr_unaligned_unsupported
:
1222 *inside_cost
= VECT_MAX_COST
;
1224 if (dump_enabled_p ())
1225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1226 "vect_model_load_cost: unsupported access.\n");
1235 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1236 the loop preheader for the vectorized stmt STMT. */
1239 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1242 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1245 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1246 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1250 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1254 if (nested_in_vect_loop_p (loop
, stmt
))
1257 pe
= loop_preheader_edge (loop
);
1258 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1259 gcc_assert (!new_bb
);
1263 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1265 gimple_stmt_iterator gsi_bb_start
;
1267 gcc_assert (bb_vinfo
);
1268 bb
= BB_VINFO_BB (bb_vinfo
);
1269 gsi_bb_start
= gsi_after_labels (bb
);
1270 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1274 if (dump_enabled_p ())
1276 dump_printf_loc (MSG_NOTE
, vect_location
,
1277 "created new init_stmt: ");
1278 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1282 /* Function vect_init_vector.
1284 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1285 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1286 vector type a vector with all elements equal to VAL is created first.
1287 Place the initialization at BSI if it is not NULL. Otherwise, place the
1288 initialization at the loop preheader.
1289 Return the DEF of INIT_STMT.
1290 It will be used in the vectorization of STMT. */
1293 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1298 if (TREE_CODE (type
) == VECTOR_TYPE
1299 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1301 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1303 if (CONSTANT_CLASS_P (val
))
1304 val
= fold_convert (TREE_TYPE (type
), val
);
1307 new_temp
= make_ssa_name (TREE_TYPE (type
));
1308 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1309 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1313 val
= build_vector_from_val (type
, val
);
1316 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1317 init_stmt
= gimple_build_assign (new_temp
, val
);
1318 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1323 /* Function vect_get_vec_def_for_operand.
1325 OP is an operand in STMT. This function returns a (vector) def that will be
1326 used in the vectorized stmt for STMT.
1328 In the case that OP is an SSA_NAME which is defined in the loop, then
1329 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1331 In case OP is an invariant or constant, a new stmt that creates a vector def
1332 needs to be introduced. VECTYPE may be used to specify a required type for
1333 vector invariant. */
1336 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1341 stmt_vec_info def_stmt_info
= NULL
;
1342 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1343 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1344 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1345 enum vect_def_type dt
;
1349 if (dump_enabled_p ())
1351 dump_printf_loc (MSG_NOTE
, vect_location
,
1352 "vect_get_vec_def_for_operand: ");
1353 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1354 dump_printf (MSG_NOTE
, "\n");
1357 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1358 gcc_assert (is_simple_use
);
1359 if (dump_enabled_p ())
1361 int loc_printed
= 0;
1365 dump_printf (MSG_NOTE
, " def_stmt = ");
1367 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1368 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1374 /* operand is a constant or a loop invariant. */
1375 case vect_constant_def
:
1376 case vect_external_def
:
1379 vector_type
= vectype
;
1380 else if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
1381 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1382 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1384 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1386 gcc_assert (vector_type
);
1387 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1390 /* operand is defined inside the loop. */
1391 case vect_internal_def
:
1393 /* Get the def from the vectorized stmt. */
1394 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1396 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1397 /* Get vectorized pattern statement. */
1399 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1400 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1401 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1402 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1403 gcc_assert (vec_stmt
);
1404 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1405 vec_oprnd
= PHI_RESULT (vec_stmt
);
1406 else if (is_gimple_call (vec_stmt
))
1407 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1409 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1413 /* operand is defined by a loop header phi - reduction */
1414 case vect_reduction_def
:
1415 case vect_double_reduction_def
:
1416 case vect_nested_cycle
:
1417 /* Code should use get_initial_def_for_reduction. */
1420 /* operand is defined by loop-header phi - induction. */
1421 case vect_induction_def
:
1423 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1425 /* Get the def from the vectorized stmt. */
1426 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1427 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1428 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1429 vec_oprnd
= PHI_RESULT (vec_stmt
);
1431 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1441 /* Function vect_get_vec_def_for_stmt_copy
1443 Return a vector-def for an operand. This function is used when the
1444 vectorized stmt to be created (by the caller to this function) is a "copy"
1445 created in case the vectorized result cannot fit in one vector, and several
1446 copies of the vector-stmt are required. In this case the vector-def is
1447 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1448 of the stmt that defines VEC_OPRND.
1449 DT is the type of the vector def VEC_OPRND.
1452 In case the vectorization factor (VF) is bigger than the number
1453 of elements that can fit in a vectype (nunits), we have to generate
1454 more than one vector stmt to vectorize the scalar stmt. This situation
1455 arises when there are multiple data-types operated upon in the loop; the
1456 smallest data-type determines the VF, and as a result, when vectorizing
1457 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1458 vector stmt (each computing a vector of 'nunits' results, and together
1459 computing 'VF' results in each iteration). This function is called when
1460 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1461 which VF=16 and nunits=4, so the number of copies required is 4):
1463 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1465 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1466 VS1.1: vx.1 = memref1 VS1.2
1467 VS1.2: vx.2 = memref2 VS1.3
1468 VS1.3: vx.3 = memref3
1470 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1471 VSnew.1: vz1 = vx.1 + ... VSnew.2
1472 VSnew.2: vz2 = vx.2 + ... VSnew.3
1473 VSnew.3: vz3 = vx.3 + ...
1475 The vectorization of S1 is explained in vectorizable_load.
1476 The vectorization of S2:
1477 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1478 the function 'vect_get_vec_def_for_operand' is called to
1479 get the relevant vector-def for each operand of S2. For operand x it
1480 returns the vector-def 'vx.0'.
1482 To create the remaining copies of the vector-stmt (VSnew.j), this
1483 function is called to get the relevant vector-def for each operand. It is
1484 obtained from the respective VS1.j stmt, which is recorded in the
1485 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1487 For example, to obtain the vector-def 'vx.1' in order to create the
1488 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1489 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1490 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1491 and return its def ('vx.1').
1492 Overall, to create the above sequence this function will be called 3 times:
1493 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1494 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1495 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1498 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1500 gimple
*vec_stmt_for_operand
;
1501 stmt_vec_info def_stmt_info
;
1503 /* Do nothing; can reuse same def. */
1504 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1507 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1508 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1509 gcc_assert (def_stmt_info
);
1510 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1511 gcc_assert (vec_stmt_for_operand
);
1512 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1513 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1515 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1520 /* Get vectorized definitions for the operands to create a copy of an original
1521 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1524 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1525 vec
<tree
> *vec_oprnds0
,
1526 vec
<tree
> *vec_oprnds1
)
1528 tree vec_oprnd
= vec_oprnds0
->pop ();
1530 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1531 vec_oprnds0
->quick_push (vec_oprnd
);
1533 if (vec_oprnds1
&& vec_oprnds1
->length ())
1535 vec_oprnd
= vec_oprnds1
->pop ();
1536 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1537 vec_oprnds1
->quick_push (vec_oprnd
);
1542 /* Get vectorized definitions for OP0 and OP1.
1543 REDUC_INDEX is the index of reduction operand in case of reduction,
1544 and -1 otherwise. */
1547 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1548 vec
<tree
> *vec_oprnds0
,
1549 vec
<tree
> *vec_oprnds1
,
1550 slp_tree slp_node
, int reduc_index
)
1554 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1555 auto_vec
<tree
> ops (nops
);
1556 auto_vec
<vec
<tree
> > vec_defs (nops
);
1558 ops
.quick_push (op0
);
1560 ops
.quick_push (op1
);
1562 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1564 *vec_oprnds0
= vec_defs
[0];
1566 *vec_oprnds1
= vec_defs
[1];
1572 vec_oprnds0
->create (1);
1573 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1574 vec_oprnds0
->quick_push (vec_oprnd
);
1578 vec_oprnds1
->create (1);
1579 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1580 vec_oprnds1
->quick_push (vec_oprnd
);
1586 /* Function vect_finish_stmt_generation.
1588 Insert a new stmt. */
1591 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1592 gimple_stmt_iterator
*gsi
)
1594 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1595 vec_info
*vinfo
= stmt_info
->vinfo
;
1597 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1599 if (!gsi_end_p (*gsi
)
1600 && gimple_has_mem_ops (vec_stmt
))
1602 gimple
*at_stmt
= gsi_stmt (*gsi
);
1603 tree vuse
= gimple_vuse (at_stmt
);
1604 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1606 tree vdef
= gimple_vdef (at_stmt
);
1607 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1608 /* If we have an SSA vuse and insert a store, update virtual
1609 SSA form to avoid triggering the renamer. Do so only
1610 if we can easily see all uses - which is what almost always
1611 happens with the way vectorized stmts are inserted. */
1612 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1613 && ((is_gimple_assign (vec_stmt
)
1614 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1615 || (is_gimple_call (vec_stmt
)
1616 && !(gimple_call_flags (vec_stmt
)
1617 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1619 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1620 gimple_set_vdef (vec_stmt
, new_vdef
);
1621 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1625 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1627 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1629 if (dump_enabled_p ())
1631 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1632 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1635 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1637 /* While EH edges will generally prevent vectorization, stmt might
1638 e.g. be in a must-not-throw region. Ensure newly created stmts
1639 that could throw are part of the same region. */
1640 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1641 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1642 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1645 /* We want to vectorize a call to combined function CFN with function
1646 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1647 as the types of all inputs. Check whether this is possible using
1648 an internal function, returning its code if so or IFN_LAST if not. */
1651 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1652 tree vectype_out
, tree vectype_in
)
1655 if (internal_fn_p (cfn
))
1656 ifn
= as_internal_fn (cfn
);
1658 ifn
= associated_internal_fn (fndecl
);
1659 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1661 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1662 if (info
.vectorizable
)
1664 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1665 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1666 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
)))
1674 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1675 gimple_stmt_iterator
*);
1678 /* Function vectorizable_mask_load_store.
1680 Check if STMT performs a conditional load or store that can be vectorized.
1681 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1682 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1683 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1686 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1687 gimple
**vec_stmt
, slp_tree slp_node
)
1689 tree vec_dest
= NULL
;
1690 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1691 stmt_vec_info prev_stmt_info
;
1692 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1693 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1694 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1695 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1696 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1697 tree rhs_vectype
= NULL_TREE
;
1702 tree dataref_ptr
= NULL_TREE
;
1704 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1708 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1709 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1710 int gather_scale
= 1;
1711 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1715 enum vect_def_type dt
;
1717 if (slp_node
!= NULL
)
1720 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1721 gcc_assert (ncopies
>= 1);
1723 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1724 mask
= gimple_call_arg (stmt
, 2);
1726 if (TREE_CODE (TREE_TYPE (mask
)) != BOOLEAN_TYPE
)
1729 /* FORNOW. This restriction should be relaxed. */
1730 if (nested_in_vect_loop
&& ncopies
> 1)
1732 if (dump_enabled_p ())
1733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1734 "multiple types in nested loop.");
1738 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1741 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1744 if (!STMT_VINFO_DATA_REF (stmt_info
))
1747 elem_type
= TREE_TYPE (vectype
);
1749 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1752 if (STMT_VINFO_STRIDED_P (stmt_info
))
1755 if (TREE_CODE (mask
) != SSA_NAME
)
1758 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
1762 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
1769 tree rhs
= gimple_call_arg (stmt
, 3);
1770 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
1774 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1777 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1778 &gather_off
, &gather_scale
);
1779 gcc_assert (gather_decl
);
1780 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1781 &gather_off_vectype
))
1783 if (dump_enabled_p ())
1784 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1785 "gather index use not simple.");
1789 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1791 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1792 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1796 "masked gather with integer mask not supported.");
1800 else if (tree_int_cst_compare (nested_in_vect_loop
1801 ? STMT_VINFO_DR_STEP (stmt_info
)
1802 : DR_STEP (dr
), size_zero_node
) <= 0)
1804 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1805 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
1806 TYPE_MODE (mask_vectype
),
1809 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
1812 if (!vec_stmt
) /* transformation not required. */
1814 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1816 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1819 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1825 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1827 tree vec_oprnd0
= NULL_TREE
, op
;
1828 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1829 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1830 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1831 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1832 tree mask_perm_mask
= NULL_TREE
;
1833 edge pe
= loop_preheader_edge (loop
);
1836 enum { NARROW
, NONE
, WIDEN
} modifier
;
1837 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1839 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1840 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1841 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1842 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1843 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1844 scaletype
= TREE_VALUE (arglist
);
1845 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1846 && types_compatible_p (srctype
, masktype
));
1848 if (nunits
== gather_off_nunits
)
1850 else if (nunits
== gather_off_nunits
/ 2)
1852 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1855 for (i
= 0; i
< gather_off_nunits
; ++i
)
1856 sel
[i
] = i
| nunits
;
1858 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1860 else if (nunits
== gather_off_nunits
* 2)
1862 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1865 for (i
= 0; i
< nunits
; ++i
)
1866 sel
[i
] = i
< gather_off_nunits
1867 ? i
: i
+ nunits
- gather_off_nunits
;
1869 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1871 for (i
= 0; i
< nunits
; ++i
)
1872 sel
[i
] = i
| gather_off_nunits
;
1873 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1878 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1880 ptr
= fold_convert (ptrtype
, gather_base
);
1881 if (!is_gimple_min_invariant (ptr
))
1883 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1884 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1885 gcc_assert (!new_bb
);
1888 scale
= build_int_cst (scaletype
, gather_scale
);
1890 prev_stmt_info
= NULL
;
1891 for (j
= 0; j
< ncopies
; ++j
)
1893 if (modifier
== WIDEN
&& (j
& 1))
1894 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1895 perm_mask
, stmt
, gsi
);
1898 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1901 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1903 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1905 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1906 == TYPE_VECTOR_SUBPARTS (idxtype
));
1907 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1908 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1910 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1911 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1915 if (mask_perm_mask
&& (j
& 1))
1916 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1917 mask_perm_mask
, stmt
, gsi
);
1921 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1924 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1925 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1929 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1931 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1932 == TYPE_VECTOR_SUBPARTS (masktype
));
1933 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1934 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1936 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1937 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1943 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1946 if (!useless_type_conversion_p (vectype
, rettype
))
1948 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1949 == TYPE_VECTOR_SUBPARTS (rettype
));
1950 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1951 gimple_call_set_lhs (new_stmt
, op
);
1952 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1953 var
= make_ssa_name (vec_dest
);
1954 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1955 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1959 var
= make_ssa_name (vec_dest
, new_stmt
);
1960 gimple_call_set_lhs (new_stmt
, var
);
1963 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1965 if (modifier
== NARROW
)
1972 var
= permute_vec_elements (prev_res
, var
,
1973 perm_mask
, stmt
, gsi
);
1974 new_stmt
= SSA_NAME_DEF_STMT (var
);
1977 if (prev_stmt_info
== NULL
)
1978 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1980 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1981 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1984 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1986 if (STMT_VINFO_RELATED_STMT (stmt_info
))
1988 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1989 stmt_info
= vinfo_for_stmt (stmt
);
1991 tree lhs
= gimple_call_lhs (stmt
);
1992 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1993 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1994 set_vinfo_for_stmt (stmt
, NULL
);
1995 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1996 gsi_replace (gsi
, new_stmt
, true);
2001 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2002 prev_stmt_info
= NULL
;
2003 for (i
= 0; i
< ncopies
; i
++)
2005 unsigned align
, misalign
;
2009 tree rhs
= gimple_call_arg (stmt
, 3);
2010 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2011 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2012 /* We should have catched mismatched types earlier. */
2013 gcc_assert (useless_type_conversion_p (vectype
,
2014 TREE_TYPE (vec_rhs
)));
2015 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2016 NULL_TREE
, &dummy
, gsi
,
2017 &ptr_incr
, false, &inv_p
);
2018 gcc_assert (!inv_p
);
2022 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2023 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2024 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2025 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2026 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2027 TYPE_SIZE_UNIT (vectype
));
2030 align
= TYPE_ALIGN_UNIT (vectype
);
2031 if (aligned_access_p (dr
))
2033 else if (DR_MISALIGNMENT (dr
) == -1)
2035 align
= TYPE_ALIGN_UNIT (elem_type
);
2039 misalign
= DR_MISALIGNMENT (dr
);
2040 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2043 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2044 gimple_call_arg (stmt
, 1),
2046 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2048 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2050 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2051 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2056 tree vec_mask
= NULL_TREE
;
2057 prev_stmt_info
= NULL
;
2058 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2059 for (i
= 0; i
< ncopies
; i
++)
2061 unsigned align
, misalign
;
2065 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2066 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2067 NULL_TREE
, &dummy
, gsi
,
2068 &ptr_incr
, false, &inv_p
);
2069 gcc_assert (!inv_p
);
2073 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2074 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2075 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2076 TYPE_SIZE_UNIT (vectype
));
2079 align
= TYPE_ALIGN_UNIT (vectype
);
2080 if (aligned_access_p (dr
))
2082 else if (DR_MISALIGNMENT (dr
) == -1)
2084 align
= TYPE_ALIGN_UNIT (elem_type
);
2088 misalign
= DR_MISALIGNMENT (dr
);
2089 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2092 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2093 gimple_call_arg (stmt
, 1),
2095 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2096 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2098 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2100 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2101 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2107 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2109 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2111 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2112 stmt_info
= vinfo_for_stmt (stmt
);
2114 tree lhs
= gimple_call_lhs (stmt
);
2115 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2116 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2117 set_vinfo_for_stmt (stmt
, NULL
);
2118 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2119 gsi_replace (gsi
, new_stmt
, true);
2126 /* Function vectorizable_call.
2128 Check if GS performs a function call that can be vectorized.
2129 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2130 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2131 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2134 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2141 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2142 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2143 tree vectype_out
, vectype_in
;
2146 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2147 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2148 vec_info
*vinfo
= stmt_info
->vinfo
;
2149 tree fndecl
, new_temp
, rhs_type
;
2151 enum vect_def_type dt
[3]
2152 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2153 gimple
*new_stmt
= NULL
;
2155 vec
<tree
> vargs
= vNULL
;
2156 enum { NARROW
, NONE
, WIDEN
} modifier
;
2160 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2163 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2166 /* Is GS a vectorizable call? */
2167 stmt
= dyn_cast
<gcall
*> (gs
);
2171 if (gimple_call_internal_p (stmt
)
2172 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2173 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2174 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2177 if (gimple_call_lhs (stmt
) == NULL_TREE
2178 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2181 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2183 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2185 /* Process function arguments. */
2186 rhs_type
= NULL_TREE
;
2187 vectype_in
= NULL_TREE
;
2188 nargs
= gimple_call_num_args (stmt
);
2190 /* Bail out if the function has more than three arguments, we do not have
2191 interesting builtin functions to vectorize with more than two arguments
2192 except for fma. No arguments is also not good. */
2193 if (nargs
== 0 || nargs
> 3)
2196 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2197 if (gimple_call_internal_p (stmt
)
2198 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2201 rhs_type
= unsigned_type_node
;
2204 for (i
= 0; i
< nargs
; i
++)
2208 op
= gimple_call_arg (stmt
, i
);
2210 /* We can only handle calls with arguments of the same type. */
2212 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2214 if (dump_enabled_p ())
2215 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2216 "argument types differ.\n");
2220 rhs_type
= TREE_TYPE (op
);
2222 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2224 if (dump_enabled_p ())
2225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2226 "use not simple.\n");
2231 vectype_in
= opvectype
;
2233 && opvectype
!= vectype_in
)
2235 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2237 "argument vector types differ.\n");
2241 /* If all arguments are external or constant defs use a vector type with
2242 the same size as the output vector type. */
2244 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2246 gcc_assert (vectype_in
);
2249 if (dump_enabled_p ())
2251 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2252 "no vectype for scalar type ");
2253 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2254 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2261 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2262 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2263 if (nunits_in
== nunits_out
/ 2)
2265 else if (nunits_out
== nunits_in
)
2267 else if (nunits_out
== nunits_in
/ 2)
2272 /* We only handle functions that do not read or clobber memory. */
2273 if (gimple_vuse (stmt
))
2275 if (dump_enabled_p ())
2276 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2277 "function reads from or writes to memory.\n");
2281 /* For now, we only vectorize functions if a target specific builtin
2282 is available. TODO -- in some cases, it might be profitable to
2283 insert the calls for pieces of the vector, in order to be able
2284 to vectorize other operations in the loop. */
2286 internal_fn ifn
= IFN_LAST
;
2287 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2288 tree callee
= gimple_call_fndecl (stmt
);
2290 /* First try using an internal function. */
2291 if (cfn
!= CFN_LAST
)
2292 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2295 /* If that fails, try asking for a target-specific built-in function. */
2296 if (ifn
== IFN_LAST
)
2298 if (cfn
!= CFN_LAST
)
2299 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2300 (cfn
, vectype_out
, vectype_in
);
2302 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2303 (callee
, vectype_out
, vectype_in
);
2306 if (ifn
== IFN_LAST
&& !fndecl
)
2308 if (cfn
== CFN_GOMP_SIMD_LANE
2311 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2312 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2313 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2314 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2316 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2317 { 0, 1, 2, ... vf - 1 } vector. */
2318 gcc_assert (nargs
== 0);
2322 if (dump_enabled_p ())
2323 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2324 "function is not vectorizable.\n");
2329 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2331 else if (modifier
== NARROW
)
2332 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2334 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2336 /* Sanity check: make sure that at least one copy of the vectorized stmt
2337 needs to be generated. */
2338 gcc_assert (ncopies
>= 1);
2340 if (!vec_stmt
) /* transformation not required. */
2342 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2343 if (dump_enabled_p ())
2344 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2346 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2356 scalar_dest
= gimple_call_lhs (stmt
);
2357 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2359 prev_stmt_info
= NULL
;
2363 for (j
= 0; j
< ncopies
; ++j
)
2365 /* Build argument list for the vectorized call. */
2367 vargs
.create (nargs
);
2373 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2374 vec
<tree
> vec_oprnds0
;
2376 for (i
= 0; i
< nargs
; i
++)
2377 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2378 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2379 vec_oprnds0
= vec_defs
[0];
2381 /* Arguments are ready. Create the new vector stmt. */
2382 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2385 for (k
= 0; k
< nargs
; k
++)
2387 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2388 vargs
[k
] = vec_oprndsk
[i
];
2390 if (ifn
!= IFN_LAST
)
2391 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2393 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2394 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2395 gimple_call_set_lhs (new_stmt
, new_temp
);
2396 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2397 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2400 for (i
= 0; i
< nargs
; i
++)
2402 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2403 vec_oprndsi
.release ();
2408 for (i
= 0; i
< nargs
; i
++)
2410 op
= gimple_call_arg (stmt
, i
);
2413 = vect_get_vec_def_for_operand (op
, stmt
);
2416 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2418 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2421 vargs
.quick_push (vec_oprnd0
);
2424 if (gimple_call_internal_p (stmt
)
2425 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2427 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2429 for (k
= 0; k
< nunits_out
; ++k
)
2430 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2431 tree cst
= build_vector (vectype_out
, v
);
2433 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2434 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2435 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2436 new_temp
= make_ssa_name (vec_dest
);
2437 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2441 if (ifn
!= IFN_LAST
)
2442 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2444 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2445 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2446 gimple_call_set_lhs (new_stmt
, new_temp
);
2448 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2451 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2453 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2455 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2461 for (j
= 0; j
< ncopies
; ++j
)
2463 /* Build argument list for the vectorized call. */
2465 vargs
.create (nargs
* 2);
2471 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2472 vec
<tree
> vec_oprnds0
;
2474 for (i
= 0; i
< nargs
; i
++)
2475 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2476 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2477 vec_oprnds0
= vec_defs
[0];
2479 /* Arguments are ready. Create the new vector stmt. */
2480 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2484 for (k
= 0; k
< nargs
; k
++)
2486 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2487 vargs
.quick_push (vec_oprndsk
[i
]);
2488 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2490 if (ifn
!= IFN_LAST
)
2491 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2493 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2494 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2495 gimple_call_set_lhs (new_stmt
, new_temp
);
2496 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2497 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2500 for (i
= 0; i
< nargs
; i
++)
2502 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2503 vec_oprndsi
.release ();
2508 for (i
= 0; i
< nargs
; i
++)
2510 op
= gimple_call_arg (stmt
, i
);
2514 = vect_get_vec_def_for_operand (op
, stmt
);
2516 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2520 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2522 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2524 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2527 vargs
.quick_push (vec_oprnd0
);
2528 vargs
.quick_push (vec_oprnd1
);
2531 if (ifn
!= IFN_LAST
)
2532 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2534 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2535 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2536 gimple_call_set_lhs (new_stmt
, new_temp
);
2537 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2540 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2542 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2544 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2547 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2552 /* No current target implements this case. */
2558 /* The call in STMT might prevent it from being removed in dce.
2559 We however cannot remove it here, due to the way the ssa name
2560 it defines is mapped to the new definition. So just replace
2561 rhs of the statement with something harmless. */
2566 type
= TREE_TYPE (scalar_dest
);
2567 if (is_pattern_stmt_p (stmt_info
))
2568 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2570 lhs
= gimple_call_lhs (stmt
);
2572 if (gimple_call_internal_p (stmt
)
2573 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2575 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2576 with vf - 1 rather than 0, that is the last iteration of the
2578 imm_use_iterator iter
;
2579 use_operand_p use_p
;
2581 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2583 basic_block use_bb
= gimple_bb (use_stmt
);
2585 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2587 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2588 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2589 ncopies
* nunits_out
- 1));
2590 update_stmt (use_stmt
);
2595 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2596 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2597 set_vinfo_for_stmt (stmt
, NULL
);
2598 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2599 gsi_replace (gsi
, new_stmt
, false);
2605 struct simd_call_arg_info
2609 enum vect_def_type dt
;
2610 HOST_WIDE_INT linear_step
;
2612 bool simd_lane_linear
;
2615 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2616 is linear within simd lane (but not within whole loop), note it in
2620 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2621 struct simd_call_arg_info
*arginfo
)
2623 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2625 if (!is_gimple_assign (def_stmt
)
2626 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2627 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2630 tree base
= gimple_assign_rhs1 (def_stmt
);
2631 HOST_WIDE_INT linear_step
= 0;
2632 tree v
= gimple_assign_rhs2 (def_stmt
);
2633 while (TREE_CODE (v
) == SSA_NAME
)
2636 def_stmt
= SSA_NAME_DEF_STMT (v
);
2637 if (is_gimple_assign (def_stmt
))
2638 switch (gimple_assign_rhs_code (def_stmt
))
2641 t
= gimple_assign_rhs2 (def_stmt
);
2642 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2644 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2645 v
= gimple_assign_rhs1 (def_stmt
);
2648 t
= gimple_assign_rhs2 (def_stmt
);
2649 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2651 linear_step
= tree_to_shwi (t
);
2652 v
= gimple_assign_rhs1 (def_stmt
);
2655 t
= gimple_assign_rhs1 (def_stmt
);
2656 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2657 || (TYPE_PRECISION (TREE_TYPE (v
))
2658 < TYPE_PRECISION (TREE_TYPE (t
))))
2667 else if (is_gimple_call (def_stmt
)
2668 && gimple_call_internal_p (def_stmt
)
2669 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2671 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2672 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2677 arginfo
->linear_step
= linear_step
;
2679 arginfo
->simd_lane_linear
= true;
2685 /* Function vectorizable_simd_clone_call.
2687 Check if STMT performs a function call that can be vectorized
2688 by calling a simd clone of the function.
2689 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2690 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2691 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2694 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2695 gimple
**vec_stmt
, slp_tree slp_node
)
2700 tree vec_oprnd0
= NULL_TREE
;
2701 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2703 unsigned int nunits
;
2704 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2705 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2706 vec_info
*vinfo
= stmt_info
->vinfo
;
2707 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2708 tree fndecl
, new_temp
;
2710 gimple
*new_stmt
= NULL
;
2712 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2713 vec
<tree
> vargs
= vNULL
;
2715 tree lhs
, rtype
, ratype
;
2716 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2718 /* Is STMT a vectorizable call? */
2719 if (!is_gimple_call (stmt
))
2722 fndecl
= gimple_call_fndecl (stmt
);
2723 if (fndecl
== NULL_TREE
)
2726 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2727 if (node
== NULL
|| node
->simd_clones
== NULL
)
2730 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2733 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2736 if (gimple_call_lhs (stmt
)
2737 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2740 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2742 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2744 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2748 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2751 /* Process function arguments. */
2752 nargs
= gimple_call_num_args (stmt
);
2754 /* Bail out if the function has zero arguments. */
2758 arginfo
.create (nargs
);
2760 for (i
= 0; i
< nargs
; i
++)
2762 simd_call_arg_info thisarginfo
;
2765 thisarginfo
.linear_step
= 0;
2766 thisarginfo
.align
= 0;
2767 thisarginfo
.op
= NULL_TREE
;
2768 thisarginfo
.simd_lane_linear
= false;
2770 op
= gimple_call_arg (stmt
, i
);
2771 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2772 &thisarginfo
.vectype
)
2773 || thisarginfo
.dt
== vect_uninitialized_def
)
2775 if (dump_enabled_p ())
2776 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2777 "use not simple.\n");
2782 if (thisarginfo
.dt
== vect_constant_def
2783 || thisarginfo
.dt
== vect_external_def
)
2784 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2786 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2788 /* For linear arguments, the analyze phase should have saved
2789 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2790 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2791 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2793 gcc_assert (vec_stmt
);
2794 thisarginfo
.linear_step
2795 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2797 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2798 thisarginfo
.simd_lane_linear
2799 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2800 == boolean_true_node
);
2801 /* If loop has been peeled for alignment, we need to adjust it. */
2802 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2803 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2804 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2806 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2807 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2808 tree opt
= TREE_TYPE (thisarginfo
.op
);
2809 bias
= fold_convert (TREE_TYPE (step
), bias
);
2810 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2812 = fold_build2 (POINTER_TYPE_P (opt
)
2813 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2814 thisarginfo
.op
, bias
);
2818 && thisarginfo
.dt
!= vect_constant_def
2819 && thisarginfo
.dt
!= vect_external_def
2821 && TREE_CODE (op
) == SSA_NAME
2822 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2824 && tree_fits_shwi_p (iv
.step
))
2826 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2827 thisarginfo
.op
= iv
.base
;
2829 else if ((thisarginfo
.dt
== vect_constant_def
2830 || thisarginfo
.dt
== vect_external_def
)
2831 && POINTER_TYPE_P (TREE_TYPE (op
)))
2832 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2833 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2835 if (POINTER_TYPE_P (TREE_TYPE (op
))
2836 && !thisarginfo
.linear_step
2838 && thisarginfo
.dt
!= vect_constant_def
2839 && thisarginfo
.dt
!= vect_external_def
2842 && TREE_CODE (op
) == SSA_NAME
)
2843 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2845 arginfo
.quick_push (thisarginfo
);
2848 unsigned int badness
= 0;
2849 struct cgraph_node
*bestn
= NULL
;
2850 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2851 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2853 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2854 n
= n
->simdclone
->next_clone
)
2856 unsigned int this_badness
= 0;
2857 if (n
->simdclone
->simdlen
2858 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2859 || n
->simdclone
->nargs
!= nargs
)
2861 if (n
->simdclone
->simdlen
2862 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2863 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2864 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2865 if (n
->simdclone
->inbranch
)
2866 this_badness
+= 2048;
2867 int target_badness
= targetm
.simd_clone
.usable (n
);
2868 if (target_badness
< 0)
2870 this_badness
+= target_badness
* 512;
2871 /* FORNOW: Have to add code to add the mask argument. */
2872 if (n
->simdclone
->inbranch
)
2874 for (i
= 0; i
< nargs
; i
++)
2876 switch (n
->simdclone
->args
[i
].arg_type
)
2878 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2879 if (!useless_type_conversion_p
2880 (n
->simdclone
->args
[i
].orig_type
,
2881 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2883 else if (arginfo
[i
].dt
== vect_constant_def
2884 || arginfo
[i
].dt
== vect_external_def
2885 || arginfo
[i
].linear_step
)
2888 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2889 if (arginfo
[i
].dt
!= vect_constant_def
2890 && arginfo
[i
].dt
!= vect_external_def
)
2893 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2894 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2895 if (arginfo
[i
].dt
== vect_constant_def
2896 || arginfo
[i
].dt
== vect_external_def
2897 || (arginfo
[i
].linear_step
2898 != n
->simdclone
->args
[i
].linear_step
))
2901 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2902 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2903 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2904 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2905 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2906 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2910 case SIMD_CLONE_ARG_TYPE_MASK
:
2913 if (i
== (size_t) -1)
2915 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2920 if (arginfo
[i
].align
)
2921 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2922 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2924 if (i
== (size_t) -1)
2926 if (bestn
== NULL
|| this_badness
< badness
)
2929 badness
= this_badness
;
2939 for (i
= 0; i
< nargs
; i
++)
2940 if ((arginfo
[i
].dt
== vect_constant_def
2941 || arginfo
[i
].dt
== vect_external_def
)
2942 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2945 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2947 if (arginfo
[i
].vectype
== NULL
2948 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2949 > bestn
->simdclone
->simdlen
))
2956 fndecl
= bestn
->decl
;
2957 nunits
= bestn
->simdclone
->simdlen
;
2958 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2960 /* If the function isn't const, only allow it in simd loops where user
2961 has asserted that at least nunits consecutive iterations can be
2962 performed using SIMD instructions. */
2963 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2964 && gimple_vuse (stmt
))
2970 /* Sanity check: make sure that at least one copy of the vectorized stmt
2971 needs to be generated. */
2972 gcc_assert (ncopies
>= 1);
2974 if (!vec_stmt
) /* transformation not required. */
2976 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2977 for (i
= 0; i
< nargs
; i
++)
2978 if (bestn
->simdclone
->args
[i
].arg_type
2979 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2981 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2983 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2984 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2985 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
2986 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
2987 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
2988 tree sll
= arginfo
[i
].simd_lane_linear
2989 ? boolean_true_node
: boolean_false_node
;
2990 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
2992 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2993 if (dump_enabled_p ())
2994 dump_printf_loc (MSG_NOTE
, vect_location
,
2995 "=== vectorizable_simd_clone_call ===\n");
2996 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3003 if (dump_enabled_p ())
3004 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3007 scalar_dest
= gimple_call_lhs (stmt
);
3008 vec_dest
= NULL_TREE
;
3013 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3014 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3015 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3018 rtype
= TREE_TYPE (ratype
);
3022 prev_stmt_info
= NULL
;
3023 for (j
= 0; j
< ncopies
; ++j
)
3025 /* Build argument list for the vectorized call. */
3027 vargs
.create (nargs
);
3031 for (i
= 0; i
< nargs
; i
++)
3033 unsigned int k
, l
, m
, o
;
3035 op
= gimple_call_arg (stmt
, i
);
3036 switch (bestn
->simdclone
->args
[i
].arg_type
)
3038 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3039 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3040 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3041 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3043 if (TYPE_VECTOR_SUBPARTS (atype
)
3044 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3046 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3047 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3048 / TYPE_VECTOR_SUBPARTS (atype
));
3049 gcc_assert ((k
& (k
- 1)) == 0);
3052 = vect_get_vec_def_for_operand (op
, stmt
);
3055 vec_oprnd0
= arginfo
[i
].op
;
3056 if ((m
& (k
- 1)) == 0)
3058 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3061 arginfo
[i
].op
= vec_oprnd0
;
3063 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3065 bitsize_int ((m
& (k
- 1)) * prec
));
3067 = gimple_build_assign (make_ssa_name (atype
),
3069 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3070 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3074 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3075 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3076 gcc_assert ((k
& (k
- 1)) == 0);
3077 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3079 vec_alloc (ctor_elts
, k
);
3082 for (l
= 0; l
< k
; l
++)
3084 if (m
== 0 && l
== 0)
3086 = vect_get_vec_def_for_operand (op
, stmt
);
3089 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3091 arginfo
[i
].op
= vec_oprnd0
;
3094 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3098 vargs
.safe_push (vec_oprnd0
);
3101 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3103 = gimple_build_assign (make_ssa_name (atype
),
3105 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3106 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3111 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3112 vargs
.safe_push (op
);
3114 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3119 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3124 edge pe
= loop_preheader_edge (loop
);
3125 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3126 gcc_assert (!new_bb
);
3128 if (arginfo
[i
].simd_lane_linear
)
3130 vargs
.safe_push (arginfo
[i
].op
);
3133 tree phi_res
= copy_ssa_name (op
);
3134 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3135 set_vinfo_for_stmt (new_phi
,
3136 new_stmt_vec_info (new_phi
, loop_vinfo
));
3137 add_phi_arg (new_phi
, arginfo
[i
].op
,
3138 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3140 = POINTER_TYPE_P (TREE_TYPE (op
))
3141 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3142 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3143 ? sizetype
: TREE_TYPE (op
);
3145 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3147 tree tcst
= wide_int_to_tree (type
, cst
);
3148 tree phi_arg
= copy_ssa_name (op
);
3150 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3151 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3152 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3153 set_vinfo_for_stmt (new_stmt
,
3154 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3155 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3157 arginfo
[i
].op
= phi_res
;
3158 vargs
.safe_push (phi_res
);
3163 = POINTER_TYPE_P (TREE_TYPE (op
))
3164 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3165 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3166 ? sizetype
: TREE_TYPE (op
);
3168 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3170 tree tcst
= wide_int_to_tree (type
, cst
);
3171 new_temp
= make_ssa_name (TREE_TYPE (op
));
3172 new_stmt
= gimple_build_assign (new_temp
, code
,
3173 arginfo
[i
].op
, tcst
);
3174 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3175 vargs
.safe_push (new_temp
);
3178 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3179 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3180 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3181 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3187 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3190 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3192 new_temp
= create_tmp_var (ratype
);
3193 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3194 == TYPE_VECTOR_SUBPARTS (rtype
))
3195 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3197 new_temp
= make_ssa_name (rtype
, new_stmt
);
3198 gimple_call_set_lhs (new_stmt
, new_temp
);
3200 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3204 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3207 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3208 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3209 gcc_assert ((k
& (k
- 1)) == 0);
3210 for (l
= 0; l
< k
; l
++)
3215 t
= build_fold_addr_expr (new_temp
);
3216 t
= build2 (MEM_REF
, vectype
, t
,
3217 build_int_cst (TREE_TYPE (t
),
3218 l
* prec
/ BITS_PER_UNIT
));
3221 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3222 size_int (prec
), bitsize_int (l
* prec
));
3224 = gimple_build_assign (make_ssa_name (vectype
), t
);
3225 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3226 if (j
== 0 && l
== 0)
3227 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3229 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3231 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3236 tree clobber
= build_constructor (ratype
, NULL
);
3237 TREE_THIS_VOLATILE (clobber
) = 1;
3238 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3239 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3243 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3245 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3246 / TYPE_VECTOR_SUBPARTS (rtype
));
3247 gcc_assert ((k
& (k
- 1)) == 0);
3248 if ((j
& (k
- 1)) == 0)
3249 vec_alloc (ret_ctor_elts
, k
);
3252 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3253 for (m
= 0; m
< o
; m
++)
3255 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3256 size_int (m
), NULL_TREE
, NULL_TREE
);
3258 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3259 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3260 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3261 gimple_assign_lhs (new_stmt
));
3263 tree clobber
= build_constructor (ratype
, NULL
);
3264 TREE_THIS_VOLATILE (clobber
) = 1;
3265 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3266 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3269 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3270 if ((j
& (k
- 1)) != k
- 1)
3272 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3274 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3275 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3277 if ((unsigned) j
== k
- 1)
3278 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3280 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3282 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3287 tree t
= build_fold_addr_expr (new_temp
);
3288 t
= build2 (MEM_REF
, vectype
, t
,
3289 build_int_cst (TREE_TYPE (t
), 0));
3291 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3292 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3293 tree clobber
= build_constructor (ratype
, NULL
);
3294 TREE_THIS_VOLATILE (clobber
) = 1;
3295 vect_finish_stmt_generation (stmt
,
3296 gimple_build_assign (new_temp
,
3302 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3304 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3306 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3311 /* The call in STMT might prevent it from being removed in dce.
3312 We however cannot remove it here, due to the way the ssa name
3313 it defines is mapped to the new definition. So just replace
3314 rhs of the statement with something harmless. */
3321 type
= TREE_TYPE (scalar_dest
);
3322 if (is_pattern_stmt_p (stmt_info
))
3323 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3325 lhs
= gimple_call_lhs (stmt
);
3326 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3329 new_stmt
= gimple_build_nop ();
3330 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3331 set_vinfo_for_stmt (stmt
, NULL
);
3332 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3333 gsi_replace (gsi
, new_stmt
, true);
3334 unlink_stmt_vdef (stmt
);
3340 /* Function vect_gen_widened_results_half
3342 Create a vector stmt whose code, type, number of arguments, and result
3343 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3344 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3345 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3346 needs to be created (DECL is a function-decl of a target-builtin).
3347 STMT is the original scalar stmt that we are vectorizing. */
3350 vect_gen_widened_results_half (enum tree_code code
,
3352 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3353 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3359 /* Generate half of the widened result: */
3360 if (code
== CALL_EXPR
)
3362 /* Target specific support */
3363 if (op_type
== binary_op
)
3364 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3366 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3367 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3368 gimple_call_set_lhs (new_stmt
, new_temp
);
3372 /* Generic support */
3373 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3374 if (op_type
!= binary_op
)
3376 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3377 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3378 gimple_assign_set_lhs (new_stmt
, new_temp
);
3380 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3386 /* Get vectorized definitions for loop-based vectorization. For the first
3387 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3388 scalar operand), and for the rest we get a copy with
3389 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3390 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3391 The vectors are collected into VEC_OPRNDS. */
3394 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3395 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3399 /* Get first vector operand. */
3400 /* All the vector operands except the very first one (that is scalar oprnd)
3402 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3403 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3405 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3407 vec_oprnds
->quick_push (vec_oprnd
);
3409 /* Get second vector operand. */
3410 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3411 vec_oprnds
->quick_push (vec_oprnd
);
3415 /* For conversion in multiple steps, continue to get operands
3418 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3422 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3423 For multi-step conversions store the resulting vectors and call the function
3427 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3428 int multi_step_cvt
, gimple
*stmt
,
3430 gimple_stmt_iterator
*gsi
,
3431 slp_tree slp_node
, enum tree_code code
,
3432 stmt_vec_info
*prev_stmt_info
)
3435 tree vop0
, vop1
, new_tmp
, vec_dest
;
3437 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3439 vec_dest
= vec_dsts
.pop ();
3441 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3443 /* Create demotion operation. */
3444 vop0
= (*vec_oprnds
)[i
];
3445 vop1
= (*vec_oprnds
)[i
+ 1];
3446 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3447 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3448 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3449 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3452 /* Store the resulting vector for next recursive call. */
3453 (*vec_oprnds
)[i
/2] = new_tmp
;
3456 /* This is the last step of the conversion sequence. Store the
3457 vectors in SLP_NODE or in vector info of the scalar statement
3458 (or in STMT_VINFO_RELATED_STMT chain). */
3460 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3463 if (!*prev_stmt_info
)
3464 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3466 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3468 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3473 /* For multi-step demotion operations we first generate demotion operations
3474 from the source type to the intermediate types, and then combine the
3475 results (stored in VEC_OPRNDS) in demotion operation to the destination
3479 /* At each level of recursion we have half of the operands we had at the
3481 vec_oprnds
->truncate ((i
+1)/2);
3482 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3483 stmt
, vec_dsts
, gsi
, slp_node
,
3484 VEC_PACK_TRUNC_EXPR
,
3488 vec_dsts
.quick_push (vec_dest
);
3492 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3493 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3494 the resulting vectors and call the function recursively. */
3497 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3498 vec
<tree
> *vec_oprnds1
,
3499 gimple
*stmt
, tree vec_dest
,
3500 gimple_stmt_iterator
*gsi
,
3501 enum tree_code code1
,
3502 enum tree_code code2
, tree decl1
,
3503 tree decl2
, int op_type
)
3506 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3507 gimple
*new_stmt1
, *new_stmt2
;
3508 vec
<tree
> vec_tmp
= vNULL
;
3510 vec_tmp
.create (vec_oprnds0
->length () * 2);
3511 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3513 if (op_type
== binary_op
)
3514 vop1
= (*vec_oprnds1
)[i
];
3518 /* Generate the two halves of promotion operation. */
3519 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3520 op_type
, vec_dest
, gsi
, stmt
);
3521 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3522 op_type
, vec_dest
, gsi
, stmt
);
3523 if (is_gimple_call (new_stmt1
))
3525 new_tmp1
= gimple_call_lhs (new_stmt1
);
3526 new_tmp2
= gimple_call_lhs (new_stmt2
);
3530 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3531 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3534 /* Store the results for the next step. */
3535 vec_tmp
.quick_push (new_tmp1
);
3536 vec_tmp
.quick_push (new_tmp2
);
3539 vec_oprnds0
->release ();
3540 *vec_oprnds0
= vec_tmp
;
3544 /* Check if STMT performs a conversion operation, that can be vectorized.
3545 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3546 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3547 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3550 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3551 gimple
**vec_stmt
, slp_tree slp_node
)
3555 tree op0
, op1
= NULL_TREE
;
3556 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3557 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3558 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3559 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3560 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3561 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3564 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3565 gimple
*new_stmt
= NULL
;
3566 stmt_vec_info prev_stmt_info
;
3569 tree vectype_out
, vectype_in
;
3571 tree lhs_type
, rhs_type
;
3572 enum { NARROW
, NONE
, WIDEN
} modifier
;
3573 vec
<tree
> vec_oprnds0
= vNULL
;
3574 vec
<tree
> vec_oprnds1
= vNULL
;
3576 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3577 vec_info
*vinfo
= stmt_info
->vinfo
;
3578 int multi_step_cvt
= 0;
3579 vec
<tree
> vec_dsts
= vNULL
;
3580 vec
<tree
> interm_types
= vNULL
;
3581 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3583 machine_mode rhs_mode
;
3584 unsigned short fltsz
;
3586 /* Is STMT a vectorizable conversion? */
3588 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3591 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3594 if (!is_gimple_assign (stmt
))
3597 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3600 code
= gimple_assign_rhs_code (stmt
);
3601 if (!CONVERT_EXPR_CODE_P (code
)
3602 && code
!= FIX_TRUNC_EXPR
3603 && code
!= FLOAT_EXPR
3604 && code
!= WIDEN_MULT_EXPR
3605 && code
!= WIDEN_LSHIFT_EXPR
)
3608 op_type
= TREE_CODE_LENGTH (code
);
3610 /* Check types of lhs and rhs. */
3611 scalar_dest
= gimple_assign_lhs (stmt
);
3612 lhs_type
= TREE_TYPE (scalar_dest
);
3613 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3615 op0
= gimple_assign_rhs1 (stmt
);
3616 rhs_type
= TREE_TYPE (op0
);
3618 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3619 && !((INTEGRAL_TYPE_P (lhs_type
)
3620 && INTEGRAL_TYPE_P (rhs_type
))
3621 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3622 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3625 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3626 && ((INTEGRAL_TYPE_P (lhs_type
)
3627 && (TYPE_PRECISION (lhs_type
)
3628 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3629 || (INTEGRAL_TYPE_P (rhs_type
)
3630 && (TYPE_PRECISION (rhs_type
)
3631 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
))))))
3633 if (dump_enabled_p ())
3634 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3635 "type conversion to/from bit-precision unsupported."
3640 /* Check the operands of the operation. */
3641 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3643 if (dump_enabled_p ())
3644 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3645 "use not simple.\n");
3648 if (op_type
== binary_op
)
3652 op1
= gimple_assign_rhs2 (stmt
);
3653 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3654 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3656 if (CONSTANT_CLASS_P (op0
))
3657 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3659 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3663 if (dump_enabled_p ())
3664 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3665 "use not simple.\n");
3670 /* If op0 is an external or constant defs use a vector type of
3671 the same size as the output vector type. */
3673 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3675 gcc_assert (vectype_in
);
3678 if (dump_enabled_p ())
3680 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3681 "no vectype for scalar type ");
3682 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3683 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3689 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3690 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
3692 if (dump_enabled_p ())
3694 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3695 "can't convert between boolean and non "
3697 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3698 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3704 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3705 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3706 if (nunits_in
< nunits_out
)
3708 else if (nunits_out
== nunits_in
)
3713 /* Multiple types in SLP are handled by creating the appropriate number of
3714 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3716 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3718 else if (modifier
== NARROW
)
3719 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3721 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3723 /* Sanity check: make sure that at least one copy of the vectorized stmt
3724 needs to be generated. */
3725 gcc_assert (ncopies
>= 1);
3727 /* Supportable by target? */
3731 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3733 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3738 if (dump_enabled_p ())
3739 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3740 "conversion not supported by target.\n");
3744 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3745 &code1
, &code2
, &multi_step_cvt
,
3748 /* Binary widening operation can only be supported directly by the
3750 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3754 if (code
!= FLOAT_EXPR
3755 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3756 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3759 rhs_mode
= TYPE_MODE (rhs_type
);
3760 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3761 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3762 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3763 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3766 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3767 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3768 if (cvt_type
== NULL_TREE
)
3771 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3773 if (!supportable_convert_operation (code
, vectype_out
,
3774 cvt_type
, &decl1
, &codecvt1
))
3777 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3778 cvt_type
, &codecvt1
,
3779 &codecvt2
, &multi_step_cvt
,
3783 gcc_assert (multi_step_cvt
== 0);
3785 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3786 vectype_in
, &code1
, &code2
,
3787 &multi_step_cvt
, &interm_types
))
3791 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3794 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3795 codecvt2
= ERROR_MARK
;
3799 interm_types
.safe_push (cvt_type
);
3800 cvt_type
= NULL_TREE
;
3805 gcc_assert (op_type
== unary_op
);
3806 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3807 &code1
, &multi_step_cvt
,
3811 if (code
!= FIX_TRUNC_EXPR
3812 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3813 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3816 rhs_mode
= TYPE_MODE (rhs_type
);
3818 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3819 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3820 if (cvt_type
== NULL_TREE
)
3822 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3825 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3826 &code1
, &multi_step_cvt
,
3835 if (!vec_stmt
) /* transformation not required. */
3837 if (dump_enabled_p ())
3838 dump_printf_loc (MSG_NOTE
, vect_location
,
3839 "=== vectorizable_conversion ===\n");
3840 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3842 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3843 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3845 else if (modifier
== NARROW
)
3847 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3848 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3852 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3853 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3855 interm_types
.release ();
3860 if (dump_enabled_p ())
3861 dump_printf_loc (MSG_NOTE
, vect_location
,
3862 "transform conversion. ncopies = %d.\n", ncopies
);
3864 if (op_type
== binary_op
)
3866 if (CONSTANT_CLASS_P (op0
))
3867 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3868 else if (CONSTANT_CLASS_P (op1
))
3869 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3872 /* In case of multi-step conversion, we first generate conversion operations
3873 to the intermediate types, and then from that types to the final one.
3874 We create vector destinations for the intermediate type (TYPES) received
3875 from supportable_*_operation, and store them in the correct order
3876 for future use in vect_create_vectorized_*_stmts (). */
3877 vec_dsts
.create (multi_step_cvt
+ 1);
3878 vec_dest
= vect_create_destination_var (scalar_dest
,
3879 (cvt_type
&& modifier
== WIDEN
)
3880 ? cvt_type
: vectype_out
);
3881 vec_dsts
.quick_push (vec_dest
);
3885 for (i
= interm_types
.length () - 1;
3886 interm_types
.iterate (i
, &intermediate_type
); i
--)
3888 vec_dest
= vect_create_destination_var (scalar_dest
,
3890 vec_dsts
.quick_push (vec_dest
);
3895 vec_dest
= vect_create_destination_var (scalar_dest
,
3897 ? vectype_out
: cvt_type
);
3901 if (modifier
== WIDEN
)
3903 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3904 if (op_type
== binary_op
)
3905 vec_oprnds1
.create (1);
3907 else if (modifier
== NARROW
)
3908 vec_oprnds0
.create (
3909 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3911 else if (code
== WIDEN_LSHIFT_EXPR
)
3912 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3915 prev_stmt_info
= NULL
;
3919 for (j
= 0; j
< ncopies
; j
++)
3922 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3925 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3927 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3929 /* Arguments are ready, create the new vector stmt. */
3930 if (code1
== CALL_EXPR
)
3932 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3933 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3934 gimple_call_set_lhs (new_stmt
, new_temp
);
3938 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3939 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3940 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3941 gimple_assign_set_lhs (new_stmt
, new_temp
);
3944 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3946 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3949 if (!prev_stmt_info
)
3950 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3952 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3953 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3960 /* In case the vectorization factor (VF) is bigger than the number
3961 of elements that we can fit in a vectype (nunits), we have to
3962 generate more than one vector stmt - i.e - we need to "unroll"
3963 the vector stmt by a factor VF/nunits. */
3964 for (j
= 0; j
< ncopies
; j
++)
3971 if (code
== WIDEN_LSHIFT_EXPR
)
3976 /* Store vec_oprnd1 for every vector stmt to be created
3977 for SLP_NODE. We check during the analysis that all
3978 the shift arguments are the same. */
3979 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3980 vec_oprnds1
.quick_push (vec_oprnd1
);
3982 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3986 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3987 &vec_oprnds1
, slp_node
, -1);
3991 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
3992 vec_oprnds0
.quick_push (vec_oprnd0
);
3993 if (op_type
== binary_op
)
3995 if (code
== WIDEN_LSHIFT_EXPR
)
3998 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
3999 vec_oprnds1
.quick_push (vec_oprnd1
);
4005 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4006 vec_oprnds0
.truncate (0);
4007 vec_oprnds0
.quick_push (vec_oprnd0
);
4008 if (op_type
== binary_op
)
4010 if (code
== WIDEN_LSHIFT_EXPR
)
4013 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4015 vec_oprnds1
.truncate (0);
4016 vec_oprnds1
.quick_push (vec_oprnd1
);
4020 /* Arguments are ready. Create the new vector stmts. */
4021 for (i
= multi_step_cvt
; i
>= 0; i
--)
4023 tree this_dest
= vec_dsts
[i
];
4024 enum tree_code c1
= code1
, c2
= code2
;
4025 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4030 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4032 stmt
, this_dest
, gsi
,
4033 c1
, c2
, decl1
, decl2
,
4037 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4041 if (codecvt1
== CALL_EXPR
)
4043 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4044 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4045 gimple_call_set_lhs (new_stmt
, new_temp
);
4049 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4050 new_temp
= make_ssa_name (vec_dest
);
4051 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4055 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4058 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4061 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4064 if (!prev_stmt_info
)
4065 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4067 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4068 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4073 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4077 /* In case the vectorization factor (VF) is bigger than the number
4078 of elements that we can fit in a vectype (nunits), we have to
4079 generate more than one vector stmt - i.e - we need to "unroll"
4080 the vector stmt by a factor VF/nunits. */
4081 for (j
= 0; j
< ncopies
; j
++)
4085 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4089 vec_oprnds0
.truncate (0);
4090 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4091 vect_pow2 (multi_step_cvt
) - 1);
4094 /* Arguments are ready. Create the new vector stmts. */
4096 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4098 if (codecvt1
== CALL_EXPR
)
4100 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4101 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4102 gimple_call_set_lhs (new_stmt
, new_temp
);
4106 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4107 new_temp
= make_ssa_name (vec_dest
);
4108 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4112 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4113 vec_oprnds0
[i
] = new_temp
;
4116 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4117 stmt
, vec_dsts
, gsi
,
4122 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4126 vec_oprnds0
.release ();
4127 vec_oprnds1
.release ();
4128 vec_dsts
.release ();
4129 interm_types
.release ();
4135 /* Function vectorizable_assignment.
4137 Check if STMT performs an assignment (copy) that can be vectorized.
4138 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4139 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4140 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4143 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4144 gimple
**vec_stmt
, slp_tree slp_node
)
4149 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4150 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4153 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4156 vec
<tree
> vec_oprnds
= vNULL
;
4158 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4159 vec_info
*vinfo
= stmt_info
->vinfo
;
4160 gimple
*new_stmt
= NULL
;
4161 stmt_vec_info prev_stmt_info
= NULL
;
4162 enum tree_code code
;
4165 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4168 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4171 /* Is vectorizable assignment? */
4172 if (!is_gimple_assign (stmt
))
4175 scalar_dest
= gimple_assign_lhs (stmt
);
4176 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4179 code
= gimple_assign_rhs_code (stmt
);
4180 if (gimple_assign_single_p (stmt
)
4181 || code
== PAREN_EXPR
4182 || CONVERT_EXPR_CODE_P (code
))
4183 op
= gimple_assign_rhs1 (stmt
);
4187 if (code
== VIEW_CONVERT_EXPR
)
4188 op
= TREE_OPERAND (op
, 0);
4190 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4191 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4193 /* Multiple types in SLP are handled by creating the appropriate number of
4194 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4196 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4199 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4201 gcc_assert (ncopies
>= 1);
4203 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4205 if (dump_enabled_p ())
4206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4207 "use not simple.\n");
4211 /* We can handle NOP_EXPR conversions that do not change the number
4212 of elements or the vector size. */
4213 if ((CONVERT_EXPR_CODE_P (code
)
4214 || code
== VIEW_CONVERT_EXPR
)
4216 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4217 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4218 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4221 /* We do not handle bit-precision changes. */
4222 if ((CONVERT_EXPR_CODE_P (code
)
4223 || code
== VIEW_CONVERT_EXPR
)
4224 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4225 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4226 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4227 || ((TYPE_PRECISION (TREE_TYPE (op
))
4228 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4229 /* But a conversion that does not change the bit-pattern is ok. */
4230 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4231 > TYPE_PRECISION (TREE_TYPE (op
)))
4232 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4234 if (dump_enabled_p ())
4235 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4236 "type conversion to/from bit-precision "
4241 if (!vec_stmt
) /* transformation not required. */
4243 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4244 if (dump_enabled_p ())
4245 dump_printf_loc (MSG_NOTE
, vect_location
,
4246 "=== vectorizable_assignment ===\n");
4247 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4252 if (dump_enabled_p ())
4253 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4256 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4259 for (j
= 0; j
< ncopies
; j
++)
4263 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4265 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4267 /* Arguments are ready. create the new vector stmt. */
4268 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4270 if (CONVERT_EXPR_CODE_P (code
)
4271 || code
== VIEW_CONVERT_EXPR
)
4272 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4273 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4274 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4275 gimple_assign_set_lhs (new_stmt
, new_temp
);
4276 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4278 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4285 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4287 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4289 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4292 vec_oprnds
.release ();
4297 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4298 either as shift by a scalar or by a vector. */
4301 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4304 machine_mode vec_mode
;
4309 vectype
= get_vectype_for_scalar_type (scalar_type
);
4313 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4315 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4317 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4319 || (optab_handler (optab
, TYPE_MODE (vectype
))
4320 == CODE_FOR_nothing
))
4324 vec_mode
= TYPE_MODE (vectype
);
4325 icode
= (int) optab_handler (optab
, vec_mode
);
4326 if (icode
== CODE_FOR_nothing
)
4333 /* Function vectorizable_shift.
4335 Check if STMT performs a shift operation that can be vectorized.
4336 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4337 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4338 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4341 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4342 gimple
**vec_stmt
, slp_tree slp_node
)
4346 tree op0
, op1
= NULL
;
4347 tree vec_oprnd1
= NULL_TREE
;
4348 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4350 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4351 enum tree_code code
;
4352 machine_mode vec_mode
;
4356 machine_mode optab_op2_mode
;
4358 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4359 gimple
*new_stmt
= NULL
;
4360 stmt_vec_info prev_stmt_info
;
4367 vec
<tree
> vec_oprnds0
= vNULL
;
4368 vec
<tree
> vec_oprnds1
= vNULL
;
4371 bool scalar_shift_arg
= true;
4372 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4373 vec_info
*vinfo
= stmt_info
->vinfo
;
4376 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4379 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4382 /* Is STMT a vectorizable binary/unary operation? */
4383 if (!is_gimple_assign (stmt
))
4386 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4389 code
= gimple_assign_rhs_code (stmt
);
4391 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4392 || code
== RROTATE_EXPR
))
4395 scalar_dest
= gimple_assign_lhs (stmt
);
4396 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4397 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4398 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4400 if (dump_enabled_p ())
4401 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4402 "bit-precision shifts not supported.\n");
4406 op0
= gimple_assign_rhs1 (stmt
);
4407 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4409 if (dump_enabled_p ())
4410 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4411 "use not simple.\n");
4414 /* If op0 is an external or constant def use a vector type with
4415 the same size as the output vector type. */
4417 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4419 gcc_assert (vectype
);
4422 if (dump_enabled_p ())
4423 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4424 "no vectype for scalar type\n");
4428 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4429 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4430 if (nunits_out
!= nunits_in
)
4433 op1
= gimple_assign_rhs2 (stmt
);
4434 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4436 if (dump_enabled_p ())
4437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4438 "use not simple.\n");
4443 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4447 /* Multiple types in SLP are handled by creating the appropriate number of
4448 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4450 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4453 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4455 gcc_assert (ncopies
>= 1);
4457 /* Determine whether the shift amount is a vector, or scalar. If the
4458 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4460 if ((dt
[1] == vect_internal_def
4461 || dt
[1] == vect_induction_def
)
4463 scalar_shift_arg
= false;
4464 else if (dt
[1] == vect_constant_def
4465 || dt
[1] == vect_external_def
4466 || dt
[1] == vect_internal_def
)
4468 /* In SLP, need to check whether the shift count is the same,
4469 in loops if it is a constant or invariant, it is always
4473 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4476 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4477 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4478 scalar_shift_arg
= false;
4483 if (dump_enabled_p ())
4484 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4485 "operand mode requires invariant argument.\n");
4489 /* Vector shifted by vector. */
4490 if (!scalar_shift_arg
)
4492 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4493 if (dump_enabled_p ())
4494 dump_printf_loc (MSG_NOTE
, vect_location
,
4495 "vector/vector shift/rotate found.\n");
4498 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4499 if (op1_vectype
== NULL_TREE
4500 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4502 if (dump_enabled_p ())
4503 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4504 "unusable type for last operand in"
4505 " vector/vector shift/rotate.\n");
4509 /* See if the machine has a vector shifted by scalar insn and if not
4510 then see if it has a vector shifted by vector insn. */
4513 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4515 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4517 if (dump_enabled_p ())
4518 dump_printf_loc (MSG_NOTE
, vect_location
,
4519 "vector/scalar shift/rotate found.\n");
4523 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4525 && (optab_handler (optab
, TYPE_MODE (vectype
))
4526 != CODE_FOR_nothing
))
4528 scalar_shift_arg
= false;
4530 if (dump_enabled_p ())
4531 dump_printf_loc (MSG_NOTE
, vect_location
,
4532 "vector/vector shift/rotate found.\n");
4534 /* Unlike the other binary operators, shifts/rotates have
4535 the rhs being int, instead of the same type as the lhs,
4536 so make sure the scalar is the right type if we are
4537 dealing with vectors of long long/long/short/char. */
4538 if (dt
[1] == vect_constant_def
)
4539 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4540 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4544 && TYPE_MODE (TREE_TYPE (vectype
))
4545 != TYPE_MODE (TREE_TYPE (op1
)))
4547 if (dump_enabled_p ())
4548 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4549 "unusable type for last operand in"
4550 " vector/vector shift/rotate.\n");
4553 if (vec_stmt
&& !slp_node
)
4555 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4556 op1
= vect_init_vector (stmt
, op1
,
4557 TREE_TYPE (vectype
), NULL
);
4564 /* Supportable by target? */
4567 if (dump_enabled_p ())
4568 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4572 vec_mode
= TYPE_MODE (vectype
);
4573 icode
= (int) optab_handler (optab
, vec_mode
);
4574 if (icode
== CODE_FOR_nothing
)
4576 if (dump_enabled_p ())
4577 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4578 "op not supported by target.\n");
4579 /* Check only during analysis. */
4580 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4581 || (vf
< vect_min_worthwhile_factor (code
)
4584 if (dump_enabled_p ())
4585 dump_printf_loc (MSG_NOTE
, vect_location
,
4586 "proceeding using word mode.\n");
4589 /* Worthwhile without SIMD support? Check only during analysis. */
4590 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4591 && vf
< vect_min_worthwhile_factor (code
)
4594 if (dump_enabled_p ())
4595 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4596 "not worthwhile without SIMD support.\n");
4600 if (!vec_stmt
) /* transformation not required. */
4602 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4603 if (dump_enabled_p ())
4604 dump_printf_loc (MSG_NOTE
, vect_location
,
4605 "=== vectorizable_shift ===\n");
4606 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4612 if (dump_enabled_p ())
4613 dump_printf_loc (MSG_NOTE
, vect_location
,
4614 "transform binary/unary operation.\n");
4617 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4619 prev_stmt_info
= NULL
;
4620 for (j
= 0; j
< ncopies
; j
++)
4625 if (scalar_shift_arg
)
4627 /* Vector shl and shr insn patterns can be defined with scalar
4628 operand 2 (shift operand). In this case, use constant or loop
4629 invariant op1 directly, without extending it to vector mode
4631 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4632 if (!VECTOR_MODE_P (optab_op2_mode
))
4634 if (dump_enabled_p ())
4635 dump_printf_loc (MSG_NOTE
, vect_location
,
4636 "operand 1 using scalar mode.\n");
4638 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4639 vec_oprnds1
.quick_push (vec_oprnd1
);
4642 /* Store vec_oprnd1 for every vector stmt to be created
4643 for SLP_NODE. We check during the analysis that all
4644 the shift arguments are the same.
4645 TODO: Allow different constants for different vector
4646 stmts generated for an SLP instance. */
4647 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4648 vec_oprnds1
.quick_push (vec_oprnd1
);
4653 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4654 (a special case for certain kind of vector shifts); otherwise,
4655 operand 1 should be of a vector type (the usual case). */
4657 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4660 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4664 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4666 /* Arguments are ready. Create the new vector stmt. */
4667 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4669 vop1
= vec_oprnds1
[i
];
4670 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4671 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4672 gimple_assign_set_lhs (new_stmt
, new_temp
);
4673 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4675 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4682 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4684 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4685 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4688 vec_oprnds0
.release ();
4689 vec_oprnds1
.release ();
4695 /* Function vectorizable_operation.
4697 Check if STMT performs a binary, unary or ternary operation that can
4699 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4700 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4701 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4704 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4705 gimple
**vec_stmt
, slp_tree slp_node
)
4709 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4710 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4712 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4713 enum tree_code code
;
4714 machine_mode vec_mode
;
4718 bool target_support_p
;
4720 enum vect_def_type dt
[3]
4721 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4722 gimple
*new_stmt
= NULL
;
4723 stmt_vec_info prev_stmt_info
;
4729 vec
<tree
> vec_oprnds0
= vNULL
;
4730 vec
<tree
> vec_oprnds1
= vNULL
;
4731 vec
<tree
> vec_oprnds2
= vNULL
;
4732 tree vop0
, vop1
, vop2
;
4733 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4734 vec_info
*vinfo
= stmt_info
->vinfo
;
4737 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4740 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4743 /* Is STMT a vectorizable binary/unary operation? */
4744 if (!is_gimple_assign (stmt
))
4747 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4750 code
= gimple_assign_rhs_code (stmt
);
4752 /* For pointer addition, we should use the normal plus for
4753 the vector addition. */
4754 if (code
== POINTER_PLUS_EXPR
)
4757 /* Support only unary or binary operations. */
4758 op_type
= TREE_CODE_LENGTH (code
);
4759 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4761 if (dump_enabled_p ())
4762 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4763 "num. args = %d (not unary/binary/ternary op).\n",
4768 scalar_dest
= gimple_assign_lhs (stmt
);
4769 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4771 /* Most operations cannot handle bit-precision types without extra
4773 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4774 && (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4775 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4776 /* Exception are bitwise binary operations. */
4777 && code
!= BIT_IOR_EXPR
4778 && code
!= BIT_XOR_EXPR
4779 && code
!= BIT_AND_EXPR
)
4781 if (dump_enabled_p ())
4782 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4783 "bit-precision arithmetic not supported.\n");
4787 op0
= gimple_assign_rhs1 (stmt
);
4788 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4790 if (dump_enabled_p ())
4791 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4792 "use not simple.\n");
4795 /* If op0 is an external or constant def use a vector type with
4796 the same size as the output vector type. */
4799 /* For boolean type we cannot determine vectype by
4800 invariant value (don't know whether it is a vector
4801 of booleans or vector of integers). We use output
4802 vectype because operations on boolean don't change
4804 if (TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
)
4806 if (TREE_CODE (TREE_TYPE (scalar_dest
)) != BOOLEAN_TYPE
)
4808 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4810 "not supported operation on bool value.\n");
4813 vectype
= vectype_out
;
4816 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4819 gcc_assert (vectype
);
4822 if (dump_enabled_p ())
4824 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4825 "no vectype for scalar type ");
4826 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4828 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4834 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4835 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4836 if (nunits_out
!= nunits_in
)
4839 if (op_type
== binary_op
|| op_type
== ternary_op
)
4841 op1
= gimple_assign_rhs2 (stmt
);
4842 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4844 if (dump_enabled_p ())
4845 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4846 "use not simple.\n");
4850 if (op_type
== ternary_op
)
4852 op2
= gimple_assign_rhs3 (stmt
);
4853 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4855 if (dump_enabled_p ())
4856 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4857 "use not simple.\n");
4863 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4867 /* Multiple types in SLP are handled by creating the appropriate number of
4868 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4870 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4873 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4875 gcc_assert (ncopies
>= 1);
4877 /* Shifts are handled in vectorizable_shift (). */
4878 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4879 || code
== RROTATE_EXPR
)
4882 /* Supportable by target? */
4884 vec_mode
= TYPE_MODE (vectype
);
4885 if (code
== MULT_HIGHPART_EXPR
)
4886 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4889 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4892 if (dump_enabled_p ())
4893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4897 target_support_p
= (optab_handler (optab
, vec_mode
)
4898 != CODE_FOR_nothing
);
4901 if (!target_support_p
)
4903 if (dump_enabled_p ())
4904 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4905 "op not supported by target.\n");
4906 /* Check only during analysis. */
4907 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4908 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4910 if (dump_enabled_p ())
4911 dump_printf_loc (MSG_NOTE
, vect_location
,
4912 "proceeding using word mode.\n");
4915 /* Worthwhile without SIMD support? Check only during analysis. */
4916 if (!VECTOR_MODE_P (vec_mode
)
4918 && vf
< vect_min_worthwhile_factor (code
))
4920 if (dump_enabled_p ())
4921 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4922 "not worthwhile without SIMD support.\n");
4926 if (!vec_stmt
) /* transformation not required. */
4928 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4929 if (dump_enabled_p ())
4930 dump_printf_loc (MSG_NOTE
, vect_location
,
4931 "=== vectorizable_operation ===\n");
4932 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4938 if (dump_enabled_p ())
4939 dump_printf_loc (MSG_NOTE
, vect_location
,
4940 "transform binary/unary operation.\n");
4943 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4945 /* In case the vectorization factor (VF) is bigger than the number
4946 of elements that we can fit in a vectype (nunits), we have to generate
4947 more than one vector stmt - i.e - we need to "unroll" the
4948 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4949 from one copy of the vector stmt to the next, in the field
4950 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4951 stages to find the correct vector defs to be used when vectorizing
4952 stmts that use the defs of the current stmt. The example below
4953 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4954 we need to create 4 vectorized stmts):
4956 before vectorization:
4957 RELATED_STMT VEC_STMT
4961 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4963 RELATED_STMT VEC_STMT
4964 VS1_0: vx0 = memref0 VS1_1 -
4965 VS1_1: vx1 = memref1 VS1_2 -
4966 VS1_2: vx2 = memref2 VS1_3 -
4967 VS1_3: vx3 = memref3 - -
4968 S1: x = load - VS1_0
4971 step2: vectorize stmt S2 (done here):
4972 To vectorize stmt S2 we first need to find the relevant vector
4973 def for the first operand 'x'. This is, as usual, obtained from
4974 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4975 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4976 relevant vector def 'vx0'. Having found 'vx0' we can generate
4977 the vector stmt VS2_0, and as usual, record it in the
4978 STMT_VINFO_VEC_STMT of stmt S2.
4979 When creating the second copy (VS2_1), we obtain the relevant vector
4980 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4981 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4982 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4983 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4984 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4985 chain of stmts and pointers:
4986 RELATED_STMT VEC_STMT
4987 VS1_0: vx0 = memref0 VS1_1 -
4988 VS1_1: vx1 = memref1 VS1_2 -
4989 VS1_2: vx2 = memref2 VS1_3 -
4990 VS1_3: vx3 = memref3 - -
4991 S1: x = load - VS1_0
4992 VS2_0: vz0 = vx0 + v1 VS2_1 -
4993 VS2_1: vz1 = vx1 + v1 VS2_2 -
4994 VS2_2: vz2 = vx2 + v1 VS2_3 -
4995 VS2_3: vz3 = vx3 + v1 - -
4996 S2: z = x + 1 - VS2_0 */
4998 prev_stmt_info
= NULL
;
4999 for (j
= 0; j
< ncopies
; j
++)
5004 if (op_type
== binary_op
|| op_type
== ternary_op
)
5005 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5008 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5010 if (op_type
== ternary_op
)
5012 vec_oprnds2
.create (1);
5013 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
5019 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5020 if (op_type
== ternary_op
)
5022 tree vec_oprnd
= vec_oprnds2
.pop ();
5023 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5028 /* Arguments are ready. Create the new vector stmt. */
5029 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5031 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5032 ? vec_oprnds1
[i
] : NULL_TREE
);
5033 vop2
= ((op_type
== ternary_op
)
5034 ? vec_oprnds2
[i
] : NULL_TREE
);
5035 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5036 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5037 gimple_assign_set_lhs (new_stmt
, new_temp
);
5038 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5040 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5047 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5049 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5050 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5053 vec_oprnds0
.release ();
5054 vec_oprnds1
.release ();
5055 vec_oprnds2
.release ();
5060 /* A helper function to ensure data reference DR's base alignment
5064 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5069 if (DR_VECT_AUX (dr
)->base_misaligned
)
5071 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5072 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5074 if (decl_in_symtab_p (base_decl
))
5075 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5078 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5079 DECL_USER_ALIGN (base_decl
) = 1;
5081 DR_VECT_AUX (dr
)->base_misaligned
= false;
5086 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5087 reversal of the vector elements. If that is impossible to do,
5091 perm_mask_for_reverse (tree vectype
)
5096 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5097 sel
= XALLOCAVEC (unsigned char, nunits
);
5099 for (i
= 0; i
< nunits
; ++i
)
5100 sel
[i
] = nunits
- 1 - i
;
5102 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5104 return vect_gen_perm_mask_checked (vectype
, sel
);
5107 /* Function vectorizable_store.
5109 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5111 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5112 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5113 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5116 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5122 tree vec_oprnd
= NULL_TREE
;
5123 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5124 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5126 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5127 struct loop
*loop
= NULL
;
5128 machine_mode vec_mode
;
5130 enum dr_alignment_support alignment_support_scheme
;
5132 enum vect_def_type dt
;
5133 stmt_vec_info prev_stmt_info
= NULL
;
5134 tree dataref_ptr
= NULL_TREE
;
5135 tree dataref_offset
= NULL_TREE
;
5136 gimple
*ptr_incr
= NULL
;
5139 gimple
*next_stmt
, *first_stmt
= NULL
;
5140 bool grouped_store
= false;
5141 bool store_lanes_p
= false;
5142 unsigned int group_size
, i
;
5143 vec
<tree
> dr_chain
= vNULL
;
5144 vec
<tree
> oprnds
= vNULL
;
5145 vec
<tree
> result_chain
= vNULL
;
5147 bool negative
= false;
5148 tree offset
= NULL_TREE
;
5149 vec
<tree
> vec_oprnds
= vNULL
;
5150 bool slp
= (slp_node
!= NULL
);
5151 unsigned int vec_num
;
5152 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5153 vec_info
*vinfo
= stmt_info
->vinfo
;
5155 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5156 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5157 int scatter_scale
= 1;
5158 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5159 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5162 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5165 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5168 /* Is vectorizable store? */
5170 if (!is_gimple_assign (stmt
))
5173 scalar_dest
= gimple_assign_lhs (stmt
);
5174 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5175 && is_pattern_stmt_p (stmt_info
))
5176 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5177 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5178 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5179 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5180 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5181 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5182 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5183 && TREE_CODE (scalar_dest
) != MEM_REF
)
5186 gcc_assert (gimple_assign_single_p (stmt
));
5188 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5189 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5192 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5194 /* Multiple types in SLP are handled by creating the appropriate number of
5195 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5197 if (slp
|| PURE_SLP_STMT (stmt_info
))
5200 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5202 gcc_assert (ncopies
>= 1);
5204 /* FORNOW. This restriction should be relaxed. */
5205 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5207 if (dump_enabled_p ())
5208 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5209 "multiple types in nested loop.\n");
5213 op
= gimple_assign_rhs1 (stmt
);
5214 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5216 if (dump_enabled_p ())
5217 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5218 "use not simple.\n");
5222 elem_type
= TREE_TYPE (vectype
);
5223 vec_mode
= TYPE_MODE (vectype
);
5225 /* FORNOW. In some cases can vectorize even if data-type not supported
5226 (e.g. - array initialization with 0). */
5227 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5230 if (!STMT_VINFO_DATA_REF (stmt_info
))
5233 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5236 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5237 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5238 size_zero_node
) < 0;
5239 if (negative
&& ncopies
> 1)
5241 if (dump_enabled_p ())
5242 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5243 "multiple types with negative step.\n");
5248 gcc_assert (!grouped_store
);
5249 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5250 if (alignment_support_scheme
!= dr_aligned
5251 && alignment_support_scheme
!= dr_unaligned_supported
)
5253 if (dump_enabled_p ())
5254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5255 "negative step but alignment required.\n");
5258 if (dt
!= vect_constant_def
5259 && dt
!= vect_external_def
5260 && !perm_mask_for_reverse (vectype
))
5262 if (dump_enabled_p ())
5263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5264 "negative step and reversing not supported.\n");
5270 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5272 grouped_store
= true;
5273 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5274 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5276 && !PURE_SLP_STMT (stmt_info
)
5277 && !STMT_VINFO_STRIDED_P (stmt_info
))
5279 if (vect_store_lanes_supported (vectype
, group_size
))
5280 store_lanes_p
= true;
5281 else if (!vect_grouped_store_supported (vectype
, group_size
))
5285 if (STMT_VINFO_STRIDED_P (stmt_info
)
5286 && (slp
|| PURE_SLP_STMT (stmt_info
))
5287 && (group_size
> nunits
5288 || nunits
% group_size
!= 0))
5290 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5291 "unhandled strided group store\n");
5295 if (first_stmt
== stmt
)
5297 /* STMT is the leader of the group. Check the operands of all the
5298 stmts of the group. */
5299 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5302 gcc_assert (gimple_assign_single_p (next_stmt
));
5303 op
= gimple_assign_rhs1 (next_stmt
);
5304 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5306 if (dump_enabled_p ())
5307 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5308 "use not simple.\n");
5311 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5316 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5319 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5320 &scatter_off
, &scatter_scale
);
5321 gcc_assert (scatter_decl
);
5322 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5323 &scatter_off_vectype
))
5325 if (dump_enabled_p ())
5326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5327 "scatter index use not simple.");
5332 if (!vec_stmt
) /* transformation not required. */
5334 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5335 /* The SLP costs are calculated during SLP analysis. */
5336 if (!PURE_SLP_STMT (stmt_info
))
5337 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5344 ensure_base_align (stmt_info
, dr
);
5346 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5348 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5349 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5350 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5351 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5352 edge pe
= loop_preheader_edge (loop
);
5355 enum { NARROW
, NONE
, WIDEN
} modifier
;
5356 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5358 if (nunits
== (unsigned int) scatter_off_nunits
)
5360 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5362 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5365 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5366 sel
[i
] = i
| nunits
;
5368 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5369 gcc_assert (perm_mask
!= NULL_TREE
);
5371 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5373 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5376 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5377 sel
[i
] = i
| scatter_off_nunits
;
5379 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5380 gcc_assert (perm_mask
!= NULL_TREE
);
5386 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5387 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5388 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5389 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5390 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5391 scaletype
= TREE_VALUE (arglist
);
5393 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5394 && TREE_CODE (rettype
) == VOID_TYPE
);
5396 ptr
= fold_convert (ptrtype
, scatter_base
);
5397 if (!is_gimple_min_invariant (ptr
))
5399 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5400 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5401 gcc_assert (!new_bb
);
5404 /* Currently we support only unconditional scatter stores,
5405 so mask should be all ones. */
5406 mask
= build_int_cst (masktype
, -1);
5407 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5409 scale
= build_int_cst (scaletype
, scatter_scale
);
5411 prev_stmt_info
= NULL
;
5412 for (j
= 0; j
< ncopies
; ++j
)
5417 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5419 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5421 else if (modifier
!= NONE
&& (j
& 1))
5423 if (modifier
== WIDEN
)
5426 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5427 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5430 else if (modifier
== NARROW
)
5432 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5435 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5443 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5445 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5448 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5450 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5451 == TYPE_VECTOR_SUBPARTS (srctype
));
5452 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5453 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5454 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5455 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5459 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5461 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5462 == TYPE_VECTOR_SUBPARTS (idxtype
));
5463 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5464 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5465 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5466 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5471 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5473 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5475 if (prev_stmt_info
== NULL
)
5476 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5478 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5479 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5486 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5487 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5489 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5492 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5494 /* We vectorize all the stmts of the interleaving group when we
5495 reach the last stmt in the group. */
5496 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5497 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5506 grouped_store
= false;
5507 /* VEC_NUM is the number of vect stmts to be created for this
5509 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5510 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5511 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5512 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5513 op
= gimple_assign_rhs1 (first_stmt
);
5516 /* VEC_NUM is the number of vect stmts to be created for this
5518 vec_num
= group_size
;
5524 group_size
= vec_num
= 1;
5527 if (dump_enabled_p ())
5528 dump_printf_loc (MSG_NOTE
, vect_location
,
5529 "transform store. ncopies = %d\n", ncopies
);
5531 if (STMT_VINFO_STRIDED_P (stmt_info
))
5533 gimple_stmt_iterator incr_gsi
;
5539 gimple_seq stmts
= NULL
;
5540 tree stride_base
, stride_step
, alias_off
;
5544 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5547 = fold_build_pointer_plus
5548 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5549 size_binop (PLUS_EXPR
,
5550 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5551 convert_to_ptrofftype (DR_INIT(first_dr
))));
5552 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5554 /* For a store with loop-invariant (but other than power-of-2)
5555 stride (i.e. not a grouped access) like so:
5557 for (i = 0; i < n; i += stride)
5560 we generate a new induction variable and new stores from
5561 the components of the (vectorized) rhs:
5563 for (j = 0; ; j += VF*stride)
5568 array[j + stride] = tmp2;
5572 unsigned nstores
= nunits
;
5573 tree ltype
= elem_type
;
5576 nstores
= nunits
/ group_size
;
5577 if (group_size
< nunits
)
5578 ltype
= build_vector_type (elem_type
, group_size
);
5581 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5582 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5586 ivstep
= stride_step
;
5587 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5588 build_int_cst (TREE_TYPE (ivstep
),
5589 ncopies
* nstores
));
5591 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5593 create_iv (stride_base
, ivstep
, NULL
,
5594 loop
, &incr_gsi
, insert_after
,
5596 incr
= gsi_stmt (incr_gsi
);
5597 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5599 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5601 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5603 prev_stmt_info
= NULL
;
5604 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5605 next_stmt
= first_stmt
;
5606 for (g
= 0; g
< group_size
; g
++)
5608 running_off
= offvar
;
5611 tree size
= TYPE_SIZE_UNIT (ltype
);
5612 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5614 tree newoff
= copy_ssa_name (running_off
, NULL
);
5615 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5617 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5618 running_off
= newoff
;
5620 for (j
= 0; j
< ncopies
; j
++)
5622 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5623 and first_stmt == stmt. */
5628 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5630 vec_oprnd
= vec_oprnds
[0];
5634 gcc_assert (gimple_assign_single_p (next_stmt
));
5635 op
= gimple_assign_rhs1 (next_stmt
);
5636 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5642 vec_oprnd
= vec_oprnds
[j
];
5645 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5646 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5650 for (i
= 0; i
< nstores
; i
++)
5652 tree newref
, newoff
;
5653 gimple
*incr
, *assign
;
5654 tree size
= TYPE_SIZE (ltype
);
5655 /* Extract the i'th component. */
5656 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5657 bitsize_int (i
), size
);
5658 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5661 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5665 newref
= build2 (MEM_REF
, ltype
,
5666 running_off
, alias_off
);
5668 /* And store it to *running_off. */
5669 assign
= gimple_build_assign (newref
, elem
);
5670 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5672 newoff
= copy_ssa_name (running_off
, NULL
);
5673 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5674 running_off
, stride_step
);
5675 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5677 running_off
= newoff
;
5678 if (g
== group_size
- 1
5681 if (j
== 0 && i
== 0)
5682 STMT_VINFO_VEC_STMT (stmt_info
)
5683 = *vec_stmt
= assign
;
5685 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5686 prev_stmt_info
= vinfo_for_stmt (assign
);
5690 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5695 dr_chain
.create (group_size
);
5696 oprnds
.create (group_size
);
5698 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5699 gcc_assert (alignment_support_scheme
);
5700 /* Targets with store-lane instructions must not require explicit
5702 gcc_assert (!store_lanes_p
5703 || alignment_support_scheme
== dr_aligned
5704 || alignment_support_scheme
== dr_unaligned_supported
);
5707 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5710 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5712 aggr_type
= vectype
;
5714 /* In case the vectorization factor (VF) is bigger than the number
5715 of elements that we can fit in a vectype (nunits), we have to generate
5716 more than one vector stmt - i.e - we need to "unroll" the
5717 vector stmt by a factor VF/nunits. For more details see documentation in
5718 vect_get_vec_def_for_copy_stmt. */
5720 /* In case of interleaving (non-unit grouped access):
5727 We create vectorized stores starting from base address (the access of the
5728 first stmt in the chain (S2 in the above example), when the last store stmt
5729 of the chain (S4) is reached:
5732 VS2: &base + vec_size*1 = vx0
5733 VS3: &base + vec_size*2 = vx1
5734 VS4: &base + vec_size*3 = vx3
5736 Then permutation statements are generated:
5738 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5739 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5742 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5743 (the order of the data-refs in the output of vect_permute_store_chain
5744 corresponds to the order of scalar stmts in the interleaving chain - see
5745 the documentation of vect_permute_store_chain()).
5747 In case of both multiple types and interleaving, above vector stores and
5748 permutation stmts are created for every copy. The result vector stmts are
5749 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5750 STMT_VINFO_RELATED_STMT for the next copies.
5753 prev_stmt_info
= NULL
;
5754 for (j
= 0; j
< ncopies
; j
++)
5761 /* Get vectorized arguments for SLP_NODE. */
5762 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5763 NULL
, slp_node
, -1);
5765 vec_oprnd
= vec_oprnds
[0];
5769 /* For interleaved stores we collect vectorized defs for all the
5770 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5771 used as an input to vect_permute_store_chain(), and OPRNDS as
5772 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5774 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5775 OPRNDS are of size 1. */
5776 next_stmt
= first_stmt
;
5777 for (i
= 0; i
< group_size
; i
++)
5779 /* Since gaps are not supported for interleaved stores,
5780 GROUP_SIZE is the exact number of stmts in the chain.
5781 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5782 there is no interleaving, GROUP_SIZE is 1, and only one
5783 iteration of the loop will be executed. */
5784 gcc_assert (next_stmt
5785 && gimple_assign_single_p (next_stmt
));
5786 op
= gimple_assign_rhs1 (next_stmt
);
5788 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5789 dr_chain
.quick_push (vec_oprnd
);
5790 oprnds
.quick_push (vec_oprnd
);
5791 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5795 /* We should have catched mismatched types earlier. */
5796 gcc_assert (useless_type_conversion_p (vectype
,
5797 TREE_TYPE (vec_oprnd
)));
5798 bool simd_lane_access_p
5799 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5800 if (simd_lane_access_p
5801 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5802 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5803 && integer_zerop (DR_OFFSET (first_dr
))
5804 && integer_zerop (DR_INIT (first_dr
))
5805 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5806 get_alias_set (DR_REF (first_dr
))))
5808 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5809 dataref_offset
= build_int_cst (reference_alias_ptr_type
5810 (DR_REF (first_dr
)), 0);
5815 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5816 simd_lane_access_p
? loop
: NULL
,
5817 offset
, &dummy
, gsi
, &ptr_incr
,
5818 simd_lane_access_p
, &inv_p
);
5819 gcc_assert (bb_vinfo
|| !inv_p
);
5823 /* For interleaved stores we created vectorized defs for all the
5824 defs stored in OPRNDS in the previous iteration (previous copy).
5825 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5826 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5828 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5829 OPRNDS are of size 1. */
5830 for (i
= 0; i
< group_size
; i
++)
5833 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5834 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5835 dr_chain
[i
] = vec_oprnd
;
5836 oprnds
[i
] = vec_oprnd
;
5840 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5841 TYPE_SIZE_UNIT (aggr_type
));
5843 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5844 TYPE_SIZE_UNIT (aggr_type
));
5851 /* Combine all the vectors into an array. */
5852 vec_array
= create_vector_array (vectype
, vec_num
);
5853 for (i
= 0; i
< vec_num
; i
++)
5855 vec_oprnd
= dr_chain
[i
];
5856 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5860 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5861 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5862 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5863 gimple_call_set_lhs (new_stmt
, data_ref
);
5864 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5872 result_chain
.create (group_size
);
5874 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5878 next_stmt
= first_stmt
;
5879 for (i
= 0; i
< vec_num
; i
++)
5881 unsigned align
, misalign
;
5884 /* Bump the vector pointer. */
5885 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5889 vec_oprnd
= vec_oprnds
[i
];
5890 else if (grouped_store
)
5891 /* For grouped stores vectorized defs are interleaved in
5892 vect_permute_store_chain(). */
5893 vec_oprnd
= result_chain
[i
];
5895 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5899 : build_int_cst (reference_alias_ptr_type
5900 (DR_REF (first_dr
)), 0));
5901 align
= TYPE_ALIGN_UNIT (vectype
);
5902 if (aligned_access_p (first_dr
))
5904 else if (DR_MISALIGNMENT (first_dr
) == -1)
5906 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5907 align
= TYPE_ALIGN_UNIT (elem_type
);
5909 align
= get_object_alignment (DR_REF (first_dr
))
5912 TREE_TYPE (data_ref
)
5913 = build_aligned_type (TREE_TYPE (data_ref
),
5914 align
* BITS_PER_UNIT
);
5918 TREE_TYPE (data_ref
)
5919 = build_aligned_type (TREE_TYPE (data_ref
),
5920 TYPE_ALIGN (elem_type
));
5921 misalign
= DR_MISALIGNMENT (first_dr
);
5923 if (dataref_offset
== NULL_TREE
5924 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5925 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5929 && dt
!= vect_constant_def
5930 && dt
!= vect_external_def
)
5932 tree perm_mask
= perm_mask_for_reverse (vectype
);
5934 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5936 tree new_temp
= make_ssa_name (perm_dest
);
5938 /* Generate the permute statement. */
5940 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5941 vec_oprnd
, perm_mask
);
5942 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5944 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5945 vec_oprnd
= new_temp
;
5948 /* Arguments are ready. Create the new vector stmt. */
5949 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5950 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5955 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5963 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5965 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5966 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5970 dr_chain
.release ();
5972 result_chain
.release ();
5973 vec_oprnds
.release ();
5978 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5979 VECTOR_CST mask. No checks are made that the target platform supports the
5980 mask, so callers may wish to test can_vec_perm_p separately, or use
5981 vect_gen_perm_mask_checked. */
5984 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5986 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5989 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5991 mask_elt_type
= lang_hooks
.types
.type_for_mode
5992 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5993 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5995 mask_elts
= XALLOCAVEC (tree
, nunits
);
5996 for (i
= nunits
- 1; i
>= 0; i
--)
5997 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5998 mask_vec
= build_vector (mask_type
, mask_elts
);
6003 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6004 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6007 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6009 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6010 return vect_gen_perm_mask_any (vectype
, sel
);
6013 /* Given a vector variable X and Y, that was generated for the scalar
6014 STMT, generate instructions to permute the vector elements of X and Y
6015 using permutation mask MASK_VEC, insert them at *GSI and return the
6016 permuted vector variable. */
6019 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6020 gimple_stmt_iterator
*gsi
)
6022 tree vectype
= TREE_TYPE (x
);
6023 tree perm_dest
, data_ref
;
6026 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6027 data_ref
= make_ssa_name (perm_dest
);
6029 /* Generate the permute statement. */
6030 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6031 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6036 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6037 inserting them on the loops preheader edge. Returns true if we
6038 were successful in doing so (and thus STMT can be moved then),
6039 otherwise returns false. */
6042 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6048 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6050 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6051 if (!gimple_nop_p (def_stmt
)
6052 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6054 /* Make sure we don't need to recurse. While we could do
6055 so in simple cases when there are more complex use webs
6056 we don't have an easy way to preserve stmt order to fulfil
6057 dependencies within them. */
6060 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6062 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6064 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6065 if (!gimple_nop_p (def_stmt2
)
6066 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6076 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6078 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6079 if (!gimple_nop_p (def_stmt
)
6080 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6082 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6083 gsi_remove (&gsi
, false);
6084 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6091 /* vectorizable_load.
6093 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6095 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6096 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6097 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6100 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6101 slp_tree slp_node
, slp_instance slp_node_instance
)
6104 tree vec_dest
= NULL
;
6105 tree data_ref
= NULL
;
6106 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6107 stmt_vec_info prev_stmt_info
;
6108 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6109 struct loop
*loop
= NULL
;
6110 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6111 bool nested_in_vect_loop
= false;
6112 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6116 gimple
*new_stmt
= NULL
;
6118 enum dr_alignment_support alignment_support_scheme
;
6119 tree dataref_ptr
= NULL_TREE
;
6120 tree dataref_offset
= NULL_TREE
;
6121 gimple
*ptr_incr
= NULL
;
6123 int i
, j
, group_size
= -1, group_gap_adj
;
6124 tree msq
= NULL_TREE
, lsq
;
6125 tree offset
= NULL_TREE
;
6126 tree byte_offset
= NULL_TREE
;
6127 tree realignment_token
= NULL_TREE
;
6129 vec
<tree
> dr_chain
= vNULL
;
6130 bool grouped_load
= false;
6131 bool load_lanes_p
= false;
6134 bool negative
= false;
6135 bool compute_in_loop
= false;
6136 struct loop
*at_loop
;
6138 bool slp
= (slp_node
!= NULL
);
6139 bool slp_perm
= false;
6140 enum tree_code code
;
6141 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6144 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6145 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6146 int gather_scale
= 1;
6147 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6148 vec_info
*vinfo
= stmt_info
->vinfo
;
6150 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6153 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6156 /* Is vectorizable load? */
6157 if (!is_gimple_assign (stmt
))
6160 scalar_dest
= gimple_assign_lhs (stmt
);
6161 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6164 code
= gimple_assign_rhs_code (stmt
);
6165 if (code
!= ARRAY_REF
6166 && code
!= BIT_FIELD_REF
6167 && code
!= INDIRECT_REF
6168 && code
!= COMPONENT_REF
6169 && code
!= IMAGPART_EXPR
6170 && code
!= REALPART_EXPR
6172 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6175 if (!STMT_VINFO_DATA_REF (stmt_info
))
6178 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6179 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6183 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6184 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6185 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6190 /* Multiple types in SLP are handled by creating the appropriate number of
6191 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6193 if (slp
|| PURE_SLP_STMT (stmt_info
))
6196 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6198 gcc_assert (ncopies
>= 1);
6200 /* FORNOW. This restriction should be relaxed. */
6201 if (nested_in_vect_loop
&& ncopies
> 1)
6203 if (dump_enabled_p ())
6204 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6205 "multiple types in nested loop.\n");
6209 /* Invalidate assumptions made by dependence analysis when vectorization
6210 on the unrolled body effectively re-orders stmts. */
6212 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6213 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6214 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6216 if (dump_enabled_p ())
6217 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6218 "cannot perform implicit CSE when unrolling "
6219 "with negative dependence distance\n");
6223 elem_type
= TREE_TYPE (vectype
);
6224 mode
= TYPE_MODE (vectype
);
6226 /* FORNOW. In some cases can vectorize even if data-type not supported
6227 (e.g. - data copies). */
6228 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6230 if (dump_enabled_p ())
6231 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6232 "Aligned load, but unsupported type.\n");
6236 /* Check if the load is a part of an interleaving chain. */
6237 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6239 grouped_load
= true;
6241 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6243 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6245 /* If this is single-element interleaving with an element distance
6246 that leaves unused vector loads around punt - we at least create
6247 very sub-optimal code in that case (and blow up memory,
6249 bool force_peeling
= false;
6250 if (first_stmt
== stmt
6251 && !GROUP_NEXT_ELEMENT (stmt_info
))
6253 if (GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6255 if (dump_enabled_p ())
6256 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6257 "single-element interleaving not supported "
6258 "for not adjacent vector loads\n");
6262 /* Single-element interleaving requires peeling for gaps. */
6263 force_peeling
= true;
6266 /* If there is a gap in the end of the group or the group size cannot
6267 be made a multiple of the vector element count then we access excess
6268 elements in the last iteration and thus need to peel that off. */
6270 && ! STMT_VINFO_STRIDED_P (stmt_info
)
6272 || GROUP_GAP (vinfo_for_stmt (first_stmt
)) != 0
6273 || (!slp
&& vf
% GROUP_SIZE (vinfo_for_stmt (first_stmt
)) != 0)))
6275 if (dump_enabled_p ())
6276 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6277 "Data access with gaps requires scalar "
6281 if (dump_enabled_p ())
6282 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6283 "Peeling for outer loop is not supported\n");
6287 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
6290 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6293 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6295 && !PURE_SLP_STMT (stmt_info
)
6296 && !STMT_VINFO_STRIDED_P (stmt_info
))
6298 if (vect_load_lanes_supported (vectype
, group_size
))
6299 load_lanes_p
= true;
6300 else if (!vect_grouped_load_supported (vectype
, group_size
))
6304 /* Invalidate assumptions made by dependence analysis when vectorization
6305 on the unrolled body effectively re-orders stmts. */
6306 if (!PURE_SLP_STMT (stmt_info
)
6307 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6308 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6309 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6311 if (dump_enabled_p ())
6312 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6313 "cannot perform implicit CSE when performing "
6314 "group loads with negative dependence distance\n");
6318 /* Similarly when the stmt is a load that is both part of a SLP
6319 instance and a loop vectorized stmt via the same-dr mechanism
6320 we have to give up. */
6321 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6322 && (STMT_SLP_TYPE (stmt_info
)
6323 != STMT_SLP_TYPE (vinfo_for_stmt
6324 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6326 if (dump_enabled_p ())
6327 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6328 "conflicting SLP types for CSEd load\n");
6334 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6337 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6338 &gather_off
, &gather_scale
);
6339 gcc_assert (gather_decl
);
6340 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6341 &gather_off_vectype
))
6343 if (dump_enabled_p ())
6344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6345 "gather index use not simple.\n");
6349 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6352 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6353 && (group_size
> nunits
6354 || nunits
% group_size
!= 0))
6356 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6357 "unhandled strided group load\n");
6363 negative
= tree_int_cst_compare (nested_in_vect_loop
6364 ? STMT_VINFO_DR_STEP (stmt_info
)
6366 size_zero_node
) < 0;
6367 if (negative
&& ncopies
> 1)
6369 if (dump_enabled_p ())
6370 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6371 "multiple types with negative step.\n");
6379 if (dump_enabled_p ())
6380 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6381 "negative step for group load not supported"
6385 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6386 if (alignment_support_scheme
!= dr_aligned
6387 && alignment_support_scheme
!= dr_unaligned_supported
)
6389 if (dump_enabled_p ())
6390 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6391 "negative step but alignment required.\n");
6394 if (!perm_mask_for_reverse (vectype
))
6396 if (dump_enabled_p ())
6397 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6398 "negative step and reversing not supported."
6405 if (!vec_stmt
) /* transformation not required. */
6407 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6408 /* The SLP costs are calculated during SLP analysis. */
6409 if (!PURE_SLP_STMT (stmt_info
))
6410 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6415 if (dump_enabled_p ())
6416 dump_printf_loc (MSG_NOTE
, vect_location
,
6417 "transform load. ncopies = %d\n", ncopies
);
6421 ensure_base_align (stmt_info
, dr
);
6423 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6425 tree vec_oprnd0
= NULL_TREE
, op
;
6426 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6427 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6428 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6429 edge pe
= loop_preheader_edge (loop
);
6432 enum { NARROW
, NONE
, WIDEN
} modifier
;
6433 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6435 if (nunits
== gather_off_nunits
)
6437 else if (nunits
== gather_off_nunits
/ 2)
6439 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6442 for (i
= 0; i
< gather_off_nunits
; ++i
)
6443 sel
[i
] = i
| nunits
;
6445 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6447 else if (nunits
== gather_off_nunits
* 2)
6449 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6452 for (i
= 0; i
< nunits
; ++i
)
6453 sel
[i
] = i
< gather_off_nunits
6454 ? i
: i
+ nunits
- gather_off_nunits
;
6456 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6462 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6463 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6464 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6465 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6466 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6467 scaletype
= TREE_VALUE (arglist
);
6468 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6470 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6472 ptr
= fold_convert (ptrtype
, gather_base
);
6473 if (!is_gimple_min_invariant (ptr
))
6475 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6476 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6477 gcc_assert (!new_bb
);
6480 /* Currently we support only unconditional gather loads,
6481 so mask should be all ones. */
6482 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6483 mask
= build_int_cst (masktype
, -1);
6484 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6486 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6487 mask
= build_vector_from_val (masktype
, mask
);
6488 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6490 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6494 for (j
= 0; j
< 6; ++j
)
6496 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6497 mask
= build_real (TREE_TYPE (masktype
), r
);
6498 mask
= build_vector_from_val (masktype
, mask
);
6499 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6504 scale
= build_int_cst (scaletype
, gather_scale
);
6506 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6507 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6508 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6512 for (j
= 0; j
< 6; ++j
)
6514 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6515 merge
= build_real (TREE_TYPE (rettype
), r
);
6519 merge
= build_vector_from_val (rettype
, merge
);
6520 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6522 prev_stmt_info
= NULL
;
6523 for (j
= 0; j
< ncopies
; ++j
)
6525 if (modifier
== WIDEN
&& (j
& 1))
6526 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6527 perm_mask
, stmt
, gsi
);
6530 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6533 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6535 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6537 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6538 == TYPE_VECTOR_SUBPARTS (idxtype
));
6539 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6540 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6542 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6543 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6548 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6550 if (!useless_type_conversion_p (vectype
, rettype
))
6552 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6553 == TYPE_VECTOR_SUBPARTS (rettype
));
6554 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6555 gimple_call_set_lhs (new_stmt
, op
);
6556 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6557 var
= make_ssa_name (vec_dest
);
6558 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6560 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6564 var
= make_ssa_name (vec_dest
, new_stmt
);
6565 gimple_call_set_lhs (new_stmt
, var
);
6568 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6570 if (modifier
== NARROW
)
6577 var
= permute_vec_elements (prev_res
, var
,
6578 perm_mask
, stmt
, gsi
);
6579 new_stmt
= SSA_NAME_DEF_STMT (var
);
6582 if (prev_stmt_info
== NULL
)
6583 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6585 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6586 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6590 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6592 gimple_stmt_iterator incr_gsi
;
6598 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6599 gimple_seq stmts
= NULL
;
6600 tree stride_base
, stride_step
, alias_off
;
6602 gcc_assert (!nested_in_vect_loop
);
6604 if (slp
&& grouped_load
)
6605 first_dr
= STMT_VINFO_DATA_REF
6606 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6611 = fold_build_pointer_plus
6612 (DR_BASE_ADDRESS (first_dr
),
6613 size_binop (PLUS_EXPR
,
6614 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6615 convert_to_ptrofftype (DR_INIT (first_dr
))));
6616 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6618 /* For a load with loop-invariant (but other than power-of-2)
6619 stride (i.e. not a grouped access) like so:
6621 for (i = 0; i < n; i += stride)
6624 we generate a new induction variable and new accesses to
6625 form a new vector (or vectors, depending on ncopies):
6627 for (j = 0; ; j += VF*stride)
6629 tmp2 = array[j + stride];
6631 vectemp = {tmp1, tmp2, ...}
6634 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6635 build_int_cst (TREE_TYPE (stride_step
), vf
));
6637 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6639 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6640 loop
, &incr_gsi
, insert_after
,
6642 incr
= gsi_stmt (incr_gsi
);
6643 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6645 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6646 &stmts
, true, NULL_TREE
);
6648 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6650 prev_stmt_info
= NULL
;
6651 running_off
= offvar
;
6652 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6653 int nloads
= nunits
;
6654 tree ltype
= TREE_TYPE (vectype
);
6655 auto_vec
<tree
> dr_chain
;
6658 nloads
= nunits
/ group_size
;
6659 if (group_size
< nunits
)
6660 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6663 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6664 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6666 dr_chain
.create (ncopies
);
6668 for (j
= 0; j
< ncopies
; j
++)
6674 vec_alloc (v
, nloads
);
6675 for (i
= 0; i
< nloads
; i
++)
6677 tree newref
, newoff
;
6679 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6681 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6684 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6685 newoff
= copy_ssa_name (running_off
);
6686 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6687 running_off
, stride_step
);
6688 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6690 running_off
= newoff
;
6693 vec_inv
= build_constructor (vectype
, v
);
6694 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6695 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6699 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6700 build2 (MEM_REF
, ltype
,
6701 running_off
, alias_off
));
6702 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6704 tree newoff
= copy_ssa_name (running_off
);
6705 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6706 running_off
, stride_step
);
6707 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6709 running_off
= newoff
;
6714 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6716 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6721 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6723 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6724 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6728 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6729 slp_node_instance
, false);
6735 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6736 /* For BB vectorization we directly vectorize a subchain
6737 without permutation. */
6738 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6739 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6741 /* Check if the chain of loads is already vectorized. */
6742 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6743 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6744 ??? But we can only do so if there is exactly one
6745 as we have no way to get at the rest. Leave the CSE
6747 ??? With the group load eventually participating
6748 in multiple different permutations (having multiple
6749 slp nodes which refer to the same group) the CSE
6750 is even wrong code. See PR56270. */
6753 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6756 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6757 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6760 /* VEC_NUM is the number of vect stmts to be created for this group. */
6763 grouped_load
= false;
6764 /* For SLP permutation support we need to load the whole group,
6765 not only the number of vector stmts the permutation result
6768 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6770 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6771 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6774 vec_num
= group_size
;
6780 group_size
= vec_num
= 1;
6784 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6785 gcc_assert (alignment_support_scheme
);
6786 /* Targets with load-lane instructions must not require explicit
6788 gcc_assert (!load_lanes_p
6789 || alignment_support_scheme
== dr_aligned
6790 || alignment_support_scheme
== dr_unaligned_supported
);
6792 /* In case the vectorization factor (VF) is bigger than the number
6793 of elements that we can fit in a vectype (nunits), we have to generate
6794 more than one vector stmt - i.e - we need to "unroll" the
6795 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6796 from one copy of the vector stmt to the next, in the field
6797 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6798 stages to find the correct vector defs to be used when vectorizing
6799 stmts that use the defs of the current stmt. The example below
6800 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6801 need to create 4 vectorized stmts):
6803 before vectorization:
6804 RELATED_STMT VEC_STMT
6808 step 1: vectorize stmt S1:
6809 We first create the vector stmt VS1_0, and, as usual, record a
6810 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6811 Next, we create the vector stmt VS1_1, and record a pointer to
6812 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6813 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6815 RELATED_STMT VEC_STMT
6816 VS1_0: vx0 = memref0 VS1_1 -
6817 VS1_1: vx1 = memref1 VS1_2 -
6818 VS1_2: vx2 = memref2 VS1_3 -
6819 VS1_3: vx3 = memref3 - -
6820 S1: x = load - VS1_0
6823 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6824 information we recorded in RELATED_STMT field is used to vectorize
6827 /* In case of interleaving (non-unit grouped access):
6834 Vectorized loads are created in the order of memory accesses
6835 starting from the access of the first stmt of the chain:
6838 VS2: vx1 = &base + vec_size*1
6839 VS3: vx3 = &base + vec_size*2
6840 VS4: vx4 = &base + vec_size*3
6842 Then permutation statements are generated:
6844 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6845 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6848 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6849 (the order of the data-refs in the output of vect_permute_load_chain
6850 corresponds to the order of scalar stmts in the interleaving chain - see
6851 the documentation of vect_permute_load_chain()).
6852 The generation of permutation stmts and recording them in
6853 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6855 In case of both multiple types and interleaving, the vector loads and
6856 permutation stmts above are created for every copy. The result vector
6857 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6858 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6860 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6861 on a target that supports unaligned accesses (dr_unaligned_supported)
6862 we generate the following code:
6866 p = p + indx * vectype_size;
6871 Otherwise, the data reference is potentially unaligned on a target that
6872 does not support unaligned accesses (dr_explicit_realign_optimized) -
6873 then generate the following code, in which the data in each iteration is
6874 obtained by two vector loads, one from the previous iteration, and one
6875 from the current iteration:
6877 msq_init = *(floor(p1))
6878 p2 = initial_addr + VS - 1;
6879 realignment_token = call target_builtin;
6882 p2 = p2 + indx * vectype_size
6884 vec_dest = realign_load (msq, lsq, realignment_token)
6889 /* If the misalignment remains the same throughout the execution of the
6890 loop, we can create the init_addr and permutation mask at the loop
6891 preheader. Otherwise, it needs to be created inside the loop.
6892 This can only occur when vectorizing memory accesses in the inner-loop
6893 nested within an outer-loop that is being vectorized. */
6895 if (nested_in_vect_loop
6896 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6897 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6899 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6900 compute_in_loop
= true;
6903 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6904 || alignment_support_scheme
== dr_explicit_realign
)
6905 && !compute_in_loop
)
6907 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6908 alignment_support_scheme
, NULL_TREE
,
6910 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6912 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6913 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6921 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6924 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6926 aggr_type
= vectype
;
6928 prev_stmt_info
= NULL
;
6929 for (j
= 0; j
< ncopies
; j
++)
6931 /* 1. Create the vector or array pointer update chain. */
6934 bool simd_lane_access_p
6935 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6936 if (simd_lane_access_p
6937 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6938 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6939 && integer_zerop (DR_OFFSET (first_dr
))
6940 && integer_zerop (DR_INIT (first_dr
))
6941 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6942 get_alias_set (DR_REF (first_dr
)))
6943 && (alignment_support_scheme
== dr_aligned
6944 || alignment_support_scheme
== dr_unaligned_supported
))
6946 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6947 dataref_offset
= build_int_cst (reference_alias_ptr_type
6948 (DR_REF (first_dr
)), 0);
6953 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6954 offset
, &dummy
, gsi
, &ptr_incr
,
6955 simd_lane_access_p
, &inv_p
,
6958 else if (dataref_offset
)
6959 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6960 TYPE_SIZE_UNIT (aggr_type
));
6962 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6963 TYPE_SIZE_UNIT (aggr_type
));
6965 if (grouped_load
|| slp_perm
)
6966 dr_chain
.create (vec_num
);
6972 vec_array
= create_vector_array (vectype
, vec_num
);
6975 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6976 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6977 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6978 gimple_call_set_lhs (new_stmt
, vec_array
);
6979 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6981 /* Extract each vector into an SSA_NAME. */
6982 for (i
= 0; i
< vec_num
; i
++)
6984 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6986 dr_chain
.quick_push (new_temp
);
6989 /* Record the mapping between SSA_NAMEs and statements. */
6990 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6994 for (i
= 0; i
< vec_num
; i
++)
6997 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7000 /* 2. Create the vector-load in the loop. */
7001 switch (alignment_support_scheme
)
7004 case dr_unaligned_supported
:
7006 unsigned int align
, misalign
;
7009 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7012 : build_int_cst (reference_alias_ptr_type
7013 (DR_REF (first_dr
)), 0));
7014 align
= TYPE_ALIGN_UNIT (vectype
);
7015 if (alignment_support_scheme
== dr_aligned
)
7017 gcc_assert (aligned_access_p (first_dr
));
7020 else if (DR_MISALIGNMENT (first_dr
) == -1)
7022 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
7023 align
= TYPE_ALIGN_UNIT (elem_type
);
7025 align
= (get_object_alignment (DR_REF (first_dr
))
7028 TREE_TYPE (data_ref
)
7029 = build_aligned_type (TREE_TYPE (data_ref
),
7030 align
* BITS_PER_UNIT
);
7034 TREE_TYPE (data_ref
)
7035 = build_aligned_type (TREE_TYPE (data_ref
),
7036 TYPE_ALIGN (elem_type
));
7037 misalign
= DR_MISALIGNMENT (first_dr
);
7039 if (dataref_offset
== NULL_TREE
7040 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7041 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7045 case dr_explicit_realign
:
7049 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7051 if (compute_in_loop
)
7052 msq
= vect_setup_realignment (first_stmt
, gsi
,
7054 dr_explicit_realign
,
7057 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7058 ptr
= copy_ssa_name (dataref_ptr
);
7060 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7061 new_stmt
= gimple_build_assign
7062 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7064 (TREE_TYPE (dataref_ptr
),
7065 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7066 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7068 = build2 (MEM_REF
, vectype
, ptr
,
7069 build_int_cst (reference_alias_ptr_type
7070 (DR_REF (first_dr
)), 0));
7071 vec_dest
= vect_create_destination_var (scalar_dest
,
7073 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7074 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7075 gimple_assign_set_lhs (new_stmt
, new_temp
);
7076 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7077 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7078 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7081 bump
= size_binop (MULT_EXPR
, vs
,
7082 TYPE_SIZE_UNIT (elem_type
));
7083 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7084 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7085 new_stmt
= gimple_build_assign
7086 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7089 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7090 ptr
= copy_ssa_name (ptr
, new_stmt
);
7091 gimple_assign_set_lhs (new_stmt
, ptr
);
7092 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7094 = build2 (MEM_REF
, vectype
, ptr
,
7095 build_int_cst (reference_alias_ptr_type
7096 (DR_REF (first_dr
)), 0));
7099 case dr_explicit_realign_optimized
:
7100 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7101 new_temp
= copy_ssa_name (dataref_ptr
);
7103 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7104 new_stmt
= gimple_build_assign
7105 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7107 (TREE_TYPE (dataref_ptr
),
7108 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7109 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7111 = build2 (MEM_REF
, vectype
, new_temp
,
7112 build_int_cst (reference_alias_ptr_type
7113 (DR_REF (first_dr
)), 0));
7118 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7119 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7120 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7121 gimple_assign_set_lhs (new_stmt
, new_temp
);
7122 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7124 /* 3. Handle explicit realignment if necessary/supported.
7126 vec_dest = realign_load (msq, lsq, realignment_token) */
7127 if (alignment_support_scheme
== dr_explicit_realign_optimized
7128 || alignment_support_scheme
== dr_explicit_realign
)
7130 lsq
= gimple_assign_lhs (new_stmt
);
7131 if (!realignment_token
)
7132 realignment_token
= dataref_ptr
;
7133 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7134 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7135 msq
, lsq
, realignment_token
);
7136 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7137 gimple_assign_set_lhs (new_stmt
, new_temp
);
7138 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7140 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7143 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7144 add_phi_arg (phi
, lsq
,
7145 loop_latch_edge (containing_loop
),
7151 /* 4. Handle invariant-load. */
7152 if (inv_p
&& !bb_vinfo
)
7154 gcc_assert (!grouped_load
);
7155 /* If we have versioned for aliasing or the loop doesn't
7156 have any data dependencies that would preclude this,
7157 then we are sure this is a loop invariant load and
7158 thus we can insert it on the preheader edge. */
7159 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7160 && !nested_in_vect_loop
7161 && hoist_defs_of_uses (stmt
, loop
))
7163 if (dump_enabled_p ())
7165 dump_printf_loc (MSG_NOTE
, vect_location
,
7166 "hoisting out of the vectorized "
7168 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7170 tree tem
= copy_ssa_name (scalar_dest
);
7171 gsi_insert_on_edge_immediate
7172 (loop_preheader_edge (loop
),
7173 gimple_build_assign (tem
,
7175 (gimple_assign_rhs1 (stmt
))));
7176 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7180 gimple_stmt_iterator gsi2
= *gsi
;
7182 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7185 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7186 set_vinfo_for_stmt (new_stmt
,
7187 new_stmt_vec_info (new_stmt
, vinfo
));
7192 tree perm_mask
= perm_mask_for_reverse (vectype
);
7193 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7194 perm_mask
, stmt
, gsi
);
7195 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7198 /* Collect vector loads and later create their permutation in
7199 vect_transform_grouped_load (). */
7200 if (grouped_load
|| slp_perm
)
7201 dr_chain
.quick_push (new_temp
);
7203 /* Store vector loads in the corresponding SLP_NODE. */
7204 if (slp
&& !slp_perm
)
7205 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7207 /* Bump the vector pointer to account for a gap or for excess
7208 elements loaded for a permuted SLP load. */
7209 if (group_gap_adj
!= 0)
7213 = wide_int_to_tree (sizetype
,
7214 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7215 group_gap_adj
, &ovf
));
7216 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7221 if (slp
&& !slp_perm
)
7226 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7227 slp_node_instance
, false))
7229 dr_chain
.release ();
7238 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7239 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7244 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7246 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7247 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7250 dr_chain
.release ();
7256 /* Function vect_is_simple_cond.
7259 LOOP - the loop that is being vectorized.
7260 COND - Condition that is checked for simple use.
7263 *COMP_VECTYPE - the vector type for the comparison.
7265 Returns whether a COND can be vectorized. Checks whether
7266 condition operands are supportable using vec_is_simple_use. */
7269 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7272 enum vect_def_type dt
;
7273 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7276 if (TREE_CODE (cond
) == SSA_NAME
7277 && TREE_CODE (TREE_TYPE (cond
)) == BOOLEAN_TYPE
)
7279 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7280 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7283 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7288 if (!COMPARISON_CLASS_P (cond
))
7291 lhs
= TREE_OPERAND (cond
, 0);
7292 rhs
= TREE_OPERAND (cond
, 1);
7294 if (TREE_CODE (lhs
) == SSA_NAME
)
7296 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7297 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7300 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7301 && TREE_CODE (lhs
) != FIXED_CST
)
7304 if (TREE_CODE (rhs
) == SSA_NAME
)
7306 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7307 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7310 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7311 && TREE_CODE (rhs
) != FIXED_CST
)
7314 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7318 /* vectorizable_condition.
7320 Check if STMT is conditional modify expression that can be vectorized.
7321 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7322 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7325 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7326 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7327 else clause if it is 2).
7329 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7332 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7333 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7336 tree scalar_dest
= NULL_TREE
;
7337 tree vec_dest
= NULL_TREE
;
7338 tree cond_expr
, then_clause
, else_clause
;
7339 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7340 tree comp_vectype
= NULL_TREE
;
7341 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7342 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7343 tree vec_compare
, vec_cond_expr
;
7345 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7346 enum vect_def_type dt
, dts
[4];
7348 enum tree_code code
;
7349 stmt_vec_info prev_stmt_info
= NULL
;
7351 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7352 vec
<tree
> vec_oprnds0
= vNULL
;
7353 vec
<tree
> vec_oprnds1
= vNULL
;
7354 vec
<tree
> vec_oprnds2
= vNULL
;
7355 vec
<tree
> vec_oprnds3
= vNULL
;
7357 bool masked
= false;
7359 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7362 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7364 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7367 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7368 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7372 /* FORNOW: not yet supported. */
7373 if (STMT_VINFO_LIVE_P (stmt_info
))
7375 if (dump_enabled_p ())
7376 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7377 "value used after loop.\n");
7382 /* Is vectorizable conditional operation? */
7383 if (!is_gimple_assign (stmt
))
7386 code
= gimple_assign_rhs_code (stmt
);
7388 if (code
!= COND_EXPR
)
7391 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7392 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7394 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7397 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7399 gcc_assert (ncopies
>= 1);
7400 if (reduc_index
&& ncopies
> 1)
7401 return false; /* FORNOW */
7403 cond_expr
= gimple_assign_rhs1 (stmt
);
7404 then_clause
= gimple_assign_rhs2 (stmt
);
7405 else_clause
= gimple_assign_rhs3 (stmt
);
7407 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7412 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7414 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7417 if (VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7419 vec_cmp_type
= comp_vectype
;
7423 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7424 if (vec_cmp_type
== NULL_TREE
)
7429 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7430 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7437 vec_oprnds0
.create (1);
7438 vec_oprnds1
.create (1);
7439 vec_oprnds2
.create (1);
7440 vec_oprnds3
.create (1);
7444 scalar_dest
= gimple_assign_lhs (stmt
);
7445 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7447 /* Handle cond expr. */
7448 for (j
= 0; j
< ncopies
; j
++)
7450 gassign
*new_stmt
= NULL
;
7455 auto_vec
<tree
, 4> ops
;
7456 auto_vec
<vec
<tree
>, 4> vec_defs
;
7459 ops
.safe_push (cond_expr
);
7462 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7463 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7465 ops
.safe_push (then_clause
);
7466 ops
.safe_push (else_clause
);
7467 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7468 vec_oprnds3
= vec_defs
.pop ();
7469 vec_oprnds2
= vec_defs
.pop ();
7471 vec_oprnds1
= vec_defs
.pop ();
7472 vec_oprnds0
= vec_defs
.pop ();
7475 vec_defs
.release ();
7483 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
7485 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
7491 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7492 stmt
, comp_vectype
);
7493 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7494 loop_vinfo
, >emp
, &dts
[0]);
7497 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7498 stmt
, comp_vectype
);
7499 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7500 loop_vinfo
, >emp
, &dts
[1]);
7502 if (reduc_index
== 1)
7503 vec_then_clause
= reduc_def
;
7506 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7508 vect_is_simple_use (then_clause
, loop_vinfo
,
7511 if (reduc_index
== 2)
7512 vec_else_clause
= reduc_def
;
7515 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7517 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7524 = vect_get_vec_def_for_stmt_copy (dts
[0],
7525 vec_oprnds0
.pop ());
7528 = vect_get_vec_def_for_stmt_copy (dts
[1],
7529 vec_oprnds1
.pop ());
7531 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7532 vec_oprnds2
.pop ());
7533 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7534 vec_oprnds3
.pop ());
7539 vec_oprnds0
.quick_push (vec_cond_lhs
);
7541 vec_oprnds1
.quick_push (vec_cond_rhs
);
7542 vec_oprnds2
.quick_push (vec_then_clause
);
7543 vec_oprnds3
.quick_push (vec_else_clause
);
7546 /* Arguments are ready. Create the new vector stmt. */
7547 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7549 vec_then_clause
= vec_oprnds2
[i
];
7550 vec_else_clause
= vec_oprnds3
[i
];
7553 vec_compare
= vec_cond_lhs
;
7556 vec_cond_rhs
= vec_oprnds1
[i
];
7557 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7558 vec_cond_lhs
, vec_cond_rhs
);
7560 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7561 vec_compare
, vec_then_clause
, vec_else_clause
);
7563 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7564 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7565 gimple_assign_set_lhs (new_stmt
, new_temp
);
7566 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7568 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7575 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7577 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7579 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7582 vec_oprnds0
.release ();
7583 vec_oprnds1
.release ();
7584 vec_oprnds2
.release ();
7585 vec_oprnds3
.release ();
7590 /* vectorizable_comparison.
7592 Check if STMT is comparison expression that can be vectorized.
7593 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7594 comparison, put it in VEC_STMT, and insert it at GSI.
7596 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7599 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7600 gimple
**vec_stmt
, tree reduc_def
,
7603 tree lhs
, rhs1
, rhs2
;
7604 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7605 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7606 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7607 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
7609 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7610 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
7613 enum tree_code code
;
7614 stmt_vec_info prev_stmt_info
= NULL
;
7616 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7617 vec
<tree
> vec_oprnds0
= vNULL
;
7618 vec
<tree
> vec_oprnds1
= vNULL
;
7623 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7626 if (!VECTOR_BOOLEAN_TYPE_P (vectype
))
7629 mask_type
= vectype
;
7630 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7632 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7635 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7637 gcc_assert (ncopies
>= 1);
7638 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7639 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7643 if (STMT_VINFO_LIVE_P (stmt_info
))
7645 if (dump_enabled_p ())
7646 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7647 "value used after loop.\n");
7651 if (!is_gimple_assign (stmt
))
7654 code
= gimple_assign_rhs_code (stmt
);
7656 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
7659 rhs1
= gimple_assign_rhs1 (stmt
);
7660 rhs2
= gimple_assign_rhs2 (stmt
);
7662 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
7663 &dts
[0], &vectype1
))
7666 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
7667 &dts
[1], &vectype2
))
7670 if (vectype1
&& vectype2
7671 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7674 vectype
= vectype1
? vectype1
: vectype2
;
7676 /* Invariant comparison. */
7679 vectype
= build_vector_type (TREE_TYPE (rhs1
), nunits
);
7680 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype
)) != current_vector_size
)
7683 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
7688 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
7689 vect_model_simple_cost (stmt_info
, ncopies
, dts
, NULL
, NULL
);
7690 return expand_vec_cmp_expr_p (vectype
, mask_type
);
7696 vec_oprnds0
.create (1);
7697 vec_oprnds1
.create (1);
7701 lhs
= gimple_assign_lhs (stmt
);
7702 mask
= vect_create_destination_var (lhs
, mask_type
);
7704 /* Handle cmp expr. */
7705 for (j
= 0; j
< ncopies
; j
++)
7707 gassign
*new_stmt
= NULL
;
7712 auto_vec
<tree
, 2> ops
;
7713 auto_vec
<vec
<tree
>, 2> vec_defs
;
7715 ops
.safe_push (rhs1
);
7716 ops
.safe_push (rhs2
);
7717 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7718 vec_oprnds1
= vec_defs
.pop ();
7719 vec_oprnds0
= vec_defs
.pop ();
7723 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
7724 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
7729 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
7730 vec_oprnds0
.pop ());
7731 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
7732 vec_oprnds1
.pop ());
7737 vec_oprnds0
.quick_push (vec_rhs1
);
7738 vec_oprnds1
.quick_push (vec_rhs2
);
7741 /* Arguments are ready. Create the new vector stmt. */
7742 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
7744 vec_rhs2
= vec_oprnds1
[i
];
7746 new_temp
= make_ssa_name (mask
);
7747 new_stmt
= gimple_build_assign (new_temp
, code
, vec_rhs1
, vec_rhs2
);
7748 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7750 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7757 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7759 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7761 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7764 vec_oprnds0
.release ();
7765 vec_oprnds1
.release ();
7770 /* Make sure the statement is vectorizable. */
7773 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7775 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7776 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7777 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7779 tree scalar_type
, vectype
;
7780 gimple
*pattern_stmt
;
7781 gimple_seq pattern_def_seq
;
7783 if (dump_enabled_p ())
7785 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7786 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7789 if (gimple_has_volatile_ops (stmt
))
7791 if (dump_enabled_p ())
7792 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7793 "not vectorized: stmt has volatile operands\n");
7798 /* Skip stmts that do not need to be vectorized. In loops this is expected
7800 - the COND_EXPR which is the loop exit condition
7801 - any LABEL_EXPRs in the loop
7802 - computations that are used only for array indexing or loop control.
7803 In basic blocks we only analyze statements that are a part of some SLP
7804 instance, therefore, all the statements are relevant.
7806 Pattern statement needs to be analyzed instead of the original statement
7807 if the original statement is not relevant. Otherwise, we analyze both
7808 statements. In basic blocks we are called from some SLP instance
7809 traversal, don't analyze pattern stmts instead, the pattern stmts
7810 already will be part of SLP instance. */
7812 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7813 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7814 && !STMT_VINFO_LIVE_P (stmt_info
))
7816 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7818 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7819 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7821 /* Analyze PATTERN_STMT instead of the original stmt. */
7822 stmt
= pattern_stmt
;
7823 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7824 if (dump_enabled_p ())
7826 dump_printf_loc (MSG_NOTE
, vect_location
,
7827 "==> examining pattern statement: ");
7828 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7833 if (dump_enabled_p ())
7834 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7839 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7842 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7843 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7845 /* Analyze PATTERN_STMT too. */
7846 if (dump_enabled_p ())
7848 dump_printf_loc (MSG_NOTE
, vect_location
,
7849 "==> examining pattern statement: ");
7850 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7853 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7857 if (is_pattern_stmt_p (stmt_info
)
7859 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7861 gimple_stmt_iterator si
;
7863 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7865 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7866 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7867 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7869 /* Analyze def stmt of STMT if it's a pattern stmt. */
7870 if (dump_enabled_p ())
7872 dump_printf_loc (MSG_NOTE
, vect_location
,
7873 "==> examining pattern def statement: ");
7874 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7877 if (!vect_analyze_stmt (pattern_def_stmt
,
7878 need_to_vectorize
, node
))
7884 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7886 case vect_internal_def
:
7889 case vect_reduction_def
:
7890 case vect_nested_cycle
:
7891 gcc_assert (!bb_vinfo
7892 && (relevance
== vect_used_in_outer
7893 || relevance
== vect_used_in_outer_by_reduction
7894 || relevance
== vect_used_by_reduction
7895 || relevance
== vect_unused_in_scope
));
7898 case vect_induction_def
:
7899 case vect_constant_def
:
7900 case vect_external_def
:
7901 case vect_unknown_def_type
:
7908 gcc_assert (PURE_SLP_STMT (stmt_info
));
7910 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7911 if (dump_enabled_p ())
7913 dump_printf_loc (MSG_NOTE
, vect_location
,
7914 "get vectype for scalar type: ");
7915 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7916 dump_printf (MSG_NOTE
, "\n");
7919 vectype
= get_vectype_for_scalar_type (scalar_type
);
7922 if (dump_enabled_p ())
7924 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7925 "not SLPed: unsupported data-type ");
7926 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7928 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7933 if (dump_enabled_p ())
7935 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7936 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7937 dump_printf (MSG_NOTE
, "\n");
7940 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7943 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7945 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7946 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7947 || (is_gimple_call (stmt
)
7948 && gimple_call_lhs (stmt
) == NULL_TREE
));
7949 *need_to_vectorize
= true;
7952 if (PURE_SLP_STMT (stmt_info
) && !node
)
7954 dump_printf_loc (MSG_NOTE
, vect_location
,
7955 "handled only by SLP analysis\n");
7961 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7962 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7963 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7964 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7965 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7966 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7967 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7968 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7969 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7970 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7971 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7972 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
7973 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
7977 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7978 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7979 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7980 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7981 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7982 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7983 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7984 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7985 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
7986 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
7991 if (dump_enabled_p ())
7993 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7994 "not vectorized: relevant stmt not ");
7995 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7996 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8005 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8006 need extra handling, except for vectorizable reductions. */
8007 if (STMT_VINFO_LIVE_P (stmt_info
)
8008 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8009 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
8013 if (dump_enabled_p ())
8015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8016 "not vectorized: live stmt not ");
8017 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8018 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8028 /* Function vect_transform_stmt.
8030 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8033 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8034 bool *grouped_store
, slp_tree slp_node
,
8035 slp_instance slp_node_instance
)
8037 bool is_store
= false;
8038 gimple
*vec_stmt
= NULL
;
8039 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8042 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8044 switch (STMT_VINFO_TYPE (stmt_info
))
8046 case type_demotion_vec_info_type
:
8047 case type_promotion_vec_info_type
:
8048 case type_conversion_vec_info_type
:
8049 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8053 case induc_vec_info_type
:
8054 gcc_assert (!slp_node
);
8055 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
8059 case shift_vec_info_type
:
8060 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8064 case op_vec_info_type
:
8065 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8069 case assignment_vec_info_type
:
8070 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8074 case load_vec_info_type
:
8075 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8080 case store_vec_info_type
:
8081 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8083 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8085 /* In case of interleaving, the whole chain is vectorized when the
8086 last store in the chain is reached. Store stmts before the last
8087 one are skipped, and there vec_stmt_info shouldn't be freed
8089 *grouped_store
= true;
8090 if (STMT_VINFO_VEC_STMT (stmt_info
))
8097 case condition_vec_info_type
:
8098 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8102 case comparison_vec_info_type
:
8103 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8107 case call_vec_info_type
:
8108 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8109 stmt
= gsi_stmt (*gsi
);
8110 if (is_gimple_call (stmt
)
8111 && gimple_call_internal_p (stmt
)
8112 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
8116 case call_simd_clone_vec_info_type
:
8117 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8118 stmt
= gsi_stmt (*gsi
);
8121 case reduc_vec_info_type
:
8122 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
8127 if (!STMT_VINFO_LIVE_P (stmt_info
))
8129 if (dump_enabled_p ())
8130 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8131 "stmt not supported.\n");
8136 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8137 This would break hybrid SLP vectorization. */
8139 gcc_assert (!vec_stmt
8140 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8142 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8143 is being vectorized, but outside the immediately enclosing loop. */
8145 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8146 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8147 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8148 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8149 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8150 || STMT_VINFO_RELEVANT (stmt_info
) ==
8151 vect_used_in_outer_by_reduction
))
8153 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8154 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8155 imm_use_iterator imm_iter
;
8156 use_operand_p use_p
;
8160 if (dump_enabled_p ())
8161 dump_printf_loc (MSG_NOTE
, vect_location
,
8162 "Record the vdef for outer-loop vectorization.\n");
8164 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8165 (to be used when vectorizing outer-loop stmts that use the DEF of
8167 if (gimple_code (stmt
) == GIMPLE_PHI
)
8168 scalar_dest
= PHI_RESULT (stmt
);
8170 scalar_dest
= gimple_assign_lhs (stmt
);
8172 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8174 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8176 exit_phi
= USE_STMT (use_p
);
8177 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8182 /* Handle stmts whose DEF is used outside the loop-nest that is
8183 being vectorized. */
8184 if (STMT_VINFO_LIVE_P (stmt_info
)
8185 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8187 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
8192 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8198 /* Remove a group of stores (for SLP or interleaving), free their
8202 vect_remove_stores (gimple
*first_stmt
)
8204 gimple
*next
= first_stmt
;
8206 gimple_stmt_iterator next_si
;
8210 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8212 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8213 if (is_pattern_stmt_p (stmt_info
))
8214 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8215 /* Free the attached stmt_vec_info and remove the stmt. */
8216 next_si
= gsi_for_stmt (next
);
8217 unlink_stmt_vdef (next
);
8218 gsi_remove (&next_si
, true);
8219 release_defs (next
);
8220 free_stmt_vec_info (next
);
8226 /* Function new_stmt_vec_info.
8228 Create and initialize a new stmt_vec_info struct for STMT. */
8231 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8234 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8236 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8237 STMT_VINFO_STMT (res
) = stmt
;
8239 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8240 STMT_VINFO_LIVE_P (res
) = false;
8241 STMT_VINFO_VECTYPE (res
) = NULL
;
8242 STMT_VINFO_VEC_STMT (res
) = NULL
;
8243 STMT_VINFO_VECTORIZABLE (res
) = true;
8244 STMT_VINFO_IN_PATTERN_P (res
) = false;
8245 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8246 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8247 STMT_VINFO_DATA_REF (res
) = NULL
;
8248 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8250 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
8251 STMT_VINFO_DR_OFFSET (res
) = NULL
;
8252 STMT_VINFO_DR_INIT (res
) = NULL
;
8253 STMT_VINFO_DR_STEP (res
) = NULL
;
8254 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
8256 if (gimple_code (stmt
) == GIMPLE_PHI
8257 && is_loop_header_bb_p (gimple_bb (stmt
)))
8258 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8260 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8262 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8263 STMT_SLP_TYPE (res
) = loop_vect
;
8264 GROUP_FIRST_ELEMENT (res
) = NULL
;
8265 GROUP_NEXT_ELEMENT (res
) = NULL
;
8266 GROUP_SIZE (res
) = 0;
8267 GROUP_STORE_COUNT (res
) = 0;
8268 GROUP_GAP (res
) = 0;
8269 GROUP_SAME_DR_STMT (res
) = NULL
;
8275 /* Create a hash table for stmt_vec_info. */
8278 init_stmt_vec_info_vec (void)
8280 gcc_assert (!stmt_vec_info_vec
.exists ());
8281 stmt_vec_info_vec
.create (50);
8285 /* Free hash table for stmt_vec_info. */
8288 free_stmt_vec_info_vec (void)
8292 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8294 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8295 gcc_assert (stmt_vec_info_vec
.exists ());
8296 stmt_vec_info_vec
.release ();
8300 /* Free stmt vectorization related info. */
8303 free_stmt_vec_info (gimple
*stmt
)
8305 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8310 /* Check if this statement has a related "pattern stmt"
8311 (introduced by the vectorizer during the pattern recognition
8312 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8314 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8316 stmt_vec_info patt_info
8317 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8320 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
8321 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
8322 gimple_set_bb (patt_stmt
, NULL
);
8323 tree lhs
= gimple_get_lhs (patt_stmt
);
8324 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8325 release_ssa_name (lhs
);
8328 gimple_stmt_iterator si
;
8329 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
8331 gimple
*seq_stmt
= gsi_stmt (si
);
8332 gimple_set_bb (seq_stmt
, NULL
);
8333 lhs
= gimple_get_lhs (seq_stmt
);
8334 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8335 release_ssa_name (lhs
);
8336 free_stmt_vec_info (seq_stmt
);
8339 free_stmt_vec_info (patt_stmt
);
8343 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
8344 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
8345 set_vinfo_for_stmt (stmt
, NULL
);
8350 /* Function get_vectype_for_scalar_type_and_size.
8352 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8356 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
8358 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
8359 machine_mode simd_mode
;
8360 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8367 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8368 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8371 /* For vector types of elements whose mode precision doesn't
8372 match their types precision we use a element type of mode
8373 precision. The vectorization routines will have to make sure
8374 they support the proper result truncation/extension.
8375 We also make sure to build vector types with INTEGER_TYPE
8376 component type only. */
8377 if (INTEGRAL_TYPE_P (scalar_type
)
8378 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8379 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8380 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8381 TYPE_UNSIGNED (scalar_type
));
8383 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8384 When the component mode passes the above test simply use a type
8385 corresponding to that mode. The theory is that any use that
8386 would cause problems with this will disable vectorization anyway. */
8387 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8388 && !INTEGRAL_TYPE_P (scalar_type
))
8389 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8391 /* We can't build a vector type of elements with alignment bigger than
8393 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8394 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8395 TYPE_UNSIGNED (scalar_type
));
8397 /* If we felt back to using the mode fail if there was
8398 no scalar type for it. */
8399 if (scalar_type
== NULL_TREE
)
8402 /* If no size was supplied use the mode the target prefers. Otherwise
8403 lookup a vector mode of the specified size. */
8405 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8407 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8408 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8412 vectype
= build_vector_type (scalar_type
, nunits
);
8414 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8415 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8421 unsigned int current_vector_size
;
8423 /* Function get_vectype_for_scalar_type.
8425 Returns the vector type corresponding to SCALAR_TYPE as supported
8429 get_vectype_for_scalar_type (tree scalar_type
)
8432 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8433 current_vector_size
);
8435 && current_vector_size
== 0)
8436 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8440 /* Function get_mask_type_for_scalar_type.
8442 Returns the mask type corresponding to a result of comparison
8443 of vectors of specified SCALAR_TYPE as supported by target. */
8446 get_mask_type_for_scalar_type (tree scalar_type
)
8448 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
8453 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
8454 current_vector_size
);
8457 /* Function get_same_sized_vectype
8459 Returns a vector type corresponding to SCALAR_TYPE of size
8460 VECTOR_TYPE if supported by the target. */
8463 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8465 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8466 return build_same_sized_truth_vector_type (vector_type
);
8468 return get_vectype_for_scalar_type_and_size
8469 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8472 /* Function vect_is_simple_use.
8475 VINFO - the vect info of the loop or basic block that is being vectorized.
8476 OPERAND - operand in the loop or bb.
8478 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8479 DT - the type of definition
8481 Returns whether a stmt with OPERAND can be vectorized.
8482 For loops, supportable operands are constants, loop invariants, and operands
8483 that are defined by the current iteration of the loop. Unsupportable
8484 operands are those that are defined by a previous iteration of the loop (as
8485 is the case in reduction/induction computations).
8486 For basic blocks, supportable operands are constants and bb invariants.
8487 For now, operands defined outside the basic block are not supported. */
8490 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8491 gimple
**def_stmt
, enum vect_def_type
*dt
)
8494 *dt
= vect_unknown_def_type
;
8496 if (dump_enabled_p ())
8498 dump_printf_loc (MSG_NOTE
, vect_location
,
8499 "vect_is_simple_use: operand ");
8500 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8501 dump_printf (MSG_NOTE
, "\n");
8504 if (CONSTANT_CLASS_P (operand
))
8506 *dt
= vect_constant_def
;
8510 if (is_gimple_min_invariant (operand
))
8512 *dt
= vect_external_def
;
8516 if (TREE_CODE (operand
) != SSA_NAME
)
8518 if (dump_enabled_p ())
8519 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8524 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8526 *dt
= vect_external_def
;
8530 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8531 if (dump_enabled_p ())
8533 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8534 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8537 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8538 *dt
= vect_external_def
;
8541 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8542 if (is_a
<bb_vec_info
> (vinfo
) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8543 *dt
= vect_external_def
;
8545 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8548 if (dump_enabled_p ())
8550 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8553 case vect_uninitialized_def
:
8554 dump_printf (MSG_NOTE
, "uninitialized\n");
8556 case vect_constant_def
:
8557 dump_printf (MSG_NOTE
, "constant\n");
8559 case vect_external_def
:
8560 dump_printf (MSG_NOTE
, "external\n");
8562 case vect_internal_def
:
8563 dump_printf (MSG_NOTE
, "internal\n");
8565 case vect_induction_def
:
8566 dump_printf (MSG_NOTE
, "induction\n");
8568 case vect_reduction_def
:
8569 dump_printf (MSG_NOTE
, "reduction\n");
8571 case vect_double_reduction_def
:
8572 dump_printf (MSG_NOTE
, "double reduction\n");
8574 case vect_nested_cycle
:
8575 dump_printf (MSG_NOTE
, "nested cycle\n");
8577 case vect_unknown_def_type
:
8578 dump_printf (MSG_NOTE
, "unknown\n");
8583 if (*dt
== vect_unknown_def_type
)
8585 if (dump_enabled_p ())
8586 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8587 "Unsupported pattern.\n");
8591 switch (gimple_code (*def_stmt
))
8598 if (dump_enabled_p ())
8599 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8600 "unsupported defining stmt:\n");
8607 /* Function vect_is_simple_use.
8609 Same as vect_is_simple_use but also determines the vector operand
8610 type of OPERAND and stores it to *VECTYPE. If the definition of
8611 OPERAND is vect_uninitialized_def, vect_constant_def or
8612 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8613 is responsible to compute the best suited vector type for the
8617 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8618 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8620 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8623 /* Now get a vector type if the def is internal, otherwise supply
8624 NULL_TREE and leave it up to the caller to figure out a proper
8625 type for the use stmt. */
8626 if (*dt
== vect_internal_def
8627 || *dt
== vect_induction_def
8628 || *dt
== vect_reduction_def
8629 || *dt
== vect_double_reduction_def
8630 || *dt
== vect_nested_cycle
)
8632 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8634 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8635 && !STMT_VINFO_RELEVANT (stmt_info
)
8636 && !STMT_VINFO_LIVE_P (stmt_info
))
8637 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8639 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8640 gcc_assert (*vectype
!= NULL_TREE
);
8642 else if (*dt
== vect_uninitialized_def
8643 || *dt
== vect_constant_def
8644 || *dt
== vect_external_def
)
8645 *vectype
= NULL_TREE
;
8653 /* Function supportable_widening_operation
8655 Check whether an operation represented by the code CODE is a
8656 widening operation that is supported by the target platform in
8657 vector form (i.e., when operating on arguments of type VECTYPE_IN
8658 producing a result of type VECTYPE_OUT).
8660 Widening operations we currently support are NOP (CONVERT), FLOAT
8661 and WIDEN_MULT. This function checks if these operations are supported
8662 by the target platform either directly (via vector tree-codes), or via
8666 - CODE1 and CODE2 are codes of vector operations to be used when
8667 vectorizing the operation, if available.
8668 - MULTI_STEP_CVT determines the number of required intermediate steps in
8669 case of multi-step conversion (like char->short->int - in that case
8670 MULTI_STEP_CVT will be 1).
8671 - INTERM_TYPES contains the intermediate type required to perform the
8672 widening operation (short in the above example). */
8675 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8676 tree vectype_out
, tree vectype_in
,
8677 enum tree_code
*code1
, enum tree_code
*code2
,
8678 int *multi_step_cvt
,
8679 vec
<tree
> *interm_types
)
8681 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8682 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8683 struct loop
*vect_loop
= NULL
;
8684 machine_mode vec_mode
;
8685 enum insn_code icode1
, icode2
;
8686 optab optab1
, optab2
;
8687 tree vectype
= vectype_in
;
8688 tree wide_vectype
= vectype_out
;
8689 enum tree_code c1
, c2
;
8691 tree prev_type
, intermediate_type
;
8692 machine_mode intermediate_mode
, prev_mode
;
8693 optab optab3
, optab4
;
8695 *multi_step_cvt
= 0;
8697 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8701 case WIDEN_MULT_EXPR
:
8702 /* The result of a vectorized widening operation usually requires
8703 two vectors (because the widened results do not fit into one vector).
8704 The generated vector results would normally be expected to be
8705 generated in the same order as in the original scalar computation,
8706 i.e. if 8 results are generated in each vector iteration, they are
8707 to be organized as follows:
8708 vect1: [res1,res2,res3,res4],
8709 vect2: [res5,res6,res7,res8].
8711 However, in the special case that the result of the widening
8712 operation is used in a reduction computation only, the order doesn't
8713 matter (because when vectorizing a reduction we change the order of
8714 the computation). Some targets can take advantage of this and
8715 generate more efficient code. For example, targets like Altivec,
8716 that support widen_mult using a sequence of {mult_even,mult_odd}
8717 generate the following vectors:
8718 vect1: [res1,res3,res5,res7],
8719 vect2: [res2,res4,res6,res8].
8721 When vectorizing outer-loops, we execute the inner-loop sequentially
8722 (each vectorized inner-loop iteration contributes to VF outer-loop
8723 iterations in parallel). We therefore don't allow to change the
8724 order of the computation in the inner-loop during outer-loop
8726 /* TODO: Another case in which order doesn't *really* matter is when we
8727 widen and then contract again, e.g. (short)((int)x * y >> 8).
8728 Normally, pack_trunc performs an even/odd permute, whereas the
8729 repack from an even/odd expansion would be an interleave, which
8730 would be significantly simpler for e.g. AVX2. */
8731 /* In any case, in order to avoid duplicating the code below, recurse
8732 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8733 are properly set up for the caller. If we fail, we'll continue with
8734 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8736 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8737 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8738 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8739 stmt
, vectype_out
, vectype_in
,
8740 code1
, code2
, multi_step_cvt
,
8743 /* Elements in a vector with vect_used_by_reduction property cannot
8744 be reordered if the use chain with this property does not have the
8745 same operation. One such an example is s += a * b, where elements
8746 in a and b cannot be reordered. Here we check if the vector defined
8747 by STMT is only directly used in the reduction statement. */
8748 tree lhs
= gimple_assign_lhs (stmt
);
8749 use_operand_p dummy
;
8751 stmt_vec_info use_stmt_info
= NULL
;
8752 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8753 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8754 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8757 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8758 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8771 case VEC_WIDEN_MULT_EVEN_EXPR
:
8772 /* Support the recursion induced just above. */
8773 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8774 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8777 case WIDEN_LSHIFT_EXPR
:
8778 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8779 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8783 c1
= VEC_UNPACK_LO_EXPR
;
8784 c2
= VEC_UNPACK_HI_EXPR
;
8788 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8789 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8792 case FIX_TRUNC_EXPR
:
8793 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8794 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8795 computing the operation. */
8802 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8805 if (code
== FIX_TRUNC_EXPR
)
8807 /* The signedness is determined from output operand. */
8808 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8809 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8813 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8814 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8817 if (!optab1
|| !optab2
)
8820 vec_mode
= TYPE_MODE (vectype
);
8821 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8822 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8828 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8829 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8832 /* Check if it's a multi-step conversion that can be done using intermediate
8835 prev_type
= vectype
;
8836 prev_mode
= vec_mode
;
8838 if (!CONVERT_EXPR_CODE_P (code
))
8841 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8842 intermediate steps in promotion sequence. We try
8843 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8845 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8846 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8848 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8850 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8851 TYPE_UNSIGNED (prev_type
));
8852 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8853 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8855 if (!optab3
|| !optab4
8856 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8857 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8858 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8859 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8860 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8861 == CODE_FOR_nothing
)
8862 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8863 == CODE_FOR_nothing
))
8866 interm_types
->quick_push (intermediate_type
);
8867 (*multi_step_cvt
)++;
8869 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8870 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8873 prev_type
= intermediate_type
;
8874 prev_mode
= intermediate_mode
;
8877 interm_types
->release ();
8882 /* Function supportable_narrowing_operation
8884 Check whether an operation represented by the code CODE is a
8885 narrowing operation that is supported by the target platform in
8886 vector form (i.e., when operating on arguments of type VECTYPE_IN
8887 and producing a result of type VECTYPE_OUT).
8889 Narrowing operations we currently support are NOP (CONVERT) and
8890 FIX_TRUNC. This function checks if these operations are supported by
8891 the target platform directly via vector tree-codes.
8894 - CODE1 is the code of a vector operation to be used when
8895 vectorizing the operation, if available.
8896 - MULTI_STEP_CVT determines the number of required intermediate steps in
8897 case of multi-step conversion (like int->short->char - in that case
8898 MULTI_STEP_CVT will be 1).
8899 - INTERM_TYPES contains the intermediate type required to perform the
8900 narrowing operation (short in the above example). */
8903 supportable_narrowing_operation (enum tree_code code
,
8904 tree vectype_out
, tree vectype_in
,
8905 enum tree_code
*code1
, int *multi_step_cvt
,
8906 vec
<tree
> *interm_types
)
8908 machine_mode vec_mode
;
8909 enum insn_code icode1
;
8910 optab optab1
, interm_optab
;
8911 tree vectype
= vectype_in
;
8912 tree narrow_vectype
= vectype_out
;
8914 tree intermediate_type
;
8915 machine_mode intermediate_mode
, prev_mode
;
8919 *multi_step_cvt
= 0;
8923 c1
= VEC_PACK_TRUNC_EXPR
;
8926 case FIX_TRUNC_EXPR
:
8927 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8931 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8932 tree code and optabs used for computing the operation. */
8939 if (code
== FIX_TRUNC_EXPR
)
8940 /* The signedness is determined from output operand. */
8941 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8943 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8948 vec_mode
= TYPE_MODE (vectype
);
8949 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8954 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8957 /* Check if it's a multi-step conversion that can be done using intermediate
8959 prev_mode
= vec_mode
;
8960 if (code
== FIX_TRUNC_EXPR
)
8961 uns
= TYPE_UNSIGNED (vectype_out
);
8963 uns
= TYPE_UNSIGNED (vectype
);
8965 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8966 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8967 costly than signed. */
8968 if (code
== FIX_TRUNC_EXPR
&& uns
)
8970 enum insn_code icode2
;
8973 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8975 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8976 if (interm_optab
!= unknown_optab
8977 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8978 && insn_data
[icode1
].operand
[0].mode
8979 == insn_data
[icode2
].operand
[0].mode
)
8982 optab1
= interm_optab
;
8987 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8988 intermediate steps in promotion sequence. We try
8989 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8990 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8991 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8993 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8995 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8997 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9000 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9001 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9002 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9003 == CODE_FOR_nothing
))
9006 interm_types
->quick_push (intermediate_type
);
9007 (*multi_step_cvt
)++;
9009 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9012 prev_mode
= intermediate_mode
;
9013 optab1
= interm_optab
;
9016 interm_types
->release ();