1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Says whether a statement is a load, a store of a vectorized statement
56 result, or a store of an invariant value. */
57 enum vec_load_store_type
{
63 /* Return the vectorized type for the given statement. */
66 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
68 return STMT_VINFO_VECTYPE (stmt_info
);
71 /* Return TRUE iff the given statement is in an inner loop relative to
72 the loop being vectorized. */
74 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
76 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
77 basic_block bb
= gimple_bb (stmt
);
78 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
84 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
86 return (bb
->loop_father
== loop
->inner
);
89 /* Record the cost of a statement, either by directly informing the
90 target model or by saving it in a vector for later processing.
91 Return a preliminary estimate of the statement's cost. */
94 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
95 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
96 int misalign
, enum vect_cost_model_location where
)
100 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
101 stmt_info_for_cost si
= { count
, kind
,
102 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
104 body_cost_vec
->safe_push (si
);
106 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
110 count
, kind
, stmt_info
, misalign
, where
);
113 /* Return a variable of type ELEM_TYPE[NELEMS]. */
116 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
118 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
122 /* ARRAY is an array of vectors created by create_vector_array.
123 Return an SSA_NAME for the vector in index N. The reference
124 is part of the vectorization of STMT and the vector is associated
125 with scalar destination SCALAR_DEST. */
128 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
129 tree array
, unsigned HOST_WIDE_INT n
)
131 tree vect_type
, vect
, vect_name
, array_ref
;
134 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
135 vect_type
= TREE_TYPE (TREE_TYPE (array
));
136 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
137 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
138 build_int_cst (size_type_node
, n
),
139 NULL_TREE
, NULL_TREE
);
141 new_stmt
= gimple_build_assign (vect
, array_ref
);
142 vect_name
= make_ssa_name (vect
, new_stmt
);
143 gimple_assign_set_lhs (new_stmt
, vect_name
);
144 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
149 /* ARRAY is an array of vectors created by create_vector_array.
150 Emit code to store SSA_NAME VECT in index N of the array.
151 The store is part of the vectorization of STMT. */
154 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
155 tree array
, unsigned HOST_WIDE_INT n
)
160 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
161 build_int_cst (size_type_node
, n
),
162 NULL_TREE
, NULL_TREE
);
164 new_stmt
= gimple_build_assign (array_ref
, vect
);
165 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
168 /* PTR is a pointer to an array of type TYPE. Return a representation
169 of *PTR. The memory reference replaces those in FIRST_DR
173 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
177 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
178 /* Arrays have the same alignment as their type. */
179 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
183 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
185 /* Function vect_mark_relevant.
187 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
190 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
191 enum vect_relevant relevant
, bool live_p
)
193 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
194 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
195 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
196 gimple
*pattern_stmt
;
198 if (dump_enabled_p ())
200 dump_printf_loc (MSG_NOTE
, vect_location
,
201 "mark relevant %d, live %d: ", relevant
, live_p
);
202 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
205 /* If this stmt is an original stmt in a pattern, we might need to mark its
206 related pattern stmt instead of the original stmt. However, such stmts
207 may have their own uses that are not in any pattern, in such cases the
208 stmt itself should be marked. */
209 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
211 /* This is the last stmt in a sequence that was detected as a
212 pattern that can potentially be vectorized. Don't mark the stmt
213 as relevant/live because it's not going to be vectorized.
214 Instead mark the pattern-stmt that replaces it. */
216 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
,
220 "last stmt in pattern. don't mark"
221 " relevant/live.\n");
222 stmt_info
= vinfo_for_stmt (pattern_stmt
);
223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
224 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
225 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
229 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
230 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
231 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
233 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
234 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
236 if (dump_enabled_p ())
237 dump_printf_loc (MSG_NOTE
, vect_location
,
238 "already marked relevant/live.\n");
242 worklist
->safe_push (stmt
);
246 /* Function is_simple_and_all_uses_invariant
248 Return true if STMT is simple and all uses of it are invariant. */
251 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
257 if (!is_gimple_assign (stmt
))
260 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
262 enum vect_def_type dt
= vect_uninitialized_def
;
264 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
268 "use not simple.\n");
272 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
278 /* Function vect_stmt_relevant_p.
280 Return true if STMT in loop that is represented by LOOP_VINFO is
281 "relevant for vectorization".
283 A stmt is considered "relevant for vectorization" if:
284 - it has uses outside the loop.
285 - it has vdefs (it alters memory).
286 - control stmts in the loop (except for the exit condition).
288 CHECKME: what other side effects would the vectorizer allow? */
291 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
292 enum vect_relevant
*relevant
, bool *live_p
)
294 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
296 imm_use_iterator imm_iter
;
300 *relevant
= vect_unused_in_scope
;
303 /* cond stmt other than loop exit cond. */
304 if (is_ctrl_stmt (stmt
)
305 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
306 != loop_exit_ctrl_vec_info_type
)
307 *relevant
= vect_used_in_scope
;
309 /* changing memory. */
310 if (gimple_code (stmt
) != GIMPLE_PHI
)
311 if (gimple_vdef (stmt
)
312 && !gimple_clobber_p (stmt
))
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE
, vect_location
,
316 "vec_stmt_relevant_p: stmt has vdefs.\n");
317 *relevant
= vect_used_in_scope
;
320 /* uses outside the loop. */
321 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
323 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
325 basic_block bb
= gimple_bb (USE_STMT (use_p
));
326 if (!flow_bb_inside_loop_p (loop
, bb
))
328 if (dump_enabled_p ())
329 dump_printf_loc (MSG_NOTE
, vect_location
,
330 "vec_stmt_relevant_p: used out of loop.\n");
332 if (is_gimple_debug (USE_STMT (use_p
)))
335 /* We expect all such uses to be in the loop exit phis
336 (because of loop closed form) */
337 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
338 gcc_assert (bb
== single_exit (loop
)->dest
);
345 if (*live_p
&& *relevant
== vect_unused_in_scope
346 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_NOTE
, vect_location
,
350 "vec_stmt_relevant_p: stmt live but not relevant.\n");
351 *relevant
= vect_used_only_live
;
354 return (*live_p
|| *relevant
);
358 /* Function exist_non_indexing_operands_for_use_p
360 USE is one of the uses attached to STMT. Check if USE is
361 used in STMT for anything other than indexing an array. */
364 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
367 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
369 /* USE corresponds to some operand in STMT. If there is no data
370 reference in STMT, then any operand that corresponds to USE
371 is not indexing an array. */
372 if (!STMT_VINFO_DATA_REF (stmt_info
))
375 /* STMT has a data_ref. FORNOW this means that its of one of
379 (This should have been verified in analyze_data_refs).
381 'var' in the second case corresponds to a def, not a use,
382 so USE cannot correspond to any operands that are not used
385 Therefore, all we need to check is if STMT falls into the
386 first case, and whether var corresponds to USE. */
388 if (!gimple_assign_copy_p (stmt
))
390 if (is_gimple_call (stmt
)
391 && gimple_call_internal_p (stmt
))
392 switch (gimple_call_internal_fn (stmt
))
395 operand
= gimple_call_arg (stmt
, 3);
400 operand
= gimple_call_arg (stmt
, 2);
410 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
412 operand
= gimple_assign_rhs1 (stmt
);
413 if (TREE_CODE (operand
) != SSA_NAME
)
424 Function process_use.
427 - a USE in STMT in a loop represented by LOOP_VINFO
428 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
429 that defined USE. This is done by calling mark_relevant and passing it
430 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
431 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 Generally, LIVE_P and RELEVANT are used to define the liveness and
436 relevance info of the DEF_STMT of this USE:
437 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
438 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
440 - case 1: If USE is used only for address computations (e.g. array indexing),
441 which does not need to be directly vectorized, then the liveness/relevance
442 of the respective DEF_STMT is left unchanged.
443 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
444 skip DEF_STMT cause it had already been processed.
445 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
446 be modified accordingly.
448 Return true if everything is as expected. Return false otherwise. */
451 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
452 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
455 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
456 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
457 stmt_vec_info dstmt_vinfo
;
458 basic_block bb
, def_bb
;
460 enum vect_def_type dt
;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
467 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
471 "not vectorized: unsupported use in stmt.\n");
475 if (!def_stmt
|| gimple_nop_p (def_stmt
))
478 def_bb
= gimple_bb (def_stmt
);
479 if (!flow_bb_inside_loop_p (loop
, def_bb
))
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
486 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
487 DEF_STMT must have already been processed, because this should be the
488 only way that STMT, which is a reduction-phi, was put in the worklist,
489 as there should be no other uses for DEF_STMT in the loop. So we just
490 check that everything is as expected, and we are done. */
491 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
492 bb
= gimple_bb (stmt
);
493 if (gimple_code (stmt
) == GIMPLE_PHI
494 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
495 && gimple_code (def_stmt
) != GIMPLE_PHI
496 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
497 && bb
->loop_father
== def_bb
->loop_father
)
499 if (dump_enabled_p ())
500 dump_printf_loc (MSG_NOTE
, vect_location
,
501 "reduc-stmt defining reduc-phi in the same nest.\n");
502 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
503 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
504 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
505 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
506 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
510 /* case 3a: outer-loop stmt defining an inner-loop stmt:
511 outer-loop-header-bb:
517 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE
, vect_location
,
521 "outer-loop def-stmt defining inner-loop stmt.\n");
525 case vect_unused_in_scope
:
526 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
527 vect_used_in_scope
: vect_unused_in_scope
;
530 case vect_used_in_outer_by_reduction
:
531 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
532 relevant
= vect_used_by_reduction
;
535 case vect_used_in_outer
:
536 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
537 relevant
= vect_used_in_scope
;
540 case vect_used_in_scope
:
548 /* case 3b: inner-loop stmt defining an outer-loop stmt:
549 outer-loop-header-bb:
553 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
555 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
557 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE
, vect_location
,
559 "inner-loop def-stmt defining outer-loop stmt.\n");
563 case vect_unused_in_scope
:
564 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
565 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
566 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
569 case vect_used_by_reduction
:
570 case vect_used_only_live
:
571 relevant
= vect_used_in_outer_by_reduction
;
574 case vect_used_in_scope
:
575 relevant
= vect_used_in_outer
;
582 /* We are also not interested in uses on loop PHI backedges that are
583 inductions. Otherwise we'll needlessly vectorize the IV increment
584 and cause hybrid SLP for SLP inductions. Unless the PHI is live
586 else if (gimple_code (stmt
) == GIMPLE_PHI
587 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
588 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
589 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
592 if (dump_enabled_p ())
593 dump_printf_loc (MSG_NOTE
, vect_location
,
594 "induction value on backedge.\n");
599 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
604 /* Function vect_mark_stmts_to_be_vectorized.
606 Not all stmts in the loop need to be vectorized. For example:
615 Stmt 1 and 3 do not need to be vectorized, because loop control and
616 addressing of vectorized data-refs are handled differently.
618 This pass detects such stmts. */
621 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
623 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
624 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
625 unsigned int nbbs
= loop
->num_nodes
;
626 gimple_stmt_iterator si
;
629 stmt_vec_info stmt_vinfo
;
633 enum vect_relevant relevant
;
635 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE
, vect_location
,
637 "=== vect_mark_stmts_to_be_vectorized ===\n");
639 auto_vec
<gimple
*, 64> worklist
;
641 /* 1. Init worklist. */
642 for (i
= 0; i
< nbbs
; i
++)
645 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
648 if (dump_enabled_p ())
650 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
651 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
654 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
655 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
657 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
659 stmt
= gsi_stmt (si
);
660 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
663 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
666 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
667 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
671 /* 2. Process_worklist */
672 while (worklist
.length () > 0)
677 stmt
= worklist
.pop ();
678 if (dump_enabled_p ())
680 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
681 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
684 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
685 (DEF_STMT) as relevant/irrelevant according to the relevance property
687 stmt_vinfo
= vinfo_for_stmt (stmt
);
688 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
690 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
691 propagated as is to the DEF_STMTs of its USEs.
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the relevance to vect_used_by_reduction.
695 This is because we distinguish between two kinds of relevant stmts -
696 those that are used by a reduction computation, and those that are
697 (also) used by a regular computation. This allows us later on to
698 identify stmts that are used solely by a reduction, and therefore the
699 order of the results that they produce does not have to be kept. */
701 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
703 case vect_reduction_def
:
704 gcc_assert (relevant
!= vect_unused_in_scope
);
705 if (relevant
!= vect_unused_in_scope
706 && relevant
!= vect_used_in_scope
707 && relevant
!= vect_used_by_reduction
708 && relevant
!= vect_used_only_live
)
710 if (dump_enabled_p ())
711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
712 "unsupported use of reduction.\n");
717 case vect_nested_cycle
:
718 if (relevant
!= vect_unused_in_scope
719 && relevant
!= vect_used_in_outer_by_reduction
720 && relevant
!= vect_used_in_outer
)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
724 "unsupported use of nested cycle.\n");
730 case vect_double_reduction_def
:
731 if (relevant
!= vect_unused_in_scope
732 && relevant
!= vect_used_by_reduction
733 && relevant
!= vect_used_only_live
)
735 if (dump_enabled_p ())
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
737 "unsupported use of double reduction.\n");
747 if (is_pattern_stmt_p (stmt_vinfo
))
749 /* Pattern statements are not inserted into the code, so
750 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
751 have to scan the RHS or function arguments instead. */
752 if (is_gimple_assign (stmt
))
754 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
755 tree op
= gimple_assign_rhs1 (stmt
);
758 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
760 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
761 relevant
, &worklist
, false)
762 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
763 relevant
, &worklist
, false))
767 for (; i
< gimple_num_ops (stmt
); i
++)
769 op
= gimple_op (stmt
, i
);
770 if (TREE_CODE (op
) == SSA_NAME
771 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
776 else if (is_gimple_call (stmt
))
778 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
780 tree arg
= gimple_call_arg (stmt
, i
);
781 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
788 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
790 tree op
= USE_FROM_PTR (use_p
);
791 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
796 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
798 gather_scatter_info gs_info
;
799 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
801 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
805 } /* while worklist */
811 /* Function vect_model_simple_cost.
813 Models cost for simple operations, i.e. those that only emit ncopies of a
814 single op. Right now, this does not account for multiple insns that could
815 be generated for the single vector op. We will handle that shortly. */
818 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
819 enum vect_def_type
*dt
,
821 stmt_vector_for_cost
*prologue_cost_vec
,
822 stmt_vector_for_cost
*body_cost_vec
)
825 int inside_cost
= 0, prologue_cost
= 0;
827 /* The SLP costs were already calculated during SLP tree build. */
828 if (PURE_SLP_STMT (stmt_info
))
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 for (i
= 0; i
< ndts
; i
++)
835 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
836 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
837 stmt_info
, 0, vect_prologue
);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
841 stmt_info
, 0, vect_body
);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE
, vect_location
,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
857 enum vect_def_type
*dt
, int pwr
)
860 int inside_cost
= 0, prologue_cost
= 0;
861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
863 void *target_cost_data
;
865 /* The SLP costs were already calculated during SLP tree build. */
866 if (PURE_SLP_STMT (stmt_info
))
870 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
872 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
874 for (i
= 0; i
< pwr
+ 1; i
++)
876 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
878 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
879 vec_promote_demote
, stmt_info
, 0,
883 /* FORNOW: Assuming maximum 2 args per stmts. */
884 for (i
= 0; i
< 2; i
++)
885 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
886 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
887 stmt_info
, 0, vect_prologue
);
889 if (dump_enabled_p ())
890 dump_printf_loc (MSG_NOTE
, vect_location
,
891 "vect_model_promotion_demotion_cost: inside_cost = %d, "
892 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
895 /* Function vect_model_store_cost
897 Models cost for stores. In the case of grouped accesses, one access
898 has the overhead of the grouped access attributed to it. */
901 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
902 vect_memory_access_type memory_access_type
,
903 enum vect_def_type dt
, slp_tree slp_node
,
904 stmt_vector_for_cost
*prologue_cost_vec
,
905 stmt_vector_for_cost
*body_cost_vec
)
907 unsigned int inside_cost
= 0, prologue_cost
= 0;
908 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
909 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
910 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
912 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
913 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
914 stmt_info
, 0, vect_prologue
);
916 /* Grouped stores update all elements in the group at once,
917 so we want the DR for the first statement. */
918 if (!slp_node
&& grouped_access_p
)
920 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
921 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
924 /* True if we should include any once-per-group costs as well as
925 the cost of the statement itself. For SLP we only get called
926 once per group anyhow. */
927 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
929 /* We assume that the cost of a single store-lanes instruction is
930 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
931 access is instead being provided by a permute-and-store operation,
932 include the cost of the permutes. */
934 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
936 /* Uses a high and low interleave or shuffle operations for each
938 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
939 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
940 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
941 stmt_info
, 0, vect_body
);
943 if (dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE
, vect_location
,
945 "vect_model_store_cost: strided group_size = %d .\n",
949 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
950 /* Costs of the stores. */
951 if (memory_access_type
== VMAT_ELEMENTWISE
952 || memory_access_type
== VMAT_GATHER_SCATTER
)
953 /* N scalar stores plus extracting the elements. */
954 inside_cost
+= record_stmt_cost (body_cost_vec
,
955 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
956 scalar_store
, stmt_info
, 0, vect_body
);
958 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
960 if (memory_access_type
== VMAT_ELEMENTWISE
961 || memory_access_type
== VMAT_STRIDED_SLP
)
962 inside_cost
+= record_stmt_cost (body_cost_vec
,
963 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
964 vec_to_scalar
, stmt_info
, 0, vect_body
);
966 if (dump_enabled_p ())
967 dump_printf_loc (MSG_NOTE
, vect_location
,
968 "vect_model_store_cost: inside_cost = %d, "
969 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
973 /* Calculate cost of DR's memory access. */
975 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
976 unsigned int *inside_cost
,
977 stmt_vector_for_cost
*body_cost_vec
)
979 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
980 gimple
*stmt
= DR_STMT (dr
);
981 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
983 switch (alignment_support_scheme
)
987 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
988 vector_store
, stmt_info
, 0,
991 if (dump_enabled_p ())
992 dump_printf_loc (MSG_NOTE
, vect_location
,
993 "vect_model_store_cost: aligned.\n");
997 case dr_unaligned_supported
:
999 /* Here, we assign an additional cost for the unaligned store. */
1000 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1001 unaligned_store
, stmt_info
,
1002 DR_MISALIGNMENT (dr
), vect_body
);
1003 if (dump_enabled_p ())
1004 dump_printf_loc (MSG_NOTE
, vect_location
,
1005 "vect_model_store_cost: unaligned supported by "
1010 case dr_unaligned_unsupported
:
1012 *inside_cost
= VECT_MAX_COST
;
1014 if (dump_enabled_p ())
1015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1016 "vect_model_store_cost: unsupported access.\n");
1026 /* Function vect_model_load_cost
1028 Models cost for loads. In the case of grouped accesses, one access has
1029 the overhead of the grouped access attributed to it. Since unaligned
1030 accesses are supported for loads, we also account for the costs of the
1031 access scheme chosen. */
1034 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1035 vect_memory_access_type memory_access_type
,
1037 stmt_vector_for_cost
*prologue_cost_vec
,
1038 stmt_vector_for_cost
*body_cost_vec
)
1040 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1041 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1042 unsigned int inside_cost
= 0, prologue_cost
= 0;
1043 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1045 /* Grouped loads read all elements in the group at once,
1046 so we want the DR for the first statement. */
1047 if (!slp_node
&& grouped_access_p
)
1049 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1050 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1053 /* True if we should include any once-per-group costs as well as
1054 the cost of the statement itself. For SLP we only get called
1055 once per group anyhow. */
1056 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1058 /* We assume that the cost of a single load-lanes instruction is
1059 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1060 access is instead being provided by a load-and-permute operation,
1061 include the cost of the permutes. */
1063 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1065 /* Uses an even and odd extract operations or shuffle operations
1066 for each needed permute. */
1067 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1068 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1069 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1070 stmt_info
, 0, vect_body
);
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE
, vect_location
,
1074 "vect_model_load_cost: strided group_size = %d .\n",
1078 /* The loads themselves. */
1079 if (memory_access_type
== VMAT_ELEMENTWISE
1080 || memory_access_type
== VMAT_GATHER_SCATTER
)
1082 /* N scalar loads plus gathering them into a vector. */
1083 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1084 inside_cost
+= record_stmt_cost (body_cost_vec
,
1085 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1086 scalar_load
, stmt_info
, 0, vect_body
);
1089 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1090 &inside_cost
, &prologue_cost
,
1091 prologue_cost_vec
, body_cost_vec
, true);
1092 if (memory_access_type
== VMAT_ELEMENTWISE
1093 || memory_access_type
== VMAT_STRIDED_SLP
)
1094 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1095 stmt_info
, 0, vect_body
);
1097 if (dump_enabled_p ())
1098 dump_printf_loc (MSG_NOTE
, vect_location
,
1099 "vect_model_load_cost: inside_cost = %d, "
1100 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1104 /* Calculate cost of DR's memory access. */
1106 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1107 bool add_realign_cost
, unsigned int *inside_cost
,
1108 unsigned int *prologue_cost
,
1109 stmt_vector_for_cost
*prologue_cost_vec
,
1110 stmt_vector_for_cost
*body_cost_vec
,
1111 bool record_prologue_costs
)
1113 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1114 gimple
*stmt
= DR_STMT (dr
);
1115 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1117 switch (alignment_support_scheme
)
1121 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1122 stmt_info
, 0, vect_body
);
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE
, vect_location
,
1126 "vect_model_load_cost: aligned.\n");
1130 case dr_unaligned_supported
:
1132 /* Here, we assign an additional cost for the unaligned load. */
1133 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1134 unaligned_load
, stmt_info
,
1135 DR_MISALIGNMENT (dr
), vect_body
);
1137 if (dump_enabled_p ())
1138 dump_printf_loc (MSG_NOTE
, vect_location
,
1139 "vect_model_load_cost: unaligned supported by "
1144 case dr_explicit_realign
:
1146 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1147 vector_load
, stmt_info
, 0, vect_body
);
1148 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1149 vec_perm
, stmt_info
, 0, vect_body
);
1151 /* FIXME: If the misalignment remains fixed across the iterations of
1152 the containing loop, the following cost should be added to the
1154 if (targetm
.vectorize
.builtin_mask_for_load
)
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1156 stmt_info
, 0, vect_body
);
1158 if (dump_enabled_p ())
1159 dump_printf_loc (MSG_NOTE
, vect_location
,
1160 "vect_model_load_cost: explicit realign\n");
1164 case dr_explicit_realign_optimized
:
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE
, vect_location
,
1168 "vect_model_load_cost: unaligned software "
1171 /* Unaligned software pipeline has a load of an address, an initial
1172 load, and possibly a mask operation to "prime" the loop. However,
1173 if this is an access in a group of loads, which provide grouped
1174 access, then the above cost should only be considered for one
1175 access in the group. Inside the loop, there is a load op
1176 and a realignment op. */
1178 if (add_realign_cost
&& record_prologue_costs
)
1180 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1181 vector_stmt
, stmt_info
,
1183 if (targetm
.vectorize
.builtin_mask_for_load
)
1184 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1185 vector_stmt
, stmt_info
,
1189 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1190 stmt_info
, 0, vect_body
);
1191 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1192 stmt_info
, 0, vect_body
);
1194 if (dump_enabled_p ())
1195 dump_printf_loc (MSG_NOTE
, vect_location
,
1196 "vect_model_load_cost: explicit realign optimized"
1202 case dr_unaligned_unsupported
:
1204 *inside_cost
= VECT_MAX_COST
;
1206 if (dump_enabled_p ())
1207 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1208 "vect_model_load_cost: unsupported access.\n");
1217 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1218 the loop preheader for the vectorized stmt STMT. */
1221 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1224 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1227 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1228 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1232 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1236 if (nested_in_vect_loop_p (loop
, stmt
))
1239 pe
= loop_preheader_edge (loop
);
1240 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1241 gcc_assert (!new_bb
);
1245 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1247 gimple_stmt_iterator gsi_bb_start
;
1249 gcc_assert (bb_vinfo
);
1250 bb
= BB_VINFO_BB (bb_vinfo
);
1251 gsi_bb_start
= gsi_after_labels (bb
);
1252 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1256 if (dump_enabled_p ())
1258 dump_printf_loc (MSG_NOTE
, vect_location
,
1259 "created new init_stmt: ");
1260 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1264 /* Function vect_init_vector.
1266 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1267 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1268 vector type a vector with all elements equal to VAL is created first.
1269 Place the initialization at BSI if it is not NULL. Otherwise, place the
1270 initialization at the loop preheader.
1271 Return the DEF of INIT_STMT.
1272 It will be used in the vectorization of STMT. */
1275 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1280 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1281 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1283 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1284 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1286 /* Scalar boolean value should be transformed into
1287 all zeros or all ones value before building a vector. */
1288 if (VECTOR_BOOLEAN_TYPE_P (type
))
1290 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1291 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1293 if (CONSTANT_CLASS_P (val
))
1294 val
= integer_zerop (val
) ? false_val
: true_val
;
1297 new_temp
= make_ssa_name (TREE_TYPE (type
));
1298 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1299 val
, true_val
, false_val
);
1300 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1304 else if (CONSTANT_CLASS_P (val
))
1305 val
= fold_convert (TREE_TYPE (type
), val
);
1308 new_temp
= make_ssa_name (TREE_TYPE (type
));
1309 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1310 init_stmt
= gimple_build_assign (new_temp
,
1311 fold_build1 (VIEW_CONVERT_EXPR
,
1315 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1316 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1320 val
= build_vector_from_val (type
, val
);
1323 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1324 init_stmt
= gimple_build_assign (new_temp
, val
);
1325 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1329 /* Function vect_get_vec_def_for_operand_1.
1331 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1332 DT that will be used in the vectorized stmt. */
1335 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1339 stmt_vec_info def_stmt_info
= NULL
;
1343 /* operand is a constant or a loop invariant. */
1344 case vect_constant_def
:
1345 case vect_external_def
:
1346 /* Code should use vect_get_vec_def_for_operand. */
1349 /* operand is defined inside the loop. */
1350 case vect_internal_def
:
1352 /* Get the def from the vectorized stmt. */
1353 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1355 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1356 /* Get vectorized pattern statement. */
1358 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1359 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1360 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1361 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1362 gcc_assert (vec_stmt
);
1363 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1364 vec_oprnd
= PHI_RESULT (vec_stmt
);
1365 else if (is_gimple_call (vec_stmt
))
1366 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1368 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1372 /* operand is defined by a loop header phi. */
1373 case vect_reduction_def
:
1374 case vect_double_reduction_def
:
1375 case vect_nested_cycle
:
1376 case vect_induction_def
:
1378 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1380 /* Get the def from the vectorized stmt. */
1381 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1382 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1383 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1384 vec_oprnd
= PHI_RESULT (vec_stmt
);
1386 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1396 /* Function vect_get_vec_def_for_operand.
1398 OP is an operand in STMT. This function returns a (vector) def that will be
1399 used in the vectorized stmt for STMT.
1401 In the case that OP is an SSA_NAME which is defined in the loop, then
1402 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1404 In case OP is an invariant or constant, a new stmt that creates a vector def
1405 needs to be introduced. VECTYPE may be used to specify a required type for
1406 vector invariant. */
1409 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1412 enum vect_def_type dt
;
1414 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1415 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1417 if (dump_enabled_p ())
1419 dump_printf_loc (MSG_NOTE
, vect_location
,
1420 "vect_get_vec_def_for_operand: ");
1421 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1422 dump_printf (MSG_NOTE
, "\n");
1425 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1426 gcc_assert (is_simple_use
);
1427 if (def_stmt
&& dump_enabled_p ())
1429 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1430 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1433 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1435 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1439 vector_type
= vectype
;
1440 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1441 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1442 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1444 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1446 gcc_assert (vector_type
);
1447 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1450 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1454 /* Function vect_get_vec_def_for_stmt_copy
1456 Return a vector-def for an operand. This function is used when the
1457 vectorized stmt to be created (by the caller to this function) is a "copy"
1458 created in case the vectorized result cannot fit in one vector, and several
1459 copies of the vector-stmt are required. In this case the vector-def is
1460 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1461 of the stmt that defines VEC_OPRND.
1462 DT is the type of the vector def VEC_OPRND.
1465 In case the vectorization factor (VF) is bigger than the number
1466 of elements that can fit in a vectype (nunits), we have to generate
1467 more than one vector stmt to vectorize the scalar stmt. This situation
1468 arises when there are multiple data-types operated upon in the loop; the
1469 smallest data-type determines the VF, and as a result, when vectorizing
1470 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1471 vector stmt (each computing a vector of 'nunits' results, and together
1472 computing 'VF' results in each iteration). This function is called when
1473 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1474 which VF=16 and nunits=4, so the number of copies required is 4):
1476 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1478 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1479 VS1.1: vx.1 = memref1 VS1.2
1480 VS1.2: vx.2 = memref2 VS1.3
1481 VS1.3: vx.3 = memref3
1483 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1484 VSnew.1: vz1 = vx.1 + ... VSnew.2
1485 VSnew.2: vz2 = vx.2 + ... VSnew.3
1486 VSnew.3: vz3 = vx.3 + ...
1488 The vectorization of S1 is explained in vectorizable_load.
1489 The vectorization of S2:
1490 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1491 the function 'vect_get_vec_def_for_operand' is called to
1492 get the relevant vector-def for each operand of S2. For operand x it
1493 returns the vector-def 'vx.0'.
1495 To create the remaining copies of the vector-stmt (VSnew.j), this
1496 function is called to get the relevant vector-def for each operand. It is
1497 obtained from the respective VS1.j stmt, which is recorded in the
1498 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1500 For example, to obtain the vector-def 'vx.1' in order to create the
1501 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1502 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1503 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1504 and return its def ('vx.1').
1505 Overall, to create the above sequence this function will be called 3 times:
1506 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1507 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1508 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1511 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1513 gimple
*vec_stmt_for_operand
;
1514 stmt_vec_info def_stmt_info
;
1516 /* Do nothing; can reuse same def. */
1517 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1520 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1521 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1522 gcc_assert (def_stmt_info
);
1523 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1524 gcc_assert (vec_stmt_for_operand
);
1525 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1526 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1528 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1533 /* Get vectorized definitions for the operands to create a copy of an original
1534 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1537 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1538 vec
<tree
> *vec_oprnds0
,
1539 vec
<tree
> *vec_oprnds1
)
1541 tree vec_oprnd
= vec_oprnds0
->pop ();
1543 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1544 vec_oprnds0
->quick_push (vec_oprnd
);
1546 if (vec_oprnds1
&& vec_oprnds1
->length ())
1548 vec_oprnd
= vec_oprnds1
->pop ();
1549 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1550 vec_oprnds1
->quick_push (vec_oprnd
);
1555 /* Get vectorized definitions for OP0 and OP1. */
1558 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1559 vec
<tree
> *vec_oprnds0
,
1560 vec
<tree
> *vec_oprnds1
,
1565 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1566 auto_vec
<tree
> ops (nops
);
1567 auto_vec
<vec
<tree
> > vec_defs (nops
);
1569 ops
.quick_push (op0
);
1571 ops
.quick_push (op1
);
1573 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1575 *vec_oprnds0
= vec_defs
[0];
1577 *vec_oprnds1
= vec_defs
[1];
1583 vec_oprnds0
->create (1);
1584 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1585 vec_oprnds0
->quick_push (vec_oprnd
);
1589 vec_oprnds1
->create (1);
1590 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1591 vec_oprnds1
->quick_push (vec_oprnd
);
1597 /* Function vect_finish_stmt_generation.
1599 Insert a new stmt. */
1602 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1603 gimple_stmt_iterator
*gsi
)
1605 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1606 vec_info
*vinfo
= stmt_info
->vinfo
;
1608 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1610 if (!gsi_end_p (*gsi
)
1611 && gimple_has_mem_ops (vec_stmt
))
1613 gimple
*at_stmt
= gsi_stmt (*gsi
);
1614 tree vuse
= gimple_vuse (at_stmt
);
1615 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1617 tree vdef
= gimple_vdef (at_stmt
);
1618 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1619 /* If we have an SSA vuse and insert a store, update virtual
1620 SSA form to avoid triggering the renamer. Do so only
1621 if we can easily see all uses - which is what almost always
1622 happens with the way vectorized stmts are inserted. */
1623 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1624 && ((is_gimple_assign (vec_stmt
)
1625 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1626 || (is_gimple_call (vec_stmt
)
1627 && !(gimple_call_flags (vec_stmt
)
1628 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1630 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1631 gimple_set_vdef (vec_stmt
, new_vdef
);
1632 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1636 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1638 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1640 if (dump_enabled_p ())
1642 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1643 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1646 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1648 /* While EH edges will generally prevent vectorization, stmt might
1649 e.g. be in a must-not-throw region. Ensure newly created stmts
1650 that could throw are part of the same region. */
1651 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1652 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1653 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1656 /* We want to vectorize a call to combined function CFN with function
1657 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1658 as the types of all inputs. Check whether this is possible using
1659 an internal function, returning its code if so or IFN_LAST if not. */
1662 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1663 tree vectype_out
, tree vectype_in
)
1666 if (internal_fn_p (cfn
))
1667 ifn
= as_internal_fn (cfn
);
1669 ifn
= associated_internal_fn (fndecl
);
1670 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1672 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1673 if (info
.vectorizable
)
1675 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1676 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1677 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1678 OPTIMIZE_FOR_SPEED
))
1686 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1687 gimple_stmt_iterator
*);
1689 /* STMT is a non-strided load or store, meaning that it accesses
1690 elements with a known constant step. Return -1 if that step
1691 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1694 compare_step_with_zero (gimple
*stmt
)
1696 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1697 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1698 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
1702 /* If the target supports a permute mask that reverses the elements in
1703 a vector of type VECTYPE, return that mask, otherwise return null. */
1706 perm_mask_for_reverse (tree vectype
)
1711 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1712 sel
= XALLOCAVEC (unsigned char, nunits
);
1714 for (i
= 0; i
< nunits
; ++i
)
1715 sel
[i
] = nunits
- 1 - i
;
1717 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
1719 return vect_gen_perm_mask_checked (vectype
, sel
);
1722 /* A subroutine of get_load_store_type, with a subset of the same
1723 arguments. Handle the case where STMT is part of a grouped load
1726 For stores, the statements in the group are all consecutive
1727 and there is no gap at the end. For loads, the statements in the
1728 group might not be consecutive; there can be gaps between statements
1729 as well as at the end. */
1732 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1733 vec_load_store_type vls_type
,
1734 vect_memory_access_type
*memory_access_type
)
1736 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1737 vec_info
*vinfo
= stmt_info
->vinfo
;
1738 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1739 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
1740 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1741 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1742 bool single_element_p
= (stmt
== first_stmt
1743 && !GROUP_NEXT_ELEMENT (stmt_info
));
1744 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
1745 unsigned nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1747 /* True if the vectorized statements would access beyond the last
1748 statement in the group. */
1749 bool overrun_p
= false;
1751 /* True if we can cope with such overrun by peeling for gaps, so that
1752 there is at least one final scalar iteration after the vector loop. */
1753 bool can_overrun_p
= (vls_type
== VLS_LOAD
&& loop_vinfo
&& !loop
->inner
);
1755 /* There can only be a gap at the end of the group if the stride is
1756 known at compile time. */
1757 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
1759 /* Stores can't yet have gaps. */
1760 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
1764 if (STMT_VINFO_STRIDED_P (stmt_info
))
1766 /* Try to use consecutive accesses of GROUP_SIZE elements,
1767 separated by the stride, until we have a complete vector.
1768 Fall back to scalar accesses if that isn't possible. */
1769 if (nunits
% group_size
== 0)
1770 *memory_access_type
= VMAT_STRIDED_SLP
;
1772 *memory_access_type
= VMAT_ELEMENTWISE
;
1776 overrun_p
= loop_vinfo
&& gap
!= 0;
1777 if (overrun_p
&& vls_type
!= VLS_LOAD
)
1779 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1780 "Grouped store with gaps requires"
1781 " non-consecutive accesses\n");
1784 /* If the access is aligned an overrun is fine. */
1787 (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
))))
1789 if (overrun_p
&& !can_overrun_p
)
1791 if (dump_enabled_p ())
1792 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1793 "Peeling for outer loop is not supported\n");
1796 *memory_access_type
= VMAT_CONTIGUOUS
;
1801 /* We can always handle this case using elementwise accesses,
1802 but see if something more efficient is available. */
1803 *memory_access_type
= VMAT_ELEMENTWISE
;
1805 /* If there is a gap at the end of the group then these optimizations
1806 would access excess elements in the last iteration. */
1807 bool would_overrun_p
= (gap
!= 0);
1808 /* If the access is aligned an overrun is fine, but only if the
1809 overrun is not inside an unused vector (if the gap is as large
1810 or larger than a vector). */
1814 (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
))))
1815 would_overrun_p
= false;
1816 if (!STMT_VINFO_STRIDED_P (stmt_info
)
1817 && (can_overrun_p
|| !would_overrun_p
)
1818 && compare_step_with_zero (stmt
) > 0)
1820 /* First try using LOAD/STORE_LANES. */
1821 if (vls_type
== VLS_LOAD
1822 ? vect_load_lanes_supported (vectype
, group_size
)
1823 : vect_store_lanes_supported (vectype
, group_size
))
1825 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
1826 overrun_p
= would_overrun_p
;
1829 /* If that fails, try using permuting loads. */
1830 if (*memory_access_type
== VMAT_ELEMENTWISE
1831 && (vls_type
== VLS_LOAD
1832 ? vect_grouped_load_supported (vectype
, single_element_p
,
1834 : vect_grouped_store_supported (vectype
, group_size
)))
1836 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
1837 overrun_p
= would_overrun_p
;
1842 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
1844 /* STMT is the leader of the group. Check the operands of all the
1845 stmts of the group. */
1846 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
1849 gcc_assert (gimple_assign_single_p (next_stmt
));
1850 tree op
= gimple_assign_rhs1 (next_stmt
);
1852 enum vect_def_type dt
;
1853 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
1855 if (dump_enabled_p ())
1856 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1857 "use not simple.\n");
1860 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
1866 gcc_assert (can_overrun_p
);
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1869 "Data access with gaps requires scalar "
1871 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
1877 /* A subroutine of get_load_store_type, with a subset of the same
1878 arguments. Handle the case where STMT is a load or store that
1879 accesses consecutive elements with a negative step. */
1881 static vect_memory_access_type
1882 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
1883 vec_load_store_type vls_type
,
1884 unsigned int ncopies
)
1886 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1887 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1888 dr_alignment_support alignment_support_scheme
;
1892 if (dump_enabled_p ())
1893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1894 "multiple types with negative step.\n");
1895 return VMAT_ELEMENTWISE
;
1898 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1899 if (alignment_support_scheme
!= dr_aligned
1900 && alignment_support_scheme
!= dr_unaligned_supported
)
1902 if (dump_enabled_p ())
1903 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1904 "negative step but alignment required.\n");
1905 return VMAT_ELEMENTWISE
;
1908 if (vls_type
== VLS_STORE_INVARIANT
)
1910 if (dump_enabled_p ())
1911 dump_printf_loc (MSG_NOTE
, vect_location
,
1912 "negative step with invariant source;"
1913 " no permute needed.\n");
1914 return VMAT_CONTIGUOUS_DOWN
;
1917 if (!perm_mask_for_reverse (vectype
))
1919 if (dump_enabled_p ())
1920 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1921 "negative step and reversing not supported.\n");
1922 return VMAT_ELEMENTWISE
;
1925 return VMAT_CONTIGUOUS_REVERSE
;
1928 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1929 if there is a memory access type that the vectorized form can use,
1930 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1931 or scatters, fill in GS_INFO accordingly.
1933 SLP says whether we're performing SLP rather than loop vectorization.
1934 VECTYPE is the vector type that the vectorized statements will use.
1935 NCOPIES is the number of vector statements that will be needed. */
1938 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1939 vec_load_store_type vls_type
, unsigned int ncopies
,
1940 vect_memory_access_type
*memory_access_type
,
1941 gather_scatter_info
*gs_info
)
1943 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1944 vec_info
*vinfo
= stmt_info
->vinfo
;
1945 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1946 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1948 *memory_access_type
= VMAT_GATHER_SCATTER
;
1950 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
1952 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
1953 &gs_info
->offset_dt
,
1954 &gs_info
->offset_vectype
))
1956 if (dump_enabled_p ())
1957 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1958 "%s index use not simple.\n",
1959 vls_type
== VLS_LOAD
? "gather" : "scatter");
1963 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1965 if (!get_group_load_store_type (stmt
, vectype
, slp
, vls_type
,
1966 memory_access_type
))
1969 else if (STMT_VINFO_STRIDED_P (stmt_info
))
1972 *memory_access_type
= VMAT_ELEMENTWISE
;
1976 int cmp
= compare_step_with_zero (stmt
);
1978 *memory_access_type
= get_negative_load_store_type
1979 (stmt
, vectype
, vls_type
, ncopies
);
1982 gcc_assert (vls_type
== VLS_LOAD
);
1983 *memory_access_type
= VMAT_INVARIANT
;
1986 *memory_access_type
= VMAT_CONTIGUOUS
;
1989 /* FIXME: At the moment the cost model seems to underestimate the
1990 cost of using elementwise accesses. This check preserves the
1991 traditional behavior until that can be fixed. */
1992 if (*memory_access_type
== VMAT_ELEMENTWISE
1993 && !STMT_VINFO_STRIDED_P (stmt_info
))
1995 if (dump_enabled_p ())
1996 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1997 "not falling back to elementwise accesses\n");
2003 /* Function vectorizable_mask_load_store.
2005 Check if STMT performs a conditional load or store that can be vectorized.
2006 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2007 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2008 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2011 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2012 gimple
**vec_stmt
, slp_tree slp_node
)
2014 tree vec_dest
= NULL
;
2015 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2016 stmt_vec_info prev_stmt_info
;
2017 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2018 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2019 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
2020 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2021 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2022 tree rhs_vectype
= NULL_TREE
;
2027 tree dataref_ptr
= NULL_TREE
;
2029 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2033 gather_scatter_info gs_info
;
2034 vec_load_store_type vls_type
;
2037 enum vect_def_type dt
;
2039 if (slp_node
!= NULL
)
2042 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2043 gcc_assert (ncopies
>= 1);
2045 mask
= gimple_call_arg (stmt
, 2);
2047 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2050 /* FORNOW. This restriction should be relaxed. */
2051 if (nested_in_vect_loop
&& ncopies
> 1)
2053 if (dump_enabled_p ())
2054 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2055 "multiple types in nested loop.");
2059 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2062 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2066 if (!STMT_VINFO_DATA_REF (stmt_info
))
2069 elem_type
= TREE_TYPE (vectype
);
2071 if (TREE_CODE (mask
) != SSA_NAME
)
2074 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
2078 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2080 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
)
2081 || TYPE_VECTOR_SUBPARTS (mask_vectype
) != TYPE_VECTOR_SUBPARTS (vectype
))
2084 if (gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
2086 tree rhs
= gimple_call_arg (stmt
, 3);
2087 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
2089 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
2090 vls_type
= VLS_STORE_INVARIANT
;
2092 vls_type
= VLS_STORE
;
2095 vls_type
= VLS_LOAD
;
2097 vect_memory_access_type memory_access_type
;
2098 if (!get_load_store_type (stmt
, vectype
, false, vls_type
, ncopies
,
2099 &memory_access_type
, &gs_info
))
2102 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2104 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2106 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
2107 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2109 if (dump_enabled_p ())
2110 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2111 "masked gather with integer mask not supported.");
2115 else if (memory_access_type
!= VMAT_CONTIGUOUS
)
2117 if (dump_enabled_p ())
2118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2119 "unsupported access type for masked %s.\n",
2120 vls_type
== VLS_LOAD
? "load" : "store");
2123 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2124 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
2125 TYPE_MODE (mask_vectype
),
2126 vls_type
== VLS_LOAD
)
2128 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
2131 if (!vec_stmt
) /* transformation not required. */
2133 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
2134 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2135 if (vls_type
== VLS_LOAD
)
2136 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
2139 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
2140 dt
, NULL
, NULL
, NULL
);
2143 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
2147 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2149 tree vec_oprnd0
= NULL_TREE
, op
;
2150 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2151 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
2152 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
2153 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
2154 tree mask_perm_mask
= NULL_TREE
;
2155 edge pe
= loop_preheader_edge (loop
);
2158 enum { NARROW
, NONE
, WIDEN
} modifier
;
2159 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
2161 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
2162 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2163 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2164 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2165 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2166 scaletype
= TREE_VALUE (arglist
);
2167 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2168 && types_compatible_p (srctype
, masktype
));
2170 if (nunits
== gather_off_nunits
)
2172 else if (nunits
== gather_off_nunits
/ 2)
2174 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
2177 for (i
= 0; i
< gather_off_nunits
; ++i
)
2178 sel
[i
] = i
| nunits
;
2180 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
2182 else if (nunits
== gather_off_nunits
* 2)
2184 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
2187 for (i
= 0; i
< nunits
; ++i
)
2188 sel
[i
] = i
< gather_off_nunits
2189 ? i
: i
+ nunits
- gather_off_nunits
;
2191 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
2193 for (i
= 0; i
< nunits
; ++i
)
2194 sel
[i
] = i
| gather_off_nunits
;
2195 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
2200 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2202 ptr
= fold_convert (ptrtype
, gs_info
.base
);
2203 if (!is_gimple_min_invariant (ptr
))
2205 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2206 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2207 gcc_assert (!new_bb
);
2210 scale
= build_int_cst (scaletype
, gs_info
.scale
);
2212 prev_stmt_info
= NULL
;
2213 for (j
= 0; j
< ncopies
; ++j
)
2215 if (modifier
== WIDEN
&& (j
& 1))
2216 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2217 perm_mask
, stmt
, gsi
);
2220 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
2223 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
2225 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2227 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
2228 == TYPE_VECTOR_SUBPARTS (idxtype
));
2229 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2230 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2232 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2233 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2237 if (mask_perm_mask
&& (j
& 1))
2238 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2239 mask_perm_mask
, stmt
, gsi
);
2243 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2246 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2247 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2251 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2253 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
2254 == TYPE_VECTOR_SUBPARTS (masktype
));
2255 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2256 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2258 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
2259 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2265 = gimple_build_call (gs_info
.decl
, 5, mask_op
, ptr
, op
, mask_op
,
2268 if (!useless_type_conversion_p (vectype
, rettype
))
2270 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2271 == TYPE_VECTOR_SUBPARTS (rettype
));
2272 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2273 gimple_call_set_lhs (new_stmt
, op
);
2274 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2275 var
= make_ssa_name (vec_dest
);
2276 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2277 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2281 var
= make_ssa_name (vec_dest
, new_stmt
);
2282 gimple_call_set_lhs (new_stmt
, var
);
2285 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2287 if (modifier
== NARROW
)
2294 var
= permute_vec_elements (prev_res
, var
,
2295 perm_mask
, stmt
, gsi
);
2296 new_stmt
= SSA_NAME_DEF_STMT (var
);
2299 if (prev_stmt_info
== NULL
)
2300 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2302 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2303 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2306 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2308 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2310 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2311 stmt_info
= vinfo_for_stmt (stmt
);
2313 tree lhs
= gimple_call_lhs (stmt
);
2314 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2315 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2316 set_vinfo_for_stmt (stmt
, NULL
);
2317 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2318 gsi_replace (gsi
, new_stmt
, true);
2321 else if (vls_type
!= VLS_LOAD
)
2323 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2324 prev_stmt_info
= NULL
;
2325 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
2326 for (i
= 0; i
< ncopies
; i
++)
2328 unsigned align
, misalign
;
2332 tree rhs
= gimple_call_arg (stmt
, 3);
2333 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2334 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2335 /* We should have catched mismatched types earlier. */
2336 gcc_assert (useless_type_conversion_p (vectype
,
2337 TREE_TYPE (vec_rhs
)));
2338 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2339 NULL_TREE
, &dummy
, gsi
,
2340 &ptr_incr
, false, &inv_p
);
2341 gcc_assert (!inv_p
);
2345 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2346 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2347 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2348 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2349 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2350 TYPE_SIZE_UNIT (vectype
));
2353 align
= TYPE_ALIGN_UNIT (vectype
);
2354 if (aligned_access_p (dr
))
2356 else if (DR_MISALIGNMENT (dr
) == -1)
2358 align
= TYPE_ALIGN_UNIT (elem_type
);
2362 misalign
= DR_MISALIGNMENT (dr
);
2363 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2365 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2366 misalign
? least_bit_hwi (misalign
) : align
);
2368 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2369 ptr
, vec_mask
, vec_rhs
);
2370 gimple_call_set_nothrow (call
, true);
2372 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2374 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2376 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2377 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2382 tree vec_mask
= NULL_TREE
;
2383 prev_stmt_info
= NULL
;
2384 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2385 for (i
= 0; i
< ncopies
; i
++)
2387 unsigned align
, misalign
;
2391 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2392 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2393 NULL_TREE
, &dummy
, gsi
,
2394 &ptr_incr
, false, &inv_p
);
2395 gcc_assert (!inv_p
);
2399 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2400 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2401 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2402 TYPE_SIZE_UNIT (vectype
));
2405 align
= TYPE_ALIGN_UNIT (vectype
);
2406 if (aligned_access_p (dr
))
2408 else if (DR_MISALIGNMENT (dr
) == -1)
2410 align
= TYPE_ALIGN_UNIT (elem_type
);
2414 misalign
= DR_MISALIGNMENT (dr
);
2415 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2417 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2418 misalign
? least_bit_hwi (misalign
) : align
);
2420 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2422 gimple_call_set_lhs (call
, make_ssa_name (vec_dest
));
2423 gimple_call_set_nothrow (call
, true);
2424 vect_finish_stmt_generation (stmt
, call
, gsi
);
2426 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= call
;
2428 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = call
;
2429 prev_stmt_info
= vinfo_for_stmt (call
);
2433 if (vls_type
== VLS_LOAD
)
2435 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2437 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2439 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2440 stmt_info
= vinfo_for_stmt (stmt
);
2442 tree lhs
= gimple_call_lhs (stmt
);
2443 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2444 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2445 set_vinfo_for_stmt (stmt
, NULL
);
2446 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2447 gsi_replace (gsi
, new_stmt
, true);
2453 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2456 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2457 gimple
**vec_stmt
, slp_tree slp_node
,
2458 tree vectype_in
, enum vect_def_type
*dt
)
2461 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2462 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2463 unsigned ncopies
, nunits
;
2465 op
= gimple_call_arg (stmt
, 0);
2466 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2467 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2469 /* Multiple types in SLP are handled by creating the appropriate number of
2470 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2475 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2477 gcc_assert (ncopies
>= 1);
2479 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2484 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (char_vectype
));
2485 unsigned char *elt
= elts
;
2486 unsigned word_bytes
= TYPE_VECTOR_SUBPARTS (char_vectype
) / nunits
;
2487 for (unsigned i
= 0; i
< nunits
; ++i
)
2488 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2489 *elt
++ = (i
+ 1) * word_bytes
- j
- 1;
2491 if (! can_vec_perm_p (TYPE_MODE (char_vectype
), false, elts
))
2496 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2497 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2500 if (! PURE_SLP_STMT (stmt_info
))
2502 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2503 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2504 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2505 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2510 tree
*telts
= XALLOCAVEC (tree
, TYPE_VECTOR_SUBPARTS (char_vectype
));
2511 for (unsigned i
= 0; i
< TYPE_VECTOR_SUBPARTS (char_vectype
); ++i
)
2512 telts
[i
] = build_int_cst (char_type_node
, elts
[i
]);
2513 tree bswap_vconst
= build_vector (char_vectype
, telts
);
2516 vec
<tree
> vec_oprnds
= vNULL
;
2517 gimple
*new_stmt
= NULL
;
2518 stmt_vec_info prev_stmt_info
= NULL
;
2519 for (unsigned j
= 0; j
< ncopies
; j
++)
2523 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2525 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2527 /* Arguments are ready. create the new vector stmt. */
2530 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2532 tree tem
= make_ssa_name (char_vectype
);
2533 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2534 char_vectype
, vop
));
2535 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2536 tree tem2
= make_ssa_name (char_vectype
);
2537 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2538 tem
, tem
, bswap_vconst
);
2539 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2540 tem
= make_ssa_name (vectype
);
2541 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2543 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2545 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2552 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2554 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2556 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2559 vec_oprnds
.release ();
2563 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2564 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2565 in a single step. On success, store the binary pack code in
2569 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2570 tree_code
*convert_code
)
2572 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2573 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2577 int multi_step_cvt
= 0;
2578 auto_vec
<tree
, 8> interm_types
;
2579 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2580 &code
, &multi_step_cvt
,
2585 *convert_code
= code
;
2589 /* Function vectorizable_call.
2591 Check if GS performs a function call that can be vectorized.
2592 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2593 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2594 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2597 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2604 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2605 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2606 tree vectype_out
, vectype_in
;
2609 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2610 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2611 vec_info
*vinfo
= stmt_info
->vinfo
;
2612 tree fndecl
, new_temp
, rhs_type
;
2614 enum vect_def_type dt
[3]
2615 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2617 gimple
*new_stmt
= NULL
;
2619 vec
<tree
> vargs
= vNULL
;
2620 enum { NARROW
, NONE
, WIDEN
} modifier
;
2624 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2627 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2631 /* Is GS a vectorizable call? */
2632 stmt
= dyn_cast
<gcall
*> (gs
);
2636 if (gimple_call_internal_p (stmt
)
2637 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2638 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2639 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2642 if (gimple_call_lhs (stmt
) == NULL_TREE
2643 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2646 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2648 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2650 /* Process function arguments. */
2651 rhs_type
= NULL_TREE
;
2652 vectype_in
= NULL_TREE
;
2653 nargs
= gimple_call_num_args (stmt
);
2655 /* Bail out if the function has more than three arguments, we do not have
2656 interesting builtin functions to vectorize with more than two arguments
2657 except for fma. No arguments is also not good. */
2658 if (nargs
== 0 || nargs
> 3)
2661 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2662 if (gimple_call_internal_p (stmt
)
2663 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2666 rhs_type
= unsigned_type_node
;
2669 for (i
= 0; i
< nargs
; i
++)
2673 op
= gimple_call_arg (stmt
, i
);
2675 /* We can only handle calls with arguments of the same type. */
2677 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2679 if (dump_enabled_p ())
2680 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2681 "argument types differ.\n");
2685 rhs_type
= TREE_TYPE (op
);
2687 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2689 if (dump_enabled_p ())
2690 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2691 "use not simple.\n");
2696 vectype_in
= opvectype
;
2698 && opvectype
!= vectype_in
)
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2702 "argument vector types differ.\n");
2706 /* If all arguments are external or constant defs use a vector type with
2707 the same size as the output vector type. */
2709 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2711 gcc_assert (vectype_in
);
2714 if (dump_enabled_p ())
2716 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2717 "no vectype for scalar type ");
2718 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2719 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2726 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2727 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2728 if (nunits_in
== nunits_out
/ 2)
2730 else if (nunits_out
== nunits_in
)
2732 else if (nunits_out
== nunits_in
/ 2)
2737 /* We only handle functions that do not read or clobber memory. */
2738 if (gimple_vuse (stmt
))
2740 if (dump_enabled_p ())
2741 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2742 "function reads from or writes to memory.\n");
2746 /* For now, we only vectorize functions if a target specific builtin
2747 is available. TODO -- in some cases, it might be profitable to
2748 insert the calls for pieces of the vector, in order to be able
2749 to vectorize other operations in the loop. */
2751 internal_fn ifn
= IFN_LAST
;
2752 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2753 tree callee
= gimple_call_fndecl (stmt
);
2755 /* First try using an internal function. */
2756 tree_code convert_code
= ERROR_MARK
;
2758 && (modifier
== NONE
2759 || (modifier
== NARROW
2760 && simple_integer_narrowing (vectype_out
, vectype_in
,
2762 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2765 /* If that fails, try asking for a target-specific built-in function. */
2766 if (ifn
== IFN_LAST
)
2768 if (cfn
!= CFN_LAST
)
2769 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2770 (cfn
, vectype_out
, vectype_in
);
2772 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2773 (callee
, vectype_out
, vectype_in
);
2776 if (ifn
== IFN_LAST
&& !fndecl
)
2778 if (cfn
== CFN_GOMP_SIMD_LANE
2781 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2782 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2783 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2784 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2786 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2787 { 0, 1, 2, ... vf - 1 } vector. */
2788 gcc_assert (nargs
== 0);
2790 else if (modifier
== NONE
2791 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
2792 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
2793 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
2794 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
2798 if (dump_enabled_p ())
2799 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2800 "function is not vectorizable.\n");
2807 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2808 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2810 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2812 /* Sanity check: make sure that at least one copy of the vectorized stmt
2813 needs to be generated. */
2814 gcc_assert (ncopies
>= 1);
2816 if (!vec_stmt
) /* transformation not required. */
2818 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2819 if (dump_enabled_p ())
2820 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2822 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
2823 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2824 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2825 vec_promote_demote
, stmt_info
, 0, vect_body
);
2832 if (dump_enabled_p ())
2833 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2836 scalar_dest
= gimple_call_lhs (stmt
);
2837 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2839 prev_stmt_info
= NULL
;
2840 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2842 tree prev_res
= NULL_TREE
;
2843 for (j
= 0; j
< ncopies
; ++j
)
2845 /* Build argument list for the vectorized call. */
2847 vargs
.create (nargs
);
2853 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2854 vec
<tree
> vec_oprnds0
;
2856 for (i
= 0; i
< nargs
; i
++)
2857 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2858 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
2859 vec_oprnds0
= vec_defs
[0];
2861 /* Arguments are ready. Create the new vector stmt. */
2862 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2865 for (k
= 0; k
< nargs
; k
++)
2867 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2868 vargs
[k
] = vec_oprndsk
[i
];
2870 if (modifier
== NARROW
)
2872 tree half_res
= make_ssa_name (vectype_in
);
2874 = gimple_build_call_internal_vec (ifn
, vargs
);
2875 gimple_call_set_lhs (call
, half_res
);
2876 gimple_call_set_nothrow (call
, true);
2878 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2881 prev_res
= half_res
;
2884 new_temp
= make_ssa_name (vec_dest
);
2885 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2886 prev_res
, half_res
);
2891 if (ifn
!= IFN_LAST
)
2892 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2894 call
= gimple_build_call_vec (fndecl
, vargs
);
2895 new_temp
= make_ssa_name (vec_dest
, call
);
2896 gimple_call_set_lhs (call
, new_temp
);
2897 gimple_call_set_nothrow (call
, true);
2900 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2901 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2904 for (i
= 0; i
< nargs
; i
++)
2906 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2907 vec_oprndsi
.release ();
2912 for (i
= 0; i
< nargs
; i
++)
2914 op
= gimple_call_arg (stmt
, i
);
2917 = vect_get_vec_def_for_operand (op
, stmt
);
2920 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2922 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2925 vargs
.quick_push (vec_oprnd0
);
2928 if (gimple_call_internal_p (stmt
)
2929 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2931 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2933 for (k
= 0; k
< nunits_out
; ++k
)
2934 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2935 tree cst
= build_vector (vectype_out
, v
);
2937 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2938 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2939 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2940 new_temp
= make_ssa_name (vec_dest
);
2941 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2943 else if (modifier
== NARROW
)
2945 tree half_res
= make_ssa_name (vectype_in
);
2946 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
2947 gimple_call_set_lhs (call
, half_res
);
2948 gimple_call_set_nothrow (call
, true);
2950 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2953 prev_res
= half_res
;
2956 new_temp
= make_ssa_name (vec_dest
);
2957 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2958 prev_res
, half_res
);
2963 if (ifn
!= IFN_LAST
)
2964 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2966 call
= gimple_build_call_vec (fndecl
, vargs
);
2967 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2968 gimple_call_set_lhs (call
, new_temp
);
2969 gimple_call_set_nothrow (call
, true);
2972 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2974 if (j
== (modifier
== NARROW
? 1 : 0))
2975 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2977 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2979 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2982 else if (modifier
== NARROW
)
2984 for (j
= 0; j
< ncopies
; ++j
)
2986 /* Build argument list for the vectorized call. */
2988 vargs
.create (nargs
* 2);
2994 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2995 vec
<tree
> vec_oprnds0
;
2997 for (i
= 0; i
< nargs
; i
++)
2998 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2999 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3000 vec_oprnds0
= vec_defs
[0];
3002 /* Arguments are ready. Create the new vector stmt. */
3003 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3007 for (k
= 0; k
< nargs
; k
++)
3009 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3010 vargs
.quick_push (vec_oprndsk
[i
]);
3011 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3014 if (ifn
!= IFN_LAST
)
3015 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3017 call
= gimple_build_call_vec (fndecl
, vargs
);
3018 new_temp
= make_ssa_name (vec_dest
, call
);
3019 gimple_call_set_lhs (call
, new_temp
);
3020 gimple_call_set_nothrow (call
, true);
3022 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3023 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3026 for (i
= 0; i
< nargs
; i
++)
3028 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3029 vec_oprndsi
.release ();
3034 for (i
= 0; i
< nargs
; i
++)
3036 op
= gimple_call_arg (stmt
, i
);
3040 = vect_get_vec_def_for_operand (op
, stmt
);
3042 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3046 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3048 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3050 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3053 vargs
.quick_push (vec_oprnd0
);
3054 vargs
.quick_push (vec_oprnd1
);
3057 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3058 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3059 gimple_call_set_lhs (new_stmt
, new_temp
);
3060 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3063 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3065 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3067 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3070 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3073 /* No current target implements this case. */
3078 /* The call in STMT might prevent it from being removed in dce.
3079 We however cannot remove it here, due to the way the ssa name
3080 it defines is mapped to the new definition. So just replace
3081 rhs of the statement with something harmless. */
3086 type
= TREE_TYPE (scalar_dest
);
3087 if (is_pattern_stmt_p (stmt_info
))
3088 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3090 lhs
= gimple_call_lhs (stmt
);
3092 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3093 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3094 set_vinfo_for_stmt (stmt
, NULL
);
3095 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3096 gsi_replace (gsi
, new_stmt
, false);
3102 struct simd_call_arg_info
3106 HOST_WIDE_INT linear_step
;
3107 enum vect_def_type dt
;
3109 bool simd_lane_linear
;
3112 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3113 is linear within simd lane (but not within whole loop), note it in
3117 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3118 struct simd_call_arg_info
*arginfo
)
3120 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3122 if (!is_gimple_assign (def_stmt
)
3123 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3124 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3127 tree base
= gimple_assign_rhs1 (def_stmt
);
3128 HOST_WIDE_INT linear_step
= 0;
3129 tree v
= gimple_assign_rhs2 (def_stmt
);
3130 while (TREE_CODE (v
) == SSA_NAME
)
3133 def_stmt
= SSA_NAME_DEF_STMT (v
);
3134 if (is_gimple_assign (def_stmt
))
3135 switch (gimple_assign_rhs_code (def_stmt
))
3138 t
= gimple_assign_rhs2 (def_stmt
);
3139 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3141 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3142 v
= gimple_assign_rhs1 (def_stmt
);
3145 t
= gimple_assign_rhs2 (def_stmt
);
3146 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3148 linear_step
= tree_to_shwi (t
);
3149 v
= gimple_assign_rhs1 (def_stmt
);
3152 t
= gimple_assign_rhs1 (def_stmt
);
3153 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3154 || (TYPE_PRECISION (TREE_TYPE (v
))
3155 < TYPE_PRECISION (TREE_TYPE (t
))))
3164 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3166 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3167 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3172 arginfo
->linear_step
= linear_step
;
3174 arginfo
->simd_lane_linear
= true;
3180 /* Function vectorizable_simd_clone_call.
3182 Check if STMT performs a function call that can be vectorized
3183 by calling a simd clone of the function.
3184 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3185 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3186 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3189 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3190 gimple
**vec_stmt
, slp_tree slp_node
)
3195 tree vec_oprnd0
= NULL_TREE
;
3196 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3198 unsigned int nunits
;
3199 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3200 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3201 vec_info
*vinfo
= stmt_info
->vinfo
;
3202 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3203 tree fndecl
, new_temp
;
3205 gimple
*new_stmt
= NULL
;
3207 auto_vec
<simd_call_arg_info
> arginfo
;
3208 vec
<tree
> vargs
= vNULL
;
3210 tree lhs
, rtype
, ratype
;
3211 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
3213 /* Is STMT a vectorizable call? */
3214 if (!is_gimple_call (stmt
))
3217 fndecl
= gimple_call_fndecl (stmt
);
3218 if (fndecl
== NULL_TREE
)
3221 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3222 if (node
== NULL
|| node
->simd_clones
== NULL
)
3225 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3228 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3232 if (gimple_call_lhs (stmt
)
3233 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3236 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3238 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3240 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3247 /* Process function arguments. */
3248 nargs
= gimple_call_num_args (stmt
);
3250 /* Bail out if the function has zero arguments. */
3254 arginfo
.reserve (nargs
, true);
3256 for (i
= 0; i
< nargs
; i
++)
3258 simd_call_arg_info thisarginfo
;
3261 thisarginfo
.linear_step
= 0;
3262 thisarginfo
.align
= 0;
3263 thisarginfo
.op
= NULL_TREE
;
3264 thisarginfo
.simd_lane_linear
= false;
3266 op
= gimple_call_arg (stmt
, i
);
3267 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3268 &thisarginfo
.vectype
)
3269 || thisarginfo
.dt
== vect_uninitialized_def
)
3271 if (dump_enabled_p ())
3272 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3273 "use not simple.\n");
3277 if (thisarginfo
.dt
== vect_constant_def
3278 || thisarginfo
.dt
== vect_external_def
)
3279 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3281 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3283 /* For linear arguments, the analyze phase should have saved
3284 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3285 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3286 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3288 gcc_assert (vec_stmt
);
3289 thisarginfo
.linear_step
3290 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3292 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3293 thisarginfo
.simd_lane_linear
3294 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3295 == boolean_true_node
);
3296 /* If loop has been peeled for alignment, we need to adjust it. */
3297 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3298 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3299 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3301 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3302 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3303 tree opt
= TREE_TYPE (thisarginfo
.op
);
3304 bias
= fold_convert (TREE_TYPE (step
), bias
);
3305 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3307 = fold_build2 (POINTER_TYPE_P (opt
)
3308 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3309 thisarginfo
.op
, bias
);
3313 && thisarginfo
.dt
!= vect_constant_def
3314 && thisarginfo
.dt
!= vect_external_def
3316 && TREE_CODE (op
) == SSA_NAME
3317 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3319 && tree_fits_shwi_p (iv
.step
))
3321 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3322 thisarginfo
.op
= iv
.base
;
3324 else if ((thisarginfo
.dt
== vect_constant_def
3325 || thisarginfo
.dt
== vect_external_def
)
3326 && POINTER_TYPE_P (TREE_TYPE (op
)))
3327 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3328 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3330 if (POINTER_TYPE_P (TREE_TYPE (op
))
3331 && !thisarginfo
.linear_step
3333 && thisarginfo
.dt
!= vect_constant_def
3334 && thisarginfo
.dt
!= vect_external_def
3337 && TREE_CODE (op
) == SSA_NAME
)
3338 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3340 arginfo
.quick_push (thisarginfo
);
3343 unsigned int badness
= 0;
3344 struct cgraph_node
*bestn
= NULL
;
3345 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3346 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3348 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3349 n
= n
->simdclone
->next_clone
)
3351 unsigned int this_badness
= 0;
3352 if (n
->simdclone
->simdlen
3353 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
3354 || n
->simdclone
->nargs
!= nargs
)
3356 if (n
->simdclone
->simdlen
3357 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
3358 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
3359 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3360 if (n
->simdclone
->inbranch
)
3361 this_badness
+= 2048;
3362 int target_badness
= targetm
.simd_clone
.usable (n
);
3363 if (target_badness
< 0)
3365 this_badness
+= target_badness
* 512;
3366 /* FORNOW: Have to add code to add the mask argument. */
3367 if (n
->simdclone
->inbranch
)
3369 for (i
= 0; i
< nargs
; i
++)
3371 switch (n
->simdclone
->args
[i
].arg_type
)
3373 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3374 if (!useless_type_conversion_p
3375 (n
->simdclone
->args
[i
].orig_type
,
3376 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3378 else if (arginfo
[i
].dt
== vect_constant_def
3379 || arginfo
[i
].dt
== vect_external_def
3380 || arginfo
[i
].linear_step
)
3383 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3384 if (arginfo
[i
].dt
!= vect_constant_def
3385 && arginfo
[i
].dt
!= vect_external_def
)
3388 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3389 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3390 if (arginfo
[i
].dt
== vect_constant_def
3391 || arginfo
[i
].dt
== vect_external_def
3392 || (arginfo
[i
].linear_step
3393 != n
->simdclone
->args
[i
].linear_step
))
3396 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3397 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3398 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3399 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3400 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3401 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3405 case SIMD_CLONE_ARG_TYPE_MASK
:
3408 if (i
== (size_t) -1)
3410 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3415 if (arginfo
[i
].align
)
3416 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3417 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3419 if (i
== (size_t) -1)
3421 if (bestn
== NULL
|| this_badness
< badness
)
3424 badness
= this_badness
;
3431 for (i
= 0; i
< nargs
; i
++)
3432 if ((arginfo
[i
].dt
== vect_constant_def
3433 || arginfo
[i
].dt
== vect_external_def
)
3434 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3437 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3439 if (arginfo
[i
].vectype
== NULL
3440 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3441 > bestn
->simdclone
->simdlen
))
3445 fndecl
= bestn
->decl
;
3446 nunits
= bestn
->simdclone
->simdlen
;
3447 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3449 /* If the function isn't const, only allow it in simd loops where user
3450 has asserted that at least nunits consecutive iterations can be
3451 performed using SIMD instructions. */
3452 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3453 && gimple_vuse (stmt
))
3456 /* Sanity check: make sure that at least one copy of the vectorized stmt
3457 needs to be generated. */
3458 gcc_assert (ncopies
>= 1);
3460 if (!vec_stmt
) /* transformation not required. */
3462 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3463 for (i
= 0; i
< nargs
; i
++)
3464 if ((bestn
->simdclone
->args
[i
].arg_type
3465 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3466 || (bestn
->simdclone
->args
[i
].arg_type
3467 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3469 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3471 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3472 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3473 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3474 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3475 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3476 tree sll
= arginfo
[i
].simd_lane_linear
3477 ? boolean_true_node
: boolean_false_node
;
3478 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3480 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3481 if (dump_enabled_p ())
3482 dump_printf_loc (MSG_NOTE
, vect_location
,
3483 "=== vectorizable_simd_clone_call ===\n");
3484 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3490 if (dump_enabled_p ())
3491 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3494 scalar_dest
= gimple_call_lhs (stmt
);
3495 vec_dest
= NULL_TREE
;
3500 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3501 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3502 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3505 rtype
= TREE_TYPE (ratype
);
3509 prev_stmt_info
= NULL
;
3510 for (j
= 0; j
< ncopies
; ++j
)
3512 /* Build argument list for the vectorized call. */
3514 vargs
.create (nargs
);
3518 for (i
= 0; i
< nargs
; i
++)
3520 unsigned int k
, l
, m
, o
;
3522 op
= gimple_call_arg (stmt
, i
);
3523 switch (bestn
->simdclone
->args
[i
].arg_type
)
3525 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3526 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3527 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3528 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3530 if (TYPE_VECTOR_SUBPARTS (atype
)
3531 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3533 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3534 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3535 / TYPE_VECTOR_SUBPARTS (atype
));
3536 gcc_assert ((k
& (k
- 1)) == 0);
3539 = vect_get_vec_def_for_operand (op
, stmt
);
3542 vec_oprnd0
= arginfo
[i
].op
;
3543 if ((m
& (k
- 1)) == 0)
3545 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3548 arginfo
[i
].op
= vec_oprnd0
;
3550 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3552 bitsize_int ((m
& (k
- 1)) * prec
));
3554 = gimple_build_assign (make_ssa_name (atype
),
3556 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3557 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3561 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3562 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3563 gcc_assert ((k
& (k
- 1)) == 0);
3564 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3566 vec_alloc (ctor_elts
, k
);
3569 for (l
= 0; l
< k
; l
++)
3571 if (m
== 0 && l
== 0)
3573 = vect_get_vec_def_for_operand (op
, stmt
);
3576 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3578 arginfo
[i
].op
= vec_oprnd0
;
3581 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3585 vargs
.safe_push (vec_oprnd0
);
3588 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3590 = gimple_build_assign (make_ssa_name (atype
),
3592 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3593 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3598 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3599 vargs
.safe_push (op
);
3601 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3602 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3607 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3612 edge pe
= loop_preheader_edge (loop
);
3613 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3614 gcc_assert (!new_bb
);
3616 if (arginfo
[i
].simd_lane_linear
)
3618 vargs
.safe_push (arginfo
[i
].op
);
3621 tree phi_res
= copy_ssa_name (op
);
3622 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3623 set_vinfo_for_stmt (new_phi
,
3624 new_stmt_vec_info (new_phi
, loop_vinfo
));
3625 add_phi_arg (new_phi
, arginfo
[i
].op
,
3626 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3628 = POINTER_TYPE_P (TREE_TYPE (op
))
3629 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3630 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3631 ? sizetype
: TREE_TYPE (op
);
3633 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3635 tree tcst
= wide_int_to_tree (type
, cst
);
3636 tree phi_arg
= copy_ssa_name (op
);
3638 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3639 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3640 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3641 set_vinfo_for_stmt (new_stmt
,
3642 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3643 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3645 arginfo
[i
].op
= phi_res
;
3646 vargs
.safe_push (phi_res
);
3651 = POINTER_TYPE_P (TREE_TYPE (op
))
3652 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3653 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3654 ? sizetype
: TREE_TYPE (op
);
3656 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3658 tree tcst
= wide_int_to_tree (type
, cst
);
3659 new_temp
= make_ssa_name (TREE_TYPE (op
));
3660 new_stmt
= gimple_build_assign (new_temp
, code
,
3661 arginfo
[i
].op
, tcst
);
3662 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3663 vargs
.safe_push (new_temp
);
3666 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3667 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3668 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3669 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3670 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3671 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3677 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3680 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3682 new_temp
= create_tmp_var (ratype
);
3683 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3684 == TYPE_VECTOR_SUBPARTS (rtype
))
3685 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3687 new_temp
= make_ssa_name (rtype
, new_stmt
);
3688 gimple_call_set_lhs (new_stmt
, new_temp
);
3690 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3694 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3697 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3698 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3699 gcc_assert ((k
& (k
- 1)) == 0);
3700 for (l
= 0; l
< k
; l
++)
3705 t
= build_fold_addr_expr (new_temp
);
3706 t
= build2 (MEM_REF
, vectype
, t
,
3707 build_int_cst (TREE_TYPE (t
),
3708 l
* prec
/ BITS_PER_UNIT
));
3711 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3712 bitsize_int (prec
), bitsize_int (l
* prec
));
3714 = gimple_build_assign (make_ssa_name (vectype
), t
);
3715 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3716 if (j
== 0 && l
== 0)
3717 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3719 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3721 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3726 tree clobber
= build_constructor (ratype
, NULL
);
3727 TREE_THIS_VOLATILE (clobber
) = 1;
3728 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3729 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3733 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3735 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3736 / TYPE_VECTOR_SUBPARTS (rtype
));
3737 gcc_assert ((k
& (k
- 1)) == 0);
3738 if ((j
& (k
- 1)) == 0)
3739 vec_alloc (ret_ctor_elts
, k
);
3742 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3743 for (m
= 0; m
< o
; m
++)
3745 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3746 size_int (m
), NULL_TREE
, NULL_TREE
);
3748 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3749 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3750 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3751 gimple_assign_lhs (new_stmt
));
3753 tree clobber
= build_constructor (ratype
, NULL
);
3754 TREE_THIS_VOLATILE (clobber
) = 1;
3755 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3756 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3759 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3760 if ((j
& (k
- 1)) != k
- 1)
3762 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3764 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3765 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3767 if ((unsigned) j
== k
- 1)
3768 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3770 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3772 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3777 tree t
= build_fold_addr_expr (new_temp
);
3778 t
= build2 (MEM_REF
, vectype
, t
,
3779 build_int_cst (TREE_TYPE (t
), 0));
3781 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3782 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3783 tree clobber
= build_constructor (ratype
, NULL
);
3784 TREE_THIS_VOLATILE (clobber
) = 1;
3785 vect_finish_stmt_generation (stmt
,
3786 gimple_build_assign (new_temp
,
3792 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3794 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3796 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3801 /* The call in STMT might prevent it from being removed in dce.
3802 We however cannot remove it here, due to the way the ssa name
3803 it defines is mapped to the new definition. So just replace
3804 rhs of the statement with something harmless. */
3811 type
= TREE_TYPE (scalar_dest
);
3812 if (is_pattern_stmt_p (stmt_info
))
3813 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3815 lhs
= gimple_call_lhs (stmt
);
3816 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3819 new_stmt
= gimple_build_nop ();
3820 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3821 set_vinfo_for_stmt (stmt
, NULL
);
3822 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3823 gsi_replace (gsi
, new_stmt
, true);
3824 unlink_stmt_vdef (stmt
);
3830 /* Function vect_gen_widened_results_half
3832 Create a vector stmt whose code, type, number of arguments, and result
3833 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3834 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3835 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3836 needs to be created (DECL is a function-decl of a target-builtin).
3837 STMT is the original scalar stmt that we are vectorizing. */
3840 vect_gen_widened_results_half (enum tree_code code
,
3842 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3843 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3849 /* Generate half of the widened result: */
3850 if (code
== CALL_EXPR
)
3852 /* Target specific support */
3853 if (op_type
== binary_op
)
3854 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3856 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3857 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3858 gimple_call_set_lhs (new_stmt
, new_temp
);
3862 /* Generic support */
3863 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3864 if (op_type
!= binary_op
)
3866 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3867 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3868 gimple_assign_set_lhs (new_stmt
, new_temp
);
3870 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3876 /* Get vectorized definitions for loop-based vectorization. For the first
3877 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3878 scalar operand), and for the rest we get a copy with
3879 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3880 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3881 The vectors are collected into VEC_OPRNDS. */
3884 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3885 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3889 /* Get first vector operand. */
3890 /* All the vector operands except the very first one (that is scalar oprnd)
3892 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3893 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3895 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3897 vec_oprnds
->quick_push (vec_oprnd
);
3899 /* Get second vector operand. */
3900 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3901 vec_oprnds
->quick_push (vec_oprnd
);
3905 /* For conversion in multiple steps, continue to get operands
3908 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3912 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3913 For multi-step conversions store the resulting vectors and call the function
3917 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3918 int multi_step_cvt
, gimple
*stmt
,
3920 gimple_stmt_iterator
*gsi
,
3921 slp_tree slp_node
, enum tree_code code
,
3922 stmt_vec_info
*prev_stmt_info
)
3925 tree vop0
, vop1
, new_tmp
, vec_dest
;
3927 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3929 vec_dest
= vec_dsts
.pop ();
3931 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3933 /* Create demotion operation. */
3934 vop0
= (*vec_oprnds
)[i
];
3935 vop1
= (*vec_oprnds
)[i
+ 1];
3936 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3937 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3938 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3939 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3942 /* Store the resulting vector for next recursive call. */
3943 (*vec_oprnds
)[i
/2] = new_tmp
;
3946 /* This is the last step of the conversion sequence. Store the
3947 vectors in SLP_NODE or in vector info of the scalar statement
3948 (or in STMT_VINFO_RELATED_STMT chain). */
3950 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3953 if (!*prev_stmt_info
)
3954 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3956 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3958 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3963 /* For multi-step demotion operations we first generate demotion operations
3964 from the source type to the intermediate types, and then combine the
3965 results (stored in VEC_OPRNDS) in demotion operation to the destination
3969 /* At each level of recursion we have half of the operands we had at the
3971 vec_oprnds
->truncate ((i
+1)/2);
3972 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3973 stmt
, vec_dsts
, gsi
, slp_node
,
3974 VEC_PACK_TRUNC_EXPR
,
3978 vec_dsts
.quick_push (vec_dest
);
3982 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3983 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3984 the resulting vectors and call the function recursively. */
3987 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3988 vec
<tree
> *vec_oprnds1
,
3989 gimple
*stmt
, tree vec_dest
,
3990 gimple_stmt_iterator
*gsi
,
3991 enum tree_code code1
,
3992 enum tree_code code2
, tree decl1
,
3993 tree decl2
, int op_type
)
3996 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3997 gimple
*new_stmt1
, *new_stmt2
;
3998 vec
<tree
> vec_tmp
= vNULL
;
4000 vec_tmp
.create (vec_oprnds0
->length () * 2);
4001 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4003 if (op_type
== binary_op
)
4004 vop1
= (*vec_oprnds1
)[i
];
4008 /* Generate the two halves of promotion operation. */
4009 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4010 op_type
, vec_dest
, gsi
, stmt
);
4011 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4012 op_type
, vec_dest
, gsi
, stmt
);
4013 if (is_gimple_call (new_stmt1
))
4015 new_tmp1
= gimple_call_lhs (new_stmt1
);
4016 new_tmp2
= gimple_call_lhs (new_stmt2
);
4020 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4021 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4024 /* Store the results for the next step. */
4025 vec_tmp
.quick_push (new_tmp1
);
4026 vec_tmp
.quick_push (new_tmp2
);
4029 vec_oprnds0
->release ();
4030 *vec_oprnds0
= vec_tmp
;
4034 /* Check if STMT performs a conversion operation, that can be vectorized.
4035 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4036 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4037 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4040 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4041 gimple
**vec_stmt
, slp_tree slp_node
)
4045 tree op0
, op1
= NULL_TREE
;
4046 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4047 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4048 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4049 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4050 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4051 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4054 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4056 gimple
*new_stmt
= NULL
;
4057 stmt_vec_info prev_stmt_info
;
4060 tree vectype_out
, vectype_in
;
4062 tree lhs_type
, rhs_type
;
4063 enum { NARROW
, NONE
, WIDEN
} modifier
;
4064 vec
<tree
> vec_oprnds0
= vNULL
;
4065 vec
<tree
> vec_oprnds1
= vNULL
;
4067 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4068 vec_info
*vinfo
= stmt_info
->vinfo
;
4069 int multi_step_cvt
= 0;
4070 vec
<tree
> interm_types
= vNULL
;
4071 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4073 unsigned short fltsz
;
4075 /* Is STMT a vectorizable conversion? */
4077 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4080 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4084 if (!is_gimple_assign (stmt
))
4087 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4090 code
= gimple_assign_rhs_code (stmt
);
4091 if (!CONVERT_EXPR_CODE_P (code
)
4092 && code
!= FIX_TRUNC_EXPR
4093 && code
!= FLOAT_EXPR
4094 && code
!= WIDEN_MULT_EXPR
4095 && code
!= WIDEN_LSHIFT_EXPR
)
4098 op_type
= TREE_CODE_LENGTH (code
);
4100 /* Check types of lhs and rhs. */
4101 scalar_dest
= gimple_assign_lhs (stmt
);
4102 lhs_type
= TREE_TYPE (scalar_dest
);
4103 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4105 op0
= gimple_assign_rhs1 (stmt
);
4106 rhs_type
= TREE_TYPE (op0
);
4108 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4109 && !((INTEGRAL_TYPE_P (lhs_type
)
4110 && INTEGRAL_TYPE_P (rhs_type
))
4111 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4112 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4115 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4116 && ((INTEGRAL_TYPE_P (lhs_type
)
4117 && !type_has_mode_precision_p (lhs_type
))
4118 || (INTEGRAL_TYPE_P (rhs_type
)
4119 && !type_has_mode_precision_p (rhs_type
))))
4121 if (dump_enabled_p ())
4122 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4123 "type conversion to/from bit-precision unsupported."
4128 /* Check the operands of the operation. */
4129 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4131 if (dump_enabled_p ())
4132 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4133 "use not simple.\n");
4136 if (op_type
== binary_op
)
4140 op1
= gimple_assign_rhs2 (stmt
);
4141 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4142 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4144 if (CONSTANT_CLASS_P (op0
))
4145 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4147 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4151 if (dump_enabled_p ())
4152 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4153 "use not simple.\n");
4158 /* If op0 is an external or constant defs use a vector type of
4159 the same size as the output vector type. */
4161 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4163 gcc_assert (vectype_in
);
4166 if (dump_enabled_p ())
4168 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4169 "no vectype for scalar type ");
4170 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4171 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4177 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4178 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4180 if (dump_enabled_p ())
4182 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4183 "can't convert between boolean and non "
4185 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4186 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4192 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4193 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4194 if (nunits_in
< nunits_out
)
4196 else if (nunits_out
== nunits_in
)
4201 /* Multiple types in SLP are handled by creating the appropriate number of
4202 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4206 else if (modifier
== NARROW
)
4207 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
4209 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4211 /* Sanity check: make sure that at least one copy of the vectorized stmt
4212 needs to be generated. */
4213 gcc_assert (ncopies
>= 1);
4215 machine_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4216 machine_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4218 /* Supportable by target? */
4222 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4224 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4229 if (dump_enabled_p ())
4230 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4231 "conversion not supported by target.\n");
4235 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4236 &code1
, &code2
, &multi_step_cvt
,
4239 /* Binary widening operation can only be supported directly by the
4241 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4245 if (code
!= FLOAT_EXPR
4246 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4249 fltsz
= GET_MODE_SIZE (lhs_mode
);
4250 FOR_EACH_2XWIDER_MODE (rhs_mode
, rhs_mode
)
4252 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4256 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4257 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4258 if (cvt_type
== NULL_TREE
)
4261 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4263 if (!supportable_convert_operation (code
, vectype_out
,
4264 cvt_type
, &decl1
, &codecvt1
))
4267 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4268 cvt_type
, &codecvt1
,
4269 &codecvt2
, &multi_step_cvt
,
4273 gcc_assert (multi_step_cvt
== 0);
4275 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4276 vectype_in
, &code1
, &code2
,
4277 &multi_step_cvt
, &interm_types
))
4281 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
4284 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4285 codecvt2
= ERROR_MARK
;
4289 interm_types
.safe_push (cvt_type
);
4290 cvt_type
= NULL_TREE
;
4295 gcc_assert (op_type
== unary_op
);
4296 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4297 &code1
, &multi_step_cvt
,
4301 if (code
!= FIX_TRUNC_EXPR
4302 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4306 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4307 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4308 if (cvt_type
== NULL_TREE
)
4310 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4313 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4314 &code1
, &multi_step_cvt
,
4323 if (!vec_stmt
) /* transformation not required. */
4325 if (dump_enabled_p ())
4326 dump_printf_loc (MSG_NOTE
, vect_location
,
4327 "=== vectorizable_conversion ===\n");
4328 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4330 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4331 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4333 else if (modifier
== NARROW
)
4335 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4336 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4340 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4341 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4343 interm_types
.release ();
4348 if (dump_enabled_p ())
4349 dump_printf_loc (MSG_NOTE
, vect_location
,
4350 "transform conversion. ncopies = %d.\n", ncopies
);
4352 if (op_type
== binary_op
)
4354 if (CONSTANT_CLASS_P (op0
))
4355 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4356 else if (CONSTANT_CLASS_P (op1
))
4357 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4360 /* In case of multi-step conversion, we first generate conversion operations
4361 to the intermediate types, and then from that types to the final one.
4362 We create vector destinations for the intermediate type (TYPES) received
4363 from supportable_*_operation, and store them in the correct order
4364 for future use in vect_create_vectorized_*_stmts (). */
4365 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4366 vec_dest
= vect_create_destination_var (scalar_dest
,
4367 (cvt_type
&& modifier
== WIDEN
)
4368 ? cvt_type
: vectype_out
);
4369 vec_dsts
.quick_push (vec_dest
);
4373 for (i
= interm_types
.length () - 1;
4374 interm_types
.iterate (i
, &intermediate_type
); i
--)
4376 vec_dest
= vect_create_destination_var (scalar_dest
,
4378 vec_dsts
.quick_push (vec_dest
);
4383 vec_dest
= vect_create_destination_var (scalar_dest
,
4385 ? vectype_out
: cvt_type
);
4389 if (modifier
== WIDEN
)
4391 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4392 if (op_type
== binary_op
)
4393 vec_oprnds1
.create (1);
4395 else if (modifier
== NARROW
)
4396 vec_oprnds0
.create (
4397 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4399 else if (code
== WIDEN_LSHIFT_EXPR
)
4400 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4403 prev_stmt_info
= NULL
;
4407 for (j
= 0; j
< ncopies
; j
++)
4410 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4412 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4414 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4416 /* Arguments are ready, create the new vector stmt. */
4417 if (code1
== CALL_EXPR
)
4419 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4420 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4421 gimple_call_set_lhs (new_stmt
, new_temp
);
4425 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4426 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4427 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4428 gimple_assign_set_lhs (new_stmt
, new_temp
);
4431 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4433 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4436 if (!prev_stmt_info
)
4437 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4439 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4440 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4447 /* In case the vectorization factor (VF) is bigger than the number
4448 of elements that we can fit in a vectype (nunits), we have to
4449 generate more than one vector stmt - i.e - we need to "unroll"
4450 the vector stmt by a factor VF/nunits. */
4451 for (j
= 0; j
< ncopies
; j
++)
4458 if (code
== WIDEN_LSHIFT_EXPR
)
4463 /* Store vec_oprnd1 for every vector stmt to be created
4464 for SLP_NODE. We check during the analysis that all
4465 the shift arguments are the same. */
4466 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4467 vec_oprnds1
.quick_push (vec_oprnd1
);
4469 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4473 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4474 &vec_oprnds1
, slp_node
);
4478 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4479 vec_oprnds0
.quick_push (vec_oprnd0
);
4480 if (op_type
== binary_op
)
4482 if (code
== WIDEN_LSHIFT_EXPR
)
4485 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4486 vec_oprnds1
.quick_push (vec_oprnd1
);
4492 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4493 vec_oprnds0
.truncate (0);
4494 vec_oprnds0
.quick_push (vec_oprnd0
);
4495 if (op_type
== binary_op
)
4497 if (code
== WIDEN_LSHIFT_EXPR
)
4500 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4502 vec_oprnds1
.truncate (0);
4503 vec_oprnds1
.quick_push (vec_oprnd1
);
4507 /* Arguments are ready. Create the new vector stmts. */
4508 for (i
= multi_step_cvt
; i
>= 0; i
--)
4510 tree this_dest
= vec_dsts
[i
];
4511 enum tree_code c1
= code1
, c2
= code2
;
4512 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4517 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4519 stmt
, this_dest
, gsi
,
4520 c1
, c2
, decl1
, decl2
,
4524 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4528 if (codecvt1
== CALL_EXPR
)
4530 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4531 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4532 gimple_call_set_lhs (new_stmt
, new_temp
);
4536 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4537 new_temp
= make_ssa_name (vec_dest
);
4538 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4542 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4545 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4548 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4551 if (!prev_stmt_info
)
4552 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4554 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4555 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4560 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4564 /* In case the vectorization factor (VF) is bigger than the number
4565 of elements that we can fit in a vectype (nunits), we have to
4566 generate more than one vector stmt - i.e - we need to "unroll"
4567 the vector stmt by a factor VF/nunits. */
4568 for (j
= 0; j
< ncopies
; j
++)
4572 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4576 vec_oprnds0
.truncate (0);
4577 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4578 vect_pow2 (multi_step_cvt
) - 1);
4581 /* Arguments are ready. Create the new vector stmts. */
4583 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4585 if (codecvt1
== CALL_EXPR
)
4587 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4588 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4589 gimple_call_set_lhs (new_stmt
, new_temp
);
4593 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4594 new_temp
= make_ssa_name (vec_dest
);
4595 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4599 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4600 vec_oprnds0
[i
] = new_temp
;
4603 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4604 stmt
, vec_dsts
, gsi
,
4609 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4613 vec_oprnds0
.release ();
4614 vec_oprnds1
.release ();
4615 interm_types
.release ();
4621 /* Function vectorizable_assignment.
4623 Check if STMT performs an assignment (copy) that can be vectorized.
4624 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4625 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4626 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4629 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4630 gimple
**vec_stmt
, slp_tree slp_node
)
4635 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4636 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4639 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
4643 vec
<tree
> vec_oprnds
= vNULL
;
4645 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4646 vec_info
*vinfo
= stmt_info
->vinfo
;
4647 gimple
*new_stmt
= NULL
;
4648 stmt_vec_info prev_stmt_info
= NULL
;
4649 enum tree_code code
;
4652 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4655 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4659 /* Is vectorizable assignment? */
4660 if (!is_gimple_assign (stmt
))
4663 scalar_dest
= gimple_assign_lhs (stmt
);
4664 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4667 code
= gimple_assign_rhs_code (stmt
);
4668 if (gimple_assign_single_p (stmt
)
4669 || code
== PAREN_EXPR
4670 || CONVERT_EXPR_CODE_P (code
))
4671 op
= gimple_assign_rhs1 (stmt
);
4675 if (code
== VIEW_CONVERT_EXPR
)
4676 op
= TREE_OPERAND (op
, 0);
4678 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4679 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4681 /* Multiple types in SLP are handled by creating the appropriate number of
4682 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4687 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4689 gcc_assert (ncopies
>= 1);
4691 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4693 if (dump_enabled_p ())
4694 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4695 "use not simple.\n");
4699 /* We can handle NOP_EXPR conversions that do not change the number
4700 of elements or the vector size. */
4701 if ((CONVERT_EXPR_CODE_P (code
)
4702 || code
== VIEW_CONVERT_EXPR
)
4704 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4705 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4706 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4709 /* We do not handle bit-precision changes. */
4710 if ((CONVERT_EXPR_CODE_P (code
)
4711 || code
== VIEW_CONVERT_EXPR
)
4712 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4713 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
4714 || !type_has_mode_precision_p (TREE_TYPE (op
)))
4715 /* But a conversion that does not change the bit-pattern is ok. */
4716 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4717 > TYPE_PRECISION (TREE_TYPE (op
)))
4718 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4719 /* Conversion between boolean types of different sizes is
4720 a simple assignment in case their vectypes are same
4722 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4723 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4725 if (dump_enabled_p ())
4726 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4727 "type conversion to/from bit-precision "
4732 if (!vec_stmt
) /* transformation not required. */
4734 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4735 if (dump_enabled_p ())
4736 dump_printf_loc (MSG_NOTE
, vect_location
,
4737 "=== vectorizable_assignment ===\n");
4738 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4743 if (dump_enabled_p ())
4744 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4747 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4750 for (j
= 0; j
< ncopies
; j
++)
4754 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
4756 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4758 /* Arguments are ready. create the new vector stmt. */
4759 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4761 if (CONVERT_EXPR_CODE_P (code
)
4762 || code
== VIEW_CONVERT_EXPR
)
4763 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4764 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4765 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4766 gimple_assign_set_lhs (new_stmt
, new_temp
);
4767 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4769 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4776 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4778 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4780 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4783 vec_oprnds
.release ();
4788 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4789 either as shift by a scalar or by a vector. */
4792 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4795 machine_mode vec_mode
;
4800 vectype
= get_vectype_for_scalar_type (scalar_type
);
4804 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4806 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4808 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4810 || (optab_handler (optab
, TYPE_MODE (vectype
))
4811 == CODE_FOR_nothing
))
4815 vec_mode
= TYPE_MODE (vectype
);
4816 icode
= (int) optab_handler (optab
, vec_mode
);
4817 if (icode
== CODE_FOR_nothing
)
4824 /* Function vectorizable_shift.
4826 Check if STMT performs a shift operation that can be vectorized.
4827 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4828 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4829 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4832 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4833 gimple
**vec_stmt
, slp_tree slp_node
)
4837 tree op0
, op1
= NULL
;
4838 tree vec_oprnd1
= NULL_TREE
;
4839 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4841 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4842 enum tree_code code
;
4843 machine_mode vec_mode
;
4847 machine_mode optab_op2_mode
;
4849 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4851 gimple
*new_stmt
= NULL
;
4852 stmt_vec_info prev_stmt_info
;
4859 vec
<tree
> vec_oprnds0
= vNULL
;
4860 vec
<tree
> vec_oprnds1
= vNULL
;
4863 bool scalar_shift_arg
= true;
4864 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4865 vec_info
*vinfo
= stmt_info
->vinfo
;
4868 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4871 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4875 /* Is STMT a vectorizable binary/unary operation? */
4876 if (!is_gimple_assign (stmt
))
4879 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4882 code
= gimple_assign_rhs_code (stmt
);
4884 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4885 || code
== RROTATE_EXPR
))
4888 scalar_dest
= gimple_assign_lhs (stmt
);
4889 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4890 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
4892 if (dump_enabled_p ())
4893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4894 "bit-precision shifts not supported.\n");
4898 op0
= gimple_assign_rhs1 (stmt
);
4899 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4901 if (dump_enabled_p ())
4902 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4903 "use not simple.\n");
4906 /* If op0 is an external or constant def use a vector type with
4907 the same size as the output vector type. */
4909 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4911 gcc_assert (vectype
);
4914 if (dump_enabled_p ())
4915 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4916 "no vectype for scalar type\n");
4920 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4921 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4922 if (nunits_out
!= nunits_in
)
4925 op1
= gimple_assign_rhs2 (stmt
);
4926 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4928 if (dump_enabled_p ())
4929 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4930 "use not simple.\n");
4935 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4939 /* Multiple types in SLP are handled by creating the appropriate number of
4940 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4945 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4947 gcc_assert (ncopies
>= 1);
4949 /* Determine whether the shift amount is a vector, or scalar. If the
4950 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4952 if ((dt
[1] == vect_internal_def
4953 || dt
[1] == vect_induction_def
)
4955 scalar_shift_arg
= false;
4956 else if (dt
[1] == vect_constant_def
4957 || dt
[1] == vect_external_def
4958 || dt
[1] == vect_internal_def
)
4960 /* In SLP, need to check whether the shift count is the same,
4961 in loops if it is a constant or invariant, it is always
4965 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4968 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4969 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4970 scalar_shift_arg
= false;
4973 /* If the shift amount is computed by a pattern stmt we cannot
4974 use the scalar amount directly thus give up and use a vector
4976 if (dt
[1] == vect_internal_def
)
4978 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
4979 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
4980 scalar_shift_arg
= false;
4985 if (dump_enabled_p ())
4986 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4987 "operand mode requires invariant argument.\n");
4991 /* Vector shifted by vector. */
4992 if (!scalar_shift_arg
)
4994 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4995 if (dump_enabled_p ())
4996 dump_printf_loc (MSG_NOTE
, vect_location
,
4997 "vector/vector shift/rotate found.\n");
5000 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5001 if (op1_vectype
== NULL_TREE
5002 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5004 if (dump_enabled_p ())
5005 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5006 "unusable type for last operand in"
5007 " vector/vector shift/rotate.\n");
5011 /* See if the machine has a vector shifted by scalar insn and if not
5012 then see if it has a vector shifted by vector insn. */
5015 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5017 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5019 if (dump_enabled_p ())
5020 dump_printf_loc (MSG_NOTE
, vect_location
,
5021 "vector/scalar shift/rotate found.\n");
5025 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5027 && (optab_handler (optab
, TYPE_MODE (vectype
))
5028 != CODE_FOR_nothing
))
5030 scalar_shift_arg
= false;
5032 if (dump_enabled_p ())
5033 dump_printf_loc (MSG_NOTE
, vect_location
,
5034 "vector/vector shift/rotate found.\n");
5036 /* Unlike the other binary operators, shifts/rotates have
5037 the rhs being int, instead of the same type as the lhs,
5038 so make sure the scalar is the right type if we are
5039 dealing with vectors of long long/long/short/char. */
5040 if (dt
[1] == vect_constant_def
)
5041 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5042 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5046 && TYPE_MODE (TREE_TYPE (vectype
))
5047 != TYPE_MODE (TREE_TYPE (op1
)))
5049 if (dump_enabled_p ())
5050 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5051 "unusable type for last operand in"
5052 " vector/vector shift/rotate.\n");
5055 if (vec_stmt
&& !slp_node
)
5057 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5058 op1
= vect_init_vector (stmt
, op1
,
5059 TREE_TYPE (vectype
), NULL
);
5066 /* Supportable by target? */
5069 if (dump_enabled_p ())
5070 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5074 vec_mode
= TYPE_MODE (vectype
);
5075 icode
= (int) optab_handler (optab
, vec_mode
);
5076 if (icode
== CODE_FOR_nothing
)
5078 if (dump_enabled_p ())
5079 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5080 "op not supported by target.\n");
5081 /* Check only during analysis. */
5082 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5083 || (vf
< vect_min_worthwhile_factor (code
)
5086 if (dump_enabled_p ())
5087 dump_printf_loc (MSG_NOTE
, vect_location
,
5088 "proceeding using word mode.\n");
5091 /* Worthwhile without SIMD support? Check only during analysis. */
5092 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
5093 && vf
< vect_min_worthwhile_factor (code
)
5096 if (dump_enabled_p ())
5097 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5098 "not worthwhile without SIMD support.\n");
5102 if (!vec_stmt
) /* transformation not required. */
5104 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5105 if (dump_enabled_p ())
5106 dump_printf_loc (MSG_NOTE
, vect_location
,
5107 "=== vectorizable_shift ===\n");
5108 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5114 if (dump_enabled_p ())
5115 dump_printf_loc (MSG_NOTE
, vect_location
,
5116 "transform binary/unary operation.\n");
5119 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5121 prev_stmt_info
= NULL
;
5122 for (j
= 0; j
< ncopies
; j
++)
5127 if (scalar_shift_arg
)
5129 /* Vector shl and shr insn patterns can be defined with scalar
5130 operand 2 (shift operand). In this case, use constant or loop
5131 invariant op1 directly, without extending it to vector mode
5133 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5134 if (!VECTOR_MODE_P (optab_op2_mode
))
5136 if (dump_enabled_p ())
5137 dump_printf_loc (MSG_NOTE
, vect_location
,
5138 "operand 1 using scalar mode.\n");
5140 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5141 vec_oprnds1
.quick_push (vec_oprnd1
);
5144 /* Store vec_oprnd1 for every vector stmt to be created
5145 for SLP_NODE. We check during the analysis that all
5146 the shift arguments are the same.
5147 TODO: Allow different constants for different vector
5148 stmts generated for an SLP instance. */
5149 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5150 vec_oprnds1
.quick_push (vec_oprnd1
);
5155 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5156 (a special case for certain kind of vector shifts); otherwise,
5157 operand 1 should be of a vector type (the usual case). */
5159 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5162 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5166 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5168 /* Arguments are ready. Create the new vector stmt. */
5169 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5171 vop1
= vec_oprnds1
[i
];
5172 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5173 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5174 gimple_assign_set_lhs (new_stmt
, new_temp
);
5175 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5177 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5184 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5186 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5187 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5190 vec_oprnds0
.release ();
5191 vec_oprnds1
.release ();
5197 /* Function vectorizable_operation.
5199 Check if STMT performs a binary, unary or ternary operation that can
5201 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5202 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5203 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5206 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5207 gimple
**vec_stmt
, slp_tree slp_node
)
5211 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5212 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5214 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5215 enum tree_code code
;
5216 machine_mode vec_mode
;
5220 bool target_support_p
;
5222 enum vect_def_type dt
[3]
5223 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5225 gimple
*new_stmt
= NULL
;
5226 stmt_vec_info prev_stmt_info
;
5232 vec
<tree
> vec_oprnds0
= vNULL
;
5233 vec
<tree
> vec_oprnds1
= vNULL
;
5234 vec
<tree
> vec_oprnds2
= vNULL
;
5235 tree vop0
, vop1
, vop2
;
5236 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5237 vec_info
*vinfo
= stmt_info
->vinfo
;
5240 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5243 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5247 /* Is STMT a vectorizable binary/unary operation? */
5248 if (!is_gimple_assign (stmt
))
5251 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5254 code
= gimple_assign_rhs_code (stmt
);
5256 /* For pointer addition, we should use the normal plus for
5257 the vector addition. */
5258 if (code
== POINTER_PLUS_EXPR
)
5261 /* Support only unary or binary operations. */
5262 op_type
= TREE_CODE_LENGTH (code
);
5263 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5265 if (dump_enabled_p ())
5266 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5267 "num. args = %d (not unary/binary/ternary op).\n",
5272 scalar_dest
= gimple_assign_lhs (stmt
);
5273 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5275 /* Most operations cannot handle bit-precision types without extra
5277 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5278 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5279 /* Exception are bitwise binary operations. */
5280 && code
!= BIT_IOR_EXPR
5281 && code
!= BIT_XOR_EXPR
5282 && code
!= BIT_AND_EXPR
)
5284 if (dump_enabled_p ())
5285 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5286 "bit-precision arithmetic not supported.\n");
5290 op0
= gimple_assign_rhs1 (stmt
);
5291 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5293 if (dump_enabled_p ())
5294 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5295 "use not simple.\n");
5298 /* If op0 is an external or constant def use a vector type with
5299 the same size as the output vector type. */
5302 /* For boolean type we cannot determine vectype by
5303 invariant value (don't know whether it is a vector
5304 of booleans or vector of integers). We use output
5305 vectype because operations on boolean don't change
5307 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5309 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5311 if (dump_enabled_p ())
5312 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5313 "not supported operation on bool value.\n");
5316 vectype
= vectype_out
;
5319 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5322 gcc_assert (vectype
);
5325 if (dump_enabled_p ())
5327 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5328 "no vectype for scalar type ");
5329 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5331 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5337 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5338 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5339 if (nunits_out
!= nunits_in
)
5342 if (op_type
== binary_op
|| op_type
== ternary_op
)
5344 op1
= gimple_assign_rhs2 (stmt
);
5345 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5347 if (dump_enabled_p ())
5348 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5349 "use not simple.\n");
5353 if (op_type
== ternary_op
)
5355 op2
= gimple_assign_rhs3 (stmt
);
5356 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5358 if (dump_enabled_p ())
5359 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5360 "use not simple.\n");
5366 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5370 /* Multiple types in SLP are handled by creating the appropriate number of
5371 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5376 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
5378 gcc_assert (ncopies
>= 1);
5380 /* Shifts are handled in vectorizable_shift (). */
5381 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5382 || code
== RROTATE_EXPR
)
5385 /* Supportable by target? */
5387 vec_mode
= TYPE_MODE (vectype
);
5388 if (code
== MULT_HIGHPART_EXPR
)
5389 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5392 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5395 if (dump_enabled_p ())
5396 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5400 target_support_p
= (optab_handler (optab
, vec_mode
)
5401 != CODE_FOR_nothing
);
5404 if (!target_support_p
)
5406 if (dump_enabled_p ())
5407 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5408 "op not supported by target.\n");
5409 /* Check only during analysis. */
5410 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5411 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
5413 if (dump_enabled_p ())
5414 dump_printf_loc (MSG_NOTE
, vect_location
,
5415 "proceeding using word mode.\n");
5418 /* Worthwhile without SIMD support? Check only during analysis. */
5419 if (!VECTOR_MODE_P (vec_mode
)
5421 && vf
< vect_min_worthwhile_factor (code
))
5423 if (dump_enabled_p ())
5424 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5425 "not worthwhile without SIMD support.\n");
5429 if (!vec_stmt
) /* transformation not required. */
5431 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5432 if (dump_enabled_p ())
5433 dump_printf_loc (MSG_NOTE
, vect_location
,
5434 "=== vectorizable_operation ===\n");
5435 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5441 if (dump_enabled_p ())
5442 dump_printf_loc (MSG_NOTE
, vect_location
,
5443 "transform binary/unary operation.\n");
5446 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5448 /* In case the vectorization factor (VF) is bigger than the number
5449 of elements that we can fit in a vectype (nunits), we have to generate
5450 more than one vector stmt - i.e - we need to "unroll" the
5451 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5452 from one copy of the vector stmt to the next, in the field
5453 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5454 stages to find the correct vector defs to be used when vectorizing
5455 stmts that use the defs of the current stmt. The example below
5456 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5457 we need to create 4 vectorized stmts):
5459 before vectorization:
5460 RELATED_STMT VEC_STMT
5464 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5466 RELATED_STMT VEC_STMT
5467 VS1_0: vx0 = memref0 VS1_1 -
5468 VS1_1: vx1 = memref1 VS1_2 -
5469 VS1_2: vx2 = memref2 VS1_3 -
5470 VS1_3: vx3 = memref3 - -
5471 S1: x = load - VS1_0
5474 step2: vectorize stmt S2 (done here):
5475 To vectorize stmt S2 we first need to find the relevant vector
5476 def for the first operand 'x'. This is, as usual, obtained from
5477 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5478 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5479 relevant vector def 'vx0'. Having found 'vx0' we can generate
5480 the vector stmt VS2_0, and as usual, record it in the
5481 STMT_VINFO_VEC_STMT of stmt S2.
5482 When creating the second copy (VS2_1), we obtain the relevant vector
5483 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5484 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5485 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5486 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5487 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5488 chain of stmts and pointers:
5489 RELATED_STMT VEC_STMT
5490 VS1_0: vx0 = memref0 VS1_1 -
5491 VS1_1: vx1 = memref1 VS1_2 -
5492 VS1_2: vx2 = memref2 VS1_3 -
5493 VS1_3: vx3 = memref3 - -
5494 S1: x = load - VS1_0
5495 VS2_0: vz0 = vx0 + v1 VS2_1 -
5496 VS2_1: vz1 = vx1 + v1 VS2_2 -
5497 VS2_2: vz2 = vx2 + v1 VS2_3 -
5498 VS2_3: vz3 = vx3 + v1 - -
5499 S2: z = x + 1 - VS2_0 */
5501 prev_stmt_info
= NULL
;
5502 for (j
= 0; j
< ncopies
; j
++)
5507 if (op_type
== binary_op
|| op_type
== ternary_op
)
5508 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5511 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5513 if (op_type
== ternary_op
)
5514 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5519 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5520 if (op_type
== ternary_op
)
5522 tree vec_oprnd
= vec_oprnds2
.pop ();
5523 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5528 /* Arguments are ready. Create the new vector stmt. */
5529 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5531 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5532 ? vec_oprnds1
[i
] : NULL_TREE
);
5533 vop2
= ((op_type
== ternary_op
)
5534 ? vec_oprnds2
[i
] : NULL_TREE
);
5535 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5536 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5537 gimple_assign_set_lhs (new_stmt
, new_temp
);
5538 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5540 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5547 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5549 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5550 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5553 vec_oprnds0
.release ();
5554 vec_oprnds1
.release ();
5555 vec_oprnds2
.release ();
5560 /* A helper function to ensure data reference DR's base alignment
5564 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5569 if (DR_VECT_AUX (dr
)->base_misaligned
)
5571 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5572 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5574 if (decl_in_symtab_p (base_decl
))
5575 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5578 SET_DECL_ALIGN (base_decl
, TYPE_ALIGN (vectype
));
5579 DECL_USER_ALIGN (base_decl
) = 1;
5581 DR_VECT_AUX (dr
)->base_misaligned
= false;
5586 /* Function get_group_alias_ptr_type.
5588 Return the alias type for the group starting at FIRST_STMT. */
5591 get_group_alias_ptr_type (gimple
*first_stmt
)
5593 struct data_reference
*first_dr
, *next_dr
;
5596 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5597 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
5600 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
5601 if (get_alias_set (DR_REF (first_dr
))
5602 != get_alias_set (DR_REF (next_dr
)))
5604 if (dump_enabled_p ())
5605 dump_printf_loc (MSG_NOTE
, vect_location
,
5606 "conflicting alias set types.\n");
5607 return ptr_type_node
;
5609 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5611 return reference_alias_ptr_type (DR_REF (first_dr
));
5615 /* Function vectorizable_store.
5617 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5619 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5620 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5621 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5624 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5630 tree vec_oprnd
= NULL_TREE
;
5631 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5632 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5634 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5635 struct loop
*loop
= NULL
;
5636 machine_mode vec_mode
;
5638 enum dr_alignment_support alignment_support_scheme
;
5640 enum vect_def_type dt
;
5641 stmt_vec_info prev_stmt_info
= NULL
;
5642 tree dataref_ptr
= NULL_TREE
;
5643 tree dataref_offset
= NULL_TREE
;
5644 gimple
*ptr_incr
= NULL
;
5647 gimple
*next_stmt
, *first_stmt
;
5649 unsigned int group_size
, i
;
5650 vec
<tree
> oprnds
= vNULL
;
5651 vec
<tree
> result_chain
= vNULL
;
5653 tree offset
= NULL_TREE
;
5654 vec
<tree
> vec_oprnds
= vNULL
;
5655 bool slp
= (slp_node
!= NULL
);
5656 unsigned int vec_num
;
5657 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5658 vec_info
*vinfo
= stmt_info
->vinfo
;
5660 gather_scatter_info gs_info
;
5661 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5664 vec_load_store_type vls_type
;
5667 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5670 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5674 /* Is vectorizable store? */
5676 if (!is_gimple_assign (stmt
))
5679 scalar_dest
= gimple_assign_lhs (stmt
);
5680 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5681 && is_pattern_stmt_p (stmt_info
))
5682 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5683 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5684 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5685 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5686 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5687 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5688 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5689 && TREE_CODE (scalar_dest
) != MEM_REF
)
5692 /* Cannot have hybrid store SLP -- that would mean storing to the
5693 same location twice. */
5694 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
5696 gcc_assert (gimple_assign_single_p (stmt
));
5698 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5699 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5703 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5704 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5709 /* Multiple types in SLP are handled by creating the appropriate number of
5710 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5715 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5717 gcc_assert (ncopies
>= 1);
5719 /* FORNOW. This restriction should be relaxed. */
5720 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5722 if (dump_enabled_p ())
5723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5724 "multiple types in nested loop.\n");
5728 op
= gimple_assign_rhs1 (stmt
);
5730 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5732 if (dump_enabled_p ())
5733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5734 "use not simple.\n");
5738 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
5739 vls_type
= VLS_STORE_INVARIANT
;
5741 vls_type
= VLS_STORE
;
5743 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5746 elem_type
= TREE_TYPE (vectype
);
5747 vec_mode
= TYPE_MODE (vectype
);
5749 /* FORNOW. In some cases can vectorize even if data-type not supported
5750 (e.g. - array initialization with 0). */
5751 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5754 if (!STMT_VINFO_DATA_REF (stmt_info
))
5757 vect_memory_access_type memory_access_type
;
5758 if (!get_load_store_type (stmt
, vectype
, slp
, vls_type
, ncopies
,
5759 &memory_access_type
, &gs_info
))
5762 if (!vec_stmt
) /* transformation not required. */
5764 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
5765 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5766 /* The SLP costs are calculated during SLP analysis. */
5767 if (!PURE_SLP_STMT (stmt_info
))
5768 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
, dt
,
5772 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
5776 ensure_base_align (stmt_info
, dr
);
5778 if (memory_access_type
== VMAT_GATHER_SCATTER
)
5780 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5781 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
5782 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5783 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5784 edge pe
= loop_preheader_edge (loop
);
5787 enum { NARROW
, NONE
, WIDEN
} modifier
;
5788 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
5790 if (nunits
== (unsigned int) scatter_off_nunits
)
5792 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5794 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5797 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5798 sel
[i
] = i
| nunits
;
5800 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
5801 gcc_assert (perm_mask
!= NULL_TREE
);
5803 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5805 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5808 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5809 sel
[i
] = i
| scatter_off_nunits
;
5811 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5812 gcc_assert (perm_mask
!= NULL_TREE
);
5818 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
5819 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5820 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5821 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5822 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5823 scaletype
= TREE_VALUE (arglist
);
5825 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5826 && TREE_CODE (rettype
) == VOID_TYPE
);
5828 ptr
= fold_convert (ptrtype
, gs_info
.base
);
5829 if (!is_gimple_min_invariant (ptr
))
5831 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5832 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5833 gcc_assert (!new_bb
);
5836 /* Currently we support only unconditional scatter stores,
5837 so mask should be all ones. */
5838 mask
= build_int_cst (masktype
, -1);
5839 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5841 scale
= build_int_cst (scaletype
, gs_info
.scale
);
5843 prev_stmt_info
= NULL
;
5844 for (j
= 0; j
< ncopies
; ++j
)
5849 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5851 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
5853 else if (modifier
!= NONE
&& (j
& 1))
5855 if (modifier
== WIDEN
)
5858 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5859 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5862 else if (modifier
== NARROW
)
5864 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5867 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5876 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5878 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5882 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5884 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5885 == TYPE_VECTOR_SUBPARTS (srctype
));
5886 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5887 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5888 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5889 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5893 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5895 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5896 == TYPE_VECTOR_SUBPARTS (idxtype
));
5897 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5898 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5899 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5900 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5905 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
5907 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5909 if (prev_stmt_info
== NULL
)
5910 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5912 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5913 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5918 grouped_store
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
5921 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5922 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5923 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5925 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5928 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5930 /* We vectorize all the stmts of the interleaving group when we
5931 reach the last stmt in the group. */
5932 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5933 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5942 grouped_store
= false;
5943 /* VEC_NUM is the number of vect stmts to be created for this
5945 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5946 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5947 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5948 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5949 op
= gimple_assign_rhs1 (first_stmt
);
5952 /* VEC_NUM is the number of vect stmts to be created for this
5954 vec_num
= group_size
;
5956 ref_type
= get_group_alias_ptr_type (first_stmt
);
5962 group_size
= vec_num
= 1;
5963 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
5966 if (dump_enabled_p ())
5967 dump_printf_loc (MSG_NOTE
, vect_location
,
5968 "transform store. ncopies = %d\n", ncopies
);
5970 if (memory_access_type
== VMAT_ELEMENTWISE
5971 || memory_access_type
== VMAT_STRIDED_SLP
)
5973 gimple_stmt_iterator incr_gsi
;
5979 gimple_seq stmts
= NULL
;
5980 tree stride_base
, stride_step
, alias_off
;
5984 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5987 = fold_build_pointer_plus
5988 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5989 size_binop (PLUS_EXPR
,
5990 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5991 convert_to_ptrofftype (DR_INIT (first_dr
))));
5992 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5994 /* For a store with loop-invariant (but other than power-of-2)
5995 stride (i.e. not a grouped access) like so:
5997 for (i = 0; i < n; i += stride)
6000 we generate a new induction variable and new stores from
6001 the components of the (vectorized) rhs:
6003 for (j = 0; ; j += VF*stride)
6008 array[j + stride] = tmp2;
6012 unsigned nstores
= nunits
;
6014 tree ltype
= elem_type
;
6015 tree lvectype
= vectype
;
6018 if (group_size
< nunits
6019 && nunits
% group_size
== 0)
6021 nstores
= nunits
/ group_size
;
6023 ltype
= build_vector_type (elem_type
, group_size
);
6026 /* First check if vec_extract optab doesn't support extraction
6027 of vector elts directly. */
6028 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6029 machine_mode vmode
= mode_for_vector (elmode
, group_size
);
6030 if (! VECTOR_MODE_P (vmode
)
6031 || (convert_optab_handler (vec_extract_optab
,
6032 TYPE_MODE (vectype
), vmode
)
6033 == CODE_FOR_nothing
))
6035 /* Try to avoid emitting an extract of vector elements
6036 by performing the extracts using an integer type of the
6037 same size, extracting from a vector of those and then
6038 re-interpreting it as the original vector type if
6041 = group_size
* GET_MODE_BITSIZE (elmode
);
6042 elmode
= int_mode_for_size (lsize
, 0).require ();
6043 vmode
= mode_for_vector (elmode
, nunits
/ group_size
);
6044 /* If we can't construct such a vector fall back to
6045 element extracts from the original vector type and
6046 element size stores. */
6047 if (VECTOR_MODE_P (vmode
)
6048 && (convert_optab_handler (vec_extract_optab
,
6050 != CODE_FOR_nothing
))
6052 nstores
= nunits
/ group_size
;
6054 ltype
= build_nonstandard_integer_type (lsize
, 1);
6055 lvectype
= build_vector_type (ltype
, nstores
);
6057 /* Else fall back to vector extraction anyway.
6058 Fewer stores are more important than avoiding spilling
6059 of the vector we extract from. Compared to the
6060 construction case in vectorizable_load no store-forwarding
6061 issue exists here for reasonable archs. */
6064 else if (group_size
>= nunits
6065 && group_size
% nunits
== 0)
6072 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6073 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6076 ivstep
= stride_step
;
6077 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6078 build_int_cst (TREE_TYPE (ivstep
), vf
));
6080 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6082 create_iv (stride_base
, ivstep
, NULL
,
6083 loop
, &incr_gsi
, insert_after
,
6085 incr
= gsi_stmt (incr_gsi
);
6086 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6088 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6090 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6092 prev_stmt_info
= NULL
;
6093 alias_off
= build_int_cst (ref_type
, 0);
6094 next_stmt
= first_stmt
;
6095 for (g
= 0; g
< group_size
; g
++)
6097 running_off
= offvar
;
6100 tree size
= TYPE_SIZE_UNIT (ltype
);
6101 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6103 tree newoff
= copy_ssa_name (running_off
, NULL
);
6104 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6106 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6107 running_off
= newoff
;
6109 unsigned int group_el
= 0;
6110 unsigned HOST_WIDE_INT
6111 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6112 for (j
= 0; j
< ncopies
; j
++)
6114 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6115 and first_stmt == stmt. */
6120 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6122 vec_oprnd
= vec_oprnds
[0];
6126 gcc_assert (gimple_assign_single_p (next_stmt
));
6127 op
= gimple_assign_rhs1 (next_stmt
);
6128 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6134 vec_oprnd
= vec_oprnds
[j
];
6137 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
6138 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
6141 /* Pun the vector to extract from if necessary. */
6142 if (lvectype
!= vectype
)
6144 tree tem
= make_ssa_name (lvectype
);
6146 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6147 lvectype
, vec_oprnd
));
6148 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6151 for (i
= 0; i
< nstores
; i
++)
6153 tree newref
, newoff
;
6154 gimple
*incr
, *assign
;
6155 tree size
= TYPE_SIZE (ltype
);
6156 /* Extract the i'th component. */
6157 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6158 bitsize_int (i
), size
);
6159 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6162 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6166 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6168 newref
= build2 (MEM_REF
, ltype
,
6169 running_off
, this_off
);
6171 /* And store it to *running_off. */
6172 assign
= gimple_build_assign (newref
, elem
);
6173 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6177 || group_el
== group_size
)
6179 newoff
= copy_ssa_name (running_off
, NULL
);
6180 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6181 running_off
, stride_step
);
6182 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6184 running_off
= newoff
;
6187 if (g
== group_size
- 1
6190 if (j
== 0 && i
== 0)
6191 STMT_VINFO_VEC_STMT (stmt_info
)
6192 = *vec_stmt
= assign
;
6194 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6195 prev_stmt_info
= vinfo_for_stmt (assign
);
6199 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6204 vec_oprnds
.release ();
6208 auto_vec
<tree
> dr_chain (group_size
);
6209 oprnds
.create (group_size
);
6211 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6212 gcc_assert (alignment_support_scheme
);
6213 /* Targets with store-lane instructions must not require explicit
6215 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
6216 || alignment_support_scheme
== dr_aligned
6217 || alignment_support_scheme
== dr_unaligned_supported
);
6219 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6220 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6221 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6223 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6224 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6226 aggr_type
= vectype
;
6228 /* In case the vectorization factor (VF) is bigger than the number
6229 of elements that we can fit in a vectype (nunits), we have to generate
6230 more than one vector stmt - i.e - we need to "unroll" the
6231 vector stmt by a factor VF/nunits. For more details see documentation in
6232 vect_get_vec_def_for_copy_stmt. */
6234 /* In case of interleaving (non-unit grouped access):
6241 We create vectorized stores starting from base address (the access of the
6242 first stmt in the chain (S2 in the above example), when the last store stmt
6243 of the chain (S4) is reached:
6246 VS2: &base + vec_size*1 = vx0
6247 VS3: &base + vec_size*2 = vx1
6248 VS4: &base + vec_size*3 = vx3
6250 Then permutation statements are generated:
6252 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6253 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6256 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6257 (the order of the data-refs in the output of vect_permute_store_chain
6258 corresponds to the order of scalar stmts in the interleaving chain - see
6259 the documentation of vect_permute_store_chain()).
6261 In case of both multiple types and interleaving, above vector stores and
6262 permutation stmts are created for every copy. The result vector stmts are
6263 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6264 STMT_VINFO_RELATED_STMT for the next copies.
6267 prev_stmt_info
= NULL
;
6268 for (j
= 0; j
< ncopies
; j
++)
6275 /* Get vectorized arguments for SLP_NODE. */
6276 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6279 vec_oprnd
= vec_oprnds
[0];
6283 /* For interleaved stores we collect vectorized defs for all the
6284 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6285 used as an input to vect_permute_store_chain(), and OPRNDS as
6286 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6288 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6289 OPRNDS are of size 1. */
6290 next_stmt
= first_stmt
;
6291 for (i
= 0; i
< group_size
; i
++)
6293 /* Since gaps are not supported for interleaved stores,
6294 GROUP_SIZE is the exact number of stmts in the chain.
6295 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6296 there is no interleaving, GROUP_SIZE is 1, and only one
6297 iteration of the loop will be executed. */
6298 gcc_assert (next_stmt
6299 && gimple_assign_single_p (next_stmt
));
6300 op
= gimple_assign_rhs1 (next_stmt
);
6302 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6303 dr_chain
.quick_push (vec_oprnd
);
6304 oprnds
.quick_push (vec_oprnd
);
6305 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6309 /* We should have catched mismatched types earlier. */
6310 gcc_assert (useless_type_conversion_p (vectype
,
6311 TREE_TYPE (vec_oprnd
)));
6312 bool simd_lane_access_p
6313 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6314 if (simd_lane_access_p
6315 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6316 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6317 && integer_zerop (DR_OFFSET (first_dr
))
6318 && integer_zerop (DR_INIT (first_dr
))
6319 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6320 get_alias_set (TREE_TYPE (ref_type
))))
6322 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6323 dataref_offset
= build_int_cst (ref_type
, 0);
6328 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6329 simd_lane_access_p
? loop
: NULL
,
6330 offset
, &dummy
, gsi
, &ptr_incr
,
6331 simd_lane_access_p
, &inv_p
);
6332 gcc_assert (bb_vinfo
|| !inv_p
);
6336 /* For interleaved stores we created vectorized defs for all the
6337 defs stored in OPRNDS in the previous iteration (previous copy).
6338 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6339 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6341 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6342 OPRNDS are of size 1. */
6343 for (i
= 0; i
< group_size
; i
++)
6346 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
6347 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
6348 dr_chain
[i
] = vec_oprnd
;
6349 oprnds
[i
] = vec_oprnd
;
6353 = int_const_binop (PLUS_EXPR
, dataref_offset
,
6354 TYPE_SIZE_UNIT (aggr_type
));
6356 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6357 TYPE_SIZE_UNIT (aggr_type
));
6360 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6364 /* Combine all the vectors into an array. */
6365 vec_array
= create_vector_array (vectype
, vec_num
);
6366 for (i
= 0; i
< vec_num
; i
++)
6368 vec_oprnd
= dr_chain
[i
];
6369 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6373 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6374 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6375 gcall
*call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6377 gimple_call_set_lhs (call
, data_ref
);
6378 gimple_call_set_nothrow (call
, true);
6380 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6388 result_chain
.create (group_size
);
6390 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6394 next_stmt
= first_stmt
;
6395 for (i
= 0; i
< vec_num
; i
++)
6397 unsigned align
, misalign
;
6400 /* Bump the vector pointer. */
6401 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6405 vec_oprnd
= vec_oprnds
[i
];
6406 else if (grouped_store
)
6407 /* For grouped stores vectorized defs are interleaved in
6408 vect_permute_store_chain(). */
6409 vec_oprnd
= result_chain
[i
];
6411 data_ref
= fold_build2 (MEM_REF
, vectype
,
6415 : build_int_cst (ref_type
, 0));
6416 align
= TYPE_ALIGN_UNIT (vectype
);
6417 if (aligned_access_p (first_dr
))
6419 else if (DR_MISALIGNMENT (first_dr
) == -1)
6421 align
= dr_alignment (vect_dr_behavior (first_dr
));
6423 TREE_TYPE (data_ref
)
6424 = build_aligned_type (TREE_TYPE (data_ref
),
6425 align
* BITS_PER_UNIT
);
6429 TREE_TYPE (data_ref
)
6430 = build_aligned_type (TREE_TYPE (data_ref
),
6431 TYPE_ALIGN (elem_type
));
6432 misalign
= DR_MISALIGNMENT (first_dr
);
6434 if (dataref_offset
== NULL_TREE
6435 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6436 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
6439 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6441 tree perm_mask
= perm_mask_for_reverse (vectype
);
6443 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
6445 tree new_temp
= make_ssa_name (perm_dest
);
6447 /* Generate the permute statement. */
6449 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6450 vec_oprnd
, perm_mask
);
6451 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6453 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6454 vec_oprnd
= new_temp
;
6457 /* Arguments are ready. Create the new vector stmt. */
6458 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6459 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6464 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6472 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6474 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6475 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6480 result_chain
.release ();
6481 vec_oprnds
.release ();
6486 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6487 VECTOR_CST mask. No checks are made that the target platform supports the
6488 mask, so callers may wish to test can_vec_perm_p separately, or use
6489 vect_gen_perm_mask_checked. */
6492 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
6494 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
6497 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6499 mask_elt_type
= lang_hooks
.types
.type_for_mode
6500 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))).require (), 1);
6501 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
6503 mask_elts
= XALLOCAVEC (tree
, nunits
);
6504 for (i
= nunits
- 1; i
>= 0; i
--)
6505 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
6506 mask_vec
= build_vector (mask_type
, mask_elts
);
6511 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6512 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6515 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6517 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6518 return vect_gen_perm_mask_any (vectype
, sel
);
6521 /* Given a vector variable X and Y, that was generated for the scalar
6522 STMT, generate instructions to permute the vector elements of X and Y
6523 using permutation mask MASK_VEC, insert them at *GSI and return the
6524 permuted vector variable. */
6527 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6528 gimple_stmt_iterator
*gsi
)
6530 tree vectype
= TREE_TYPE (x
);
6531 tree perm_dest
, data_ref
;
6534 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6535 data_ref
= make_ssa_name (perm_dest
);
6537 /* Generate the permute statement. */
6538 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6539 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6544 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6545 inserting them on the loops preheader edge. Returns true if we
6546 were successful in doing so (and thus STMT can be moved then),
6547 otherwise returns false. */
6550 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6556 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6558 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6559 if (!gimple_nop_p (def_stmt
)
6560 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6562 /* Make sure we don't need to recurse. While we could do
6563 so in simple cases when there are more complex use webs
6564 we don't have an easy way to preserve stmt order to fulfil
6565 dependencies within them. */
6568 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6570 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6572 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6573 if (!gimple_nop_p (def_stmt2
)
6574 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6584 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6586 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6587 if (!gimple_nop_p (def_stmt
)
6588 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6590 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6591 gsi_remove (&gsi
, false);
6592 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6599 /* vectorizable_load.
6601 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6603 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6604 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6605 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6608 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6609 slp_tree slp_node
, slp_instance slp_node_instance
)
6612 tree vec_dest
= NULL
;
6613 tree data_ref
= NULL
;
6614 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6615 stmt_vec_info prev_stmt_info
;
6616 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6617 struct loop
*loop
= NULL
;
6618 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6619 bool nested_in_vect_loop
= false;
6620 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6624 gimple
*new_stmt
= NULL
;
6626 enum dr_alignment_support alignment_support_scheme
;
6627 tree dataref_ptr
= NULL_TREE
;
6628 tree dataref_offset
= NULL_TREE
;
6629 gimple
*ptr_incr
= NULL
;
6631 int i
, j
, group_size
, group_gap_adj
;
6632 tree msq
= NULL_TREE
, lsq
;
6633 tree offset
= NULL_TREE
;
6634 tree byte_offset
= NULL_TREE
;
6635 tree realignment_token
= NULL_TREE
;
6637 vec
<tree
> dr_chain
= vNULL
;
6638 bool grouped_load
= false;
6640 gimple
*first_stmt_for_drptr
= NULL
;
6642 bool compute_in_loop
= false;
6643 struct loop
*at_loop
;
6645 bool slp
= (slp_node
!= NULL
);
6646 bool slp_perm
= false;
6647 enum tree_code code
;
6648 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6651 gather_scatter_info gs_info
;
6652 vec_info
*vinfo
= stmt_info
->vinfo
;
6655 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6658 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6662 /* Is vectorizable load? */
6663 if (!is_gimple_assign (stmt
))
6666 scalar_dest
= gimple_assign_lhs (stmt
);
6667 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6670 code
= gimple_assign_rhs_code (stmt
);
6671 if (code
!= ARRAY_REF
6672 && code
!= BIT_FIELD_REF
6673 && code
!= INDIRECT_REF
6674 && code
!= COMPONENT_REF
6675 && code
!= IMAGPART_EXPR
6676 && code
!= REALPART_EXPR
6678 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6681 if (!STMT_VINFO_DATA_REF (stmt_info
))
6684 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6685 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6689 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6690 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6691 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6696 /* Multiple types in SLP are handled by creating the appropriate number of
6697 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6702 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6704 gcc_assert (ncopies
>= 1);
6706 /* FORNOW. This restriction should be relaxed. */
6707 if (nested_in_vect_loop
&& ncopies
> 1)
6709 if (dump_enabled_p ())
6710 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6711 "multiple types in nested loop.\n");
6715 /* Invalidate assumptions made by dependence analysis when vectorization
6716 on the unrolled body effectively re-orders stmts. */
6718 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6719 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6720 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6722 if (dump_enabled_p ())
6723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6724 "cannot perform implicit CSE when unrolling "
6725 "with negative dependence distance\n");
6729 elem_type
= TREE_TYPE (vectype
);
6730 mode
= TYPE_MODE (vectype
);
6732 /* FORNOW. In some cases can vectorize even if data-type not supported
6733 (e.g. - data copies). */
6734 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6736 if (dump_enabled_p ())
6737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6738 "Aligned load, but unsupported type.\n");
6742 /* Check if the load is a part of an interleaving chain. */
6743 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6745 grouped_load
= true;
6747 gcc_assert (!nested_in_vect_loop
);
6748 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6750 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6751 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6753 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6756 /* Invalidate assumptions made by dependence analysis when vectorization
6757 on the unrolled body effectively re-orders stmts. */
6758 if (!PURE_SLP_STMT (stmt_info
)
6759 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6760 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6761 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6763 if (dump_enabled_p ())
6764 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6765 "cannot perform implicit CSE when performing "
6766 "group loads with negative dependence distance\n");
6770 /* Similarly when the stmt is a load that is both part of a SLP
6771 instance and a loop vectorized stmt via the same-dr mechanism
6772 we have to give up. */
6773 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6774 && (STMT_SLP_TYPE (stmt_info
)
6775 != STMT_SLP_TYPE (vinfo_for_stmt
6776 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6778 if (dump_enabled_p ())
6779 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6780 "conflicting SLP types for CSEd load\n");
6785 vect_memory_access_type memory_access_type
;
6786 if (!get_load_store_type (stmt
, vectype
, slp
, VLS_LOAD
, ncopies
,
6787 &memory_access_type
, &gs_info
))
6790 if (!vec_stmt
) /* transformation not required. */
6793 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6794 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6795 /* The SLP costs are calculated during SLP analysis. */
6796 if (!PURE_SLP_STMT (stmt_info
))
6797 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
6803 gcc_assert (memory_access_type
6804 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6806 if (dump_enabled_p ())
6807 dump_printf_loc (MSG_NOTE
, vect_location
,
6808 "transform load. ncopies = %d\n", ncopies
);
6812 ensure_base_align (stmt_info
, dr
);
6814 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6816 tree vec_oprnd0
= NULL_TREE
, op
;
6817 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6818 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6819 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6820 edge pe
= loop_preheader_edge (loop
);
6823 enum { NARROW
, NONE
, WIDEN
} modifier
;
6824 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6826 if (nunits
== gather_off_nunits
)
6828 else if (nunits
== gather_off_nunits
/ 2)
6830 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6833 for (i
= 0; i
< gather_off_nunits
; ++i
)
6834 sel
[i
] = i
| nunits
;
6836 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
6838 else if (nunits
== gather_off_nunits
* 2)
6840 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6843 for (i
= 0; i
< nunits
; ++i
)
6844 sel
[i
] = i
< gather_off_nunits
6845 ? i
: i
+ nunits
- gather_off_nunits
;
6847 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6853 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6854 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6855 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6856 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6857 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6858 scaletype
= TREE_VALUE (arglist
);
6859 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6861 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6863 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6864 if (!is_gimple_min_invariant (ptr
))
6866 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6867 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6868 gcc_assert (!new_bb
);
6871 /* Currently we support only unconditional gather loads,
6872 so mask should be all ones. */
6873 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6874 mask
= build_int_cst (masktype
, -1);
6875 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6877 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6878 mask
= build_vector_from_val (masktype
, mask
);
6879 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6881 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6885 for (j
= 0; j
< 6; ++j
)
6887 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6888 mask
= build_real (TREE_TYPE (masktype
), r
);
6889 mask
= build_vector_from_val (masktype
, mask
);
6890 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6895 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6897 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6898 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6899 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6903 for (j
= 0; j
< 6; ++j
)
6905 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6906 merge
= build_real (TREE_TYPE (rettype
), r
);
6910 merge
= build_vector_from_val (rettype
, merge
);
6911 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6913 prev_stmt_info
= NULL
;
6914 for (j
= 0; j
< ncopies
; ++j
)
6916 if (modifier
== WIDEN
&& (j
& 1))
6917 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6918 perm_mask
, stmt
, gsi
);
6921 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
6924 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
6926 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6928 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6929 == TYPE_VECTOR_SUBPARTS (idxtype
));
6930 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6931 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6933 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6934 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6939 = gimple_build_call (gs_info
.decl
, 5, merge
, ptr
, op
, mask
, scale
);
6941 if (!useless_type_conversion_p (vectype
, rettype
))
6943 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6944 == TYPE_VECTOR_SUBPARTS (rettype
));
6945 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6946 gimple_call_set_lhs (new_stmt
, op
);
6947 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6948 var
= make_ssa_name (vec_dest
);
6949 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6951 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6955 var
= make_ssa_name (vec_dest
, new_stmt
);
6956 gimple_call_set_lhs (new_stmt
, var
);
6959 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6961 if (modifier
== NARROW
)
6968 var
= permute_vec_elements (prev_res
, var
,
6969 perm_mask
, stmt
, gsi
);
6970 new_stmt
= SSA_NAME_DEF_STMT (var
);
6973 if (prev_stmt_info
== NULL
)
6974 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6976 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6977 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6982 if (memory_access_type
== VMAT_ELEMENTWISE
6983 || memory_access_type
== VMAT_STRIDED_SLP
)
6985 gimple_stmt_iterator incr_gsi
;
6991 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6992 gimple_seq stmts
= NULL
;
6993 tree stride_base
, stride_step
, alias_off
;
6995 gcc_assert (!nested_in_vect_loop
);
6997 if (slp
&& grouped_load
)
6999 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7000 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7001 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7002 ref_type
= get_group_alias_ptr_type (first_stmt
);
7009 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7013 = fold_build_pointer_plus
7014 (DR_BASE_ADDRESS (first_dr
),
7015 size_binop (PLUS_EXPR
,
7016 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7017 convert_to_ptrofftype (DR_INIT (first_dr
))));
7018 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7020 /* For a load with loop-invariant (but other than power-of-2)
7021 stride (i.e. not a grouped access) like so:
7023 for (i = 0; i < n; i += stride)
7026 we generate a new induction variable and new accesses to
7027 form a new vector (or vectors, depending on ncopies):
7029 for (j = 0; ; j += VF*stride)
7031 tmp2 = array[j + stride];
7033 vectemp = {tmp1, tmp2, ...}
7036 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7037 build_int_cst (TREE_TYPE (stride_step
), vf
));
7039 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7041 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
7042 loop
, &incr_gsi
, insert_after
,
7044 incr
= gsi_stmt (incr_gsi
);
7045 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7047 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
7048 &stmts
, true, NULL_TREE
);
7050 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
7052 prev_stmt_info
= NULL
;
7053 running_off
= offvar
;
7054 alias_off
= build_int_cst (ref_type
, 0);
7055 int nloads
= nunits
;
7057 tree ltype
= TREE_TYPE (vectype
);
7058 tree lvectype
= vectype
;
7059 auto_vec
<tree
> dr_chain
;
7060 if (memory_access_type
== VMAT_STRIDED_SLP
)
7062 if (group_size
< nunits
)
7064 /* First check if vec_init optab supports construction from
7065 vector elts directly. */
7066 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7067 machine_mode vmode
= mode_for_vector (elmode
, group_size
);
7068 if (VECTOR_MODE_P (vmode
)
7069 && (convert_optab_handler (vec_init_optab
,
7070 TYPE_MODE (vectype
), vmode
)
7071 != CODE_FOR_nothing
))
7073 nloads
= nunits
/ group_size
;
7075 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7079 /* Otherwise avoid emitting a constructor of vector elements
7080 by performing the loads using an integer type of the same
7081 size, constructing a vector of those and then
7082 re-interpreting it as the original vector type.
7083 This avoids a huge runtime penalty due to the general
7084 inability to perform store forwarding from smaller stores
7085 to a larger load. */
7087 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7088 elmode
= int_mode_for_size (lsize
, 0).require ();
7089 vmode
= mode_for_vector (elmode
, nunits
/ group_size
);
7090 /* If we can't construct such a vector fall back to
7091 element loads of the original vector type. */
7092 if (VECTOR_MODE_P (vmode
)
7093 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7094 != CODE_FOR_nothing
))
7096 nloads
= nunits
/ group_size
;
7098 ltype
= build_nonstandard_integer_type (lsize
, 1);
7099 lvectype
= build_vector_type (ltype
, nloads
);
7109 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7113 /* For SLP permutation support we need to load the whole group,
7114 not only the number of vector stmts the permutation result
7118 ncopies
= (group_size
* vf
+ nunits
- 1) / nunits
;
7119 dr_chain
.create (ncopies
);
7122 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7125 unsigned HOST_WIDE_INT
7126 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7127 for (j
= 0; j
< ncopies
; j
++)
7130 vec_alloc (v
, nloads
);
7131 for (i
= 0; i
< nloads
; i
++)
7133 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7135 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
7136 build2 (MEM_REF
, ltype
,
7137 running_off
, this_off
));
7138 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7140 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7141 gimple_assign_lhs (new_stmt
));
7145 || group_el
== group_size
)
7147 tree newoff
= copy_ssa_name (running_off
);
7148 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7149 running_off
, stride_step
);
7150 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7152 running_off
= newoff
;
7158 tree vec_inv
= build_constructor (lvectype
, v
);
7159 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7160 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7161 if (lvectype
!= vectype
)
7163 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7165 build1 (VIEW_CONVERT_EXPR
,
7166 vectype
, new_temp
));
7167 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7174 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7176 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7181 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7183 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7184 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7190 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7191 slp_node_instance
, false, &n_perms
);
7198 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7199 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7200 int group_gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
7201 /* For SLP vectorization we directly vectorize a subchain
7202 without permutation. */
7203 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7204 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7205 /* For BB vectorization always use the first stmt to base
7206 the data ref pointer on. */
7208 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7210 /* Check if the chain of loads is already vectorized. */
7211 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7212 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7213 ??? But we can only do so if there is exactly one
7214 as we have no way to get at the rest. Leave the CSE
7216 ??? With the group load eventually participating
7217 in multiple different permutations (having multiple
7218 slp nodes which refer to the same group) the CSE
7219 is even wrong code. See PR56270. */
7222 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7225 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7228 /* VEC_NUM is the number of vect stmts to be created for this group. */
7231 grouped_load
= false;
7232 /* For SLP permutation support we need to load the whole group,
7233 not only the number of vector stmts the permutation result
7237 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
7238 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7242 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7243 group_gap_adj
= group_gap
;
7247 vec_num
= group_size
;
7249 ref_type
= get_group_alias_ptr_type (first_stmt
);
7255 group_size
= vec_num
= 1;
7257 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7260 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7261 gcc_assert (alignment_support_scheme
);
7262 /* Targets with load-lane instructions must not require explicit
7264 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
7265 || alignment_support_scheme
== dr_aligned
7266 || alignment_support_scheme
== dr_unaligned_supported
);
7268 /* In case the vectorization factor (VF) is bigger than the number
7269 of elements that we can fit in a vectype (nunits), we have to generate
7270 more than one vector stmt - i.e - we need to "unroll" the
7271 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7272 from one copy of the vector stmt to the next, in the field
7273 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7274 stages to find the correct vector defs to be used when vectorizing
7275 stmts that use the defs of the current stmt. The example below
7276 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7277 need to create 4 vectorized stmts):
7279 before vectorization:
7280 RELATED_STMT VEC_STMT
7284 step 1: vectorize stmt S1:
7285 We first create the vector stmt VS1_0, and, as usual, record a
7286 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7287 Next, we create the vector stmt VS1_1, and record a pointer to
7288 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7289 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7291 RELATED_STMT VEC_STMT
7292 VS1_0: vx0 = memref0 VS1_1 -
7293 VS1_1: vx1 = memref1 VS1_2 -
7294 VS1_2: vx2 = memref2 VS1_3 -
7295 VS1_3: vx3 = memref3 - -
7296 S1: x = load - VS1_0
7299 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7300 information we recorded in RELATED_STMT field is used to vectorize
7303 /* In case of interleaving (non-unit grouped access):
7310 Vectorized loads are created in the order of memory accesses
7311 starting from the access of the first stmt of the chain:
7314 VS2: vx1 = &base + vec_size*1
7315 VS3: vx3 = &base + vec_size*2
7316 VS4: vx4 = &base + vec_size*3
7318 Then permutation statements are generated:
7320 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7321 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7324 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7325 (the order of the data-refs in the output of vect_permute_load_chain
7326 corresponds to the order of scalar stmts in the interleaving chain - see
7327 the documentation of vect_permute_load_chain()).
7328 The generation of permutation stmts and recording them in
7329 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7331 In case of both multiple types and interleaving, the vector loads and
7332 permutation stmts above are created for every copy. The result vector
7333 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7334 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7336 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7337 on a target that supports unaligned accesses (dr_unaligned_supported)
7338 we generate the following code:
7342 p = p + indx * vectype_size;
7347 Otherwise, the data reference is potentially unaligned on a target that
7348 does not support unaligned accesses (dr_explicit_realign_optimized) -
7349 then generate the following code, in which the data in each iteration is
7350 obtained by two vector loads, one from the previous iteration, and one
7351 from the current iteration:
7353 msq_init = *(floor(p1))
7354 p2 = initial_addr + VS - 1;
7355 realignment_token = call target_builtin;
7358 p2 = p2 + indx * vectype_size
7360 vec_dest = realign_load (msq, lsq, realignment_token)
7365 /* If the misalignment remains the same throughout the execution of the
7366 loop, we can create the init_addr and permutation mask at the loop
7367 preheader. Otherwise, it needs to be created inside the loop.
7368 This can only occur when vectorizing memory accesses in the inner-loop
7369 nested within an outer-loop that is being vectorized. */
7371 if (nested_in_vect_loop
7372 && (DR_STEP_ALIGNMENT (dr
) % GET_MODE_SIZE (TYPE_MODE (vectype
))) != 0)
7374 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7375 compute_in_loop
= true;
7378 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7379 || alignment_support_scheme
== dr_explicit_realign
)
7380 && !compute_in_loop
)
7382 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7383 alignment_support_scheme
, NULL_TREE
,
7385 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7387 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7388 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7395 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7396 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7398 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7399 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7401 aggr_type
= vectype
;
7403 prev_stmt_info
= NULL
;
7405 for (j
= 0; j
< ncopies
; j
++)
7407 /* 1. Create the vector or array pointer update chain. */
7410 bool simd_lane_access_p
7411 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7412 if (simd_lane_access_p
7413 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7414 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7415 && integer_zerop (DR_OFFSET (first_dr
))
7416 && integer_zerop (DR_INIT (first_dr
))
7417 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7418 get_alias_set (TREE_TYPE (ref_type
)))
7419 && (alignment_support_scheme
== dr_aligned
7420 || alignment_support_scheme
== dr_unaligned_supported
))
7422 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7423 dataref_offset
= build_int_cst (ref_type
, 0);
7426 else if (first_stmt_for_drptr
7427 && first_stmt
!= first_stmt_for_drptr
)
7430 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7431 at_loop
, offset
, &dummy
, gsi
,
7432 &ptr_incr
, simd_lane_access_p
,
7433 &inv_p
, byte_offset
);
7434 /* Adjust the pointer by the difference to first_stmt. */
7435 data_reference_p ptrdr
7436 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7437 tree diff
= fold_convert (sizetype
,
7438 size_binop (MINUS_EXPR
,
7441 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7446 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7447 offset
, &dummy
, gsi
, &ptr_incr
,
7448 simd_lane_access_p
, &inv_p
,
7451 else if (dataref_offset
)
7452 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7453 TYPE_SIZE_UNIT (aggr_type
));
7455 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7456 TYPE_SIZE_UNIT (aggr_type
));
7458 if (grouped_load
|| slp_perm
)
7459 dr_chain
.create (vec_num
);
7461 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7465 vec_array
= create_vector_array (vectype
, vec_num
);
7468 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7469 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
7470 gcall
*call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1,
7472 gimple_call_set_lhs (call
, vec_array
);
7473 gimple_call_set_nothrow (call
, true);
7475 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7477 /* Extract each vector into an SSA_NAME. */
7478 for (i
= 0; i
< vec_num
; i
++)
7480 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7482 dr_chain
.quick_push (new_temp
);
7485 /* Record the mapping between SSA_NAMEs and statements. */
7486 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7490 for (i
= 0; i
< vec_num
; i
++)
7493 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7496 /* 2. Create the vector-load in the loop. */
7497 switch (alignment_support_scheme
)
7500 case dr_unaligned_supported
:
7502 unsigned int align
, misalign
;
7505 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7508 : build_int_cst (ref_type
, 0));
7509 align
= TYPE_ALIGN_UNIT (vectype
);
7510 if (alignment_support_scheme
== dr_aligned
)
7512 gcc_assert (aligned_access_p (first_dr
));
7515 else if (DR_MISALIGNMENT (first_dr
) == -1)
7517 align
= dr_alignment (vect_dr_behavior (first_dr
));
7519 TREE_TYPE (data_ref
)
7520 = build_aligned_type (TREE_TYPE (data_ref
),
7521 align
* BITS_PER_UNIT
);
7525 TREE_TYPE (data_ref
)
7526 = build_aligned_type (TREE_TYPE (data_ref
),
7527 TYPE_ALIGN (elem_type
));
7528 misalign
= DR_MISALIGNMENT (first_dr
);
7530 if (dataref_offset
== NULL_TREE
7531 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7532 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7536 case dr_explicit_realign
:
7540 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7542 if (compute_in_loop
)
7543 msq
= vect_setup_realignment (first_stmt
, gsi
,
7545 dr_explicit_realign
,
7548 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7549 ptr
= copy_ssa_name (dataref_ptr
);
7551 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7552 new_stmt
= gimple_build_assign
7553 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7555 (TREE_TYPE (dataref_ptr
),
7556 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7557 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7559 = build2 (MEM_REF
, vectype
, ptr
,
7560 build_int_cst (ref_type
, 0));
7561 vec_dest
= vect_create_destination_var (scalar_dest
,
7563 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7564 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7565 gimple_assign_set_lhs (new_stmt
, new_temp
);
7566 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7567 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7568 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7571 bump
= size_binop (MULT_EXPR
, vs
,
7572 TYPE_SIZE_UNIT (elem_type
));
7573 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7574 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7575 new_stmt
= gimple_build_assign
7576 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7579 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7580 ptr
= copy_ssa_name (ptr
, new_stmt
);
7581 gimple_assign_set_lhs (new_stmt
, ptr
);
7582 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7584 = build2 (MEM_REF
, vectype
, ptr
,
7585 build_int_cst (ref_type
, 0));
7588 case dr_explicit_realign_optimized
:
7589 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7590 new_temp
= copy_ssa_name (dataref_ptr
);
7592 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7593 new_stmt
= gimple_build_assign
7594 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7596 (TREE_TYPE (dataref_ptr
),
7597 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7598 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7600 = build2 (MEM_REF
, vectype
, new_temp
,
7601 build_int_cst (ref_type
, 0));
7606 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7607 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7608 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7609 gimple_assign_set_lhs (new_stmt
, new_temp
);
7610 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7612 /* 3. Handle explicit realignment if necessary/supported.
7614 vec_dest = realign_load (msq, lsq, realignment_token) */
7615 if (alignment_support_scheme
== dr_explicit_realign_optimized
7616 || alignment_support_scheme
== dr_explicit_realign
)
7618 lsq
= gimple_assign_lhs (new_stmt
);
7619 if (!realignment_token
)
7620 realignment_token
= dataref_ptr
;
7621 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7622 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7623 msq
, lsq
, realignment_token
);
7624 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7625 gimple_assign_set_lhs (new_stmt
, new_temp
);
7626 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7628 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7631 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7632 add_phi_arg (phi
, lsq
,
7633 loop_latch_edge (containing_loop
),
7639 /* 4. Handle invariant-load. */
7640 if (inv_p
&& !bb_vinfo
)
7642 gcc_assert (!grouped_load
);
7643 /* If we have versioned for aliasing or the loop doesn't
7644 have any data dependencies that would preclude this,
7645 then we are sure this is a loop invariant load and
7646 thus we can insert it on the preheader edge. */
7647 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7648 && !nested_in_vect_loop
7649 && hoist_defs_of_uses (stmt
, loop
))
7651 if (dump_enabled_p ())
7653 dump_printf_loc (MSG_NOTE
, vect_location
,
7654 "hoisting out of the vectorized "
7656 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7658 tree tem
= copy_ssa_name (scalar_dest
);
7659 gsi_insert_on_edge_immediate
7660 (loop_preheader_edge (loop
),
7661 gimple_build_assign (tem
,
7663 (gimple_assign_rhs1 (stmt
))));
7664 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7665 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7666 set_vinfo_for_stmt (new_stmt
,
7667 new_stmt_vec_info (new_stmt
, vinfo
));
7671 gimple_stmt_iterator gsi2
= *gsi
;
7673 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7675 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7679 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7681 tree perm_mask
= perm_mask_for_reverse (vectype
);
7682 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7683 perm_mask
, stmt
, gsi
);
7684 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7687 /* Collect vector loads and later create their permutation in
7688 vect_transform_grouped_load (). */
7689 if (grouped_load
|| slp_perm
)
7690 dr_chain
.quick_push (new_temp
);
7692 /* Store vector loads in the corresponding SLP_NODE. */
7693 if (slp
&& !slp_perm
)
7694 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7696 /* With SLP permutation we load the gaps as well, without
7697 we need to skip the gaps after we manage to fully load
7698 all elements. group_gap_adj is GROUP_SIZE here. */
7699 group_elt
+= nunits
;
7700 if (group_gap_adj
!= 0 && ! slp_perm
7701 && group_elt
== group_size
- group_gap_adj
)
7705 = wide_int_to_tree (sizetype
,
7706 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7707 group_gap_adj
, &ovf
));
7708 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7713 /* Bump the vector pointer to account for a gap or for excess
7714 elements loaded for a permuted SLP load. */
7715 if (group_gap_adj
!= 0 && slp_perm
)
7719 = wide_int_to_tree (sizetype
,
7720 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7721 group_gap_adj
, &ovf
));
7722 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7727 if (slp
&& !slp_perm
)
7733 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7734 slp_node_instance
, false,
7737 dr_chain
.release ();
7745 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
7746 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7747 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7752 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7754 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7755 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7758 dr_chain
.release ();
7764 /* Function vect_is_simple_cond.
7767 LOOP - the loop that is being vectorized.
7768 COND - Condition that is checked for simple use.
7771 *COMP_VECTYPE - the vector type for the comparison.
7772 *DTS - The def types for the arguments of the comparison
7774 Returns whether a COND can be vectorized. Checks whether
7775 condition operands are supportable using vec_is_simple_use. */
7778 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
7779 tree
*comp_vectype
, enum vect_def_type
*dts
)
7782 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7785 if (TREE_CODE (cond
) == SSA_NAME
7786 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
7788 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7789 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7790 &dts
[0], comp_vectype
)
7792 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7797 if (!COMPARISON_CLASS_P (cond
))
7800 lhs
= TREE_OPERAND (cond
, 0);
7801 rhs
= TREE_OPERAND (cond
, 1);
7803 if (TREE_CODE (lhs
) == SSA_NAME
)
7805 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7806 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
7809 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
7810 || TREE_CODE (lhs
) == FIXED_CST
)
7811 dts
[0] = vect_constant_def
;
7815 if (TREE_CODE (rhs
) == SSA_NAME
)
7817 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7818 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
7821 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
7822 || TREE_CODE (rhs
) == FIXED_CST
)
7823 dts
[1] = vect_constant_def
;
7827 if (vectype1
&& vectype2
7828 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7831 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7835 /* vectorizable_condition.
7837 Check if STMT is conditional modify expression that can be vectorized.
7838 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7839 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7842 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7843 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7844 else clause if it is 2).
7846 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7849 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7850 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7853 tree scalar_dest
= NULL_TREE
;
7854 tree vec_dest
= NULL_TREE
;
7855 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
7856 tree then_clause
, else_clause
;
7857 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7858 tree comp_vectype
= NULL_TREE
;
7859 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7860 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7863 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7864 enum vect_def_type dts
[4]
7865 = {vect_unknown_def_type
, vect_unknown_def_type
,
7866 vect_unknown_def_type
, vect_unknown_def_type
};
7869 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
7870 stmt_vec_info prev_stmt_info
= NULL
;
7872 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7873 vec
<tree
> vec_oprnds0
= vNULL
;
7874 vec
<tree
> vec_oprnds1
= vNULL
;
7875 vec
<tree
> vec_oprnds2
= vNULL
;
7876 vec
<tree
> vec_oprnds3
= vNULL
;
7878 bool masked
= false;
7880 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7883 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7885 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7888 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7889 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7893 /* FORNOW: not yet supported. */
7894 if (STMT_VINFO_LIVE_P (stmt_info
))
7896 if (dump_enabled_p ())
7897 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7898 "value used after loop.\n");
7903 /* Is vectorizable conditional operation? */
7904 if (!is_gimple_assign (stmt
))
7907 code
= gimple_assign_rhs_code (stmt
);
7909 if (code
!= COND_EXPR
)
7912 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7913 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7914 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7919 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7921 gcc_assert (ncopies
>= 1);
7922 if (reduc_index
&& ncopies
> 1)
7923 return false; /* FORNOW */
7925 cond_expr
= gimple_assign_rhs1 (stmt
);
7926 then_clause
= gimple_assign_rhs2 (stmt
);
7927 else_clause
= gimple_assign_rhs3 (stmt
);
7929 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
7930 &comp_vectype
, &dts
[0])
7935 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
7938 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
7942 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
7945 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
7948 masked
= !COMPARISON_CLASS_P (cond_expr
);
7949 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7951 if (vec_cmp_type
== NULL_TREE
)
7954 cond_code
= TREE_CODE (cond_expr
);
7957 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
7958 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
7961 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7963 /* Boolean values may have another representation in vectors
7964 and therefore we prefer bit operations over comparison for
7965 them (which also works for scalar masks). We store opcodes
7966 to use in bitop1 and bitop2. Statement is vectorized as
7967 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
7968 depending on bitop1 and bitop2 arity. */
7972 bitop1
= BIT_NOT_EXPR
;
7973 bitop2
= BIT_AND_EXPR
;
7976 bitop1
= BIT_NOT_EXPR
;
7977 bitop2
= BIT_IOR_EXPR
;
7980 bitop1
= BIT_NOT_EXPR
;
7981 bitop2
= BIT_AND_EXPR
;
7982 std::swap (cond_expr0
, cond_expr1
);
7985 bitop1
= BIT_NOT_EXPR
;
7986 bitop2
= BIT_IOR_EXPR
;
7987 std::swap (cond_expr0
, cond_expr1
);
7990 bitop1
= BIT_XOR_EXPR
;
7993 bitop1
= BIT_XOR_EXPR
;
7994 bitop2
= BIT_NOT_EXPR
;
7999 cond_code
= SSA_NAME
;
8004 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8005 if (bitop1
!= NOP_EXPR
)
8007 machine_mode mode
= TYPE_MODE (comp_vectype
);
8010 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8011 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8014 if (bitop2
!= NOP_EXPR
)
8016 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8018 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8022 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8025 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8035 vec_oprnds0
.create (1);
8036 vec_oprnds1
.create (1);
8037 vec_oprnds2
.create (1);
8038 vec_oprnds3
.create (1);
8042 scalar_dest
= gimple_assign_lhs (stmt
);
8043 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8045 /* Handle cond expr. */
8046 for (j
= 0; j
< ncopies
; j
++)
8048 gassign
*new_stmt
= NULL
;
8053 auto_vec
<tree
, 4> ops
;
8054 auto_vec
<vec
<tree
>, 4> vec_defs
;
8057 ops
.safe_push (cond_expr
);
8060 ops
.safe_push (cond_expr0
);
8061 ops
.safe_push (cond_expr1
);
8063 ops
.safe_push (then_clause
);
8064 ops
.safe_push (else_clause
);
8065 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8066 vec_oprnds3
= vec_defs
.pop ();
8067 vec_oprnds2
= vec_defs
.pop ();
8069 vec_oprnds1
= vec_defs
.pop ();
8070 vec_oprnds0
= vec_defs
.pop ();
8078 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8080 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8086 = vect_get_vec_def_for_operand (cond_expr0
,
8087 stmt
, comp_vectype
);
8088 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8091 = vect_get_vec_def_for_operand (cond_expr1
,
8092 stmt
, comp_vectype
);
8093 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8095 if (reduc_index
== 1)
8096 vec_then_clause
= reduc_def
;
8099 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8101 vect_is_simple_use (then_clause
, loop_vinfo
,
8104 if (reduc_index
== 2)
8105 vec_else_clause
= reduc_def
;
8108 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8110 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8117 = vect_get_vec_def_for_stmt_copy (dts
[0],
8118 vec_oprnds0
.pop ());
8121 = vect_get_vec_def_for_stmt_copy (dts
[1],
8122 vec_oprnds1
.pop ());
8124 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8125 vec_oprnds2
.pop ());
8126 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8127 vec_oprnds3
.pop ());
8132 vec_oprnds0
.quick_push (vec_cond_lhs
);
8134 vec_oprnds1
.quick_push (vec_cond_rhs
);
8135 vec_oprnds2
.quick_push (vec_then_clause
);
8136 vec_oprnds3
.quick_push (vec_else_clause
);
8139 /* Arguments are ready. Create the new vector stmt. */
8140 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8142 vec_then_clause
= vec_oprnds2
[i
];
8143 vec_else_clause
= vec_oprnds3
[i
];
8146 vec_compare
= vec_cond_lhs
;
8149 vec_cond_rhs
= vec_oprnds1
[i
];
8150 if (bitop1
== NOP_EXPR
)
8151 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8152 vec_cond_lhs
, vec_cond_rhs
);
8155 new_temp
= make_ssa_name (vec_cmp_type
);
8156 if (bitop1
== BIT_NOT_EXPR
)
8157 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8161 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8163 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8164 if (bitop2
== NOP_EXPR
)
8165 vec_compare
= new_temp
;
8166 else if (bitop2
== BIT_NOT_EXPR
)
8168 /* Instead of doing ~x ? y : z do x ? z : y. */
8169 vec_compare
= new_temp
;
8170 std::swap (vec_then_clause
, vec_else_clause
);
8174 vec_compare
= make_ssa_name (vec_cmp_type
);
8176 = gimple_build_assign (vec_compare
, bitop2
,
8177 vec_cond_lhs
, new_temp
);
8178 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8182 new_temp
= make_ssa_name (vec_dest
);
8183 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8184 vec_compare
, vec_then_clause
,
8186 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8188 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8195 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8197 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8199 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8202 vec_oprnds0
.release ();
8203 vec_oprnds1
.release ();
8204 vec_oprnds2
.release ();
8205 vec_oprnds3
.release ();
8210 /* vectorizable_comparison.
8212 Check if STMT is comparison expression that can be vectorized.
8213 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8214 comparison, put it in VEC_STMT, and insert it at GSI.
8216 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8219 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8220 gimple
**vec_stmt
, tree reduc_def
,
8223 tree lhs
, rhs1
, rhs2
;
8224 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8225 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8226 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8227 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8229 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8230 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8234 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8235 stmt_vec_info prev_stmt_info
= NULL
;
8237 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8238 vec
<tree
> vec_oprnds0
= vNULL
;
8239 vec
<tree
> vec_oprnds1
= vNULL
;
8244 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8247 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8250 mask_type
= vectype
;
8251 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8256 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
8258 gcc_assert (ncopies
>= 1);
8259 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8260 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8264 if (STMT_VINFO_LIVE_P (stmt_info
))
8266 if (dump_enabled_p ())
8267 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8268 "value used after loop.\n");
8272 if (!is_gimple_assign (stmt
))
8275 code
= gimple_assign_rhs_code (stmt
);
8277 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
8280 rhs1
= gimple_assign_rhs1 (stmt
);
8281 rhs2
= gimple_assign_rhs2 (stmt
);
8283 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
8284 &dts
[0], &vectype1
))
8287 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
8288 &dts
[1], &vectype2
))
8291 if (vectype1
&& vectype2
8292 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
8295 vectype
= vectype1
? vectype1
: vectype2
;
8297 /* Invariant comparison. */
8300 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
8301 if (TYPE_VECTOR_SUBPARTS (vectype
) != nunits
)
8304 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
8307 /* Can't compare mask and non-mask types. */
8308 if (vectype1
&& vectype2
8309 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
8312 /* Boolean values may have another representation in vectors
8313 and therefore we prefer bit operations over comparison for
8314 them (which also works for scalar masks). We store opcodes
8315 to use in bitop1 and bitop2. Statement is vectorized as
8316 BITOP2 (rhs1 BITOP1 rhs2) or
8317 rhs1 BITOP2 (BITOP1 rhs2)
8318 depending on bitop1 and bitop2 arity. */
8319 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
8321 if (code
== GT_EXPR
)
8323 bitop1
= BIT_NOT_EXPR
;
8324 bitop2
= BIT_AND_EXPR
;
8326 else if (code
== GE_EXPR
)
8328 bitop1
= BIT_NOT_EXPR
;
8329 bitop2
= BIT_IOR_EXPR
;
8331 else if (code
== LT_EXPR
)
8333 bitop1
= BIT_NOT_EXPR
;
8334 bitop2
= BIT_AND_EXPR
;
8335 std::swap (rhs1
, rhs2
);
8336 std::swap (dts
[0], dts
[1]);
8338 else if (code
== LE_EXPR
)
8340 bitop1
= BIT_NOT_EXPR
;
8341 bitop2
= BIT_IOR_EXPR
;
8342 std::swap (rhs1
, rhs2
);
8343 std::swap (dts
[0], dts
[1]);
8347 bitop1
= BIT_XOR_EXPR
;
8348 if (code
== EQ_EXPR
)
8349 bitop2
= BIT_NOT_EXPR
;
8355 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
8356 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
8357 dts
, ndts
, NULL
, NULL
);
8358 if (bitop1
== NOP_EXPR
)
8359 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
8362 machine_mode mode
= TYPE_MODE (vectype
);
8365 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
8366 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8369 if (bitop2
!= NOP_EXPR
)
8371 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
8372 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8382 vec_oprnds0
.create (1);
8383 vec_oprnds1
.create (1);
8387 lhs
= gimple_assign_lhs (stmt
);
8388 mask
= vect_create_destination_var (lhs
, mask_type
);
8390 /* Handle cmp expr. */
8391 for (j
= 0; j
< ncopies
; j
++)
8393 gassign
*new_stmt
= NULL
;
8398 auto_vec
<tree
, 2> ops
;
8399 auto_vec
<vec
<tree
>, 2> vec_defs
;
8401 ops
.safe_push (rhs1
);
8402 ops
.safe_push (rhs2
);
8403 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8404 vec_oprnds1
= vec_defs
.pop ();
8405 vec_oprnds0
= vec_defs
.pop ();
8409 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
8410 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
8415 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
8416 vec_oprnds0
.pop ());
8417 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
8418 vec_oprnds1
.pop ());
8423 vec_oprnds0
.quick_push (vec_rhs1
);
8424 vec_oprnds1
.quick_push (vec_rhs2
);
8427 /* Arguments are ready. Create the new vector stmt. */
8428 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
8430 vec_rhs2
= vec_oprnds1
[i
];
8432 new_temp
= make_ssa_name (mask
);
8433 if (bitop1
== NOP_EXPR
)
8435 new_stmt
= gimple_build_assign (new_temp
, code
,
8436 vec_rhs1
, vec_rhs2
);
8437 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8441 if (bitop1
== BIT_NOT_EXPR
)
8442 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
8444 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
8446 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8447 if (bitop2
!= NOP_EXPR
)
8449 tree res
= make_ssa_name (mask
);
8450 if (bitop2
== BIT_NOT_EXPR
)
8451 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
8453 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
8455 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8459 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8466 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8468 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8470 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8473 vec_oprnds0
.release ();
8474 vec_oprnds1
.release ();
8479 /* Make sure the statement is vectorizable. */
8482 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
8483 slp_instance node_instance
)
8485 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8486 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8487 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
8489 gimple
*pattern_stmt
;
8490 gimple_seq pattern_def_seq
;
8492 if (dump_enabled_p ())
8494 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
8495 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8498 if (gimple_has_volatile_ops (stmt
))
8500 if (dump_enabled_p ())
8501 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8502 "not vectorized: stmt has volatile operands\n");
8507 /* Skip stmts that do not need to be vectorized. In loops this is expected
8509 - the COND_EXPR which is the loop exit condition
8510 - any LABEL_EXPRs in the loop
8511 - computations that are used only for array indexing or loop control.
8512 In basic blocks we only analyze statements that are a part of some SLP
8513 instance, therefore, all the statements are relevant.
8515 Pattern statement needs to be analyzed instead of the original statement
8516 if the original statement is not relevant. Otherwise, we analyze both
8517 statements. In basic blocks we are called from some SLP instance
8518 traversal, don't analyze pattern stmts instead, the pattern stmts
8519 already will be part of SLP instance. */
8521 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8522 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8523 && !STMT_VINFO_LIVE_P (stmt_info
))
8525 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8527 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8528 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8530 /* Analyze PATTERN_STMT instead of the original stmt. */
8531 stmt
= pattern_stmt
;
8532 stmt_info
= vinfo_for_stmt (pattern_stmt
);
8533 if (dump_enabled_p ())
8535 dump_printf_loc (MSG_NOTE
, vect_location
,
8536 "==> examining pattern statement: ");
8537 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8542 if (dump_enabled_p ())
8543 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
8548 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8551 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8552 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8554 /* Analyze PATTERN_STMT too. */
8555 if (dump_enabled_p ())
8557 dump_printf_loc (MSG_NOTE
, vect_location
,
8558 "==> examining pattern statement: ");
8559 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8562 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
8567 if (is_pattern_stmt_p (stmt_info
)
8569 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
8571 gimple_stmt_iterator si
;
8573 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
8575 gimple
*pattern_def_stmt
= gsi_stmt (si
);
8576 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
8577 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
8579 /* Analyze def stmt of STMT if it's a pattern stmt. */
8580 if (dump_enabled_p ())
8582 dump_printf_loc (MSG_NOTE
, vect_location
,
8583 "==> examining pattern def statement: ");
8584 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
8587 if (!vect_analyze_stmt (pattern_def_stmt
,
8588 need_to_vectorize
, node
, node_instance
))
8594 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
8596 case vect_internal_def
:
8599 case vect_reduction_def
:
8600 case vect_nested_cycle
:
8601 gcc_assert (!bb_vinfo
8602 && (relevance
== vect_used_in_outer
8603 || relevance
== vect_used_in_outer_by_reduction
8604 || relevance
== vect_used_by_reduction
8605 || relevance
== vect_unused_in_scope
8606 || relevance
== vect_used_only_live
));
8609 case vect_induction_def
:
8610 gcc_assert (!bb_vinfo
);
8613 case vect_constant_def
:
8614 case vect_external_def
:
8615 case vect_unknown_def_type
:
8620 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8622 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8623 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8624 || (is_gimple_call (stmt
)
8625 && gimple_call_lhs (stmt
) == NULL_TREE
));
8626 *need_to_vectorize
= true;
8629 if (PURE_SLP_STMT (stmt_info
) && !node
)
8631 dump_printf_loc (MSG_NOTE
, vect_location
,
8632 "handled only by SLP analysis\n");
8638 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8639 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8640 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8641 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8642 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8643 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8644 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8645 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8646 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8647 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8648 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
8649 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
8650 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8651 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8655 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8656 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8657 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8658 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8659 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8660 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8661 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8662 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8663 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8664 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8669 if (dump_enabled_p ())
8671 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8672 "not vectorized: relevant stmt not ");
8673 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8674 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8683 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8684 need extra handling, except for vectorizable reductions. */
8685 if (STMT_VINFO_LIVE_P (stmt_info
)
8686 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8687 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
, -1, NULL
);
8691 if (dump_enabled_p ())
8693 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8694 "not vectorized: live stmt not ");
8695 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8696 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8706 /* Function vect_transform_stmt.
8708 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8711 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8712 bool *grouped_store
, slp_tree slp_node
,
8713 slp_instance slp_node_instance
)
8715 bool is_store
= false;
8716 gimple
*vec_stmt
= NULL
;
8717 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8720 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
8721 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8723 switch (STMT_VINFO_TYPE (stmt_info
))
8725 case type_demotion_vec_info_type
:
8726 case type_promotion_vec_info_type
:
8727 case type_conversion_vec_info_type
:
8728 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8732 case induc_vec_info_type
:
8733 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
8737 case shift_vec_info_type
:
8738 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8742 case op_vec_info_type
:
8743 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8747 case assignment_vec_info_type
:
8748 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8752 case load_vec_info_type
:
8753 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8758 case store_vec_info_type
:
8759 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8761 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8763 /* In case of interleaving, the whole chain is vectorized when the
8764 last store in the chain is reached. Store stmts before the last
8765 one are skipped, and there vec_stmt_info shouldn't be freed
8767 *grouped_store
= true;
8768 if (STMT_VINFO_VEC_STMT (stmt_info
))
8775 case condition_vec_info_type
:
8776 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8780 case comparison_vec_info_type
:
8781 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8785 case call_vec_info_type
:
8786 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8787 stmt
= gsi_stmt (*gsi
);
8788 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8792 case call_simd_clone_vec_info_type
:
8793 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8794 stmt
= gsi_stmt (*gsi
);
8797 case reduc_vec_info_type
:
8798 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
8804 if (!STMT_VINFO_LIVE_P (stmt_info
))
8806 if (dump_enabled_p ())
8807 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8808 "stmt not supported.\n");
8813 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8814 This would break hybrid SLP vectorization. */
8816 gcc_assert (!vec_stmt
8817 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8819 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8820 is being vectorized, but outside the immediately enclosing loop. */
8822 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8823 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8824 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8825 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8826 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8827 || STMT_VINFO_RELEVANT (stmt_info
) ==
8828 vect_used_in_outer_by_reduction
))
8830 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8831 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8832 imm_use_iterator imm_iter
;
8833 use_operand_p use_p
;
8837 if (dump_enabled_p ())
8838 dump_printf_loc (MSG_NOTE
, vect_location
,
8839 "Record the vdef for outer-loop vectorization.\n");
8841 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8842 (to be used when vectorizing outer-loop stmts that use the DEF of
8844 if (gimple_code (stmt
) == GIMPLE_PHI
)
8845 scalar_dest
= PHI_RESULT (stmt
);
8847 scalar_dest
= gimple_assign_lhs (stmt
);
8849 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8851 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8853 exit_phi
= USE_STMT (use_p
);
8854 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8859 /* Handle stmts whose DEF is used outside the loop-nest that is
8860 being vectorized. */
8865 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8866 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
8868 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
8869 if (STMT_VINFO_LIVE_P (slp_stmt_info
))
8871 done
= vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
8877 else if (STMT_VINFO_LIVE_P (stmt_info
)
8878 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8880 done
= vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, &vec_stmt
);
8885 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8891 /* Remove a group of stores (for SLP or interleaving), free their
8895 vect_remove_stores (gimple
*first_stmt
)
8897 gimple
*next
= first_stmt
;
8899 gimple_stmt_iterator next_si
;
8903 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8905 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8906 if (is_pattern_stmt_p (stmt_info
))
8907 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8908 /* Free the attached stmt_vec_info and remove the stmt. */
8909 next_si
= gsi_for_stmt (next
);
8910 unlink_stmt_vdef (next
);
8911 gsi_remove (&next_si
, true);
8912 release_defs (next
);
8913 free_stmt_vec_info (next
);
8919 /* Function new_stmt_vec_info.
8921 Create and initialize a new stmt_vec_info struct for STMT. */
8924 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8927 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8929 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8930 STMT_VINFO_STMT (res
) = stmt
;
8932 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8933 STMT_VINFO_LIVE_P (res
) = false;
8934 STMT_VINFO_VECTYPE (res
) = NULL
;
8935 STMT_VINFO_VEC_STMT (res
) = NULL
;
8936 STMT_VINFO_VECTORIZABLE (res
) = true;
8937 STMT_VINFO_IN_PATTERN_P (res
) = false;
8938 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8939 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8940 STMT_VINFO_DATA_REF (res
) = NULL
;
8941 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8942 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
8944 if (gimple_code (stmt
) == GIMPLE_PHI
8945 && is_loop_header_bb_p (gimple_bb (stmt
)))
8946 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8948 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8950 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8951 STMT_SLP_TYPE (res
) = loop_vect
;
8952 STMT_VINFO_NUM_SLP_USES (res
) = 0;
8954 GROUP_FIRST_ELEMENT (res
) = NULL
;
8955 GROUP_NEXT_ELEMENT (res
) = NULL
;
8956 GROUP_SIZE (res
) = 0;
8957 GROUP_STORE_COUNT (res
) = 0;
8958 GROUP_GAP (res
) = 0;
8959 GROUP_SAME_DR_STMT (res
) = NULL
;
8965 /* Create a hash table for stmt_vec_info. */
8968 init_stmt_vec_info_vec (void)
8970 gcc_assert (!stmt_vec_info_vec
.exists ());
8971 stmt_vec_info_vec
.create (50);
8975 /* Free hash table for stmt_vec_info. */
8978 free_stmt_vec_info_vec (void)
8982 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8984 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8985 gcc_assert (stmt_vec_info_vec
.exists ());
8986 stmt_vec_info_vec
.release ();
8990 /* Free stmt vectorization related info. */
8993 free_stmt_vec_info (gimple
*stmt
)
8995 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9000 /* Check if this statement has a related "pattern stmt"
9001 (introduced by the vectorizer during the pattern recognition
9002 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9004 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9006 stmt_vec_info patt_info
9007 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9010 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9011 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9012 gimple_set_bb (patt_stmt
, NULL
);
9013 tree lhs
= gimple_get_lhs (patt_stmt
);
9014 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9015 release_ssa_name (lhs
);
9018 gimple_stmt_iterator si
;
9019 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9021 gimple
*seq_stmt
= gsi_stmt (si
);
9022 gimple_set_bb (seq_stmt
, NULL
);
9023 lhs
= gimple_get_lhs (seq_stmt
);
9024 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9025 release_ssa_name (lhs
);
9026 free_stmt_vec_info (seq_stmt
);
9029 free_stmt_vec_info (patt_stmt
);
9033 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9034 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9035 set_vinfo_for_stmt (stmt
, NULL
);
9040 /* Function get_vectype_for_scalar_type_and_size.
9042 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9046 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
9048 tree orig_scalar_type
= scalar_type
;
9049 scalar_mode inner_mode
;
9050 machine_mode simd_mode
;
9054 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9055 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9058 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9060 /* For vector types of elements whose mode precision doesn't
9061 match their types precision we use a element type of mode
9062 precision. The vectorization routines will have to make sure
9063 they support the proper result truncation/extension.
9064 We also make sure to build vector types with INTEGER_TYPE
9065 component type only. */
9066 if (INTEGRAL_TYPE_P (scalar_type
)
9067 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9068 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9069 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9070 TYPE_UNSIGNED (scalar_type
));
9072 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9073 When the component mode passes the above test simply use a type
9074 corresponding to that mode. The theory is that any use that
9075 would cause problems with this will disable vectorization anyway. */
9076 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9077 && !INTEGRAL_TYPE_P (scalar_type
))
9078 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9080 /* We can't build a vector type of elements with alignment bigger than
9082 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9083 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9084 TYPE_UNSIGNED (scalar_type
));
9086 /* If we felt back to using the mode fail if there was
9087 no scalar type for it. */
9088 if (scalar_type
== NULL_TREE
)
9091 /* If no size was supplied use the mode the target prefers. Otherwise
9092 lookup a vector mode of the specified size. */
9094 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9096 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
9097 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
9101 vectype
= build_vector_type (scalar_type
, nunits
);
9103 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9104 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9107 /* Re-attach the address-space qualifier if we canonicalized the scalar
9109 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9110 return build_qualified_type
9111 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9116 unsigned int current_vector_size
;
9118 /* Function get_vectype_for_scalar_type.
9120 Returns the vector type corresponding to SCALAR_TYPE as supported
9124 get_vectype_for_scalar_type (tree scalar_type
)
9127 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9128 current_vector_size
);
9130 && current_vector_size
== 0)
9131 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9135 /* Function get_mask_type_for_scalar_type.
9137 Returns the mask type corresponding to a result of comparison
9138 of vectors of specified SCALAR_TYPE as supported by target. */
9141 get_mask_type_for_scalar_type (tree scalar_type
)
9143 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9148 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9149 current_vector_size
);
9152 /* Function get_same_sized_vectype
9154 Returns a vector type corresponding to SCALAR_TYPE of size
9155 VECTOR_TYPE if supported by the target. */
9158 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9160 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9161 return build_same_sized_truth_vector_type (vector_type
);
9163 return get_vectype_for_scalar_type_and_size
9164 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9167 /* Function vect_is_simple_use.
9170 VINFO - the vect info of the loop or basic block that is being vectorized.
9171 OPERAND - operand in the loop or bb.
9173 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9174 DT - the type of definition
9176 Returns whether a stmt with OPERAND can be vectorized.
9177 For loops, supportable operands are constants, loop invariants, and operands
9178 that are defined by the current iteration of the loop. Unsupportable
9179 operands are those that are defined by a previous iteration of the loop (as
9180 is the case in reduction/induction computations).
9181 For basic blocks, supportable operands are constants and bb invariants.
9182 For now, operands defined outside the basic block are not supported. */
9185 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9186 gimple
**def_stmt
, enum vect_def_type
*dt
)
9189 *dt
= vect_unknown_def_type
;
9191 if (dump_enabled_p ())
9193 dump_printf_loc (MSG_NOTE
, vect_location
,
9194 "vect_is_simple_use: operand ");
9195 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9196 dump_printf (MSG_NOTE
, "\n");
9199 if (CONSTANT_CLASS_P (operand
))
9201 *dt
= vect_constant_def
;
9205 if (is_gimple_min_invariant (operand
))
9207 *dt
= vect_external_def
;
9211 if (TREE_CODE (operand
) != SSA_NAME
)
9213 if (dump_enabled_p ())
9214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9219 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9221 *dt
= vect_external_def
;
9225 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9226 if (dump_enabled_p ())
9228 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9229 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9232 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9233 *dt
= vect_external_def
;
9236 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9237 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9240 if (dump_enabled_p ())
9242 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9245 case vect_uninitialized_def
:
9246 dump_printf (MSG_NOTE
, "uninitialized\n");
9248 case vect_constant_def
:
9249 dump_printf (MSG_NOTE
, "constant\n");
9251 case vect_external_def
:
9252 dump_printf (MSG_NOTE
, "external\n");
9254 case vect_internal_def
:
9255 dump_printf (MSG_NOTE
, "internal\n");
9257 case vect_induction_def
:
9258 dump_printf (MSG_NOTE
, "induction\n");
9260 case vect_reduction_def
:
9261 dump_printf (MSG_NOTE
, "reduction\n");
9263 case vect_double_reduction_def
:
9264 dump_printf (MSG_NOTE
, "double reduction\n");
9266 case vect_nested_cycle
:
9267 dump_printf (MSG_NOTE
, "nested cycle\n");
9269 case vect_unknown_def_type
:
9270 dump_printf (MSG_NOTE
, "unknown\n");
9275 if (*dt
== vect_unknown_def_type
)
9277 if (dump_enabled_p ())
9278 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9279 "Unsupported pattern.\n");
9283 switch (gimple_code (*def_stmt
))
9290 if (dump_enabled_p ())
9291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9292 "unsupported defining stmt:\n");
9299 /* Function vect_is_simple_use.
9301 Same as vect_is_simple_use but also determines the vector operand
9302 type of OPERAND and stores it to *VECTYPE. If the definition of
9303 OPERAND is vect_uninitialized_def, vect_constant_def or
9304 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9305 is responsible to compute the best suited vector type for the
9309 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9310 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
9312 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
9315 /* Now get a vector type if the def is internal, otherwise supply
9316 NULL_TREE and leave it up to the caller to figure out a proper
9317 type for the use stmt. */
9318 if (*dt
== vect_internal_def
9319 || *dt
== vect_induction_def
9320 || *dt
== vect_reduction_def
9321 || *dt
== vect_double_reduction_def
9322 || *dt
== vect_nested_cycle
)
9324 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
9326 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9327 && !STMT_VINFO_RELEVANT (stmt_info
)
9328 && !STMT_VINFO_LIVE_P (stmt_info
))
9329 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9331 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
9332 gcc_assert (*vectype
!= NULL_TREE
);
9334 else if (*dt
== vect_uninitialized_def
9335 || *dt
== vect_constant_def
9336 || *dt
== vect_external_def
)
9337 *vectype
= NULL_TREE
;
9345 /* Function supportable_widening_operation
9347 Check whether an operation represented by the code CODE is a
9348 widening operation that is supported by the target platform in
9349 vector form (i.e., when operating on arguments of type VECTYPE_IN
9350 producing a result of type VECTYPE_OUT).
9352 Widening operations we currently support are NOP (CONVERT), FLOAT
9353 and WIDEN_MULT. This function checks if these operations are supported
9354 by the target platform either directly (via vector tree-codes), or via
9358 - CODE1 and CODE2 are codes of vector operations to be used when
9359 vectorizing the operation, if available.
9360 - MULTI_STEP_CVT determines the number of required intermediate steps in
9361 case of multi-step conversion (like char->short->int - in that case
9362 MULTI_STEP_CVT will be 1).
9363 - INTERM_TYPES contains the intermediate type required to perform the
9364 widening operation (short in the above example). */
9367 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
9368 tree vectype_out
, tree vectype_in
,
9369 enum tree_code
*code1
, enum tree_code
*code2
,
9370 int *multi_step_cvt
,
9371 vec
<tree
> *interm_types
)
9373 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9374 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
9375 struct loop
*vect_loop
= NULL
;
9376 machine_mode vec_mode
;
9377 enum insn_code icode1
, icode2
;
9378 optab optab1
, optab2
;
9379 tree vectype
= vectype_in
;
9380 tree wide_vectype
= vectype_out
;
9381 enum tree_code c1
, c2
;
9383 tree prev_type
, intermediate_type
;
9384 machine_mode intermediate_mode
, prev_mode
;
9385 optab optab3
, optab4
;
9387 *multi_step_cvt
= 0;
9389 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
9393 case WIDEN_MULT_EXPR
:
9394 /* The result of a vectorized widening operation usually requires
9395 two vectors (because the widened results do not fit into one vector).
9396 The generated vector results would normally be expected to be
9397 generated in the same order as in the original scalar computation,
9398 i.e. if 8 results are generated in each vector iteration, they are
9399 to be organized as follows:
9400 vect1: [res1,res2,res3,res4],
9401 vect2: [res5,res6,res7,res8].
9403 However, in the special case that the result of the widening
9404 operation is used in a reduction computation only, the order doesn't
9405 matter (because when vectorizing a reduction we change the order of
9406 the computation). Some targets can take advantage of this and
9407 generate more efficient code. For example, targets like Altivec,
9408 that support widen_mult using a sequence of {mult_even,mult_odd}
9409 generate the following vectors:
9410 vect1: [res1,res3,res5,res7],
9411 vect2: [res2,res4,res6,res8].
9413 When vectorizing outer-loops, we execute the inner-loop sequentially
9414 (each vectorized inner-loop iteration contributes to VF outer-loop
9415 iterations in parallel). We therefore don't allow to change the
9416 order of the computation in the inner-loop during outer-loop
9418 /* TODO: Another case in which order doesn't *really* matter is when we
9419 widen and then contract again, e.g. (short)((int)x * y >> 8).
9420 Normally, pack_trunc performs an even/odd permute, whereas the
9421 repack from an even/odd expansion would be an interleave, which
9422 would be significantly simpler for e.g. AVX2. */
9423 /* In any case, in order to avoid duplicating the code below, recurse
9424 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9425 are properly set up for the caller. If we fail, we'll continue with
9426 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9428 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
9429 && !nested_in_vect_loop_p (vect_loop
, stmt
)
9430 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
9431 stmt
, vectype_out
, vectype_in
,
9432 code1
, code2
, multi_step_cvt
,
9435 /* Elements in a vector with vect_used_by_reduction property cannot
9436 be reordered if the use chain with this property does not have the
9437 same operation. One such an example is s += a * b, where elements
9438 in a and b cannot be reordered. Here we check if the vector defined
9439 by STMT is only directly used in the reduction statement. */
9440 tree lhs
= gimple_assign_lhs (stmt
);
9441 use_operand_p dummy
;
9443 stmt_vec_info use_stmt_info
= NULL
;
9444 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
9445 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
9446 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
9449 c1
= VEC_WIDEN_MULT_LO_EXPR
;
9450 c2
= VEC_WIDEN_MULT_HI_EXPR
;
9463 case VEC_WIDEN_MULT_EVEN_EXPR
:
9464 /* Support the recursion induced just above. */
9465 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
9466 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
9469 case WIDEN_LSHIFT_EXPR
:
9470 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
9471 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
9475 c1
= VEC_UNPACK_LO_EXPR
;
9476 c2
= VEC_UNPACK_HI_EXPR
;
9480 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
9481 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
9484 case FIX_TRUNC_EXPR
:
9485 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9486 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9487 computing the operation. */
9494 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
9497 if (code
== FIX_TRUNC_EXPR
)
9499 /* The signedness is determined from output operand. */
9500 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9501 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
9505 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9506 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
9509 if (!optab1
|| !optab2
)
9512 vec_mode
= TYPE_MODE (vectype
);
9513 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
9514 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
9520 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9521 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9522 /* For scalar masks we may have different boolean
9523 vector types having the same QImode. Thus we
9524 add additional check for elements number. */
9525 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9526 || (TYPE_VECTOR_SUBPARTS (vectype
) / 2
9527 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9529 /* Check if it's a multi-step conversion that can be done using intermediate
9532 prev_type
= vectype
;
9533 prev_mode
= vec_mode
;
9535 if (!CONVERT_EXPR_CODE_P (code
))
9538 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9539 intermediate steps in promotion sequence. We try
9540 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9542 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9543 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9545 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9546 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9549 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) / 2,
9550 current_vector_size
);
9551 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9556 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
9557 TYPE_UNSIGNED (prev_type
));
9559 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9560 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
9562 if (!optab3
|| !optab4
9563 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
9564 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9565 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
9566 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
9567 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
9568 == CODE_FOR_nothing
)
9569 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
9570 == CODE_FOR_nothing
))
9573 interm_types
->quick_push (intermediate_type
);
9574 (*multi_step_cvt
)++;
9576 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9577 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9578 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9579 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) / 2
9580 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9582 prev_type
= intermediate_type
;
9583 prev_mode
= intermediate_mode
;
9586 interm_types
->release ();
9591 /* Function supportable_narrowing_operation
9593 Check whether an operation represented by the code CODE is a
9594 narrowing operation that is supported by the target platform in
9595 vector form (i.e., when operating on arguments of type VECTYPE_IN
9596 and producing a result of type VECTYPE_OUT).
9598 Narrowing operations we currently support are NOP (CONVERT) and
9599 FIX_TRUNC. This function checks if these operations are supported by
9600 the target platform directly via vector tree-codes.
9603 - CODE1 is the code of a vector operation to be used when
9604 vectorizing the operation, if available.
9605 - MULTI_STEP_CVT determines the number of required intermediate steps in
9606 case of multi-step conversion (like int->short->char - in that case
9607 MULTI_STEP_CVT will be 1).
9608 - INTERM_TYPES contains the intermediate type required to perform the
9609 narrowing operation (short in the above example). */
9612 supportable_narrowing_operation (enum tree_code code
,
9613 tree vectype_out
, tree vectype_in
,
9614 enum tree_code
*code1
, int *multi_step_cvt
,
9615 vec
<tree
> *interm_types
)
9617 machine_mode vec_mode
;
9618 enum insn_code icode1
;
9619 optab optab1
, interm_optab
;
9620 tree vectype
= vectype_in
;
9621 tree narrow_vectype
= vectype_out
;
9623 tree intermediate_type
, prev_type
;
9624 machine_mode intermediate_mode
, prev_mode
;
9628 *multi_step_cvt
= 0;
9632 c1
= VEC_PACK_TRUNC_EXPR
;
9635 case FIX_TRUNC_EXPR
:
9636 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9640 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9641 tree code and optabs used for computing the operation. */
9648 if (code
== FIX_TRUNC_EXPR
)
9649 /* The signedness is determined from output operand. */
9650 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9652 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9657 vec_mode
= TYPE_MODE (vectype
);
9658 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9663 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9664 /* For scalar masks we may have different boolean
9665 vector types having the same QImode. Thus we
9666 add additional check for elements number. */
9667 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9668 || (TYPE_VECTOR_SUBPARTS (vectype
) * 2
9669 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9671 /* Check if it's a multi-step conversion that can be done using intermediate
9673 prev_mode
= vec_mode
;
9674 prev_type
= vectype
;
9675 if (code
== FIX_TRUNC_EXPR
)
9676 uns
= TYPE_UNSIGNED (vectype_out
);
9678 uns
= TYPE_UNSIGNED (vectype
);
9680 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9681 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9682 costly than signed. */
9683 if (code
== FIX_TRUNC_EXPR
&& uns
)
9685 enum insn_code icode2
;
9688 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9690 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9691 if (interm_optab
!= unknown_optab
9692 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9693 && insn_data
[icode1
].operand
[0].mode
9694 == insn_data
[icode2
].operand
[0].mode
)
9697 optab1
= interm_optab
;
9702 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9703 intermediate steps in promotion sequence. We try
9704 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9705 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9706 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9708 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9709 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9712 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9713 current_vector_size
);
9714 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9719 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9721 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9724 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9725 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9726 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9727 == CODE_FOR_nothing
))
9730 interm_types
->quick_push (intermediate_type
);
9731 (*multi_step_cvt
)++;
9733 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9734 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9735 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2
9736 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9738 prev_mode
= intermediate_mode
;
9739 prev_type
= intermediate_type
;
9740 optab1
= interm_optab
;
9743 interm_types
->release ();