1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
51 /* For lang_hooks.types.type_for_mode. */
52 #include "langhooks.h"
54 /* Return the vectorized type for the given statement. */
57 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
59 return STMT_VINFO_VECTYPE (stmt_info
);
62 /* Return TRUE iff the given statement is in an inner loop relative to
63 the loop being vectorized. */
65 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
67 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
68 basic_block bb
= gimple_bb (stmt
);
69 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
75 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
77 return (bb
->loop_father
== loop
->inner
);
80 /* Record the cost of a statement, either by directly informing the
81 target model or by saving it in a vector for later processing.
82 Return a preliminary estimate of the statement's cost. */
85 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
86 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
87 int misalign
, enum vect_cost_model_location where
)
91 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
92 stmt_info_for_cost si
= { count
, kind
,
93 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
95 body_cost_vec
->safe_push (si
);
97 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
100 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
101 count
, kind
, stmt_info
, misalign
, where
);
104 /* Return a variable of type ELEM_TYPE[NELEMS]. */
107 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
109 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
113 /* ARRAY is an array of vectors created by create_vector_array.
114 Return an SSA_NAME for the vector in index N. The reference
115 is part of the vectorization of STMT and the vector is associated
116 with scalar destination SCALAR_DEST. */
119 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
120 tree array
, unsigned HOST_WIDE_INT n
)
122 tree vect_type
, vect
, vect_name
, array_ref
;
125 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
126 vect_type
= TREE_TYPE (TREE_TYPE (array
));
127 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
128 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
129 build_int_cst (size_type_node
, n
),
130 NULL_TREE
, NULL_TREE
);
132 new_stmt
= gimple_build_assign (vect
, array_ref
);
133 vect_name
= make_ssa_name (vect
, new_stmt
);
134 gimple_assign_set_lhs (new_stmt
, vect_name
);
135 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
140 /* ARRAY is an array of vectors created by create_vector_array.
141 Emit code to store SSA_NAME VECT in index N of the array.
142 The store is part of the vectorization of STMT. */
145 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
146 tree array
, unsigned HOST_WIDE_INT n
)
151 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
152 build_int_cst (size_type_node
, n
),
153 NULL_TREE
, NULL_TREE
);
155 new_stmt
= gimple_build_assign (array_ref
, vect
);
156 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
159 /* PTR is a pointer to an array of type TYPE. Return a representation
160 of *PTR. The memory reference replaces those in FIRST_DR
164 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
166 tree mem_ref
, alias_ptr_type
;
168 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
169 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
170 /* Arrays have the same alignment as their type. */
171 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
175 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
177 /* Function vect_mark_relevant.
179 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
182 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
183 enum vect_relevant relevant
, bool live_p
,
184 bool used_in_pattern
)
186 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
187 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
188 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
189 gimple
*pattern_stmt
;
191 if (dump_enabled_p ())
192 dump_printf_loc (MSG_NOTE
, vect_location
,
193 "mark relevant %d, live %d.\n", relevant
, live_p
);
195 /* If this stmt is an original stmt in a pattern, we might need to mark its
196 related pattern stmt instead of the original stmt. However, such stmts
197 may have their own uses that are not in any pattern, in such cases the
198 stmt itself should be marked. */
199 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
202 if (!used_in_pattern
)
204 imm_use_iterator imm_iter
;
208 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
209 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
211 if (is_gimple_assign (stmt
))
212 lhs
= gimple_assign_lhs (stmt
);
214 lhs
= gimple_call_lhs (stmt
);
216 /* This use is out of pattern use, if LHS has other uses that are
217 pattern uses, we should mark the stmt itself, and not the pattern
219 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
220 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
222 if (is_gimple_debug (USE_STMT (use_p
)))
224 use_stmt
= USE_STMT (use_p
);
226 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
229 if (vinfo_for_stmt (use_stmt
)
230 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
240 /* This is the last stmt in a sequence that was detected as a
241 pattern that can potentially be vectorized. Don't mark the stmt
242 as relevant/live because it's not going to be vectorized.
243 Instead mark the pattern-stmt that replaces it. */
245 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
247 if (dump_enabled_p ())
248 dump_printf_loc (MSG_NOTE
, vect_location
,
249 "last stmt in pattern. don't mark"
250 " relevant/live.\n");
251 stmt_info
= vinfo_for_stmt (pattern_stmt
);
252 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
253 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
254 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
259 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
260 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
261 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
263 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
264 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE
, vect_location
,
268 "already marked relevant/live.\n");
272 worklist
->safe_push (stmt
);
276 /* Function vect_stmt_relevant_p.
278 Return true if STMT in loop that is represented by LOOP_VINFO is
279 "relevant for vectorization".
281 A stmt is considered "relevant for vectorization" if:
282 - it has uses outside the loop.
283 - it has vdefs (it alters memory).
284 - control stmts in the loop (except for the exit condition).
286 CHECKME: what other side effects would the vectorizer allow? */
289 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
290 enum vect_relevant
*relevant
, bool *live_p
)
292 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
294 imm_use_iterator imm_iter
;
298 *relevant
= vect_unused_in_scope
;
301 /* cond stmt other than loop exit cond. */
302 if (is_ctrl_stmt (stmt
)
303 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
304 != loop_exit_ctrl_vec_info_type
)
305 *relevant
= vect_used_in_scope
;
307 /* changing memory. */
308 if (gimple_code (stmt
) != GIMPLE_PHI
)
309 if (gimple_vdef (stmt
)
310 && !gimple_clobber_p (stmt
))
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE
, vect_location
,
314 "vec_stmt_relevant_p: stmt has vdefs.\n");
315 *relevant
= vect_used_in_scope
;
318 /* uses outside the loop. */
319 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
321 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
323 basic_block bb
= gimple_bb (USE_STMT (use_p
));
324 if (!flow_bb_inside_loop_p (loop
, bb
))
326 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE
, vect_location
,
328 "vec_stmt_relevant_p: used out of loop.\n");
330 if (is_gimple_debug (USE_STMT (use_p
)))
333 /* We expect all such uses to be in the loop exit phis
334 (because of loop closed form) */
335 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
336 gcc_assert (bb
== single_exit (loop
)->dest
);
343 return (*live_p
|| *relevant
);
347 /* Function exist_non_indexing_operands_for_use_p
349 USE is one of the uses attached to STMT. Check if USE is
350 used in STMT for anything other than indexing an array. */
353 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
356 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
358 /* USE corresponds to some operand in STMT. If there is no data
359 reference in STMT, then any operand that corresponds to USE
360 is not indexing an array. */
361 if (!STMT_VINFO_DATA_REF (stmt_info
))
364 /* STMT has a data_ref. FORNOW this means that its of one of
368 (This should have been verified in analyze_data_refs).
370 'var' in the second case corresponds to a def, not a use,
371 so USE cannot correspond to any operands that are not used
374 Therefore, all we need to check is if STMT falls into the
375 first case, and whether var corresponds to USE. */
377 if (!gimple_assign_copy_p (stmt
))
379 if (is_gimple_call (stmt
)
380 && gimple_call_internal_p (stmt
))
381 switch (gimple_call_internal_fn (stmt
))
384 operand
= gimple_call_arg (stmt
, 3);
389 operand
= gimple_call_arg (stmt
, 2);
399 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
401 operand
= gimple_assign_rhs1 (stmt
);
402 if (TREE_CODE (operand
) != SSA_NAME
)
413 Function process_use.
416 - a USE in STMT in a loop represented by LOOP_VINFO
417 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
418 that defined USE. This is done by calling mark_relevant and passing it
419 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
420 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
424 Generally, LIVE_P and RELEVANT are used to define the liveness and
425 relevance info of the DEF_STMT of this USE:
426 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
427 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
429 - case 1: If USE is used only for address computations (e.g. array indexing),
430 which does not need to be directly vectorized, then the liveness/relevance
431 of the respective DEF_STMT is left unchanged.
432 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
433 skip DEF_STMT cause it had already been processed.
434 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
435 be modified accordingly.
437 Return true if everything is as expected. Return false otherwise. */
440 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
441 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
444 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
445 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
446 stmt_vec_info dstmt_vinfo
;
447 basic_block bb
, def_bb
;
449 enum vect_def_type dt
;
451 /* case 1: we are only interested in uses that need to be vectorized. Uses
452 that are used for address computation are not considered relevant. */
453 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
456 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
460 "not vectorized: unsupported use in stmt.\n");
464 if (!def_stmt
|| gimple_nop_p (def_stmt
))
467 def_bb
= gimple_bb (def_stmt
);
468 if (!flow_bb_inside_loop_p (loop
, def_bb
))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
476 DEF_STMT must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DEF_STMT in the loop. So we just
479 check that everything is as expected, and we are done. */
480 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
481 bb
= gimple_bb (stmt
);
482 if (gimple_code (stmt
) == GIMPLE_PHI
483 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
484 && gimple_code (def_stmt
) != GIMPLE_PHI
485 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
486 && bb
->loop_father
== def_bb
->loop_father
)
488 if (dump_enabled_p ())
489 dump_printf_loc (MSG_NOTE
, vect_location
,
490 "reduc-stmt defining reduc-phi in the same nest.\n");
491 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
492 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
493 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
494 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
495 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
499 /* case 3a: outer-loop stmt defining an inner-loop stmt:
500 outer-loop-header-bb:
506 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
508 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE
, vect_location
,
510 "outer-loop def-stmt defining inner-loop stmt.\n");
514 case vect_unused_in_scope
:
515 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
516 vect_used_in_scope
: vect_unused_in_scope
;
519 case vect_used_in_outer_by_reduction
:
520 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
521 relevant
= vect_used_by_reduction
;
524 case vect_used_in_outer
:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
526 relevant
= vect_used_in_scope
;
529 case vect_used_in_scope
:
537 /* case 3b: inner-loop stmt defining an outer-loop stmt:
538 outer-loop-header-bb:
542 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
544 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
546 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE
, vect_location
,
548 "inner-loop def-stmt defining outer-loop stmt.\n");
552 case vect_unused_in_scope
:
553 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
554 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
555 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
558 case vect_used_by_reduction
:
559 relevant
= vect_used_in_outer_by_reduction
;
562 case vect_used_in_scope
:
563 relevant
= vect_used_in_outer
;
571 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
572 is_pattern_stmt_p (stmt_vinfo
));
577 /* Function vect_mark_stmts_to_be_vectorized.
579 Not all stmts in the loop need to be vectorized. For example:
588 Stmt 1 and 3 do not need to be vectorized, because loop control and
589 addressing of vectorized data-refs are handled differently.
591 This pass detects such stmts. */
594 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
596 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
597 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
598 unsigned int nbbs
= loop
->num_nodes
;
599 gimple_stmt_iterator si
;
602 stmt_vec_info stmt_vinfo
;
606 enum vect_relevant relevant
, tmp_relevant
;
607 enum vect_def_type def_type
;
609 if (dump_enabled_p ())
610 dump_printf_loc (MSG_NOTE
, vect_location
,
611 "=== vect_mark_stmts_to_be_vectorized ===\n");
613 auto_vec
<gimple
*, 64> worklist
;
615 /* 1. Init worklist. */
616 for (i
= 0; i
< nbbs
; i
++)
619 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
622 if (dump_enabled_p ())
624 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
625 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
628 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
629 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
631 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
633 stmt
= gsi_stmt (si
);
634 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
637 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
640 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
641 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
645 /* 2. Process_worklist */
646 while (worklist
.length () > 0)
651 stmt
= worklist
.pop ();
652 if (dump_enabled_p ())
654 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
655 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
658 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
659 (DEF_STMT) as relevant/irrelevant and live/dead according to the
660 liveness and relevance properties of STMT. */
661 stmt_vinfo
= vinfo_for_stmt (stmt
);
662 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
663 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
665 /* Generally, the liveness and relevance properties of STMT are
666 propagated as is to the DEF_STMTs of its USEs:
667 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
668 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
670 One exception is when STMT has been identified as defining a reduction
671 variable; in this case we set the liveness/relevance as follows:
673 relevant = vect_used_by_reduction
674 This is because we distinguish between two kinds of relevant stmts -
675 those that are used by a reduction computation, and those that are
676 (also) used by a regular computation. This allows us later on to
677 identify stmts that are used solely by a reduction, and therefore the
678 order of the results that they produce does not have to be kept. */
680 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
681 tmp_relevant
= relevant
;
684 case vect_reduction_def
:
685 switch (tmp_relevant
)
687 case vect_unused_in_scope
:
688 relevant
= vect_used_by_reduction
;
691 case vect_used_by_reduction
:
692 if (gimple_code (stmt
) == GIMPLE_PHI
)
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
699 "unsupported use of reduction.\n");
706 case vect_nested_cycle
:
707 if (tmp_relevant
!= vect_unused_in_scope
708 && tmp_relevant
!= vect_used_in_outer_by_reduction
709 && tmp_relevant
!= vect_used_in_outer
)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
713 "unsupported use of nested cycle.\n");
721 case vect_double_reduction_def
:
722 if (tmp_relevant
!= vect_unused_in_scope
723 && tmp_relevant
!= vect_used_by_reduction
)
725 if (dump_enabled_p ())
726 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
727 "unsupported use of double reduction.\n");
739 if (is_pattern_stmt_p (stmt_vinfo
))
741 /* Pattern statements are not inserted into the code, so
742 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
743 have to scan the RHS or function arguments instead. */
744 if (is_gimple_assign (stmt
))
746 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
747 tree op
= gimple_assign_rhs1 (stmt
);
750 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
752 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
753 live_p
, relevant
, &worklist
, false)
754 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
755 live_p
, relevant
, &worklist
, false))
759 for (; i
< gimple_num_ops (stmt
); i
++)
761 op
= gimple_op (stmt
, i
);
762 if (TREE_CODE (op
) == SSA_NAME
763 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
768 else if (is_gimple_call (stmt
))
770 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
772 tree arg
= gimple_call_arg (stmt
, i
);
773 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
780 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
782 tree op
= USE_FROM_PTR (use_p
);
783 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
788 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
791 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
793 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
797 } /* while worklist */
803 /* Function vect_model_simple_cost.
805 Models cost for simple operations, i.e. those that only emit ncopies of a
806 single op. Right now, this does not account for multiple insns that could
807 be generated for the single vector op. We will handle that shortly. */
810 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
811 enum vect_def_type
*dt
,
812 stmt_vector_for_cost
*prologue_cost_vec
,
813 stmt_vector_for_cost
*body_cost_vec
)
816 int inside_cost
= 0, prologue_cost
= 0;
818 /* The SLP costs were already calculated during SLP tree build. */
819 if (PURE_SLP_STMT (stmt_info
))
822 /* FORNOW: Assuming maximum 2 args per stmts. */
823 for (i
= 0; i
< 2; i
++)
824 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
825 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
826 stmt_info
, 0, vect_prologue
);
828 /* Pass the inside-of-loop statements to the target-specific cost model. */
829 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
830 stmt_info
, 0, vect_body
);
832 if (dump_enabled_p ())
833 dump_printf_loc (MSG_NOTE
, vect_location
,
834 "vect_model_simple_cost: inside_cost = %d, "
835 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
839 /* Model cost for type demotion and promotion operations. PWR is normally
840 zero for single-step promotions and demotions. It will be one if
841 two-step promotion/demotion is required, and so on. Each additional
842 step doubles the number of instructions required. */
845 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
846 enum vect_def_type
*dt
, int pwr
)
849 int inside_cost
= 0, prologue_cost
= 0;
850 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
851 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
852 void *target_cost_data
;
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info
))
859 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
861 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
863 for (i
= 0; i
< pwr
+ 1; i
++)
865 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
867 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
868 vec_promote_demote
, stmt_info
, 0,
872 /* FORNOW: Assuming maximum 2 args per stmts. */
873 for (i
= 0; i
< 2; i
++)
874 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
875 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
876 stmt_info
, 0, vect_prologue
);
878 if (dump_enabled_p ())
879 dump_printf_loc (MSG_NOTE
, vect_location
,
880 "vect_model_promotion_demotion_cost: inside_cost = %d, "
881 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
884 /* Function vect_cost_group_size
886 For grouped load or store, return the group_size only if it is the first
887 load or store of a group, else return 1. This ensures that group size is
888 only returned once per group. */
891 vect_cost_group_size (stmt_vec_info stmt_info
)
893 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
895 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
896 return GROUP_SIZE (stmt_info
);
902 /* Function vect_model_store_cost
904 Models cost for stores. In the case of grouped accesses, one access
905 has the overhead of the grouped access attributed to it. */
908 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
909 bool store_lanes_p
, enum vect_def_type dt
,
911 stmt_vector_for_cost
*prologue_cost_vec
,
912 stmt_vector_for_cost
*body_cost_vec
)
915 unsigned int inside_cost
= 0, prologue_cost
= 0;
916 struct data_reference
*first_dr
;
919 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
920 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
921 stmt_info
, 0, vect_prologue
);
923 /* Grouped access? */
924 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
928 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
933 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
934 group_size
= vect_cost_group_size (stmt_info
);
937 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
939 /* Not a grouped access. */
943 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
946 /* We assume that the cost of a single store-lanes instruction is
947 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
948 access is instead being provided by a permute-and-store operation,
949 include the cost of the permutes. */
950 if (!store_lanes_p
&& group_size
> 1
951 && !STMT_VINFO_STRIDED_P (stmt_info
))
953 /* Uses a high and low interleave or shuffle operations for each
955 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
956 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
957 stmt_info
, 0, vect_body
);
959 if (dump_enabled_p ())
960 dump_printf_loc (MSG_NOTE
, vect_location
,
961 "vect_model_store_cost: strided group_size = %d .\n",
965 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
966 /* Costs of the stores. */
967 if (STMT_VINFO_STRIDED_P (stmt_info
)
968 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
970 /* N scalar stores plus extracting the elements. */
971 inside_cost
+= record_stmt_cost (body_cost_vec
,
972 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
973 scalar_store
, stmt_info
, 0, vect_body
);
976 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
978 if (STMT_VINFO_STRIDED_P (stmt_info
))
979 inside_cost
+= record_stmt_cost (body_cost_vec
,
980 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
981 vec_to_scalar
, stmt_info
, 0, vect_body
);
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE
, vect_location
,
985 "vect_model_store_cost: inside_cost = %d, "
986 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
990 /* Calculate cost of DR's memory access. */
992 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
993 unsigned int *inside_cost
,
994 stmt_vector_for_cost
*body_cost_vec
)
996 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
997 gimple
*stmt
= DR_STMT (dr
);
998 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1000 switch (alignment_support_scheme
)
1004 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1005 vector_store
, stmt_info
, 0,
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE
, vect_location
,
1010 "vect_model_store_cost: aligned.\n");
1014 case dr_unaligned_supported
:
1016 /* Here, we assign an additional cost for the unaligned store. */
1017 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1018 unaligned_store
, stmt_info
,
1019 DR_MISALIGNMENT (dr
), vect_body
);
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_NOTE
, vect_location
,
1022 "vect_model_store_cost: unaligned supported by "
1027 case dr_unaligned_unsupported
:
1029 *inside_cost
= VECT_MAX_COST
;
1031 if (dump_enabled_p ())
1032 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1033 "vect_model_store_cost: unsupported access.\n");
1043 /* Function vect_model_load_cost
1045 Models cost for loads. In the case of grouped accesses, the last access
1046 has the overhead of the grouped access attributed to it. Since unaligned
1047 accesses are supported for loads, we also account for the costs of the
1048 access scheme chosen. */
1051 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1052 bool load_lanes_p
, slp_tree slp_node
,
1053 stmt_vector_for_cost
*prologue_cost_vec
,
1054 stmt_vector_for_cost
*body_cost_vec
)
1058 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1059 unsigned int inside_cost
= 0, prologue_cost
= 0;
1061 /* Grouped accesses? */
1062 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1063 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1065 group_size
= vect_cost_group_size (stmt_info
);
1066 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1068 /* Not a grouped access. */
1075 /* We assume that the cost of a single load-lanes instruction is
1076 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1077 access is instead being provided by a load-and-permute operation,
1078 include the cost of the permutes. */
1079 if (!load_lanes_p
&& group_size
> 1
1080 && !STMT_VINFO_STRIDED_P (stmt_info
))
1082 /* Uses an even and odd extract operations or shuffle operations
1083 for each needed permute. */
1084 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1085 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1086 stmt_info
, 0, vect_body
);
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_NOTE
, vect_location
,
1090 "vect_model_load_cost: strided group_size = %d .\n",
1094 /* The loads themselves. */
1095 if (STMT_VINFO_STRIDED_P (stmt_info
)
1096 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1098 /* N scalar loads plus gathering them into a vector. */
1099 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1100 inside_cost
+= record_stmt_cost (body_cost_vec
,
1101 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1102 scalar_load
, stmt_info
, 0, vect_body
);
1105 vect_get_load_cost (first_dr
, ncopies
,
1106 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1107 || group_size
> 1 || slp_node
),
1108 &inside_cost
, &prologue_cost
,
1109 prologue_cost_vec
, body_cost_vec
, true);
1110 if (STMT_VINFO_STRIDED_P (stmt_info
))
1111 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1112 stmt_info
, 0, vect_body
);
1114 if (dump_enabled_p ())
1115 dump_printf_loc (MSG_NOTE
, vect_location
,
1116 "vect_model_load_cost: inside_cost = %d, "
1117 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1121 /* Calculate cost of DR's memory access. */
1123 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1124 bool add_realign_cost
, unsigned int *inside_cost
,
1125 unsigned int *prologue_cost
,
1126 stmt_vector_for_cost
*prologue_cost_vec
,
1127 stmt_vector_for_cost
*body_cost_vec
,
1128 bool record_prologue_costs
)
1130 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1131 gimple
*stmt
= DR_STMT (dr
);
1132 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1134 switch (alignment_support_scheme
)
1138 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1139 stmt_info
, 0, vect_body
);
1141 if (dump_enabled_p ())
1142 dump_printf_loc (MSG_NOTE
, vect_location
,
1143 "vect_model_load_cost: aligned.\n");
1147 case dr_unaligned_supported
:
1149 /* Here, we assign an additional cost for the unaligned load. */
1150 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1151 unaligned_load
, stmt_info
,
1152 DR_MISALIGNMENT (dr
), vect_body
);
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_NOTE
, vect_location
,
1156 "vect_model_load_cost: unaligned supported by "
1161 case dr_explicit_realign
:
1163 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1164 vector_load
, stmt_info
, 0, vect_body
);
1165 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1166 vec_perm
, stmt_info
, 0, vect_body
);
1168 /* FIXME: If the misalignment remains fixed across the iterations of
1169 the containing loop, the following cost should be added to the
1171 if (targetm
.vectorize
.builtin_mask_for_load
)
1172 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1173 stmt_info
, 0, vect_body
);
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE
, vect_location
,
1177 "vect_model_load_cost: explicit realign\n");
1181 case dr_explicit_realign_optimized
:
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_NOTE
, vect_location
,
1185 "vect_model_load_cost: unaligned software "
1188 /* Unaligned software pipeline has a load of an address, an initial
1189 load, and possibly a mask operation to "prime" the loop. However,
1190 if this is an access in a group of loads, which provide grouped
1191 access, then the above cost should only be considered for one
1192 access in the group. Inside the loop, there is a load op
1193 and a realignment op. */
1195 if (add_realign_cost
&& record_prologue_costs
)
1197 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1198 vector_stmt
, stmt_info
,
1200 if (targetm
.vectorize
.builtin_mask_for_load
)
1201 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1202 vector_stmt
, stmt_info
,
1206 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1207 stmt_info
, 0, vect_body
);
1208 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1209 stmt_info
, 0, vect_body
);
1211 if (dump_enabled_p ())
1212 dump_printf_loc (MSG_NOTE
, vect_location
,
1213 "vect_model_load_cost: explicit realign optimized"
1219 case dr_unaligned_unsupported
:
1221 *inside_cost
= VECT_MAX_COST
;
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1225 "vect_model_load_cost: unsupported access.\n");
1234 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1235 the loop preheader for the vectorized stmt STMT. */
1238 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1241 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1244 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1245 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1249 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1253 if (nested_in_vect_loop_p (loop
, stmt
))
1256 pe
= loop_preheader_edge (loop
);
1257 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1258 gcc_assert (!new_bb
);
1262 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1264 gimple_stmt_iterator gsi_bb_start
;
1266 gcc_assert (bb_vinfo
);
1267 bb
= BB_VINFO_BB (bb_vinfo
);
1268 gsi_bb_start
= gsi_after_labels (bb
);
1269 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1273 if (dump_enabled_p ())
1275 dump_printf_loc (MSG_NOTE
, vect_location
,
1276 "created new init_stmt: ");
1277 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1281 /* Function vect_init_vector.
1283 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1284 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1285 vector type a vector with all elements equal to VAL is created first.
1286 Place the initialization at BSI if it is not NULL. Otherwise, place the
1287 initialization at the loop preheader.
1288 Return the DEF of INIT_STMT.
1289 It will be used in the vectorization of STMT. */
1292 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1297 if (TREE_CODE (type
) == VECTOR_TYPE
1298 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1300 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1302 if (CONSTANT_CLASS_P (val
))
1303 val
= fold_convert (TREE_TYPE (type
), val
);
1306 new_temp
= make_ssa_name (TREE_TYPE (type
));
1307 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1308 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1312 val
= build_vector_from_val (type
, val
);
1315 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1316 init_stmt
= gimple_build_assign (new_temp
, val
);
1317 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1322 /* Function vect_get_vec_def_for_operand.
1324 OP is an operand in STMT. This function returns a (vector) def that will be
1325 used in the vectorized stmt for STMT.
1327 In the case that OP is an SSA_NAME which is defined in the loop, then
1328 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1330 In case OP is an invariant or constant, a new stmt that creates a vector def
1331 needs to be introduced. VECTYPE may be used to specify a required type for
1332 vector invariant. */
1335 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1340 stmt_vec_info def_stmt_info
= NULL
;
1341 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1342 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1343 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1344 enum vect_def_type dt
;
1348 if (dump_enabled_p ())
1350 dump_printf_loc (MSG_NOTE
, vect_location
,
1351 "vect_get_vec_def_for_operand: ");
1352 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1353 dump_printf (MSG_NOTE
, "\n");
1356 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1357 gcc_assert (is_simple_use
);
1358 if (dump_enabled_p ())
1360 int loc_printed
= 0;
1364 dump_printf (MSG_NOTE
, " def_stmt = ");
1366 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1367 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1373 /* operand is a constant or a loop invariant. */
1374 case vect_constant_def
:
1375 case vect_external_def
:
1378 vector_type
= vectype
;
1379 else if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
1380 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1381 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1383 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1385 gcc_assert (vector_type
);
1386 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1389 /* operand is defined inside the loop. */
1390 case vect_internal_def
:
1392 /* Get the def from the vectorized stmt. */
1393 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1395 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1396 /* Get vectorized pattern statement. */
1398 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1399 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1400 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1401 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1402 gcc_assert (vec_stmt
);
1403 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1404 vec_oprnd
= PHI_RESULT (vec_stmt
);
1405 else if (is_gimple_call (vec_stmt
))
1406 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1408 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1412 /* operand is defined by a loop header phi - reduction */
1413 case vect_reduction_def
:
1414 case vect_double_reduction_def
:
1415 case vect_nested_cycle
:
1416 /* Code should use get_initial_def_for_reduction. */
1419 /* operand is defined by loop-header phi - induction. */
1420 case vect_induction_def
:
1422 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1424 /* Get the def from the vectorized stmt. */
1425 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1426 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1427 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1428 vec_oprnd
= PHI_RESULT (vec_stmt
);
1430 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1440 /* Function vect_get_vec_def_for_stmt_copy
1442 Return a vector-def for an operand. This function is used when the
1443 vectorized stmt to be created (by the caller to this function) is a "copy"
1444 created in case the vectorized result cannot fit in one vector, and several
1445 copies of the vector-stmt are required. In this case the vector-def is
1446 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1447 of the stmt that defines VEC_OPRND.
1448 DT is the type of the vector def VEC_OPRND.
1451 In case the vectorization factor (VF) is bigger than the number
1452 of elements that can fit in a vectype (nunits), we have to generate
1453 more than one vector stmt to vectorize the scalar stmt. This situation
1454 arises when there are multiple data-types operated upon in the loop; the
1455 smallest data-type determines the VF, and as a result, when vectorizing
1456 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1457 vector stmt (each computing a vector of 'nunits' results, and together
1458 computing 'VF' results in each iteration). This function is called when
1459 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1460 which VF=16 and nunits=4, so the number of copies required is 4):
1462 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1464 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1465 VS1.1: vx.1 = memref1 VS1.2
1466 VS1.2: vx.2 = memref2 VS1.3
1467 VS1.3: vx.3 = memref3
1469 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1470 VSnew.1: vz1 = vx.1 + ... VSnew.2
1471 VSnew.2: vz2 = vx.2 + ... VSnew.3
1472 VSnew.3: vz3 = vx.3 + ...
1474 The vectorization of S1 is explained in vectorizable_load.
1475 The vectorization of S2:
1476 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1477 the function 'vect_get_vec_def_for_operand' is called to
1478 get the relevant vector-def for each operand of S2. For operand x it
1479 returns the vector-def 'vx.0'.
1481 To create the remaining copies of the vector-stmt (VSnew.j), this
1482 function is called to get the relevant vector-def for each operand. It is
1483 obtained from the respective VS1.j stmt, which is recorded in the
1484 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1486 For example, to obtain the vector-def 'vx.1' in order to create the
1487 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1488 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1489 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1490 and return its def ('vx.1').
1491 Overall, to create the above sequence this function will be called 3 times:
1492 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1493 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1494 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1497 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1499 gimple
*vec_stmt_for_operand
;
1500 stmt_vec_info def_stmt_info
;
1502 /* Do nothing; can reuse same def. */
1503 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1506 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1507 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1508 gcc_assert (def_stmt_info
);
1509 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1510 gcc_assert (vec_stmt_for_operand
);
1511 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1512 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1514 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1519 /* Get vectorized definitions for the operands to create a copy of an original
1520 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1523 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1524 vec
<tree
> *vec_oprnds0
,
1525 vec
<tree
> *vec_oprnds1
)
1527 tree vec_oprnd
= vec_oprnds0
->pop ();
1529 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1530 vec_oprnds0
->quick_push (vec_oprnd
);
1532 if (vec_oprnds1
&& vec_oprnds1
->length ())
1534 vec_oprnd
= vec_oprnds1
->pop ();
1535 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1536 vec_oprnds1
->quick_push (vec_oprnd
);
1541 /* Get vectorized definitions for OP0 and OP1.
1542 REDUC_INDEX is the index of reduction operand in case of reduction,
1543 and -1 otherwise. */
1546 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1547 vec
<tree
> *vec_oprnds0
,
1548 vec
<tree
> *vec_oprnds1
,
1549 slp_tree slp_node
, int reduc_index
)
1553 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1554 auto_vec
<tree
> ops (nops
);
1555 auto_vec
<vec
<tree
> > vec_defs (nops
);
1557 ops
.quick_push (op0
);
1559 ops
.quick_push (op1
);
1561 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1563 *vec_oprnds0
= vec_defs
[0];
1565 *vec_oprnds1
= vec_defs
[1];
1571 vec_oprnds0
->create (1);
1572 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1573 vec_oprnds0
->quick_push (vec_oprnd
);
1577 vec_oprnds1
->create (1);
1578 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1579 vec_oprnds1
->quick_push (vec_oprnd
);
1585 /* Function vect_finish_stmt_generation.
1587 Insert a new stmt. */
1590 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1591 gimple_stmt_iterator
*gsi
)
1593 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1594 vec_info
*vinfo
= stmt_info
->vinfo
;
1596 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1598 if (!gsi_end_p (*gsi
)
1599 && gimple_has_mem_ops (vec_stmt
))
1601 gimple
*at_stmt
= gsi_stmt (*gsi
);
1602 tree vuse
= gimple_vuse (at_stmt
);
1603 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1605 tree vdef
= gimple_vdef (at_stmt
);
1606 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1607 /* If we have an SSA vuse and insert a store, update virtual
1608 SSA form to avoid triggering the renamer. Do so only
1609 if we can easily see all uses - which is what almost always
1610 happens with the way vectorized stmts are inserted. */
1611 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1612 && ((is_gimple_assign (vec_stmt
)
1613 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1614 || (is_gimple_call (vec_stmt
)
1615 && !(gimple_call_flags (vec_stmt
)
1616 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1618 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1619 gimple_set_vdef (vec_stmt
, new_vdef
);
1620 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1624 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1626 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1628 if (dump_enabled_p ())
1630 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1631 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1634 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1636 /* While EH edges will generally prevent vectorization, stmt might
1637 e.g. be in a must-not-throw region. Ensure newly created stmts
1638 that could throw are part of the same region. */
1639 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1640 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1641 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1644 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1645 a function declaration if the target has a vectorized version
1646 of the function, or NULL_TREE if the function cannot be vectorized. */
1649 vectorizable_function (gcall
*call
, tree vectype_out
, tree vectype_in
)
1651 tree fndecl
= gimple_call_fndecl (call
);
1653 /* We only handle functions that do not read or clobber memory -- i.e.
1654 const or novops ones. */
1655 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1659 || TREE_CODE (fndecl
) != FUNCTION_DECL
1660 || !DECL_BUILT_IN (fndecl
))
1663 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1668 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1669 gimple_stmt_iterator
*);
1672 /* Function vectorizable_mask_load_store.
1674 Check if STMT performs a conditional load or store that can be vectorized.
1675 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1676 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1677 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1680 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1681 gimple
**vec_stmt
, slp_tree slp_node
)
1683 tree vec_dest
= NULL
;
1684 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1685 stmt_vec_info prev_stmt_info
;
1686 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1687 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1688 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1689 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1690 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1695 tree dataref_ptr
= NULL_TREE
;
1697 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1701 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1702 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1703 int gather_scale
= 1;
1704 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1708 enum vect_def_type dt
;
1710 if (slp_node
!= NULL
)
1713 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1714 gcc_assert (ncopies
>= 1);
1716 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1717 mask
= gimple_call_arg (stmt
, 2);
1719 if (TREE_CODE (TREE_TYPE (mask
)) != BOOLEAN_TYPE
)
1722 /* FORNOW. This restriction should be relaxed. */
1723 if (nested_in_vect_loop
&& ncopies
> 1)
1725 if (dump_enabled_p ())
1726 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1727 "multiple types in nested loop.");
1731 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1734 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1737 if (!STMT_VINFO_DATA_REF (stmt_info
))
1740 elem_type
= TREE_TYPE (vectype
);
1742 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1745 if (STMT_VINFO_STRIDED_P (stmt_info
))
1748 if (TREE_CODE (mask
) != SSA_NAME
)
1751 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
1755 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
1760 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1763 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1764 &gather_off
, &gather_scale
);
1765 gcc_assert (gather_decl
);
1766 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1767 &gather_off_vectype
))
1769 if (dump_enabled_p ())
1770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1771 "gather index use not simple.");
1775 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1777 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1778 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1780 if (dump_enabled_p ())
1781 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1782 "masked gather with integer mask not supported.");
1786 else if (tree_int_cst_compare (nested_in_vect_loop
1787 ? STMT_VINFO_DR_STEP (stmt_info
)
1788 : DR_STEP (dr
), size_zero_node
) <= 0)
1790 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1791 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
1792 TYPE_MODE (mask_vectype
),
1798 tree rhs
= gimple_call_arg (stmt
, 3);
1799 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
))
1803 if (!vec_stmt
) /* transformation not required. */
1805 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1807 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1810 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1816 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1818 tree vec_oprnd0
= NULL_TREE
, op
;
1819 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1820 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1821 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1822 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1823 tree mask_perm_mask
= NULL_TREE
;
1824 edge pe
= loop_preheader_edge (loop
);
1827 enum { NARROW
, NONE
, WIDEN
} modifier
;
1828 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1830 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1831 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1832 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1833 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1834 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1835 scaletype
= TREE_VALUE (arglist
);
1836 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1837 && types_compatible_p (srctype
, masktype
));
1839 if (nunits
== gather_off_nunits
)
1841 else if (nunits
== gather_off_nunits
/ 2)
1843 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1846 for (i
= 0; i
< gather_off_nunits
; ++i
)
1847 sel
[i
] = i
| nunits
;
1849 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1851 else if (nunits
== gather_off_nunits
* 2)
1853 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1856 for (i
= 0; i
< nunits
; ++i
)
1857 sel
[i
] = i
< gather_off_nunits
1858 ? i
: i
+ nunits
- gather_off_nunits
;
1860 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1862 for (i
= 0; i
< nunits
; ++i
)
1863 sel
[i
] = i
| gather_off_nunits
;
1864 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1869 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1871 ptr
= fold_convert (ptrtype
, gather_base
);
1872 if (!is_gimple_min_invariant (ptr
))
1874 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1875 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1876 gcc_assert (!new_bb
);
1879 scale
= build_int_cst (scaletype
, gather_scale
);
1881 prev_stmt_info
= NULL
;
1882 for (j
= 0; j
< ncopies
; ++j
)
1884 if (modifier
== WIDEN
&& (j
& 1))
1885 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1886 perm_mask
, stmt
, gsi
);
1889 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1892 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1894 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1896 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1897 == TYPE_VECTOR_SUBPARTS (idxtype
));
1898 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1899 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1901 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1902 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1906 if (mask_perm_mask
&& (j
& 1))
1907 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1908 mask_perm_mask
, stmt
, gsi
);
1912 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1915 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1916 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1920 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1922 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1923 == TYPE_VECTOR_SUBPARTS (masktype
));
1924 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1925 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1927 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1928 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1934 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1937 if (!useless_type_conversion_p (vectype
, rettype
))
1939 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1940 == TYPE_VECTOR_SUBPARTS (rettype
));
1941 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1942 gimple_call_set_lhs (new_stmt
, op
);
1943 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1944 var
= make_ssa_name (vec_dest
);
1945 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1946 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1950 var
= make_ssa_name (vec_dest
, new_stmt
);
1951 gimple_call_set_lhs (new_stmt
, var
);
1954 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1956 if (modifier
== NARROW
)
1963 var
= permute_vec_elements (prev_res
, var
,
1964 perm_mask
, stmt
, gsi
);
1965 new_stmt
= SSA_NAME_DEF_STMT (var
);
1968 if (prev_stmt_info
== NULL
)
1969 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1971 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1972 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1975 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1977 if (STMT_VINFO_RELATED_STMT (stmt_info
))
1979 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1980 stmt_info
= vinfo_for_stmt (stmt
);
1982 tree lhs
= gimple_call_lhs (stmt
);
1983 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1984 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1985 set_vinfo_for_stmt (stmt
, NULL
);
1986 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1987 gsi_replace (gsi
, new_stmt
, true);
1992 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
1993 prev_stmt_info
= NULL
;
1994 for (i
= 0; i
< ncopies
; i
++)
1996 unsigned align
, misalign
;
2000 tree rhs
= gimple_call_arg (stmt
, 3);
2001 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2002 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2003 /* We should have catched mismatched types earlier. */
2004 gcc_assert (useless_type_conversion_p (vectype
,
2005 TREE_TYPE (vec_rhs
)));
2006 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2007 NULL_TREE
, &dummy
, gsi
,
2008 &ptr_incr
, false, &inv_p
);
2009 gcc_assert (!inv_p
);
2013 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2014 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2015 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2016 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2017 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2018 TYPE_SIZE_UNIT (vectype
));
2021 align
= TYPE_ALIGN_UNIT (vectype
);
2022 if (aligned_access_p (dr
))
2024 else if (DR_MISALIGNMENT (dr
) == -1)
2026 align
= TYPE_ALIGN_UNIT (elem_type
);
2030 misalign
= DR_MISALIGNMENT (dr
);
2031 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2034 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2035 gimple_call_arg (stmt
, 1),
2037 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2039 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2041 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2042 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2047 tree vec_mask
= NULL_TREE
;
2048 prev_stmt_info
= NULL
;
2049 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2050 for (i
= 0; i
< ncopies
; i
++)
2052 unsigned align
, misalign
;
2056 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2057 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2058 NULL_TREE
, &dummy
, gsi
,
2059 &ptr_incr
, false, &inv_p
);
2060 gcc_assert (!inv_p
);
2064 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2065 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2066 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2067 TYPE_SIZE_UNIT (vectype
));
2070 align
= TYPE_ALIGN_UNIT (vectype
);
2071 if (aligned_access_p (dr
))
2073 else if (DR_MISALIGNMENT (dr
) == -1)
2075 align
= TYPE_ALIGN_UNIT (elem_type
);
2079 misalign
= DR_MISALIGNMENT (dr
);
2080 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2083 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2084 gimple_call_arg (stmt
, 1),
2086 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2087 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2089 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2091 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2092 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2098 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2100 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2102 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2103 stmt_info
= vinfo_for_stmt (stmt
);
2105 tree lhs
= gimple_call_lhs (stmt
);
2106 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2107 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2108 set_vinfo_for_stmt (stmt
, NULL
);
2109 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2110 gsi_replace (gsi
, new_stmt
, true);
2117 /* Function vectorizable_call.
2119 Check if GS performs a function call that can be vectorized.
2120 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2121 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2122 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2125 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2132 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2133 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2134 tree vectype_out
, vectype_in
;
2137 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2138 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2139 vec_info
*vinfo
= stmt_info
->vinfo
;
2140 tree fndecl
, new_temp
, rhs_type
;
2142 enum vect_def_type dt
[3]
2143 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2144 gimple
*new_stmt
= NULL
;
2146 vec
<tree
> vargs
= vNULL
;
2147 enum { NARROW
, NONE
, WIDEN
} modifier
;
2151 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2154 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2157 /* Is GS a vectorizable call? */
2158 stmt
= dyn_cast
<gcall
*> (gs
);
2162 if (gimple_call_internal_p (stmt
)
2163 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2164 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2165 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2168 if (gimple_call_lhs (stmt
) == NULL_TREE
2169 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2172 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2174 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2176 /* Process function arguments. */
2177 rhs_type
= NULL_TREE
;
2178 vectype_in
= NULL_TREE
;
2179 nargs
= gimple_call_num_args (stmt
);
2181 /* Bail out if the function has more than three arguments, we do not have
2182 interesting builtin functions to vectorize with more than two arguments
2183 except for fma. No arguments is also not good. */
2184 if (nargs
== 0 || nargs
> 3)
2187 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2188 if (gimple_call_internal_p (stmt
)
2189 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2192 rhs_type
= unsigned_type_node
;
2195 for (i
= 0; i
< nargs
; i
++)
2199 op
= gimple_call_arg (stmt
, i
);
2201 /* We can only handle calls with arguments of the same type. */
2203 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2205 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2207 "argument types differ.\n");
2211 rhs_type
= TREE_TYPE (op
);
2213 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2215 if (dump_enabled_p ())
2216 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2217 "use not simple.\n");
2222 vectype_in
= opvectype
;
2224 && opvectype
!= vectype_in
)
2226 if (dump_enabled_p ())
2227 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2228 "argument vector types differ.\n");
2232 /* If all arguments are external or constant defs use a vector type with
2233 the same size as the output vector type. */
2235 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2237 gcc_assert (vectype_in
);
2240 if (dump_enabled_p ())
2242 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2243 "no vectype for scalar type ");
2244 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2245 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2252 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2253 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2254 if (nunits_in
== nunits_out
/ 2)
2256 else if (nunits_out
== nunits_in
)
2258 else if (nunits_out
== nunits_in
/ 2)
2263 /* For now, we only vectorize functions if a target specific builtin
2264 is available. TODO -- in some cases, it might be profitable to
2265 insert the calls for pieces of the vector, in order to be able
2266 to vectorize other operations in the loop. */
2267 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2268 if (fndecl
== NULL_TREE
)
2270 if (gimple_call_internal_p (stmt
)
2271 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2274 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2275 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2276 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2277 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2279 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2280 { 0, 1, 2, ... vf - 1 } vector. */
2281 gcc_assert (nargs
== 0);
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2287 "function is not vectorizable.\n");
2292 gcc_assert (!gimple_vuse (stmt
));
2294 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2296 else if (modifier
== NARROW
)
2297 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2299 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2301 /* Sanity check: make sure that at least one copy of the vectorized stmt
2302 needs to be generated. */
2303 gcc_assert (ncopies
>= 1);
2305 if (!vec_stmt
) /* transformation not required. */
2307 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2308 if (dump_enabled_p ())
2309 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2311 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2317 if (dump_enabled_p ())
2318 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2321 scalar_dest
= gimple_call_lhs (stmt
);
2322 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2324 prev_stmt_info
= NULL
;
2328 for (j
= 0; j
< ncopies
; ++j
)
2330 /* Build argument list for the vectorized call. */
2332 vargs
.create (nargs
);
2338 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2339 vec
<tree
> vec_oprnds0
;
2341 for (i
= 0; i
< nargs
; i
++)
2342 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2343 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2344 vec_oprnds0
= vec_defs
[0];
2346 /* Arguments are ready. Create the new vector stmt. */
2347 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2350 for (k
= 0; k
< nargs
; k
++)
2352 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2353 vargs
[k
] = vec_oprndsk
[i
];
2355 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2356 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2357 gimple_call_set_lhs (new_stmt
, new_temp
);
2358 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2359 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2362 for (i
= 0; i
< nargs
; i
++)
2364 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2365 vec_oprndsi
.release ();
2370 for (i
= 0; i
< nargs
; i
++)
2372 op
= gimple_call_arg (stmt
, i
);
2375 = vect_get_vec_def_for_operand (op
, stmt
);
2378 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2380 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2383 vargs
.quick_push (vec_oprnd0
);
2386 if (gimple_call_internal_p (stmt
)
2387 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2389 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2391 for (k
= 0; k
< nunits_out
; ++k
)
2392 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2393 tree cst
= build_vector (vectype_out
, v
);
2395 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2396 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2397 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2398 new_temp
= make_ssa_name (vec_dest
);
2399 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2403 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2404 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2405 gimple_call_set_lhs (new_stmt
, new_temp
);
2407 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2410 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2412 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2414 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2420 for (j
= 0; j
< ncopies
; ++j
)
2422 /* Build argument list for the vectorized call. */
2424 vargs
.create (nargs
* 2);
2430 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2431 vec
<tree
> vec_oprnds0
;
2433 for (i
= 0; i
< nargs
; i
++)
2434 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2435 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2436 vec_oprnds0
= vec_defs
[0];
2438 /* Arguments are ready. Create the new vector stmt. */
2439 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2443 for (k
= 0; k
< nargs
; k
++)
2445 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2446 vargs
.quick_push (vec_oprndsk
[i
]);
2447 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2449 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2450 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2451 gimple_call_set_lhs (new_stmt
, new_temp
);
2452 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2453 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2456 for (i
= 0; i
< nargs
; i
++)
2458 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2459 vec_oprndsi
.release ();
2464 for (i
= 0; i
< nargs
; i
++)
2466 op
= gimple_call_arg (stmt
, i
);
2470 = vect_get_vec_def_for_operand (op
, stmt
);
2472 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2476 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2478 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2480 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2483 vargs
.quick_push (vec_oprnd0
);
2484 vargs
.quick_push (vec_oprnd1
);
2487 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2488 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2489 gimple_call_set_lhs (new_stmt
, new_temp
);
2490 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2493 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2495 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2497 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2500 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2505 /* No current target implements this case. */
2511 /* The call in STMT might prevent it from being removed in dce.
2512 We however cannot remove it here, due to the way the ssa name
2513 it defines is mapped to the new definition. So just replace
2514 rhs of the statement with something harmless. */
2519 type
= TREE_TYPE (scalar_dest
);
2520 if (is_pattern_stmt_p (stmt_info
))
2521 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2523 lhs
= gimple_call_lhs (stmt
);
2525 if (gimple_call_internal_p (stmt
)
2526 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2528 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2529 with vf - 1 rather than 0, that is the last iteration of the
2531 imm_use_iterator iter
;
2532 use_operand_p use_p
;
2534 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2536 basic_block use_bb
= gimple_bb (use_stmt
);
2538 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2540 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2541 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2542 ncopies
* nunits_out
- 1));
2543 update_stmt (use_stmt
);
2548 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2549 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2550 set_vinfo_for_stmt (stmt
, NULL
);
2551 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2552 gsi_replace (gsi
, new_stmt
, false);
2558 struct simd_call_arg_info
2562 enum vect_def_type dt
;
2563 HOST_WIDE_INT linear_step
;
2565 bool simd_lane_linear
;
2568 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2569 is linear within simd lane (but not within whole loop), note it in
2573 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2574 struct simd_call_arg_info
*arginfo
)
2576 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2578 if (!is_gimple_assign (def_stmt
)
2579 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2580 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2583 tree base
= gimple_assign_rhs1 (def_stmt
);
2584 HOST_WIDE_INT linear_step
= 0;
2585 tree v
= gimple_assign_rhs2 (def_stmt
);
2586 while (TREE_CODE (v
) == SSA_NAME
)
2589 def_stmt
= SSA_NAME_DEF_STMT (v
);
2590 if (is_gimple_assign (def_stmt
))
2591 switch (gimple_assign_rhs_code (def_stmt
))
2594 t
= gimple_assign_rhs2 (def_stmt
);
2595 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2597 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2598 v
= gimple_assign_rhs1 (def_stmt
);
2601 t
= gimple_assign_rhs2 (def_stmt
);
2602 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2604 linear_step
= tree_to_shwi (t
);
2605 v
= gimple_assign_rhs1 (def_stmt
);
2608 t
= gimple_assign_rhs1 (def_stmt
);
2609 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2610 || (TYPE_PRECISION (TREE_TYPE (v
))
2611 < TYPE_PRECISION (TREE_TYPE (t
))))
2620 else if (is_gimple_call (def_stmt
)
2621 && gimple_call_internal_p (def_stmt
)
2622 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2624 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2625 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2630 arginfo
->linear_step
= linear_step
;
2632 arginfo
->simd_lane_linear
= true;
2638 /* Function vectorizable_simd_clone_call.
2640 Check if STMT performs a function call that can be vectorized
2641 by calling a simd clone of the function.
2642 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2643 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2644 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2647 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2648 gimple
**vec_stmt
, slp_tree slp_node
)
2653 tree vec_oprnd0
= NULL_TREE
;
2654 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2656 unsigned int nunits
;
2657 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2658 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2659 vec_info
*vinfo
= stmt_info
->vinfo
;
2660 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2661 tree fndecl
, new_temp
;
2663 gimple
*new_stmt
= NULL
;
2665 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2666 vec
<tree
> vargs
= vNULL
;
2668 tree lhs
, rtype
, ratype
;
2669 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2671 /* Is STMT a vectorizable call? */
2672 if (!is_gimple_call (stmt
))
2675 fndecl
= gimple_call_fndecl (stmt
);
2676 if (fndecl
== NULL_TREE
)
2679 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2680 if (node
== NULL
|| node
->simd_clones
== NULL
)
2683 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2686 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2689 if (gimple_call_lhs (stmt
)
2690 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2693 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2695 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2697 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2701 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2704 /* Process function arguments. */
2705 nargs
= gimple_call_num_args (stmt
);
2707 /* Bail out if the function has zero arguments. */
2711 arginfo
.create (nargs
);
2713 for (i
= 0; i
< nargs
; i
++)
2715 simd_call_arg_info thisarginfo
;
2718 thisarginfo
.linear_step
= 0;
2719 thisarginfo
.align
= 0;
2720 thisarginfo
.op
= NULL_TREE
;
2721 thisarginfo
.simd_lane_linear
= false;
2723 op
= gimple_call_arg (stmt
, i
);
2724 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2725 &thisarginfo
.vectype
)
2726 || thisarginfo
.dt
== vect_uninitialized_def
)
2728 if (dump_enabled_p ())
2729 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2730 "use not simple.\n");
2735 if (thisarginfo
.dt
== vect_constant_def
2736 || thisarginfo
.dt
== vect_external_def
)
2737 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2739 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2741 /* For linear arguments, the analyze phase should have saved
2742 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2743 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2744 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2746 gcc_assert (vec_stmt
);
2747 thisarginfo
.linear_step
2748 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2750 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2751 thisarginfo
.simd_lane_linear
2752 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2753 == boolean_true_node
);
2754 /* If loop has been peeled for alignment, we need to adjust it. */
2755 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2756 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2757 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2759 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2760 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2761 tree opt
= TREE_TYPE (thisarginfo
.op
);
2762 bias
= fold_convert (TREE_TYPE (step
), bias
);
2763 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2765 = fold_build2 (POINTER_TYPE_P (opt
)
2766 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2767 thisarginfo
.op
, bias
);
2771 && thisarginfo
.dt
!= vect_constant_def
2772 && thisarginfo
.dt
!= vect_external_def
2774 && TREE_CODE (op
) == SSA_NAME
2775 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2777 && tree_fits_shwi_p (iv
.step
))
2779 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2780 thisarginfo
.op
= iv
.base
;
2782 else if ((thisarginfo
.dt
== vect_constant_def
2783 || thisarginfo
.dt
== vect_external_def
)
2784 && POINTER_TYPE_P (TREE_TYPE (op
)))
2785 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2786 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2788 if (POINTER_TYPE_P (TREE_TYPE (op
))
2789 && !thisarginfo
.linear_step
2791 && thisarginfo
.dt
!= vect_constant_def
2792 && thisarginfo
.dt
!= vect_external_def
2795 && TREE_CODE (op
) == SSA_NAME
)
2796 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2798 arginfo
.quick_push (thisarginfo
);
2801 unsigned int badness
= 0;
2802 struct cgraph_node
*bestn
= NULL
;
2803 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2804 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2806 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2807 n
= n
->simdclone
->next_clone
)
2809 unsigned int this_badness
= 0;
2810 if (n
->simdclone
->simdlen
2811 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2812 || n
->simdclone
->nargs
!= nargs
)
2814 if (n
->simdclone
->simdlen
2815 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2816 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2817 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2818 if (n
->simdclone
->inbranch
)
2819 this_badness
+= 2048;
2820 int target_badness
= targetm
.simd_clone
.usable (n
);
2821 if (target_badness
< 0)
2823 this_badness
+= target_badness
* 512;
2824 /* FORNOW: Have to add code to add the mask argument. */
2825 if (n
->simdclone
->inbranch
)
2827 for (i
= 0; i
< nargs
; i
++)
2829 switch (n
->simdclone
->args
[i
].arg_type
)
2831 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2832 if (!useless_type_conversion_p
2833 (n
->simdclone
->args
[i
].orig_type
,
2834 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2836 else if (arginfo
[i
].dt
== vect_constant_def
2837 || arginfo
[i
].dt
== vect_external_def
2838 || arginfo
[i
].linear_step
)
2841 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2842 if (arginfo
[i
].dt
!= vect_constant_def
2843 && arginfo
[i
].dt
!= vect_external_def
)
2846 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2847 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2848 if (arginfo
[i
].dt
== vect_constant_def
2849 || arginfo
[i
].dt
== vect_external_def
2850 || (arginfo
[i
].linear_step
2851 != n
->simdclone
->args
[i
].linear_step
))
2854 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2855 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2856 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2857 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2858 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2859 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2863 case SIMD_CLONE_ARG_TYPE_MASK
:
2866 if (i
== (size_t) -1)
2868 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2873 if (arginfo
[i
].align
)
2874 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2875 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2877 if (i
== (size_t) -1)
2879 if (bestn
== NULL
|| this_badness
< badness
)
2882 badness
= this_badness
;
2892 for (i
= 0; i
< nargs
; i
++)
2893 if ((arginfo
[i
].dt
== vect_constant_def
2894 || arginfo
[i
].dt
== vect_external_def
)
2895 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2898 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2900 if (arginfo
[i
].vectype
== NULL
2901 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2902 > bestn
->simdclone
->simdlen
))
2909 fndecl
= bestn
->decl
;
2910 nunits
= bestn
->simdclone
->simdlen
;
2911 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2913 /* If the function isn't const, only allow it in simd loops where user
2914 has asserted that at least nunits consecutive iterations can be
2915 performed using SIMD instructions. */
2916 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2917 && gimple_vuse (stmt
))
2923 /* Sanity check: make sure that at least one copy of the vectorized stmt
2924 needs to be generated. */
2925 gcc_assert (ncopies
>= 1);
2927 if (!vec_stmt
) /* transformation not required. */
2929 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2930 for (i
= 0; i
< nargs
; i
++)
2931 if (bestn
->simdclone
->args
[i
].arg_type
2932 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2934 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2936 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2937 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2938 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
2939 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
2940 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
2941 tree sll
= arginfo
[i
].simd_lane_linear
2942 ? boolean_true_node
: boolean_false_node
;
2943 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
2945 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2946 if (dump_enabled_p ())
2947 dump_printf_loc (MSG_NOTE
, vect_location
,
2948 "=== vectorizable_simd_clone_call ===\n");
2949 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2956 if (dump_enabled_p ())
2957 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2960 scalar_dest
= gimple_call_lhs (stmt
);
2961 vec_dest
= NULL_TREE
;
2966 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2967 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2968 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2971 rtype
= TREE_TYPE (ratype
);
2975 prev_stmt_info
= NULL
;
2976 for (j
= 0; j
< ncopies
; ++j
)
2978 /* Build argument list for the vectorized call. */
2980 vargs
.create (nargs
);
2984 for (i
= 0; i
< nargs
; i
++)
2986 unsigned int k
, l
, m
, o
;
2988 op
= gimple_call_arg (stmt
, i
);
2989 switch (bestn
->simdclone
->args
[i
].arg_type
)
2991 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2992 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2993 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2994 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2996 if (TYPE_VECTOR_SUBPARTS (atype
)
2997 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2999 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3000 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3001 / TYPE_VECTOR_SUBPARTS (atype
));
3002 gcc_assert ((k
& (k
- 1)) == 0);
3005 = vect_get_vec_def_for_operand (op
, stmt
);
3008 vec_oprnd0
= arginfo
[i
].op
;
3009 if ((m
& (k
- 1)) == 0)
3011 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3014 arginfo
[i
].op
= vec_oprnd0
;
3016 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3018 bitsize_int ((m
& (k
- 1)) * prec
));
3020 = gimple_build_assign (make_ssa_name (atype
),
3022 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3023 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3027 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3028 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3029 gcc_assert ((k
& (k
- 1)) == 0);
3030 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3032 vec_alloc (ctor_elts
, k
);
3035 for (l
= 0; l
< k
; l
++)
3037 if (m
== 0 && l
== 0)
3039 = vect_get_vec_def_for_operand (op
, stmt
);
3042 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3044 arginfo
[i
].op
= vec_oprnd0
;
3047 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3051 vargs
.safe_push (vec_oprnd0
);
3054 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3056 = gimple_build_assign (make_ssa_name (atype
),
3058 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3059 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3064 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3065 vargs
.safe_push (op
);
3067 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3072 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3077 edge pe
= loop_preheader_edge (loop
);
3078 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3079 gcc_assert (!new_bb
);
3081 if (arginfo
[i
].simd_lane_linear
)
3083 vargs
.safe_push (arginfo
[i
].op
);
3086 tree phi_res
= copy_ssa_name (op
);
3087 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3088 set_vinfo_for_stmt (new_phi
,
3089 new_stmt_vec_info (new_phi
, loop_vinfo
));
3090 add_phi_arg (new_phi
, arginfo
[i
].op
,
3091 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3093 = POINTER_TYPE_P (TREE_TYPE (op
))
3094 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3095 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3096 ? sizetype
: TREE_TYPE (op
);
3098 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3100 tree tcst
= wide_int_to_tree (type
, cst
);
3101 tree phi_arg
= copy_ssa_name (op
);
3103 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3104 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3105 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3106 set_vinfo_for_stmt (new_stmt
,
3107 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3108 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3110 arginfo
[i
].op
= phi_res
;
3111 vargs
.safe_push (phi_res
);
3116 = POINTER_TYPE_P (TREE_TYPE (op
))
3117 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3118 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3119 ? sizetype
: TREE_TYPE (op
);
3121 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3123 tree tcst
= wide_int_to_tree (type
, cst
);
3124 new_temp
= make_ssa_name (TREE_TYPE (op
));
3125 new_stmt
= gimple_build_assign (new_temp
, code
,
3126 arginfo
[i
].op
, tcst
);
3127 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3128 vargs
.safe_push (new_temp
);
3131 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3132 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3133 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3134 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3140 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3143 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3145 new_temp
= create_tmp_var (ratype
);
3146 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3147 == TYPE_VECTOR_SUBPARTS (rtype
))
3148 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3150 new_temp
= make_ssa_name (rtype
, new_stmt
);
3151 gimple_call_set_lhs (new_stmt
, new_temp
);
3153 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3157 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3160 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3161 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3162 gcc_assert ((k
& (k
- 1)) == 0);
3163 for (l
= 0; l
< k
; l
++)
3168 t
= build_fold_addr_expr (new_temp
);
3169 t
= build2 (MEM_REF
, vectype
, t
,
3170 build_int_cst (TREE_TYPE (t
),
3171 l
* prec
/ BITS_PER_UNIT
));
3174 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3175 size_int (prec
), bitsize_int (l
* prec
));
3177 = gimple_build_assign (make_ssa_name (vectype
), t
);
3178 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3179 if (j
== 0 && l
== 0)
3180 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3182 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3184 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3189 tree clobber
= build_constructor (ratype
, NULL
);
3190 TREE_THIS_VOLATILE (clobber
) = 1;
3191 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3192 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3196 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3198 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3199 / TYPE_VECTOR_SUBPARTS (rtype
));
3200 gcc_assert ((k
& (k
- 1)) == 0);
3201 if ((j
& (k
- 1)) == 0)
3202 vec_alloc (ret_ctor_elts
, k
);
3205 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3206 for (m
= 0; m
< o
; m
++)
3208 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3209 size_int (m
), NULL_TREE
, NULL_TREE
);
3211 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3212 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3213 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3214 gimple_assign_lhs (new_stmt
));
3216 tree clobber
= build_constructor (ratype
, NULL
);
3217 TREE_THIS_VOLATILE (clobber
) = 1;
3218 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3219 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3222 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3223 if ((j
& (k
- 1)) != k
- 1)
3225 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3227 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3228 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3230 if ((unsigned) j
== k
- 1)
3231 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3233 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3235 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3240 tree t
= build_fold_addr_expr (new_temp
);
3241 t
= build2 (MEM_REF
, vectype
, t
,
3242 build_int_cst (TREE_TYPE (t
), 0));
3244 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3245 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3246 tree clobber
= build_constructor (ratype
, NULL
);
3247 TREE_THIS_VOLATILE (clobber
) = 1;
3248 vect_finish_stmt_generation (stmt
,
3249 gimple_build_assign (new_temp
,
3255 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3257 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3259 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3264 /* The call in STMT might prevent it from being removed in dce.
3265 We however cannot remove it here, due to the way the ssa name
3266 it defines is mapped to the new definition. So just replace
3267 rhs of the statement with something harmless. */
3274 type
= TREE_TYPE (scalar_dest
);
3275 if (is_pattern_stmt_p (stmt_info
))
3276 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3278 lhs
= gimple_call_lhs (stmt
);
3279 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3282 new_stmt
= gimple_build_nop ();
3283 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3284 set_vinfo_for_stmt (stmt
, NULL
);
3285 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3286 gsi_replace (gsi
, new_stmt
, true);
3287 unlink_stmt_vdef (stmt
);
3293 /* Function vect_gen_widened_results_half
3295 Create a vector stmt whose code, type, number of arguments, and result
3296 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3297 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3298 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3299 needs to be created (DECL is a function-decl of a target-builtin).
3300 STMT is the original scalar stmt that we are vectorizing. */
3303 vect_gen_widened_results_half (enum tree_code code
,
3305 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3306 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3312 /* Generate half of the widened result: */
3313 if (code
== CALL_EXPR
)
3315 /* Target specific support */
3316 if (op_type
== binary_op
)
3317 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3319 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3320 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3321 gimple_call_set_lhs (new_stmt
, new_temp
);
3325 /* Generic support */
3326 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3327 if (op_type
!= binary_op
)
3329 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3330 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3331 gimple_assign_set_lhs (new_stmt
, new_temp
);
3333 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3339 /* Get vectorized definitions for loop-based vectorization. For the first
3340 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3341 scalar operand), and for the rest we get a copy with
3342 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3343 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3344 The vectors are collected into VEC_OPRNDS. */
3347 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3348 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3352 /* Get first vector operand. */
3353 /* All the vector operands except the very first one (that is scalar oprnd)
3355 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3356 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3358 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3360 vec_oprnds
->quick_push (vec_oprnd
);
3362 /* Get second vector operand. */
3363 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3364 vec_oprnds
->quick_push (vec_oprnd
);
3368 /* For conversion in multiple steps, continue to get operands
3371 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3375 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3376 For multi-step conversions store the resulting vectors and call the function
3380 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3381 int multi_step_cvt
, gimple
*stmt
,
3383 gimple_stmt_iterator
*gsi
,
3384 slp_tree slp_node
, enum tree_code code
,
3385 stmt_vec_info
*prev_stmt_info
)
3388 tree vop0
, vop1
, new_tmp
, vec_dest
;
3390 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3392 vec_dest
= vec_dsts
.pop ();
3394 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3396 /* Create demotion operation. */
3397 vop0
= (*vec_oprnds
)[i
];
3398 vop1
= (*vec_oprnds
)[i
+ 1];
3399 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3400 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3401 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3402 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3405 /* Store the resulting vector for next recursive call. */
3406 (*vec_oprnds
)[i
/2] = new_tmp
;
3409 /* This is the last step of the conversion sequence. Store the
3410 vectors in SLP_NODE or in vector info of the scalar statement
3411 (or in STMT_VINFO_RELATED_STMT chain). */
3413 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3416 if (!*prev_stmt_info
)
3417 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3419 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3421 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3426 /* For multi-step demotion operations we first generate demotion operations
3427 from the source type to the intermediate types, and then combine the
3428 results (stored in VEC_OPRNDS) in demotion operation to the destination
3432 /* At each level of recursion we have half of the operands we had at the
3434 vec_oprnds
->truncate ((i
+1)/2);
3435 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3436 stmt
, vec_dsts
, gsi
, slp_node
,
3437 VEC_PACK_TRUNC_EXPR
,
3441 vec_dsts
.quick_push (vec_dest
);
3445 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3446 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3447 the resulting vectors and call the function recursively. */
3450 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3451 vec
<tree
> *vec_oprnds1
,
3452 gimple
*stmt
, tree vec_dest
,
3453 gimple_stmt_iterator
*gsi
,
3454 enum tree_code code1
,
3455 enum tree_code code2
, tree decl1
,
3456 tree decl2
, int op_type
)
3459 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3460 gimple
*new_stmt1
, *new_stmt2
;
3461 vec
<tree
> vec_tmp
= vNULL
;
3463 vec_tmp
.create (vec_oprnds0
->length () * 2);
3464 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3466 if (op_type
== binary_op
)
3467 vop1
= (*vec_oprnds1
)[i
];
3471 /* Generate the two halves of promotion operation. */
3472 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3473 op_type
, vec_dest
, gsi
, stmt
);
3474 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3475 op_type
, vec_dest
, gsi
, stmt
);
3476 if (is_gimple_call (new_stmt1
))
3478 new_tmp1
= gimple_call_lhs (new_stmt1
);
3479 new_tmp2
= gimple_call_lhs (new_stmt2
);
3483 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3484 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3487 /* Store the results for the next step. */
3488 vec_tmp
.quick_push (new_tmp1
);
3489 vec_tmp
.quick_push (new_tmp2
);
3492 vec_oprnds0
->release ();
3493 *vec_oprnds0
= vec_tmp
;
3497 /* Check if STMT performs a conversion operation, that can be vectorized.
3498 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3499 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3500 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3503 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3504 gimple
**vec_stmt
, slp_tree slp_node
)
3508 tree op0
, op1
= NULL_TREE
;
3509 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3510 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3511 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3512 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3513 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3514 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3517 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3518 gimple
*new_stmt
= NULL
;
3519 stmt_vec_info prev_stmt_info
;
3522 tree vectype_out
, vectype_in
;
3524 tree lhs_type
, rhs_type
;
3525 enum { NARROW
, NONE
, WIDEN
} modifier
;
3526 vec
<tree
> vec_oprnds0
= vNULL
;
3527 vec
<tree
> vec_oprnds1
= vNULL
;
3529 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3530 vec_info
*vinfo
= stmt_info
->vinfo
;
3531 int multi_step_cvt
= 0;
3532 vec
<tree
> vec_dsts
= vNULL
;
3533 vec
<tree
> interm_types
= vNULL
;
3534 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3536 machine_mode rhs_mode
;
3537 unsigned short fltsz
;
3539 /* Is STMT a vectorizable conversion? */
3541 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3544 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3547 if (!is_gimple_assign (stmt
))
3550 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3553 code
= gimple_assign_rhs_code (stmt
);
3554 if (!CONVERT_EXPR_CODE_P (code
)
3555 && code
!= FIX_TRUNC_EXPR
3556 && code
!= FLOAT_EXPR
3557 && code
!= WIDEN_MULT_EXPR
3558 && code
!= WIDEN_LSHIFT_EXPR
)
3561 op_type
= TREE_CODE_LENGTH (code
);
3563 /* Check types of lhs and rhs. */
3564 scalar_dest
= gimple_assign_lhs (stmt
);
3565 lhs_type
= TREE_TYPE (scalar_dest
);
3566 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3568 op0
= gimple_assign_rhs1 (stmt
);
3569 rhs_type
= TREE_TYPE (op0
);
3571 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3572 && !((INTEGRAL_TYPE_P (lhs_type
)
3573 && INTEGRAL_TYPE_P (rhs_type
))
3574 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3575 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3578 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3579 && ((INTEGRAL_TYPE_P (lhs_type
)
3580 && (TYPE_PRECISION (lhs_type
)
3581 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3582 || (INTEGRAL_TYPE_P (rhs_type
)
3583 && (TYPE_PRECISION (rhs_type
)
3584 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
))))))
3586 if (dump_enabled_p ())
3587 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3588 "type conversion to/from bit-precision unsupported."
3593 /* Check the operands of the operation. */
3594 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3596 if (dump_enabled_p ())
3597 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3598 "use not simple.\n");
3601 if (op_type
== binary_op
)
3605 op1
= gimple_assign_rhs2 (stmt
);
3606 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3607 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3609 if (CONSTANT_CLASS_P (op0
))
3610 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3612 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3616 if (dump_enabled_p ())
3617 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3618 "use not simple.\n");
3623 /* If op0 is an external or constant defs use a vector type of
3624 the same size as the output vector type. */
3626 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3628 gcc_assert (vectype_in
);
3631 if (dump_enabled_p ())
3633 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3634 "no vectype for scalar type ");
3635 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3636 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3642 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3643 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
3645 if (dump_enabled_p ())
3647 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3648 "can't convert between boolean and non "
3650 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3651 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3657 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3658 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3659 if (nunits_in
< nunits_out
)
3661 else if (nunits_out
== nunits_in
)
3666 /* Multiple types in SLP are handled by creating the appropriate number of
3667 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3669 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3671 else if (modifier
== NARROW
)
3672 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3674 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3676 /* Sanity check: make sure that at least one copy of the vectorized stmt
3677 needs to be generated. */
3678 gcc_assert (ncopies
>= 1);
3680 /* Supportable by target? */
3684 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3686 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3691 if (dump_enabled_p ())
3692 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3693 "conversion not supported by target.\n");
3697 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3698 &code1
, &code2
, &multi_step_cvt
,
3701 /* Binary widening operation can only be supported directly by the
3703 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3707 if (code
!= FLOAT_EXPR
3708 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3709 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3712 rhs_mode
= TYPE_MODE (rhs_type
);
3713 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3714 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3715 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3716 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3719 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3720 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3721 if (cvt_type
== NULL_TREE
)
3724 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3726 if (!supportable_convert_operation (code
, vectype_out
,
3727 cvt_type
, &decl1
, &codecvt1
))
3730 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3731 cvt_type
, &codecvt1
,
3732 &codecvt2
, &multi_step_cvt
,
3736 gcc_assert (multi_step_cvt
== 0);
3738 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3739 vectype_in
, &code1
, &code2
,
3740 &multi_step_cvt
, &interm_types
))
3744 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3747 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3748 codecvt2
= ERROR_MARK
;
3752 interm_types
.safe_push (cvt_type
);
3753 cvt_type
= NULL_TREE
;
3758 gcc_assert (op_type
== unary_op
);
3759 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3760 &code1
, &multi_step_cvt
,
3764 if (code
!= FIX_TRUNC_EXPR
3765 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3766 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3769 rhs_mode
= TYPE_MODE (rhs_type
);
3771 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3772 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3773 if (cvt_type
== NULL_TREE
)
3775 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3778 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3779 &code1
, &multi_step_cvt
,
3788 if (!vec_stmt
) /* transformation not required. */
3790 if (dump_enabled_p ())
3791 dump_printf_loc (MSG_NOTE
, vect_location
,
3792 "=== vectorizable_conversion ===\n");
3793 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3795 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3796 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3798 else if (modifier
== NARROW
)
3800 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3801 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3805 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3806 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3808 interm_types
.release ();
3813 if (dump_enabled_p ())
3814 dump_printf_loc (MSG_NOTE
, vect_location
,
3815 "transform conversion. ncopies = %d.\n", ncopies
);
3817 if (op_type
== binary_op
)
3819 if (CONSTANT_CLASS_P (op0
))
3820 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3821 else if (CONSTANT_CLASS_P (op1
))
3822 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3825 /* In case of multi-step conversion, we first generate conversion operations
3826 to the intermediate types, and then from that types to the final one.
3827 We create vector destinations for the intermediate type (TYPES) received
3828 from supportable_*_operation, and store them in the correct order
3829 for future use in vect_create_vectorized_*_stmts (). */
3830 vec_dsts
.create (multi_step_cvt
+ 1);
3831 vec_dest
= vect_create_destination_var (scalar_dest
,
3832 (cvt_type
&& modifier
== WIDEN
)
3833 ? cvt_type
: vectype_out
);
3834 vec_dsts
.quick_push (vec_dest
);
3838 for (i
= interm_types
.length () - 1;
3839 interm_types
.iterate (i
, &intermediate_type
); i
--)
3841 vec_dest
= vect_create_destination_var (scalar_dest
,
3843 vec_dsts
.quick_push (vec_dest
);
3848 vec_dest
= vect_create_destination_var (scalar_dest
,
3850 ? vectype_out
: cvt_type
);
3854 if (modifier
== WIDEN
)
3856 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3857 if (op_type
== binary_op
)
3858 vec_oprnds1
.create (1);
3860 else if (modifier
== NARROW
)
3861 vec_oprnds0
.create (
3862 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3864 else if (code
== WIDEN_LSHIFT_EXPR
)
3865 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3868 prev_stmt_info
= NULL
;
3872 for (j
= 0; j
< ncopies
; j
++)
3875 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3878 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3880 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3882 /* Arguments are ready, create the new vector stmt. */
3883 if (code1
== CALL_EXPR
)
3885 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3886 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3887 gimple_call_set_lhs (new_stmt
, new_temp
);
3891 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3892 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3893 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3894 gimple_assign_set_lhs (new_stmt
, new_temp
);
3897 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3899 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3902 if (!prev_stmt_info
)
3903 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3905 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3906 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3913 /* In case the vectorization factor (VF) is bigger than the number
3914 of elements that we can fit in a vectype (nunits), we have to
3915 generate more than one vector stmt - i.e - we need to "unroll"
3916 the vector stmt by a factor VF/nunits. */
3917 for (j
= 0; j
< ncopies
; j
++)
3924 if (code
== WIDEN_LSHIFT_EXPR
)
3929 /* Store vec_oprnd1 for every vector stmt to be created
3930 for SLP_NODE. We check during the analysis that all
3931 the shift arguments are the same. */
3932 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3933 vec_oprnds1
.quick_push (vec_oprnd1
);
3935 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3939 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3940 &vec_oprnds1
, slp_node
, -1);
3944 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
3945 vec_oprnds0
.quick_push (vec_oprnd0
);
3946 if (op_type
== binary_op
)
3948 if (code
== WIDEN_LSHIFT_EXPR
)
3951 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
3952 vec_oprnds1
.quick_push (vec_oprnd1
);
3958 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3959 vec_oprnds0
.truncate (0);
3960 vec_oprnds0
.quick_push (vec_oprnd0
);
3961 if (op_type
== binary_op
)
3963 if (code
== WIDEN_LSHIFT_EXPR
)
3966 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3968 vec_oprnds1
.truncate (0);
3969 vec_oprnds1
.quick_push (vec_oprnd1
);
3973 /* Arguments are ready. Create the new vector stmts. */
3974 for (i
= multi_step_cvt
; i
>= 0; i
--)
3976 tree this_dest
= vec_dsts
[i
];
3977 enum tree_code c1
= code1
, c2
= code2
;
3978 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3983 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3985 stmt
, this_dest
, gsi
,
3986 c1
, c2
, decl1
, decl2
,
3990 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3994 if (codecvt1
== CALL_EXPR
)
3996 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3997 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3998 gimple_call_set_lhs (new_stmt
, new_temp
);
4002 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4003 new_temp
= make_ssa_name (vec_dest
);
4004 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4008 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4011 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4014 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4017 if (!prev_stmt_info
)
4018 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4020 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4021 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4026 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4030 /* In case the vectorization factor (VF) is bigger than the number
4031 of elements that we can fit in a vectype (nunits), we have to
4032 generate more than one vector stmt - i.e - we need to "unroll"
4033 the vector stmt by a factor VF/nunits. */
4034 for (j
= 0; j
< ncopies
; j
++)
4038 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4042 vec_oprnds0
.truncate (0);
4043 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4044 vect_pow2 (multi_step_cvt
) - 1);
4047 /* Arguments are ready. Create the new vector stmts. */
4049 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4051 if (codecvt1
== CALL_EXPR
)
4053 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4054 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4055 gimple_call_set_lhs (new_stmt
, new_temp
);
4059 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4060 new_temp
= make_ssa_name (vec_dest
);
4061 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4065 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4066 vec_oprnds0
[i
] = new_temp
;
4069 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4070 stmt
, vec_dsts
, gsi
,
4075 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4079 vec_oprnds0
.release ();
4080 vec_oprnds1
.release ();
4081 vec_dsts
.release ();
4082 interm_types
.release ();
4088 /* Function vectorizable_assignment.
4090 Check if STMT performs an assignment (copy) that can be vectorized.
4091 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4092 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4093 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4096 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4097 gimple
**vec_stmt
, slp_tree slp_node
)
4102 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4103 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4106 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4109 vec
<tree
> vec_oprnds
= vNULL
;
4111 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4112 vec_info
*vinfo
= stmt_info
->vinfo
;
4113 gimple
*new_stmt
= NULL
;
4114 stmt_vec_info prev_stmt_info
= NULL
;
4115 enum tree_code code
;
4118 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4121 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4124 /* Is vectorizable assignment? */
4125 if (!is_gimple_assign (stmt
))
4128 scalar_dest
= gimple_assign_lhs (stmt
);
4129 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4132 code
= gimple_assign_rhs_code (stmt
);
4133 if (gimple_assign_single_p (stmt
)
4134 || code
== PAREN_EXPR
4135 || CONVERT_EXPR_CODE_P (code
))
4136 op
= gimple_assign_rhs1 (stmt
);
4140 if (code
== VIEW_CONVERT_EXPR
)
4141 op
= TREE_OPERAND (op
, 0);
4143 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4144 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4146 /* Multiple types in SLP are handled by creating the appropriate number of
4147 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4149 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4152 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4154 gcc_assert (ncopies
>= 1);
4156 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4158 if (dump_enabled_p ())
4159 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4160 "use not simple.\n");
4164 /* We can handle NOP_EXPR conversions that do not change the number
4165 of elements or the vector size. */
4166 if ((CONVERT_EXPR_CODE_P (code
)
4167 || code
== VIEW_CONVERT_EXPR
)
4169 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4170 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4171 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4174 /* We do not handle bit-precision changes. */
4175 if ((CONVERT_EXPR_CODE_P (code
)
4176 || code
== VIEW_CONVERT_EXPR
)
4177 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4178 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4179 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4180 || ((TYPE_PRECISION (TREE_TYPE (op
))
4181 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4182 /* But a conversion that does not change the bit-pattern is ok. */
4183 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4184 > TYPE_PRECISION (TREE_TYPE (op
)))
4185 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4187 if (dump_enabled_p ())
4188 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4189 "type conversion to/from bit-precision "
4194 if (!vec_stmt
) /* transformation not required. */
4196 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4197 if (dump_enabled_p ())
4198 dump_printf_loc (MSG_NOTE
, vect_location
,
4199 "=== vectorizable_assignment ===\n");
4200 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4205 if (dump_enabled_p ())
4206 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4209 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4212 for (j
= 0; j
< ncopies
; j
++)
4216 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4218 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4220 /* Arguments are ready. create the new vector stmt. */
4221 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4223 if (CONVERT_EXPR_CODE_P (code
)
4224 || code
== VIEW_CONVERT_EXPR
)
4225 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4226 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4227 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4228 gimple_assign_set_lhs (new_stmt
, new_temp
);
4229 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4231 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4238 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4240 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4242 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4245 vec_oprnds
.release ();
4250 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4251 either as shift by a scalar or by a vector. */
4254 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4257 machine_mode vec_mode
;
4262 vectype
= get_vectype_for_scalar_type (scalar_type
);
4266 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4268 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4270 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4272 || (optab_handler (optab
, TYPE_MODE (vectype
))
4273 == CODE_FOR_nothing
))
4277 vec_mode
= TYPE_MODE (vectype
);
4278 icode
= (int) optab_handler (optab
, vec_mode
);
4279 if (icode
== CODE_FOR_nothing
)
4286 /* Function vectorizable_shift.
4288 Check if STMT performs a shift operation that can be vectorized.
4289 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4290 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4291 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4294 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4295 gimple
**vec_stmt
, slp_tree slp_node
)
4299 tree op0
, op1
= NULL
;
4300 tree vec_oprnd1
= NULL_TREE
;
4301 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4303 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4304 enum tree_code code
;
4305 machine_mode vec_mode
;
4309 machine_mode optab_op2_mode
;
4311 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4312 gimple
*new_stmt
= NULL
;
4313 stmt_vec_info prev_stmt_info
;
4320 vec
<tree
> vec_oprnds0
= vNULL
;
4321 vec
<tree
> vec_oprnds1
= vNULL
;
4324 bool scalar_shift_arg
= true;
4325 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4326 vec_info
*vinfo
= stmt_info
->vinfo
;
4329 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4332 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4335 /* Is STMT a vectorizable binary/unary operation? */
4336 if (!is_gimple_assign (stmt
))
4339 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4342 code
= gimple_assign_rhs_code (stmt
);
4344 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4345 || code
== RROTATE_EXPR
))
4348 scalar_dest
= gimple_assign_lhs (stmt
);
4349 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4350 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4351 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4353 if (dump_enabled_p ())
4354 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4355 "bit-precision shifts not supported.\n");
4359 op0
= gimple_assign_rhs1 (stmt
);
4360 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4362 if (dump_enabled_p ())
4363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4364 "use not simple.\n");
4367 /* If op0 is an external or constant def use a vector type with
4368 the same size as the output vector type. */
4370 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4372 gcc_assert (vectype
);
4375 if (dump_enabled_p ())
4376 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4377 "no vectype for scalar type\n");
4381 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4382 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4383 if (nunits_out
!= nunits_in
)
4386 op1
= gimple_assign_rhs2 (stmt
);
4387 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4389 if (dump_enabled_p ())
4390 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4391 "use not simple.\n");
4396 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4400 /* Multiple types in SLP are handled by creating the appropriate number of
4401 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4403 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4406 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4408 gcc_assert (ncopies
>= 1);
4410 /* Determine whether the shift amount is a vector, or scalar. If the
4411 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4413 if ((dt
[1] == vect_internal_def
4414 || dt
[1] == vect_induction_def
)
4416 scalar_shift_arg
= false;
4417 else if (dt
[1] == vect_constant_def
4418 || dt
[1] == vect_external_def
4419 || dt
[1] == vect_internal_def
)
4421 /* In SLP, need to check whether the shift count is the same,
4422 in loops if it is a constant or invariant, it is always
4426 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4429 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4430 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4431 scalar_shift_arg
= false;
4436 if (dump_enabled_p ())
4437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4438 "operand mode requires invariant argument.\n");
4442 /* Vector shifted by vector. */
4443 if (!scalar_shift_arg
)
4445 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4446 if (dump_enabled_p ())
4447 dump_printf_loc (MSG_NOTE
, vect_location
,
4448 "vector/vector shift/rotate found.\n");
4451 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4452 if (op1_vectype
== NULL_TREE
4453 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4455 if (dump_enabled_p ())
4456 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4457 "unusable type for last operand in"
4458 " vector/vector shift/rotate.\n");
4462 /* See if the machine has a vector shifted by scalar insn and if not
4463 then see if it has a vector shifted by vector insn. */
4466 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4468 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4470 if (dump_enabled_p ())
4471 dump_printf_loc (MSG_NOTE
, vect_location
,
4472 "vector/scalar shift/rotate found.\n");
4476 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4478 && (optab_handler (optab
, TYPE_MODE (vectype
))
4479 != CODE_FOR_nothing
))
4481 scalar_shift_arg
= false;
4483 if (dump_enabled_p ())
4484 dump_printf_loc (MSG_NOTE
, vect_location
,
4485 "vector/vector shift/rotate found.\n");
4487 /* Unlike the other binary operators, shifts/rotates have
4488 the rhs being int, instead of the same type as the lhs,
4489 so make sure the scalar is the right type if we are
4490 dealing with vectors of long long/long/short/char. */
4491 if (dt
[1] == vect_constant_def
)
4492 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4493 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4497 && TYPE_MODE (TREE_TYPE (vectype
))
4498 != TYPE_MODE (TREE_TYPE (op1
)))
4500 if (dump_enabled_p ())
4501 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4502 "unusable type for last operand in"
4503 " vector/vector shift/rotate.\n");
4506 if (vec_stmt
&& !slp_node
)
4508 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4509 op1
= vect_init_vector (stmt
, op1
,
4510 TREE_TYPE (vectype
), NULL
);
4517 /* Supportable by target? */
4520 if (dump_enabled_p ())
4521 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4525 vec_mode
= TYPE_MODE (vectype
);
4526 icode
= (int) optab_handler (optab
, vec_mode
);
4527 if (icode
== CODE_FOR_nothing
)
4529 if (dump_enabled_p ())
4530 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4531 "op not supported by target.\n");
4532 /* Check only during analysis. */
4533 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4534 || (vf
< vect_min_worthwhile_factor (code
)
4537 if (dump_enabled_p ())
4538 dump_printf_loc (MSG_NOTE
, vect_location
,
4539 "proceeding using word mode.\n");
4542 /* Worthwhile without SIMD support? Check only during analysis. */
4543 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4544 && vf
< vect_min_worthwhile_factor (code
)
4547 if (dump_enabled_p ())
4548 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4549 "not worthwhile without SIMD support.\n");
4553 if (!vec_stmt
) /* transformation not required. */
4555 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4556 if (dump_enabled_p ())
4557 dump_printf_loc (MSG_NOTE
, vect_location
,
4558 "=== vectorizable_shift ===\n");
4559 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4565 if (dump_enabled_p ())
4566 dump_printf_loc (MSG_NOTE
, vect_location
,
4567 "transform binary/unary operation.\n");
4570 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4572 prev_stmt_info
= NULL
;
4573 for (j
= 0; j
< ncopies
; j
++)
4578 if (scalar_shift_arg
)
4580 /* Vector shl and shr insn patterns can be defined with scalar
4581 operand 2 (shift operand). In this case, use constant or loop
4582 invariant op1 directly, without extending it to vector mode
4584 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4585 if (!VECTOR_MODE_P (optab_op2_mode
))
4587 if (dump_enabled_p ())
4588 dump_printf_loc (MSG_NOTE
, vect_location
,
4589 "operand 1 using scalar mode.\n");
4591 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4592 vec_oprnds1
.quick_push (vec_oprnd1
);
4595 /* Store vec_oprnd1 for every vector stmt to be created
4596 for SLP_NODE. We check during the analysis that all
4597 the shift arguments are the same.
4598 TODO: Allow different constants for different vector
4599 stmts generated for an SLP instance. */
4600 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4601 vec_oprnds1
.quick_push (vec_oprnd1
);
4606 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4607 (a special case for certain kind of vector shifts); otherwise,
4608 operand 1 should be of a vector type (the usual case). */
4610 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4613 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4617 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4619 /* Arguments are ready. Create the new vector stmt. */
4620 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4622 vop1
= vec_oprnds1
[i
];
4623 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4624 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4625 gimple_assign_set_lhs (new_stmt
, new_temp
);
4626 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4628 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4635 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4637 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4638 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4641 vec_oprnds0
.release ();
4642 vec_oprnds1
.release ();
4648 /* Function vectorizable_operation.
4650 Check if STMT performs a binary, unary or ternary operation that can
4652 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4653 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4654 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4657 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4658 gimple
**vec_stmt
, slp_tree slp_node
)
4662 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4663 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4665 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4666 enum tree_code code
;
4667 machine_mode vec_mode
;
4671 bool target_support_p
;
4673 enum vect_def_type dt
[3]
4674 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4675 gimple
*new_stmt
= NULL
;
4676 stmt_vec_info prev_stmt_info
;
4682 vec
<tree
> vec_oprnds0
= vNULL
;
4683 vec
<tree
> vec_oprnds1
= vNULL
;
4684 vec
<tree
> vec_oprnds2
= vNULL
;
4685 tree vop0
, vop1
, vop2
;
4686 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4687 vec_info
*vinfo
= stmt_info
->vinfo
;
4690 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4693 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4696 /* Is STMT a vectorizable binary/unary operation? */
4697 if (!is_gimple_assign (stmt
))
4700 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4703 code
= gimple_assign_rhs_code (stmt
);
4705 /* For pointer addition, we should use the normal plus for
4706 the vector addition. */
4707 if (code
== POINTER_PLUS_EXPR
)
4710 /* Support only unary or binary operations. */
4711 op_type
= TREE_CODE_LENGTH (code
);
4712 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4714 if (dump_enabled_p ())
4715 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4716 "num. args = %d (not unary/binary/ternary op).\n",
4721 scalar_dest
= gimple_assign_lhs (stmt
);
4722 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4724 /* Most operations cannot handle bit-precision types without extra
4726 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4727 && (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4728 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4729 /* Exception are bitwise binary operations. */
4730 && code
!= BIT_IOR_EXPR
4731 && code
!= BIT_XOR_EXPR
4732 && code
!= BIT_AND_EXPR
)
4734 if (dump_enabled_p ())
4735 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4736 "bit-precision arithmetic not supported.\n");
4740 op0
= gimple_assign_rhs1 (stmt
);
4741 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4743 if (dump_enabled_p ())
4744 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4745 "use not simple.\n");
4748 /* If op0 is an external or constant def use a vector type with
4749 the same size as the output vector type. */
4752 /* For boolean type we cannot determine vectype by
4753 invariant value (don't know whether it is a vector
4754 of booleans or vector of integers). We use output
4755 vectype because operations on boolean don't change
4757 if (TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
)
4759 if (TREE_CODE (TREE_TYPE (scalar_dest
)) != BOOLEAN_TYPE
)
4761 if (dump_enabled_p ())
4762 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4763 "not supported operation on bool value.\n");
4766 vectype
= vectype_out
;
4769 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4772 gcc_assert (vectype
);
4775 if (dump_enabled_p ())
4777 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4778 "no vectype for scalar type ");
4779 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4781 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4787 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4788 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4789 if (nunits_out
!= nunits_in
)
4792 if (op_type
== binary_op
|| op_type
== ternary_op
)
4794 op1
= gimple_assign_rhs2 (stmt
);
4795 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4797 if (dump_enabled_p ())
4798 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4799 "use not simple.\n");
4803 if (op_type
== ternary_op
)
4805 op2
= gimple_assign_rhs3 (stmt
);
4806 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4808 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4810 "use not simple.\n");
4816 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4820 /* Multiple types in SLP are handled by creating the appropriate number of
4821 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4823 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4826 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4828 gcc_assert (ncopies
>= 1);
4830 /* Shifts are handled in vectorizable_shift (). */
4831 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4832 || code
== RROTATE_EXPR
)
4835 /* Supportable by target? */
4837 vec_mode
= TYPE_MODE (vectype
);
4838 if (code
== MULT_HIGHPART_EXPR
)
4839 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4842 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4845 if (dump_enabled_p ())
4846 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4850 target_support_p
= (optab_handler (optab
, vec_mode
)
4851 != CODE_FOR_nothing
);
4854 if (!target_support_p
)
4856 if (dump_enabled_p ())
4857 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4858 "op not supported by target.\n");
4859 /* Check only during analysis. */
4860 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4861 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4863 if (dump_enabled_p ())
4864 dump_printf_loc (MSG_NOTE
, vect_location
,
4865 "proceeding using word mode.\n");
4868 /* Worthwhile without SIMD support? Check only during analysis. */
4869 if (!VECTOR_MODE_P (vec_mode
)
4871 && vf
< vect_min_worthwhile_factor (code
))
4873 if (dump_enabled_p ())
4874 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4875 "not worthwhile without SIMD support.\n");
4879 if (!vec_stmt
) /* transformation not required. */
4881 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4882 if (dump_enabled_p ())
4883 dump_printf_loc (MSG_NOTE
, vect_location
,
4884 "=== vectorizable_operation ===\n");
4885 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4891 if (dump_enabled_p ())
4892 dump_printf_loc (MSG_NOTE
, vect_location
,
4893 "transform binary/unary operation.\n");
4896 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4898 /* In case the vectorization factor (VF) is bigger than the number
4899 of elements that we can fit in a vectype (nunits), we have to generate
4900 more than one vector stmt - i.e - we need to "unroll" the
4901 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4902 from one copy of the vector stmt to the next, in the field
4903 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4904 stages to find the correct vector defs to be used when vectorizing
4905 stmts that use the defs of the current stmt. The example below
4906 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4907 we need to create 4 vectorized stmts):
4909 before vectorization:
4910 RELATED_STMT VEC_STMT
4914 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4916 RELATED_STMT VEC_STMT
4917 VS1_0: vx0 = memref0 VS1_1 -
4918 VS1_1: vx1 = memref1 VS1_2 -
4919 VS1_2: vx2 = memref2 VS1_3 -
4920 VS1_3: vx3 = memref3 - -
4921 S1: x = load - VS1_0
4924 step2: vectorize stmt S2 (done here):
4925 To vectorize stmt S2 we first need to find the relevant vector
4926 def for the first operand 'x'. This is, as usual, obtained from
4927 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4928 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4929 relevant vector def 'vx0'. Having found 'vx0' we can generate
4930 the vector stmt VS2_0, and as usual, record it in the
4931 STMT_VINFO_VEC_STMT of stmt S2.
4932 When creating the second copy (VS2_1), we obtain the relevant vector
4933 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4934 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4935 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4936 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4937 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4938 chain of stmts and pointers:
4939 RELATED_STMT VEC_STMT
4940 VS1_0: vx0 = memref0 VS1_1 -
4941 VS1_1: vx1 = memref1 VS1_2 -
4942 VS1_2: vx2 = memref2 VS1_3 -
4943 VS1_3: vx3 = memref3 - -
4944 S1: x = load - VS1_0
4945 VS2_0: vz0 = vx0 + v1 VS2_1 -
4946 VS2_1: vz1 = vx1 + v1 VS2_2 -
4947 VS2_2: vz2 = vx2 + v1 VS2_3 -
4948 VS2_3: vz3 = vx3 + v1 - -
4949 S2: z = x + 1 - VS2_0 */
4951 prev_stmt_info
= NULL
;
4952 for (j
= 0; j
< ncopies
; j
++)
4957 if (op_type
== binary_op
|| op_type
== ternary_op
)
4958 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4961 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4963 if (op_type
== ternary_op
)
4965 vec_oprnds2
.create (1);
4966 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4972 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4973 if (op_type
== ternary_op
)
4975 tree vec_oprnd
= vec_oprnds2
.pop ();
4976 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4981 /* Arguments are ready. Create the new vector stmt. */
4982 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4984 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4985 ? vec_oprnds1
[i
] : NULL_TREE
);
4986 vop2
= ((op_type
== ternary_op
)
4987 ? vec_oprnds2
[i
] : NULL_TREE
);
4988 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
4989 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4990 gimple_assign_set_lhs (new_stmt
, new_temp
);
4991 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4993 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5000 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5002 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5003 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5006 vec_oprnds0
.release ();
5007 vec_oprnds1
.release ();
5008 vec_oprnds2
.release ();
5013 /* A helper function to ensure data reference DR's base alignment
5017 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5022 if (DR_VECT_AUX (dr
)->base_misaligned
)
5024 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5025 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5027 if (decl_in_symtab_p (base_decl
))
5028 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5031 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5032 DECL_USER_ALIGN (base_decl
) = 1;
5034 DR_VECT_AUX (dr
)->base_misaligned
= false;
5039 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5040 reversal of the vector elements. If that is impossible to do,
5044 perm_mask_for_reverse (tree vectype
)
5049 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5050 sel
= XALLOCAVEC (unsigned char, nunits
);
5052 for (i
= 0; i
< nunits
; ++i
)
5053 sel
[i
] = nunits
- 1 - i
;
5055 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5057 return vect_gen_perm_mask_checked (vectype
, sel
);
5060 /* Function vectorizable_store.
5062 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5064 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5065 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5066 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5069 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5075 tree vec_oprnd
= NULL_TREE
;
5076 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5077 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5079 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5080 struct loop
*loop
= NULL
;
5081 machine_mode vec_mode
;
5083 enum dr_alignment_support alignment_support_scheme
;
5085 enum vect_def_type dt
;
5086 stmt_vec_info prev_stmt_info
= NULL
;
5087 tree dataref_ptr
= NULL_TREE
;
5088 tree dataref_offset
= NULL_TREE
;
5089 gimple
*ptr_incr
= NULL
;
5092 gimple
*next_stmt
, *first_stmt
= NULL
;
5093 bool grouped_store
= false;
5094 bool store_lanes_p
= false;
5095 unsigned int group_size
, i
;
5096 vec
<tree
> dr_chain
= vNULL
;
5097 vec
<tree
> oprnds
= vNULL
;
5098 vec
<tree
> result_chain
= vNULL
;
5100 bool negative
= false;
5101 tree offset
= NULL_TREE
;
5102 vec
<tree
> vec_oprnds
= vNULL
;
5103 bool slp
= (slp_node
!= NULL
);
5104 unsigned int vec_num
;
5105 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5106 vec_info
*vinfo
= stmt_info
->vinfo
;
5108 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5109 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5110 int scatter_scale
= 1;
5111 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5112 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5115 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5118 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5121 /* Is vectorizable store? */
5123 if (!is_gimple_assign (stmt
))
5126 scalar_dest
= gimple_assign_lhs (stmt
);
5127 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5128 && is_pattern_stmt_p (stmt_info
))
5129 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5130 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5131 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5132 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5133 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5134 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5135 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5136 && TREE_CODE (scalar_dest
) != MEM_REF
)
5139 gcc_assert (gimple_assign_single_p (stmt
));
5141 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5142 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5145 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5147 /* Multiple types in SLP are handled by creating the appropriate number of
5148 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5150 if (slp
|| PURE_SLP_STMT (stmt_info
))
5153 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5155 gcc_assert (ncopies
>= 1);
5157 /* FORNOW. This restriction should be relaxed. */
5158 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5160 if (dump_enabled_p ())
5161 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5162 "multiple types in nested loop.\n");
5166 op
= gimple_assign_rhs1 (stmt
);
5167 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5169 if (dump_enabled_p ())
5170 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5171 "use not simple.\n");
5175 elem_type
= TREE_TYPE (vectype
);
5176 vec_mode
= TYPE_MODE (vectype
);
5178 /* FORNOW. In some cases can vectorize even if data-type not supported
5179 (e.g. - array initialization with 0). */
5180 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5183 if (!STMT_VINFO_DATA_REF (stmt_info
))
5186 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5189 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5190 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5191 size_zero_node
) < 0;
5192 if (negative
&& ncopies
> 1)
5194 if (dump_enabled_p ())
5195 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5196 "multiple types with negative step.\n");
5201 gcc_assert (!grouped_store
);
5202 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5203 if (alignment_support_scheme
!= dr_aligned
5204 && alignment_support_scheme
!= dr_unaligned_supported
)
5206 if (dump_enabled_p ())
5207 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5208 "negative step but alignment required.\n");
5211 if (dt
!= vect_constant_def
5212 && dt
!= vect_external_def
5213 && !perm_mask_for_reverse (vectype
))
5215 if (dump_enabled_p ())
5216 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5217 "negative step and reversing not supported.\n");
5223 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5225 grouped_store
= true;
5226 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5227 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5229 && !PURE_SLP_STMT (stmt_info
)
5230 && !STMT_VINFO_STRIDED_P (stmt_info
))
5232 if (vect_store_lanes_supported (vectype
, group_size
))
5233 store_lanes_p
= true;
5234 else if (!vect_grouped_store_supported (vectype
, group_size
))
5238 if (STMT_VINFO_STRIDED_P (stmt_info
)
5239 && (slp
|| PURE_SLP_STMT (stmt_info
))
5240 && (group_size
> nunits
5241 || nunits
% group_size
!= 0))
5243 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5244 "unhandled strided group store\n");
5248 if (first_stmt
== stmt
)
5250 /* STMT is the leader of the group. Check the operands of all the
5251 stmts of the group. */
5252 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5255 gcc_assert (gimple_assign_single_p (next_stmt
));
5256 op
= gimple_assign_rhs1 (next_stmt
);
5257 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5259 if (dump_enabled_p ())
5260 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5261 "use not simple.\n");
5264 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5269 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5272 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5273 &scatter_off
, &scatter_scale
);
5274 gcc_assert (scatter_decl
);
5275 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5276 &scatter_off_vectype
))
5278 if (dump_enabled_p ())
5279 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5280 "scatter index use not simple.");
5285 if (!vec_stmt
) /* transformation not required. */
5287 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5288 /* The SLP costs are calculated during SLP analysis. */
5289 if (!PURE_SLP_STMT (stmt_info
))
5290 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5297 ensure_base_align (stmt_info
, dr
);
5299 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5301 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5302 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5303 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5304 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5305 edge pe
= loop_preheader_edge (loop
);
5308 enum { NARROW
, NONE
, WIDEN
} modifier
;
5309 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5311 if (nunits
== (unsigned int) scatter_off_nunits
)
5313 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5315 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5318 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5319 sel
[i
] = i
| nunits
;
5321 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5322 gcc_assert (perm_mask
!= NULL_TREE
);
5324 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5326 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5329 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5330 sel
[i
] = i
| scatter_off_nunits
;
5332 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5333 gcc_assert (perm_mask
!= NULL_TREE
);
5339 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5340 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5341 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5342 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5343 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5344 scaletype
= TREE_VALUE (arglist
);
5346 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5347 && TREE_CODE (rettype
) == VOID_TYPE
);
5349 ptr
= fold_convert (ptrtype
, scatter_base
);
5350 if (!is_gimple_min_invariant (ptr
))
5352 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5353 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5354 gcc_assert (!new_bb
);
5357 /* Currently we support only unconditional scatter stores,
5358 so mask should be all ones. */
5359 mask
= build_int_cst (masktype
, -1);
5360 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5362 scale
= build_int_cst (scaletype
, scatter_scale
);
5364 prev_stmt_info
= NULL
;
5365 for (j
= 0; j
< ncopies
; ++j
)
5370 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5372 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5374 else if (modifier
!= NONE
&& (j
& 1))
5376 if (modifier
== WIDEN
)
5379 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5380 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5383 else if (modifier
== NARROW
)
5385 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5388 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5396 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5398 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5401 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5403 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5404 == TYPE_VECTOR_SUBPARTS (srctype
));
5405 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5406 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5407 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5408 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5412 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5414 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5415 == TYPE_VECTOR_SUBPARTS (idxtype
));
5416 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5417 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5418 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5419 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5424 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5426 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5428 if (prev_stmt_info
== NULL
)
5429 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5431 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5432 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5439 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5440 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5442 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5445 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5447 /* We vectorize all the stmts of the interleaving group when we
5448 reach the last stmt in the group. */
5449 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5450 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5459 grouped_store
= false;
5460 /* VEC_NUM is the number of vect stmts to be created for this
5462 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5463 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5464 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5465 op
= gimple_assign_rhs1 (first_stmt
);
5468 /* VEC_NUM is the number of vect stmts to be created for this
5470 vec_num
= group_size
;
5476 group_size
= vec_num
= 1;
5479 if (dump_enabled_p ())
5480 dump_printf_loc (MSG_NOTE
, vect_location
,
5481 "transform store. ncopies = %d\n", ncopies
);
5483 if (STMT_VINFO_STRIDED_P (stmt_info
))
5485 gimple_stmt_iterator incr_gsi
;
5491 gimple_seq stmts
= NULL
;
5492 tree stride_base
, stride_step
, alias_off
;
5496 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5499 = fold_build_pointer_plus
5500 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5501 size_binop (PLUS_EXPR
,
5502 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5503 convert_to_ptrofftype (DR_INIT(first_dr
))));
5504 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5506 /* For a store with loop-invariant (but other than power-of-2)
5507 stride (i.e. not a grouped access) like so:
5509 for (i = 0; i < n; i += stride)
5512 we generate a new induction variable and new stores from
5513 the components of the (vectorized) rhs:
5515 for (j = 0; ; j += VF*stride)
5520 array[j + stride] = tmp2;
5524 unsigned nstores
= nunits
;
5525 tree ltype
= elem_type
;
5528 nstores
= nunits
/ group_size
;
5529 if (group_size
< nunits
)
5530 ltype
= build_vector_type (elem_type
, group_size
);
5533 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5534 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5538 ivstep
= stride_step
;
5539 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5540 build_int_cst (TREE_TYPE (ivstep
),
5541 ncopies
* nstores
));
5543 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5545 create_iv (stride_base
, ivstep
, NULL
,
5546 loop
, &incr_gsi
, insert_after
,
5548 incr
= gsi_stmt (incr_gsi
);
5549 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5551 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5553 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5555 prev_stmt_info
= NULL
;
5556 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5557 next_stmt
= first_stmt
;
5558 for (g
= 0; g
< group_size
; g
++)
5560 running_off
= offvar
;
5563 tree size
= TYPE_SIZE_UNIT (ltype
);
5564 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5566 tree newoff
= copy_ssa_name (running_off
, NULL
);
5567 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5569 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5570 running_off
= newoff
;
5572 for (j
= 0; j
< ncopies
; j
++)
5574 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5575 and first_stmt == stmt. */
5580 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5582 vec_oprnd
= vec_oprnds
[0];
5586 gcc_assert (gimple_assign_single_p (next_stmt
));
5587 op
= gimple_assign_rhs1 (next_stmt
);
5588 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5594 vec_oprnd
= vec_oprnds
[j
];
5597 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5598 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5602 for (i
= 0; i
< nstores
; i
++)
5604 tree newref
, newoff
;
5605 gimple
*incr
, *assign
;
5606 tree size
= TYPE_SIZE (ltype
);
5607 /* Extract the i'th component. */
5608 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5609 bitsize_int (i
), size
);
5610 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5613 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5617 newref
= build2 (MEM_REF
, ltype
,
5618 running_off
, alias_off
);
5620 /* And store it to *running_off. */
5621 assign
= gimple_build_assign (newref
, elem
);
5622 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5624 newoff
= copy_ssa_name (running_off
, NULL
);
5625 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5626 running_off
, stride_step
);
5627 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5629 running_off
= newoff
;
5630 if (g
== group_size
- 1
5633 if (j
== 0 && i
== 0)
5634 STMT_VINFO_VEC_STMT (stmt_info
)
5635 = *vec_stmt
= assign
;
5637 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5638 prev_stmt_info
= vinfo_for_stmt (assign
);
5642 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5647 dr_chain
.create (group_size
);
5648 oprnds
.create (group_size
);
5650 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5651 gcc_assert (alignment_support_scheme
);
5652 /* Targets with store-lane instructions must not require explicit
5654 gcc_assert (!store_lanes_p
5655 || alignment_support_scheme
== dr_aligned
5656 || alignment_support_scheme
== dr_unaligned_supported
);
5659 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5662 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5664 aggr_type
= vectype
;
5666 /* In case the vectorization factor (VF) is bigger than the number
5667 of elements that we can fit in a vectype (nunits), we have to generate
5668 more than one vector stmt - i.e - we need to "unroll" the
5669 vector stmt by a factor VF/nunits. For more details see documentation in
5670 vect_get_vec_def_for_copy_stmt. */
5672 /* In case of interleaving (non-unit grouped access):
5679 We create vectorized stores starting from base address (the access of the
5680 first stmt in the chain (S2 in the above example), when the last store stmt
5681 of the chain (S4) is reached:
5684 VS2: &base + vec_size*1 = vx0
5685 VS3: &base + vec_size*2 = vx1
5686 VS4: &base + vec_size*3 = vx3
5688 Then permutation statements are generated:
5690 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5691 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5694 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5695 (the order of the data-refs in the output of vect_permute_store_chain
5696 corresponds to the order of scalar stmts in the interleaving chain - see
5697 the documentation of vect_permute_store_chain()).
5699 In case of both multiple types and interleaving, above vector stores and
5700 permutation stmts are created for every copy. The result vector stmts are
5701 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5702 STMT_VINFO_RELATED_STMT for the next copies.
5705 prev_stmt_info
= NULL
;
5706 for (j
= 0; j
< ncopies
; j
++)
5713 /* Get vectorized arguments for SLP_NODE. */
5714 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5715 NULL
, slp_node
, -1);
5717 vec_oprnd
= vec_oprnds
[0];
5721 /* For interleaved stores we collect vectorized defs for all the
5722 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5723 used as an input to vect_permute_store_chain(), and OPRNDS as
5724 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5726 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5727 OPRNDS are of size 1. */
5728 next_stmt
= first_stmt
;
5729 for (i
= 0; i
< group_size
; i
++)
5731 /* Since gaps are not supported for interleaved stores,
5732 GROUP_SIZE is the exact number of stmts in the chain.
5733 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5734 there is no interleaving, GROUP_SIZE is 1, and only one
5735 iteration of the loop will be executed. */
5736 gcc_assert (next_stmt
5737 && gimple_assign_single_p (next_stmt
));
5738 op
= gimple_assign_rhs1 (next_stmt
);
5740 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5741 dr_chain
.quick_push (vec_oprnd
);
5742 oprnds
.quick_push (vec_oprnd
);
5743 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5747 /* We should have catched mismatched types earlier. */
5748 gcc_assert (useless_type_conversion_p (vectype
,
5749 TREE_TYPE (vec_oprnd
)));
5750 bool simd_lane_access_p
5751 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5752 if (simd_lane_access_p
5753 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5754 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5755 && integer_zerop (DR_OFFSET (first_dr
))
5756 && integer_zerop (DR_INIT (first_dr
))
5757 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5758 get_alias_set (DR_REF (first_dr
))))
5760 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5761 dataref_offset
= build_int_cst (reference_alias_ptr_type
5762 (DR_REF (first_dr
)), 0);
5767 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5768 simd_lane_access_p
? loop
: NULL
,
5769 offset
, &dummy
, gsi
, &ptr_incr
,
5770 simd_lane_access_p
, &inv_p
);
5771 gcc_assert (bb_vinfo
|| !inv_p
);
5775 /* For interleaved stores we created vectorized defs for all the
5776 defs stored in OPRNDS in the previous iteration (previous copy).
5777 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5778 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5780 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5781 OPRNDS are of size 1. */
5782 for (i
= 0; i
< group_size
; i
++)
5785 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5786 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5787 dr_chain
[i
] = vec_oprnd
;
5788 oprnds
[i
] = vec_oprnd
;
5792 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5793 TYPE_SIZE_UNIT (aggr_type
));
5795 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5796 TYPE_SIZE_UNIT (aggr_type
));
5803 /* Combine all the vectors into an array. */
5804 vec_array
= create_vector_array (vectype
, vec_num
);
5805 for (i
= 0; i
< vec_num
; i
++)
5807 vec_oprnd
= dr_chain
[i
];
5808 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5812 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5813 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5814 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5815 gimple_call_set_lhs (new_stmt
, data_ref
);
5816 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5824 result_chain
.create (group_size
);
5826 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5830 next_stmt
= first_stmt
;
5831 for (i
= 0; i
< vec_num
; i
++)
5833 unsigned align
, misalign
;
5836 /* Bump the vector pointer. */
5837 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5841 vec_oprnd
= vec_oprnds
[i
];
5842 else if (grouped_store
)
5843 /* For grouped stores vectorized defs are interleaved in
5844 vect_permute_store_chain(). */
5845 vec_oprnd
= result_chain
[i
];
5847 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5851 : build_int_cst (reference_alias_ptr_type
5852 (DR_REF (first_dr
)), 0));
5853 align
= TYPE_ALIGN_UNIT (vectype
);
5854 if (aligned_access_p (first_dr
))
5856 else if (DR_MISALIGNMENT (first_dr
) == -1)
5858 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5859 align
= TYPE_ALIGN_UNIT (elem_type
);
5861 align
= get_object_alignment (DR_REF (first_dr
))
5864 TREE_TYPE (data_ref
)
5865 = build_aligned_type (TREE_TYPE (data_ref
),
5866 align
* BITS_PER_UNIT
);
5870 TREE_TYPE (data_ref
)
5871 = build_aligned_type (TREE_TYPE (data_ref
),
5872 TYPE_ALIGN (elem_type
));
5873 misalign
= DR_MISALIGNMENT (first_dr
);
5875 if (dataref_offset
== NULL_TREE
5876 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5877 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5881 && dt
!= vect_constant_def
5882 && dt
!= vect_external_def
)
5884 tree perm_mask
= perm_mask_for_reverse (vectype
);
5886 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5888 tree new_temp
= make_ssa_name (perm_dest
);
5890 /* Generate the permute statement. */
5892 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5893 vec_oprnd
, perm_mask
);
5894 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5896 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5897 vec_oprnd
= new_temp
;
5900 /* Arguments are ready. Create the new vector stmt. */
5901 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5902 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5907 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5915 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5917 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5918 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5922 dr_chain
.release ();
5924 result_chain
.release ();
5925 vec_oprnds
.release ();
5930 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5931 VECTOR_CST mask. No checks are made that the target platform supports the
5932 mask, so callers may wish to test can_vec_perm_p separately, or use
5933 vect_gen_perm_mask_checked. */
5936 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5938 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5941 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5943 mask_elt_type
= lang_hooks
.types
.type_for_mode
5944 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5945 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5947 mask_elts
= XALLOCAVEC (tree
, nunits
);
5948 for (i
= nunits
- 1; i
>= 0; i
--)
5949 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5950 mask_vec
= build_vector (mask_type
, mask_elts
);
5955 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5956 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5959 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
5961 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
5962 return vect_gen_perm_mask_any (vectype
, sel
);
5965 /* Given a vector variable X and Y, that was generated for the scalar
5966 STMT, generate instructions to permute the vector elements of X and Y
5967 using permutation mask MASK_VEC, insert them at *GSI and return the
5968 permuted vector variable. */
5971 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
5972 gimple_stmt_iterator
*gsi
)
5974 tree vectype
= TREE_TYPE (x
);
5975 tree perm_dest
, data_ref
;
5978 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5979 data_ref
= make_ssa_name (perm_dest
);
5981 /* Generate the permute statement. */
5982 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
5983 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5988 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5989 inserting them on the loops preheader edge. Returns true if we
5990 were successful in doing so (and thus STMT can be moved then),
5991 otherwise returns false. */
5994 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6000 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6002 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6003 if (!gimple_nop_p (def_stmt
)
6004 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6006 /* Make sure we don't need to recurse. While we could do
6007 so in simple cases when there are more complex use webs
6008 we don't have an easy way to preserve stmt order to fulfil
6009 dependencies within them. */
6012 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6014 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6016 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6017 if (!gimple_nop_p (def_stmt2
)
6018 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6028 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6030 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6031 if (!gimple_nop_p (def_stmt
)
6032 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6034 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6035 gsi_remove (&gsi
, false);
6036 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6043 /* vectorizable_load.
6045 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6052 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6053 slp_tree slp_node
, slp_instance slp_node_instance
)
6056 tree vec_dest
= NULL
;
6057 tree data_ref
= NULL
;
6058 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6059 stmt_vec_info prev_stmt_info
;
6060 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6061 struct loop
*loop
= NULL
;
6062 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6063 bool nested_in_vect_loop
= false;
6064 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6068 gimple
*new_stmt
= NULL
;
6070 enum dr_alignment_support alignment_support_scheme
;
6071 tree dataref_ptr
= NULL_TREE
;
6072 tree dataref_offset
= NULL_TREE
;
6073 gimple
*ptr_incr
= NULL
;
6075 int i
, j
, group_size
= -1, group_gap_adj
;
6076 tree msq
= NULL_TREE
, lsq
;
6077 tree offset
= NULL_TREE
;
6078 tree byte_offset
= NULL_TREE
;
6079 tree realignment_token
= NULL_TREE
;
6081 vec
<tree
> dr_chain
= vNULL
;
6082 bool grouped_load
= false;
6083 bool load_lanes_p
= false;
6086 bool negative
= false;
6087 bool compute_in_loop
= false;
6088 struct loop
*at_loop
;
6090 bool slp
= (slp_node
!= NULL
);
6091 bool slp_perm
= false;
6092 enum tree_code code
;
6093 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6096 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6097 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6098 int gather_scale
= 1;
6099 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6100 vec_info
*vinfo
= stmt_info
->vinfo
;
6102 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6105 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6108 /* Is vectorizable load? */
6109 if (!is_gimple_assign (stmt
))
6112 scalar_dest
= gimple_assign_lhs (stmt
);
6113 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6116 code
= gimple_assign_rhs_code (stmt
);
6117 if (code
!= ARRAY_REF
6118 && code
!= BIT_FIELD_REF
6119 && code
!= INDIRECT_REF
6120 && code
!= COMPONENT_REF
6121 && code
!= IMAGPART_EXPR
6122 && code
!= REALPART_EXPR
6124 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6127 if (!STMT_VINFO_DATA_REF (stmt_info
))
6130 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6131 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6135 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6136 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6137 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6142 /* Multiple types in SLP are handled by creating the appropriate number of
6143 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6145 if (slp
|| PURE_SLP_STMT (stmt_info
))
6148 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6150 gcc_assert (ncopies
>= 1);
6152 /* FORNOW. This restriction should be relaxed. */
6153 if (nested_in_vect_loop
&& ncopies
> 1)
6155 if (dump_enabled_p ())
6156 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6157 "multiple types in nested loop.\n");
6161 /* Invalidate assumptions made by dependence analysis when vectorization
6162 on the unrolled body effectively re-orders stmts. */
6164 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6165 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6166 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6168 if (dump_enabled_p ())
6169 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6170 "cannot perform implicit CSE when unrolling "
6171 "with negative dependence distance\n");
6175 elem_type
= TREE_TYPE (vectype
);
6176 mode
= TYPE_MODE (vectype
);
6178 /* FORNOW. In some cases can vectorize even if data-type not supported
6179 (e.g. - data copies). */
6180 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6182 if (dump_enabled_p ())
6183 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6184 "Aligned load, but unsupported type.\n");
6188 /* Check if the load is a part of an interleaving chain. */
6189 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6191 grouped_load
= true;
6193 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6195 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6197 /* If this is single-element interleaving with an element distance
6198 that leaves unused vector loads around punt - we at least create
6199 very sub-optimal code in that case (and blow up memory,
6201 if (first_stmt
== stmt
6202 && !GROUP_NEXT_ELEMENT (stmt_info
)
6203 && GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6205 if (dump_enabled_p ())
6206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6207 "single-element interleaving not supported "
6208 "for not adjacent vector loads\n");
6212 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6215 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6217 && !PURE_SLP_STMT (stmt_info
)
6218 && !STMT_VINFO_STRIDED_P (stmt_info
))
6220 if (vect_load_lanes_supported (vectype
, group_size
))
6221 load_lanes_p
= true;
6222 else if (!vect_grouped_load_supported (vectype
, group_size
))
6226 /* Invalidate assumptions made by dependence analysis when vectorization
6227 on the unrolled body effectively re-orders stmts. */
6228 if (!PURE_SLP_STMT (stmt_info
)
6229 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6230 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6231 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6233 if (dump_enabled_p ())
6234 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6235 "cannot perform implicit CSE when performing "
6236 "group loads with negative dependence distance\n");
6240 /* Similarly when the stmt is a load that is both part of a SLP
6241 instance and a loop vectorized stmt via the same-dr mechanism
6242 we have to give up. */
6243 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6244 && (STMT_SLP_TYPE (stmt_info
)
6245 != STMT_SLP_TYPE (vinfo_for_stmt
6246 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6248 if (dump_enabled_p ())
6249 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6250 "conflicting SLP types for CSEd load\n");
6256 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6259 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6260 &gather_off
, &gather_scale
);
6261 gcc_assert (gather_decl
);
6262 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6263 &gather_off_vectype
))
6265 if (dump_enabled_p ())
6266 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6267 "gather index use not simple.\n");
6271 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6274 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6275 && (group_size
> nunits
6276 || nunits
% group_size
!= 0))
6278 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6279 "unhandled strided group load\n");
6285 negative
= tree_int_cst_compare (nested_in_vect_loop
6286 ? STMT_VINFO_DR_STEP (stmt_info
)
6288 size_zero_node
) < 0;
6289 if (negative
&& ncopies
> 1)
6291 if (dump_enabled_p ())
6292 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6293 "multiple types with negative step.\n");
6301 if (dump_enabled_p ())
6302 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6303 "negative step for group load not supported"
6307 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6308 if (alignment_support_scheme
!= dr_aligned
6309 && alignment_support_scheme
!= dr_unaligned_supported
)
6311 if (dump_enabled_p ())
6312 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6313 "negative step but alignment required.\n");
6316 if (!perm_mask_for_reverse (vectype
))
6318 if (dump_enabled_p ())
6319 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6320 "negative step and reversing not supported."
6327 if (!vec_stmt
) /* transformation not required. */
6329 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6330 /* The SLP costs are calculated during SLP analysis. */
6331 if (!PURE_SLP_STMT (stmt_info
))
6332 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6337 if (dump_enabled_p ())
6338 dump_printf_loc (MSG_NOTE
, vect_location
,
6339 "transform load. ncopies = %d\n", ncopies
);
6343 ensure_base_align (stmt_info
, dr
);
6345 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6347 tree vec_oprnd0
= NULL_TREE
, op
;
6348 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6349 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6350 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6351 edge pe
= loop_preheader_edge (loop
);
6354 enum { NARROW
, NONE
, WIDEN
} modifier
;
6355 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6357 if (nunits
== gather_off_nunits
)
6359 else if (nunits
== gather_off_nunits
/ 2)
6361 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6364 for (i
= 0; i
< gather_off_nunits
; ++i
)
6365 sel
[i
] = i
| nunits
;
6367 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6369 else if (nunits
== gather_off_nunits
* 2)
6371 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6374 for (i
= 0; i
< nunits
; ++i
)
6375 sel
[i
] = i
< gather_off_nunits
6376 ? i
: i
+ nunits
- gather_off_nunits
;
6378 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6384 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6385 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6386 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6387 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6388 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6389 scaletype
= TREE_VALUE (arglist
);
6390 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6392 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6394 ptr
= fold_convert (ptrtype
, gather_base
);
6395 if (!is_gimple_min_invariant (ptr
))
6397 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6398 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6399 gcc_assert (!new_bb
);
6402 /* Currently we support only unconditional gather loads,
6403 so mask should be all ones. */
6404 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6405 mask
= build_int_cst (masktype
, -1);
6406 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6408 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6409 mask
= build_vector_from_val (masktype
, mask
);
6410 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6412 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6416 for (j
= 0; j
< 6; ++j
)
6418 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6419 mask
= build_real (TREE_TYPE (masktype
), r
);
6420 mask
= build_vector_from_val (masktype
, mask
);
6421 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6426 scale
= build_int_cst (scaletype
, gather_scale
);
6428 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6429 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6430 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6434 for (j
= 0; j
< 6; ++j
)
6436 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6437 merge
= build_real (TREE_TYPE (rettype
), r
);
6441 merge
= build_vector_from_val (rettype
, merge
);
6442 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6444 prev_stmt_info
= NULL
;
6445 for (j
= 0; j
< ncopies
; ++j
)
6447 if (modifier
== WIDEN
&& (j
& 1))
6448 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6449 perm_mask
, stmt
, gsi
);
6452 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6455 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6457 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6459 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6460 == TYPE_VECTOR_SUBPARTS (idxtype
));
6461 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6462 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6464 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6465 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6470 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6472 if (!useless_type_conversion_p (vectype
, rettype
))
6474 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6475 == TYPE_VECTOR_SUBPARTS (rettype
));
6476 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6477 gimple_call_set_lhs (new_stmt
, op
);
6478 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6479 var
= make_ssa_name (vec_dest
);
6480 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6482 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6486 var
= make_ssa_name (vec_dest
, new_stmt
);
6487 gimple_call_set_lhs (new_stmt
, var
);
6490 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6492 if (modifier
== NARROW
)
6499 var
= permute_vec_elements (prev_res
, var
,
6500 perm_mask
, stmt
, gsi
);
6501 new_stmt
= SSA_NAME_DEF_STMT (var
);
6504 if (prev_stmt_info
== NULL
)
6505 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6507 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6508 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6512 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6514 gimple_stmt_iterator incr_gsi
;
6520 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6521 gimple_seq stmts
= NULL
;
6522 tree stride_base
, stride_step
, alias_off
;
6524 gcc_assert (!nested_in_vect_loop
);
6526 if (slp
&& grouped_load
)
6527 first_dr
= STMT_VINFO_DATA_REF
6528 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6533 = fold_build_pointer_plus
6534 (DR_BASE_ADDRESS (first_dr
),
6535 size_binop (PLUS_EXPR
,
6536 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6537 convert_to_ptrofftype (DR_INIT (first_dr
))));
6538 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6540 /* For a load with loop-invariant (but other than power-of-2)
6541 stride (i.e. not a grouped access) like so:
6543 for (i = 0; i < n; i += stride)
6546 we generate a new induction variable and new accesses to
6547 form a new vector (or vectors, depending on ncopies):
6549 for (j = 0; ; j += VF*stride)
6551 tmp2 = array[j + stride];
6553 vectemp = {tmp1, tmp2, ...}
6556 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6557 build_int_cst (TREE_TYPE (stride_step
), vf
));
6559 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6561 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6562 loop
, &incr_gsi
, insert_after
,
6564 incr
= gsi_stmt (incr_gsi
);
6565 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6567 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6568 &stmts
, true, NULL_TREE
);
6570 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6572 prev_stmt_info
= NULL
;
6573 running_off
= offvar
;
6574 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6575 int nloads
= nunits
;
6576 tree ltype
= TREE_TYPE (vectype
);
6577 auto_vec
<tree
> dr_chain
;
6580 nloads
= nunits
/ group_size
;
6581 if (group_size
< nunits
)
6582 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6585 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6586 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6588 dr_chain
.create (ncopies
);
6590 for (j
= 0; j
< ncopies
; j
++)
6596 vec_alloc (v
, nloads
);
6597 for (i
= 0; i
< nloads
; i
++)
6599 tree newref
, newoff
;
6601 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6603 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6606 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6607 newoff
= copy_ssa_name (running_off
);
6608 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6609 running_off
, stride_step
);
6610 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6612 running_off
= newoff
;
6615 vec_inv
= build_constructor (vectype
, v
);
6616 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6617 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6621 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6622 build2 (MEM_REF
, ltype
,
6623 running_off
, alias_off
));
6624 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6626 tree newoff
= copy_ssa_name (running_off
);
6627 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6628 running_off
, stride_step
);
6629 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6631 running_off
= newoff
;
6636 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6638 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6643 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6645 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6646 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6650 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6651 slp_node_instance
, false);
6657 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6659 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6660 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6661 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6663 /* Check if the chain of loads is already vectorized. */
6664 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6665 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6666 ??? But we can only do so if there is exactly one
6667 as we have no way to get at the rest. Leave the CSE
6669 ??? With the group load eventually participating
6670 in multiple different permutations (having multiple
6671 slp nodes which refer to the same group) the CSE
6672 is even wrong code. See PR56270. */
6675 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6678 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6679 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6682 /* VEC_NUM is the number of vect stmts to be created for this group. */
6685 grouped_load
= false;
6686 /* For SLP permutation support we need to load the whole group,
6687 not only the number of vector stmts the permutation result
6690 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6692 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6693 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6696 vec_num
= group_size
;
6702 group_size
= vec_num
= 1;
6706 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6707 gcc_assert (alignment_support_scheme
);
6708 /* Targets with load-lane instructions must not require explicit
6710 gcc_assert (!load_lanes_p
6711 || alignment_support_scheme
== dr_aligned
6712 || alignment_support_scheme
== dr_unaligned_supported
);
6714 /* In case the vectorization factor (VF) is bigger than the number
6715 of elements that we can fit in a vectype (nunits), we have to generate
6716 more than one vector stmt - i.e - we need to "unroll" the
6717 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6718 from one copy of the vector stmt to the next, in the field
6719 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6720 stages to find the correct vector defs to be used when vectorizing
6721 stmts that use the defs of the current stmt. The example below
6722 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6723 need to create 4 vectorized stmts):
6725 before vectorization:
6726 RELATED_STMT VEC_STMT
6730 step 1: vectorize stmt S1:
6731 We first create the vector stmt VS1_0, and, as usual, record a
6732 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6733 Next, we create the vector stmt VS1_1, and record a pointer to
6734 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6735 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6737 RELATED_STMT VEC_STMT
6738 VS1_0: vx0 = memref0 VS1_1 -
6739 VS1_1: vx1 = memref1 VS1_2 -
6740 VS1_2: vx2 = memref2 VS1_3 -
6741 VS1_3: vx3 = memref3 - -
6742 S1: x = load - VS1_0
6745 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6746 information we recorded in RELATED_STMT field is used to vectorize
6749 /* In case of interleaving (non-unit grouped access):
6756 Vectorized loads are created in the order of memory accesses
6757 starting from the access of the first stmt of the chain:
6760 VS2: vx1 = &base + vec_size*1
6761 VS3: vx3 = &base + vec_size*2
6762 VS4: vx4 = &base + vec_size*3
6764 Then permutation statements are generated:
6766 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6767 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6770 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6771 (the order of the data-refs in the output of vect_permute_load_chain
6772 corresponds to the order of scalar stmts in the interleaving chain - see
6773 the documentation of vect_permute_load_chain()).
6774 The generation of permutation stmts and recording them in
6775 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6777 In case of both multiple types and interleaving, the vector loads and
6778 permutation stmts above are created for every copy. The result vector
6779 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6780 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6782 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6783 on a target that supports unaligned accesses (dr_unaligned_supported)
6784 we generate the following code:
6788 p = p + indx * vectype_size;
6793 Otherwise, the data reference is potentially unaligned on a target that
6794 does not support unaligned accesses (dr_explicit_realign_optimized) -
6795 then generate the following code, in which the data in each iteration is
6796 obtained by two vector loads, one from the previous iteration, and one
6797 from the current iteration:
6799 msq_init = *(floor(p1))
6800 p2 = initial_addr + VS - 1;
6801 realignment_token = call target_builtin;
6804 p2 = p2 + indx * vectype_size
6806 vec_dest = realign_load (msq, lsq, realignment_token)
6811 /* If the misalignment remains the same throughout the execution of the
6812 loop, we can create the init_addr and permutation mask at the loop
6813 preheader. Otherwise, it needs to be created inside the loop.
6814 This can only occur when vectorizing memory accesses in the inner-loop
6815 nested within an outer-loop that is being vectorized. */
6817 if (nested_in_vect_loop
6818 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6819 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6821 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6822 compute_in_loop
= true;
6825 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6826 || alignment_support_scheme
== dr_explicit_realign
)
6827 && !compute_in_loop
)
6829 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6830 alignment_support_scheme
, NULL_TREE
,
6832 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6834 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6835 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6843 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6846 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6848 aggr_type
= vectype
;
6850 prev_stmt_info
= NULL
;
6851 for (j
= 0; j
< ncopies
; j
++)
6853 /* 1. Create the vector or array pointer update chain. */
6856 bool simd_lane_access_p
6857 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6858 if (simd_lane_access_p
6859 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6860 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6861 && integer_zerop (DR_OFFSET (first_dr
))
6862 && integer_zerop (DR_INIT (first_dr
))
6863 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6864 get_alias_set (DR_REF (first_dr
)))
6865 && (alignment_support_scheme
== dr_aligned
6866 || alignment_support_scheme
== dr_unaligned_supported
))
6868 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6869 dataref_offset
= build_int_cst (reference_alias_ptr_type
6870 (DR_REF (first_dr
)), 0);
6875 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6876 offset
, &dummy
, gsi
, &ptr_incr
,
6877 simd_lane_access_p
, &inv_p
,
6880 else if (dataref_offset
)
6881 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6882 TYPE_SIZE_UNIT (aggr_type
));
6884 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6885 TYPE_SIZE_UNIT (aggr_type
));
6887 if (grouped_load
|| slp_perm
)
6888 dr_chain
.create (vec_num
);
6894 vec_array
= create_vector_array (vectype
, vec_num
);
6897 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6898 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6899 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6900 gimple_call_set_lhs (new_stmt
, vec_array
);
6901 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6903 /* Extract each vector into an SSA_NAME. */
6904 for (i
= 0; i
< vec_num
; i
++)
6906 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6908 dr_chain
.quick_push (new_temp
);
6911 /* Record the mapping between SSA_NAMEs and statements. */
6912 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6916 for (i
= 0; i
< vec_num
; i
++)
6919 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6922 /* 2. Create the vector-load in the loop. */
6923 switch (alignment_support_scheme
)
6926 case dr_unaligned_supported
:
6928 unsigned int align
, misalign
;
6931 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
6934 : build_int_cst (reference_alias_ptr_type
6935 (DR_REF (first_dr
)), 0));
6936 align
= TYPE_ALIGN_UNIT (vectype
);
6937 if (alignment_support_scheme
== dr_aligned
)
6939 gcc_assert (aligned_access_p (first_dr
));
6942 else if (DR_MISALIGNMENT (first_dr
) == -1)
6944 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
6945 align
= TYPE_ALIGN_UNIT (elem_type
);
6947 align
= (get_object_alignment (DR_REF (first_dr
))
6950 TREE_TYPE (data_ref
)
6951 = build_aligned_type (TREE_TYPE (data_ref
),
6952 align
* BITS_PER_UNIT
);
6956 TREE_TYPE (data_ref
)
6957 = build_aligned_type (TREE_TYPE (data_ref
),
6958 TYPE_ALIGN (elem_type
));
6959 misalign
= DR_MISALIGNMENT (first_dr
);
6961 if (dataref_offset
== NULL_TREE
6962 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6963 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6967 case dr_explicit_realign
:
6971 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
6973 if (compute_in_loop
)
6974 msq
= vect_setup_realignment (first_stmt
, gsi
,
6976 dr_explicit_realign
,
6979 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6980 ptr
= copy_ssa_name (dataref_ptr
);
6982 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6983 new_stmt
= gimple_build_assign
6984 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
6986 (TREE_TYPE (dataref_ptr
),
6987 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6988 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6990 = build2 (MEM_REF
, vectype
, ptr
,
6991 build_int_cst (reference_alias_ptr_type
6992 (DR_REF (first_dr
)), 0));
6993 vec_dest
= vect_create_destination_var (scalar_dest
,
6995 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6996 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6997 gimple_assign_set_lhs (new_stmt
, new_temp
);
6998 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6999 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7000 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7003 bump
= size_binop (MULT_EXPR
, vs
,
7004 TYPE_SIZE_UNIT (elem_type
));
7005 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7006 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7007 new_stmt
= gimple_build_assign
7008 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7011 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7012 ptr
= copy_ssa_name (ptr
, new_stmt
);
7013 gimple_assign_set_lhs (new_stmt
, ptr
);
7014 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7016 = build2 (MEM_REF
, vectype
, ptr
,
7017 build_int_cst (reference_alias_ptr_type
7018 (DR_REF (first_dr
)), 0));
7021 case dr_explicit_realign_optimized
:
7022 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7023 new_temp
= copy_ssa_name (dataref_ptr
);
7025 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7026 new_stmt
= gimple_build_assign
7027 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7029 (TREE_TYPE (dataref_ptr
),
7030 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7031 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7033 = build2 (MEM_REF
, vectype
, new_temp
,
7034 build_int_cst (reference_alias_ptr_type
7035 (DR_REF (first_dr
)), 0));
7040 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7041 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7042 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7043 gimple_assign_set_lhs (new_stmt
, new_temp
);
7044 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7046 /* 3. Handle explicit realignment if necessary/supported.
7048 vec_dest = realign_load (msq, lsq, realignment_token) */
7049 if (alignment_support_scheme
== dr_explicit_realign_optimized
7050 || alignment_support_scheme
== dr_explicit_realign
)
7052 lsq
= gimple_assign_lhs (new_stmt
);
7053 if (!realignment_token
)
7054 realignment_token
= dataref_ptr
;
7055 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7056 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7057 msq
, lsq
, realignment_token
);
7058 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7059 gimple_assign_set_lhs (new_stmt
, new_temp
);
7060 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7062 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7065 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7066 add_phi_arg (phi
, lsq
,
7067 loop_latch_edge (containing_loop
),
7073 /* 4. Handle invariant-load. */
7074 if (inv_p
&& !bb_vinfo
)
7076 gcc_assert (!grouped_load
);
7077 /* If we have versioned for aliasing or the loop doesn't
7078 have any data dependencies that would preclude this,
7079 then we are sure this is a loop invariant load and
7080 thus we can insert it on the preheader edge. */
7081 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7082 && !nested_in_vect_loop
7083 && hoist_defs_of_uses (stmt
, loop
))
7085 if (dump_enabled_p ())
7087 dump_printf_loc (MSG_NOTE
, vect_location
,
7088 "hoisting out of the vectorized "
7090 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7092 tree tem
= copy_ssa_name (scalar_dest
);
7093 gsi_insert_on_edge_immediate
7094 (loop_preheader_edge (loop
),
7095 gimple_build_assign (tem
,
7097 (gimple_assign_rhs1 (stmt
))));
7098 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7102 gimple_stmt_iterator gsi2
= *gsi
;
7104 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7107 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7108 set_vinfo_for_stmt (new_stmt
,
7109 new_stmt_vec_info (new_stmt
, vinfo
));
7114 tree perm_mask
= perm_mask_for_reverse (vectype
);
7115 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7116 perm_mask
, stmt
, gsi
);
7117 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7120 /* Collect vector loads and later create their permutation in
7121 vect_transform_grouped_load (). */
7122 if (grouped_load
|| slp_perm
)
7123 dr_chain
.quick_push (new_temp
);
7125 /* Store vector loads in the corresponding SLP_NODE. */
7126 if (slp
&& !slp_perm
)
7127 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7129 /* Bump the vector pointer to account for a gap or for excess
7130 elements loaded for a permuted SLP load. */
7131 if (group_gap_adj
!= 0)
7135 = wide_int_to_tree (sizetype
,
7136 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7137 group_gap_adj
, &ovf
));
7138 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7143 if (slp
&& !slp_perm
)
7148 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7149 slp_node_instance
, false))
7151 dr_chain
.release ();
7160 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7161 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7166 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7168 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7169 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7172 dr_chain
.release ();
7178 /* Function vect_is_simple_cond.
7181 LOOP - the loop that is being vectorized.
7182 COND - Condition that is checked for simple use.
7185 *COMP_VECTYPE - the vector type for the comparison.
7187 Returns whether a COND can be vectorized. Checks whether
7188 condition operands are supportable using vec_is_simple_use. */
7191 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7194 enum vect_def_type dt
;
7195 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7198 if (TREE_CODE (cond
) == SSA_NAME
7199 && TREE_CODE (TREE_TYPE (cond
)) == BOOLEAN_TYPE
)
7201 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7202 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7205 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7210 if (!COMPARISON_CLASS_P (cond
))
7213 lhs
= TREE_OPERAND (cond
, 0);
7214 rhs
= TREE_OPERAND (cond
, 1);
7216 if (TREE_CODE (lhs
) == SSA_NAME
)
7218 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7219 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7222 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7223 && TREE_CODE (lhs
) != FIXED_CST
)
7226 if (TREE_CODE (rhs
) == SSA_NAME
)
7228 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7229 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7232 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7233 && TREE_CODE (rhs
) != FIXED_CST
)
7236 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7240 /* vectorizable_condition.
7242 Check if STMT is conditional modify expression that can be vectorized.
7243 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7244 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7247 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7248 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7249 else clause if it is 2).
7251 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7254 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7255 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7258 tree scalar_dest
= NULL_TREE
;
7259 tree vec_dest
= NULL_TREE
;
7260 tree cond_expr
, then_clause
, else_clause
;
7261 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7262 tree comp_vectype
= NULL_TREE
;
7263 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7264 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7265 tree vec_compare
, vec_cond_expr
;
7267 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7268 enum vect_def_type dt
, dts
[4];
7270 enum tree_code code
;
7271 stmt_vec_info prev_stmt_info
= NULL
;
7273 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7274 vec
<tree
> vec_oprnds0
= vNULL
;
7275 vec
<tree
> vec_oprnds1
= vNULL
;
7276 vec
<tree
> vec_oprnds2
= vNULL
;
7277 vec
<tree
> vec_oprnds3
= vNULL
;
7279 bool masked
= false;
7281 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7284 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7286 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7289 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7290 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7294 /* FORNOW: not yet supported. */
7295 if (STMT_VINFO_LIVE_P (stmt_info
))
7297 if (dump_enabled_p ())
7298 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7299 "value used after loop.\n");
7304 /* Is vectorizable conditional operation? */
7305 if (!is_gimple_assign (stmt
))
7308 code
= gimple_assign_rhs_code (stmt
);
7310 if (code
!= COND_EXPR
)
7313 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7314 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7316 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7319 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7321 gcc_assert (ncopies
>= 1);
7322 if (reduc_index
&& ncopies
> 1)
7323 return false; /* FORNOW */
7325 cond_expr
= gimple_assign_rhs1 (stmt
);
7326 then_clause
= gimple_assign_rhs2 (stmt
);
7327 else_clause
= gimple_assign_rhs3 (stmt
);
7329 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7334 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7336 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7339 if (VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7341 vec_cmp_type
= comp_vectype
;
7345 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7346 if (vec_cmp_type
== NULL_TREE
)
7351 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7352 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7359 vec_oprnds0
.create (1);
7360 vec_oprnds1
.create (1);
7361 vec_oprnds2
.create (1);
7362 vec_oprnds3
.create (1);
7366 scalar_dest
= gimple_assign_lhs (stmt
);
7367 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7369 /* Handle cond expr. */
7370 for (j
= 0; j
< ncopies
; j
++)
7372 gassign
*new_stmt
= NULL
;
7377 auto_vec
<tree
, 4> ops
;
7378 auto_vec
<vec
<tree
>, 4> vec_defs
;
7381 ops
.safe_push (cond_expr
);
7384 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7385 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7387 ops
.safe_push (then_clause
);
7388 ops
.safe_push (else_clause
);
7389 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7390 vec_oprnds3
= vec_defs
.pop ();
7391 vec_oprnds2
= vec_defs
.pop ();
7393 vec_oprnds1
= vec_defs
.pop ();
7394 vec_oprnds0
= vec_defs
.pop ();
7397 vec_defs
.release ();
7405 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
7407 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
7413 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7414 stmt
, comp_vectype
);
7415 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7416 loop_vinfo
, >emp
, &dts
[0]);
7419 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7420 stmt
, comp_vectype
);
7421 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7422 loop_vinfo
, >emp
, &dts
[1]);
7424 if (reduc_index
== 1)
7425 vec_then_clause
= reduc_def
;
7428 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7430 vect_is_simple_use (then_clause
, loop_vinfo
,
7433 if (reduc_index
== 2)
7434 vec_else_clause
= reduc_def
;
7437 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7439 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7446 = vect_get_vec_def_for_stmt_copy (dts
[0],
7447 vec_oprnds0
.pop ());
7450 = vect_get_vec_def_for_stmt_copy (dts
[1],
7451 vec_oprnds1
.pop ());
7453 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7454 vec_oprnds2
.pop ());
7455 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7456 vec_oprnds3
.pop ());
7461 vec_oprnds0
.quick_push (vec_cond_lhs
);
7463 vec_oprnds1
.quick_push (vec_cond_rhs
);
7464 vec_oprnds2
.quick_push (vec_then_clause
);
7465 vec_oprnds3
.quick_push (vec_else_clause
);
7468 /* Arguments are ready. Create the new vector stmt. */
7469 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7471 vec_then_clause
= vec_oprnds2
[i
];
7472 vec_else_clause
= vec_oprnds3
[i
];
7475 vec_compare
= vec_cond_lhs
;
7478 vec_cond_rhs
= vec_oprnds1
[i
];
7479 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7480 vec_cond_lhs
, vec_cond_rhs
);
7482 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7483 vec_compare
, vec_then_clause
, vec_else_clause
);
7485 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7486 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7487 gimple_assign_set_lhs (new_stmt
, new_temp
);
7488 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7490 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7497 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7499 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7501 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7504 vec_oprnds0
.release ();
7505 vec_oprnds1
.release ();
7506 vec_oprnds2
.release ();
7507 vec_oprnds3
.release ();
7512 /* vectorizable_comparison.
7514 Check if STMT is comparison expression that can be vectorized.
7515 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7516 comparison, put it in VEC_STMT, and insert it at GSI.
7518 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7521 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7522 gimple
**vec_stmt
, tree reduc_def
,
7525 tree lhs
, rhs1
, rhs2
;
7526 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7527 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7528 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7529 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
7531 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7532 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
7535 enum tree_code code
;
7536 stmt_vec_info prev_stmt_info
= NULL
;
7538 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7539 vec
<tree
> vec_oprnds0
= vNULL
;
7540 vec
<tree
> vec_oprnds1
= vNULL
;
7545 if (!VECTOR_BOOLEAN_TYPE_P (vectype
))
7548 mask_type
= vectype
;
7549 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7551 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7554 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7556 gcc_assert (ncopies
>= 1);
7557 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7560 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7561 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7565 if (STMT_VINFO_LIVE_P (stmt_info
))
7567 if (dump_enabled_p ())
7568 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7569 "value used after loop.\n");
7573 if (!is_gimple_assign (stmt
))
7576 code
= gimple_assign_rhs_code (stmt
);
7578 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
7581 rhs1
= gimple_assign_rhs1 (stmt
);
7582 rhs2
= gimple_assign_rhs2 (stmt
);
7584 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
7585 &dts
[0], &vectype1
))
7588 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
7589 &dts
[1], &vectype2
))
7592 if (vectype1
&& vectype2
7593 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7596 vectype
= vectype1
? vectype1
: vectype2
;
7598 /* Invariant comparison. */
7601 vectype
= build_vector_type (TREE_TYPE (rhs1
), nunits
);
7602 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype
)) != current_vector_size
)
7605 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
7610 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
7611 vect_model_simple_cost (stmt_info
, ncopies
, dts
, NULL
, NULL
);
7612 return expand_vec_cmp_expr_p (vectype
, mask_type
);
7618 vec_oprnds0
.create (1);
7619 vec_oprnds1
.create (1);
7623 lhs
= gimple_assign_lhs (stmt
);
7624 mask
= vect_create_destination_var (lhs
, mask_type
);
7626 /* Handle cmp expr. */
7627 for (j
= 0; j
< ncopies
; j
++)
7629 gassign
*new_stmt
= NULL
;
7634 auto_vec
<tree
, 2> ops
;
7635 auto_vec
<vec
<tree
>, 2> vec_defs
;
7637 ops
.safe_push (rhs1
);
7638 ops
.safe_push (rhs2
);
7639 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7640 vec_oprnds1
= vec_defs
.pop ();
7641 vec_oprnds0
= vec_defs
.pop ();
7645 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, NULL
);
7646 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, NULL
);
7651 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
7652 vec_oprnds0
.pop ());
7653 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
7654 vec_oprnds1
.pop ());
7659 vec_oprnds0
.quick_push (vec_rhs1
);
7660 vec_oprnds1
.quick_push (vec_rhs2
);
7663 /* Arguments are ready. Create the new vector stmt. */
7664 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
7666 vec_rhs2
= vec_oprnds1
[i
];
7668 new_temp
= make_ssa_name (mask
);
7669 new_stmt
= gimple_build_assign (new_temp
, code
, vec_rhs1
, vec_rhs2
);
7670 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7672 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7679 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7681 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7683 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7686 vec_oprnds0
.release ();
7687 vec_oprnds1
.release ();
7692 /* Make sure the statement is vectorizable. */
7695 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7697 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7698 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7699 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7701 tree scalar_type
, vectype
;
7702 gimple
*pattern_stmt
;
7703 gimple_seq pattern_def_seq
;
7705 if (dump_enabled_p ())
7707 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7708 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7711 if (gimple_has_volatile_ops (stmt
))
7713 if (dump_enabled_p ())
7714 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7715 "not vectorized: stmt has volatile operands\n");
7720 /* Skip stmts that do not need to be vectorized. In loops this is expected
7722 - the COND_EXPR which is the loop exit condition
7723 - any LABEL_EXPRs in the loop
7724 - computations that are used only for array indexing or loop control.
7725 In basic blocks we only analyze statements that are a part of some SLP
7726 instance, therefore, all the statements are relevant.
7728 Pattern statement needs to be analyzed instead of the original statement
7729 if the original statement is not relevant. Otherwise, we analyze both
7730 statements. In basic blocks we are called from some SLP instance
7731 traversal, don't analyze pattern stmts instead, the pattern stmts
7732 already will be part of SLP instance. */
7734 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7735 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7736 && !STMT_VINFO_LIVE_P (stmt_info
))
7738 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7740 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7741 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7743 /* Analyze PATTERN_STMT instead of the original stmt. */
7744 stmt
= pattern_stmt
;
7745 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7746 if (dump_enabled_p ())
7748 dump_printf_loc (MSG_NOTE
, vect_location
,
7749 "==> examining pattern statement: ");
7750 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7755 if (dump_enabled_p ())
7756 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7761 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7764 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7765 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7767 /* Analyze PATTERN_STMT too. */
7768 if (dump_enabled_p ())
7770 dump_printf_loc (MSG_NOTE
, vect_location
,
7771 "==> examining pattern statement: ");
7772 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7775 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7779 if (is_pattern_stmt_p (stmt_info
)
7781 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7783 gimple_stmt_iterator si
;
7785 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7787 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7788 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7789 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7791 /* Analyze def stmt of STMT if it's a pattern stmt. */
7792 if (dump_enabled_p ())
7794 dump_printf_loc (MSG_NOTE
, vect_location
,
7795 "==> examining pattern def statement: ");
7796 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7799 if (!vect_analyze_stmt (pattern_def_stmt
,
7800 need_to_vectorize
, node
))
7806 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7808 case vect_internal_def
:
7811 case vect_reduction_def
:
7812 case vect_nested_cycle
:
7813 gcc_assert (!bb_vinfo
7814 && (relevance
== vect_used_in_outer
7815 || relevance
== vect_used_in_outer_by_reduction
7816 || relevance
== vect_used_by_reduction
7817 || relevance
== vect_unused_in_scope
));
7820 case vect_induction_def
:
7821 case vect_constant_def
:
7822 case vect_external_def
:
7823 case vect_unknown_def_type
:
7830 gcc_assert (PURE_SLP_STMT (stmt_info
));
7832 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7833 if (dump_enabled_p ())
7835 dump_printf_loc (MSG_NOTE
, vect_location
,
7836 "get vectype for scalar type: ");
7837 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7838 dump_printf (MSG_NOTE
, "\n");
7841 vectype
= get_vectype_for_scalar_type (scalar_type
);
7844 if (dump_enabled_p ())
7846 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7847 "not SLPed: unsupported data-type ");
7848 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7850 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7855 if (dump_enabled_p ())
7857 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7858 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7859 dump_printf (MSG_NOTE
, "\n");
7862 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7865 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7867 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7868 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7869 || (is_gimple_call (stmt
)
7870 && gimple_call_lhs (stmt
) == NULL_TREE
));
7871 *need_to_vectorize
= true;
7874 if (PURE_SLP_STMT (stmt_info
) && !node
)
7876 dump_printf_loc (MSG_NOTE
, vect_location
,
7877 "handled only by SLP analysis\n");
7883 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7884 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7885 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7886 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7887 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7888 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7889 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7890 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7891 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7892 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7893 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7894 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
7895 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
7899 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7900 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7901 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7902 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7903 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7904 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7905 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7906 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7907 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
7908 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
7913 if (dump_enabled_p ())
7915 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7916 "not vectorized: relevant stmt not ");
7917 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7918 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7927 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7928 need extra handling, except for vectorizable reductions. */
7929 if (STMT_VINFO_LIVE_P (stmt_info
)
7930 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7931 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7935 if (dump_enabled_p ())
7937 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7938 "not vectorized: live stmt not ");
7939 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7940 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7950 /* Function vect_transform_stmt.
7952 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7955 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7956 bool *grouped_store
, slp_tree slp_node
,
7957 slp_instance slp_node_instance
)
7959 bool is_store
= false;
7960 gimple
*vec_stmt
= NULL
;
7961 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7964 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7966 switch (STMT_VINFO_TYPE (stmt_info
))
7968 case type_demotion_vec_info_type
:
7969 case type_promotion_vec_info_type
:
7970 case type_conversion_vec_info_type
:
7971 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7975 case induc_vec_info_type
:
7976 gcc_assert (!slp_node
);
7977 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7981 case shift_vec_info_type
:
7982 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7986 case op_vec_info_type
:
7987 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7991 case assignment_vec_info_type
:
7992 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7996 case load_vec_info_type
:
7997 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8002 case store_vec_info_type
:
8003 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8005 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8007 /* In case of interleaving, the whole chain is vectorized when the
8008 last store in the chain is reached. Store stmts before the last
8009 one are skipped, and there vec_stmt_info shouldn't be freed
8011 *grouped_store
= true;
8012 if (STMT_VINFO_VEC_STMT (stmt_info
))
8019 case condition_vec_info_type
:
8020 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8024 case comparison_vec_info_type
:
8025 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8029 case call_vec_info_type
:
8030 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8031 stmt
= gsi_stmt (*gsi
);
8032 if (is_gimple_call (stmt
)
8033 && gimple_call_internal_p (stmt
)
8034 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
8038 case call_simd_clone_vec_info_type
:
8039 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8040 stmt
= gsi_stmt (*gsi
);
8043 case reduc_vec_info_type
:
8044 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
8049 if (!STMT_VINFO_LIVE_P (stmt_info
))
8051 if (dump_enabled_p ())
8052 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8053 "stmt not supported.\n");
8058 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8059 This would break hybrid SLP vectorization. */
8061 gcc_assert (!vec_stmt
8062 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8064 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8065 is being vectorized, but outside the immediately enclosing loop. */
8067 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8068 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8069 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8070 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8071 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8072 || STMT_VINFO_RELEVANT (stmt_info
) ==
8073 vect_used_in_outer_by_reduction
))
8075 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8076 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8077 imm_use_iterator imm_iter
;
8078 use_operand_p use_p
;
8082 if (dump_enabled_p ())
8083 dump_printf_loc (MSG_NOTE
, vect_location
,
8084 "Record the vdef for outer-loop vectorization.\n");
8086 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8087 (to be used when vectorizing outer-loop stmts that use the DEF of
8089 if (gimple_code (stmt
) == GIMPLE_PHI
)
8090 scalar_dest
= PHI_RESULT (stmt
);
8092 scalar_dest
= gimple_assign_lhs (stmt
);
8094 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8096 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8098 exit_phi
= USE_STMT (use_p
);
8099 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8104 /* Handle stmts whose DEF is used outside the loop-nest that is
8105 being vectorized. */
8106 if (STMT_VINFO_LIVE_P (stmt_info
)
8107 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8109 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
8114 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8120 /* Remove a group of stores (for SLP or interleaving), free their
8124 vect_remove_stores (gimple
*first_stmt
)
8126 gimple
*next
= first_stmt
;
8128 gimple_stmt_iterator next_si
;
8132 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8134 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8135 if (is_pattern_stmt_p (stmt_info
))
8136 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8137 /* Free the attached stmt_vec_info and remove the stmt. */
8138 next_si
= gsi_for_stmt (next
);
8139 unlink_stmt_vdef (next
);
8140 gsi_remove (&next_si
, true);
8141 release_defs (next
);
8142 free_stmt_vec_info (next
);
8148 /* Function new_stmt_vec_info.
8150 Create and initialize a new stmt_vec_info struct for STMT. */
8153 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8156 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8158 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8159 STMT_VINFO_STMT (res
) = stmt
;
8161 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8162 STMT_VINFO_LIVE_P (res
) = false;
8163 STMT_VINFO_VECTYPE (res
) = NULL
;
8164 STMT_VINFO_VEC_STMT (res
) = NULL
;
8165 STMT_VINFO_VECTORIZABLE (res
) = true;
8166 STMT_VINFO_IN_PATTERN_P (res
) = false;
8167 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8168 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8169 STMT_VINFO_DATA_REF (res
) = NULL
;
8170 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8172 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
8173 STMT_VINFO_DR_OFFSET (res
) = NULL
;
8174 STMT_VINFO_DR_INIT (res
) = NULL
;
8175 STMT_VINFO_DR_STEP (res
) = NULL
;
8176 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
8178 if (gimple_code (stmt
) == GIMPLE_PHI
8179 && is_loop_header_bb_p (gimple_bb (stmt
)))
8180 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8182 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8184 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8185 STMT_SLP_TYPE (res
) = loop_vect
;
8186 GROUP_FIRST_ELEMENT (res
) = NULL
;
8187 GROUP_NEXT_ELEMENT (res
) = NULL
;
8188 GROUP_SIZE (res
) = 0;
8189 GROUP_STORE_COUNT (res
) = 0;
8190 GROUP_GAP (res
) = 0;
8191 GROUP_SAME_DR_STMT (res
) = NULL
;
8197 /* Create a hash table for stmt_vec_info. */
8200 init_stmt_vec_info_vec (void)
8202 gcc_assert (!stmt_vec_info_vec
.exists ());
8203 stmt_vec_info_vec
.create (50);
8207 /* Free hash table for stmt_vec_info. */
8210 free_stmt_vec_info_vec (void)
8214 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8216 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8217 gcc_assert (stmt_vec_info_vec
.exists ());
8218 stmt_vec_info_vec
.release ();
8222 /* Free stmt vectorization related info. */
8225 free_stmt_vec_info (gimple
*stmt
)
8227 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8232 /* Check if this statement has a related "pattern stmt"
8233 (introduced by the vectorizer during the pattern recognition
8234 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8236 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8238 stmt_vec_info patt_info
8239 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8242 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
8243 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
8244 gimple_set_bb (patt_stmt
, NULL
);
8245 tree lhs
= gimple_get_lhs (patt_stmt
);
8246 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8247 release_ssa_name (lhs
);
8250 gimple_stmt_iterator si
;
8251 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
8253 gimple
*seq_stmt
= gsi_stmt (si
);
8254 gimple_set_bb (seq_stmt
, NULL
);
8255 lhs
= gimple_get_lhs (seq_stmt
);
8256 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8257 release_ssa_name (lhs
);
8258 free_stmt_vec_info (seq_stmt
);
8261 free_stmt_vec_info (patt_stmt
);
8265 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
8266 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
8267 set_vinfo_for_stmt (stmt
, NULL
);
8272 /* Function get_vectype_for_scalar_type_and_size.
8274 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8278 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
8280 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
8281 machine_mode simd_mode
;
8282 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8289 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8290 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8293 /* For vector types of elements whose mode precision doesn't
8294 match their types precision we use a element type of mode
8295 precision. The vectorization routines will have to make sure
8296 they support the proper result truncation/extension.
8297 We also make sure to build vector types with INTEGER_TYPE
8298 component type only. */
8299 if (INTEGRAL_TYPE_P (scalar_type
)
8300 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8301 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8302 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8303 TYPE_UNSIGNED (scalar_type
));
8305 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8306 When the component mode passes the above test simply use a type
8307 corresponding to that mode. The theory is that any use that
8308 would cause problems with this will disable vectorization anyway. */
8309 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8310 && !INTEGRAL_TYPE_P (scalar_type
))
8311 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8313 /* We can't build a vector type of elements with alignment bigger than
8315 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8316 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8317 TYPE_UNSIGNED (scalar_type
));
8319 /* If we felt back to using the mode fail if there was
8320 no scalar type for it. */
8321 if (scalar_type
== NULL_TREE
)
8324 /* If no size was supplied use the mode the target prefers. Otherwise
8325 lookup a vector mode of the specified size. */
8327 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8329 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8330 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8334 vectype
= build_vector_type (scalar_type
, nunits
);
8336 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8337 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8343 unsigned int current_vector_size
;
8345 /* Function get_vectype_for_scalar_type.
8347 Returns the vector type corresponding to SCALAR_TYPE as supported
8351 get_vectype_for_scalar_type (tree scalar_type
)
8354 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8355 current_vector_size
);
8357 && current_vector_size
== 0)
8358 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8362 /* Function get_mask_type_for_scalar_type.
8364 Returns the mask type corresponding to a result of comparison
8365 of vectors of specified SCALAR_TYPE as supported by target. */
8368 get_mask_type_for_scalar_type (tree scalar_type
)
8370 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
8375 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
8376 current_vector_size
);
8379 /* Function get_same_sized_vectype
8381 Returns a vector type corresponding to SCALAR_TYPE of size
8382 VECTOR_TYPE if supported by the target. */
8385 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8387 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8388 return build_same_sized_truth_vector_type (vector_type
);
8390 return get_vectype_for_scalar_type_and_size
8391 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8394 /* Function vect_is_simple_use.
8397 VINFO - the vect info of the loop or basic block that is being vectorized.
8398 OPERAND - operand in the loop or bb.
8400 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8401 DT - the type of definition
8403 Returns whether a stmt with OPERAND can be vectorized.
8404 For loops, supportable operands are constants, loop invariants, and operands
8405 that are defined by the current iteration of the loop. Unsupportable
8406 operands are those that are defined by a previous iteration of the loop (as
8407 is the case in reduction/induction computations).
8408 For basic blocks, supportable operands are constants and bb invariants.
8409 For now, operands defined outside the basic block are not supported. */
8412 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8413 gimple
**def_stmt
, enum vect_def_type
*dt
)
8416 *dt
= vect_unknown_def_type
;
8418 if (dump_enabled_p ())
8420 dump_printf_loc (MSG_NOTE
, vect_location
,
8421 "vect_is_simple_use: operand ");
8422 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8423 dump_printf (MSG_NOTE
, "\n");
8426 if (CONSTANT_CLASS_P (operand
))
8428 *dt
= vect_constant_def
;
8432 if (is_gimple_min_invariant (operand
))
8434 *dt
= vect_external_def
;
8438 if (TREE_CODE (operand
) != SSA_NAME
)
8440 if (dump_enabled_p ())
8441 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8446 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8448 *dt
= vect_external_def
;
8452 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8453 if (dump_enabled_p ())
8455 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8456 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8459 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8460 *dt
= vect_external_def
;
8463 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8464 if (is_a
<bb_vec_info
> (vinfo
) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8465 *dt
= vect_external_def
;
8467 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8470 if (dump_enabled_p ())
8472 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8475 case vect_uninitialized_def
:
8476 dump_printf (MSG_NOTE
, "uninitialized\n");
8478 case vect_constant_def
:
8479 dump_printf (MSG_NOTE
, "constant\n");
8481 case vect_external_def
:
8482 dump_printf (MSG_NOTE
, "external\n");
8484 case vect_internal_def
:
8485 dump_printf (MSG_NOTE
, "internal\n");
8487 case vect_induction_def
:
8488 dump_printf (MSG_NOTE
, "induction\n");
8490 case vect_reduction_def
:
8491 dump_printf (MSG_NOTE
, "reduction\n");
8493 case vect_double_reduction_def
:
8494 dump_printf (MSG_NOTE
, "double reduction\n");
8496 case vect_nested_cycle
:
8497 dump_printf (MSG_NOTE
, "nested cycle\n");
8499 case vect_unknown_def_type
:
8500 dump_printf (MSG_NOTE
, "unknown\n");
8505 if (*dt
== vect_unknown_def_type
)
8507 if (dump_enabled_p ())
8508 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8509 "Unsupported pattern.\n");
8513 switch (gimple_code (*def_stmt
))
8520 if (dump_enabled_p ())
8521 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8522 "unsupported defining stmt:\n");
8529 /* Function vect_is_simple_use.
8531 Same as vect_is_simple_use but also determines the vector operand
8532 type of OPERAND and stores it to *VECTYPE. If the definition of
8533 OPERAND is vect_uninitialized_def, vect_constant_def or
8534 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8535 is responsible to compute the best suited vector type for the
8539 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8540 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8542 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8545 /* Now get a vector type if the def is internal, otherwise supply
8546 NULL_TREE and leave it up to the caller to figure out a proper
8547 type for the use stmt. */
8548 if (*dt
== vect_internal_def
8549 || *dt
== vect_induction_def
8550 || *dt
== vect_reduction_def
8551 || *dt
== vect_double_reduction_def
8552 || *dt
== vect_nested_cycle
)
8554 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8556 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8557 && !STMT_VINFO_RELEVANT (stmt_info
)
8558 && !STMT_VINFO_LIVE_P (stmt_info
))
8559 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8561 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8562 gcc_assert (*vectype
!= NULL_TREE
);
8564 else if (*dt
== vect_uninitialized_def
8565 || *dt
== vect_constant_def
8566 || *dt
== vect_external_def
)
8567 *vectype
= NULL_TREE
;
8575 /* Function supportable_widening_operation
8577 Check whether an operation represented by the code CODE is a
8578 widening operation that is supported by the target platform in
8579 vector form (i.e., when operating on arguments of type VECTYPE_IN
8580 producing a result of type VECTYPE_OUT).
8582 Widening operations we currently support are NOP (CONVERT), FLOAT
8583 and WIDEN_MULT. This function checks if these operations are supported
8584 by the target platform either directly (via vector tree-codes), or via
8588 - CODE1 and CODE2 are codes of vector operations to be used when
8589 vectorizing the operation, if available.
8590 - MULTI_STEP_CVT determines the number of required intermediate steps in
8591 case of multi-step conversion (like char->short->int - in that case
8592 MULTI_STEP_CVT will be 1).
8593 - INTERM_TYPES contains the intermediate type required to perform the
8594 widening operation (short in the above example). */
8597 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8598 tree vectype_out
, tree vectype_in
,
8599 enum tree_code
*code1
, enum tree_code
*code2
,
8600 int *multi_step_cvt
,
8601 vec
<tree
> *interm_types
)
8603 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8604 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8605 struct loop
*vect_loop
= NULL
;
8606 machine_mode vec_mode
;
8607 enum insn_code icode1
, icode2
;
8608 optab optab1
, optab2
;
8609 tree vectype
= vectype_in
;
8610 tree wide_vectype
= vectype_out
;
8611 enum tree_code c1
, c2
;
8613 tree prev_type
, intermediate_type
;
8614 machine_mode intermediate_mode
, prev_mode
;
8615 optab optab3
, optab4
;
8617 *multi_step_cvt
= 0;
8619 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8623 case WIDEN_MULT_EXPR
:
8624 /* The result of a vectorized widening operation usually requires
8625 two vectors (because the widened results do not fit into one vector).
8626 The generated vector results would normally be expected to be
8627 generated in the same order as in the original scalar computation,
8628 i.e. if 8 results are generated in each vector iteration, they are
8629 to be organized as follows:
8630 vect1: [res1,res2,res3,res4],
8631 vect2: [res5,res6,res7,res8].
8633 However, in the special case that the result of the widening
8634 operation is used in a reduction computation only, the order doesn't
8635 matter (because when vectorizing a reduction we change the order of
8636 the computation). Some targets can take advantage of this and
8637 generate more efficient code. For example, targets like Altivec,
8638 that support widen_mult using a sequence of {mult_even,mult_odd}
8639 generate the following vectors:
8640 vect1: [res1,res3,res5,res7],
8641 vect2: [res2,res4,res6,res8].
8643 When vectorizing outer-loops, we execute the inner-loop sequentially
8644 (each vectorized inner-loop iteration contributes to VF outer-loop
8645 iterations in parallel). We therefore don't allow to change the
8646 order of the computation in the inner-loop during outer-loop
8648 /* TODO: Another case in which order doesn't *really* matter is when we
8649 widen and then contract again, e.g. (short)((int)x * y >> 8).
8650 Normally, pack_trunc performs an even/odd permute, whereas the
8651 repack from an even/odd expansion would be an interleave, which
8652 would be significantly simpler for e.g. AVX2. */
8653 /* In any case, in order to avoid duplicating the code below, recurse
8654 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8655 are properly set up for the caller. If we fail, we'll continue with
8656 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8658 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8659 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8660 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8661 stmt
, vectype_out
, vectype_in
,
8662 code1
, code2
, multi_step_cvt
,
8665 /* Elements in a vector with vect_used_by_reduction property cannot
8666 be reordered if the use chain with this property does not have the
8667 same operation. One such an example is s += a * b, where elements
8668 in a and b cannot be reordered. Here we check if the vector defined
8669 by STMT is only directly used in the reduction statement. */
8670 tree lhs
= gimple_assign_lhs (stmt
);
8671 use_operand_p dummy
;
8673 stmt_vec_info use_stmt_info
= NULL
;
8674 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8675 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8676 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8679 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8680 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8693 case VEC_WIDEN_MULT_EVEN_EXPR
:
8694 /* Support the recursion induced just above. */
8695 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8696 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8699 case WIDEN_LSHIFT_EXPR
:
8700 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8701 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8705 c1
= VEC_UNPACK_LO_EXPR
;
8706 c2
= VEC_UNPACK_HI_EXPR
;
8710 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8711 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8714 case FIX_TRUNC_EXPR
:
8715 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8716 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8717 computing the operation. */
8724 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8727 if (code
== FIX_TRUNC_EXPR
)
8729 /* The signedness is determined from output operand. */
8730 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8731 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8735 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8736 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8739 if (!optab1
|| !optab2
)
8742 vec_mode
= TYPE_MODE (vectype
);
8743 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8744 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8750 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8751 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8754 /* Check if it's a multi-step conversion that can be done using intermediate
8757 prev_type
= vectype
;
8758 prev_mode
= vec_mode
;
8760 if (!CONVERT_EXPR_CODE_P (code
))
8763 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8764 intermediate steps in promotion sequence. We try
8765 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8767 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8768 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8770 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8772 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8773 TYPE_UNSIGNED (prev_type
));
8774 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8775 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8777 if (!optab3
|| !optab4
8778 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8779 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8780 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8781 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8782 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8783 == CODE_FOR_nothing
)
8784 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8785 == CODE_FOR_nothing
))
8788 interm_types
->quick_push (intermediate_type
);
8789 (*multi_step_cvt
)++;
8791 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8792 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8795 prev_type
= intermediate_type
;
8796 prev_mode
= intermediate_mode
;
8799 interm_types
->release ();
8804 /* Function supportable_narrowing_operation
8806 Check whether an operation represented by the code CODE is a
8807 narrowing operation that is supported by the target platform in
8808 vector form (i.e., when operating on arguments of type VECTYPE_IN
8809 and producing a result of type VECTYPE_OUT).
8811 Narrowing operations we currently support are NOP (CONVERT) and
8812 FIX_TRUNC. This function checks if these operations are supported by
8813 the target platform directly via vector tree-codes.
8816 - CODE1 is the code of a vector operation to be used when
8817 vectorizing the operation, if available.
8818 - MULTI_STEP_CVT determines the number of required intermediate steps in
8819 case of multi-step conversion (like int->short->char - in that case
8820 MULTI_STEP_CVT will be 1).
8821 - INTERM_TYPES contains the intermediate type required to perform the
8822 narrowing operation (short in the above example). */
8825 supportable_narrowing_operation (enum tree_code code
,
8826 tree vectype_out
, tree vectype_in
,
8827 enum tree_code
*code1
, int *multi_step_cvt
,
8828 vec
<tree
> *interm_types
)
8830 machine_mode vec_mode
;
8831 enum insn_code icode1
;
8832 optab optab1
, interm_optab
;
8833 tree vectype
= vectype_in
;
8834 tree narrow_vectype
= vectype_out
;
8836 tree intermediate_type
;
8837 machine_mode intermediate_mode
, prev_mode
;
8841 *multi_step_cvt
= 0;
8845 c1
= VEC_PACK_TRUNC_EXPR
;
8848 case FIX_TRUNC_EXPR
:
8849 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8853 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8854 tree code and optabs used for computing the operation. */
8861 if (code
== FIX_TRUNC_EXPR
)
8862 /* The signedness is determined from output operand. */
8863 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8865 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8870 vec_mode
= TYPE_MODE (vectype
);
8871 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8876 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8879 /* Check if it's a multi-step conversion that can be done using intermediate
8881 prev_mode
= vec_mode
;
8882 if (code
== FIX_TRUNC_EXPR
)
8883 uns
= TYPE_UNSIGNED (vectype_out
);
8885 uns
= TYPE_UNSIGNED (vectype
);
8887 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8888 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8889 costly than signed. */
8890 if (code
== FIX_TRUNC_EXPR
&& uns
)
8892 enum insn_code icode2
;
8895 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8897 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8898 if (interm_optab
!= unknown_optab
8899 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8900 && insn_data
[icode1
].operand
[0].mode
8901 == insn_data
[icode2
].operand
[0].mode
)
8904 optab1
= interm_optab
;
8909 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8910 intermediate steps in promotion sequence. We try
8911 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8912 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8913 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8915 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8917 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8919 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8922 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8923 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8924 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8925 == CODE_FOR_nothing
))
8928 interm_types
->quick_push (intermediate_type
);
8929 (*multi_step_cvt
)++;
8931 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8934 prev_mode
= intermediate_mode
;
8935 optab1
= interm_optab
;
8938 interm_types
->release ();