1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
51 /* For lang_hooks.types.type_for_mode. */
52 #include "langhooks.h"
54 /* Return the vectorized type for the given statement. */
57 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
59 return STMT_VINFO_VECTYPE (stmt_info
);
62 /* Return TRUE iff the given statement is in an inner loop relative to
63 the loop being vectorized. */
65 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
67 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
68 basic_block bb
= gimple_bb (stmt
);
69 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
75 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
77 return (bb
->loop_father
== loop
->inner
);
80 /* Record the cost of a statement, either by directly informing the
81 target model or by saving it in a vector for later processing.
82 Return a preliminary estimate of the statement's cost. */
85 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
86 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
87 int misalign
, enum vect_cost_model_location where
)
91 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
92 stmt_info_for_cost si
= { count
, kind
,
93 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
95 body_cost_vec
->safe_push (si
);
97 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
100 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
101 count
, kind
, stmt_info
, misalign
, where
);
104 /* Return a variable of type ELEM_TYPE[NELEMS]. */
107 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
109 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
113 /* ARRAY is an array of vectors created by create_vector_array.
114 Return an SSA_NAME for the vector in index N. The reference
115 is part of the vectorization of STMT and the vector is associated
116 with scalar destination SCALAR_DEST. */
119 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
120 tree array
, unsigned HOST_WIDE_INT n
)
122 tree vect_type
, vect
, vect_name
, array_ref
;
125 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
126 vect_type
= TREE_TYPE (TREE_TYPE (array
));
127 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
128 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
129 build_int_cst (size_type_node
, n
),
130 NULL_TREE
, NULL_TREE
);
132 new_stmt
= gimple_build_assign (vect
, array_ref
);
133 vect_name
= make_ssa_name (vect
, new_stmt
);
134 gimple_assign_set_lhs (new_stmt
, vect_name
);
135 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
140 /* ARRAY is an array of vectors created by create_vector_array.
141 Emit code to store SSA_NAME VECT in index N of the array.
142 The store is part of the vectorization of STMT. */
145 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
146 tree array
, unsigned HOST_WIDE_INT n
)
151 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
152 build_int_cst (size_type_node
, n
),
153 NULL_TREE
, NULL_TREE
);
155 new_stmt
= gimple_build_assign (array_ref
, vect
);
156 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
159 /* PTR is a pointer to an array of type TYPE. Return a representation
160 of *PTR. The memory reference replaces those in FIRST_DR
164 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
166 tree mem_ref
, alias_ptr_type
;
168 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
169 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
170 /* Arrays have the same alignment as their type. */
171 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
175 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
177 /* Function vect_mark_relevant.
179 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
182 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
183 enum vect_relevant relevant
, bool live_p
,
184 bool used_in_pattern
)
186 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
187 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
188 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
189 gimple
*pattern_stmt
;
191 if (dump_enabled_p ())
192 dump_printf_loc (MSG_NOTE
, vect_location
,
193 "mark relevant %d, live %d.\n", relevant
, live_p
);
195 /* If this stmt is an original stmt in a pattern, we might need to mark its
196 related pattern stmt instead of the original stmt. However, such stmts
197 may have their own uses that are not in any pattern, in such cases the
198 stmt itself should be marked. */
199 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
202 if (!used_in_pattern
)
204 imm_use_iterator imm_iter
;
208 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
209 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
211 if (is_gimple_assign (stmt
))
212 lhs
= gimple_assign_lhs (stmt
);
214 lhs
= gimple_call_lhs (stmt
);
216 /* This use is out of pattern use, if LHS has other uses that are
217 pattern uses, we should mark the stmt itself, and not the pattern
219 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
220 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
222 if (is_gimple_debug (USE_STMT (use_p
)))
224 use_stmt
= USE_STMT (use_p
);
226 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
229 if (vinfo_for_stmt (use_stmt
)
230 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
240 /* This is the last stmt in a sequence that was detected as a
241 pattern that can potentially be vectorized. Don't mark the stmt
242 as relevant/live because it's not going to be vectorized.
243 Instead mark the pattern-stmt that replaces it. */
245 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
247 if (dump_enabled_p ())
248 dump_printf_loc (MSG_NOTE
, vect_location
,
249 "last stmt in pattern. don't mark"
250 " relevant/live.\n");
251 stmt_info
= vinfo_for_stmt (pattern_stmt
);
252 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
253 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
254 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
259 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
260 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
261 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
263 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
264 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE
, vect_location
,
268 "already marked relevant/live.\n");
272 worklist
->safe_push (stmt
);
276 /* Function vect_stmt_relevant_p.
278 Return true if STMT in loop that is represented by LOOP_VINFO is
279 "relevant for vectorization".
281 A stmt is considered "relevant for vectorization" if:
282 - it has uses outside the loop.
283 - it has vdefs (it alters memory).
284 - control stmts in the loop (except for the exit condition).
286 CHECKME: what other side effects would the vectorizer allow? */
289 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
290 enum vect_relevant
*relevant
, bool *live_p
)
292 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
294 imm_use_iterator imm_iter
;
298 *relevant
= vect_unused_in_scope
;
301 /* cond stmt other than loop exit cond. */
302 if (is_ctrl_stmt (stmt
)
303 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
304 != loop_exit_ctrl_vec_info_type
)
305 *relevant
= vect_used_in_scope
;
307 /* changing memory. */
308 if (gimple_code (stmt
) != GIMPLE_PHI
)
309 if (gimple_vdef (stmt
)
310 && !gimple_clobber_p (stmt
))
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE
, vect_location
,
314 "vec_stmt_relevant_p: stmt has vdefs.\n");
315 *relevant
= vect_used_in_scope
;
318 /* uses outside the loop. */
319 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
321 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
323 basic_block bb
= gimple_bb (USE_STMT (use_p
));
324 if (!flow_bb_inside_loop_p (loop
, bb
))
326 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE
, vect_location
,
328 "vec_stmt_relevant_p: used out of loop.\n");
330 if (is_gimple_debug (USE_STMT (use_p
)))
333 /* We expect all such uses to be in the loop exit phis
334 (because of loop closed form) */
335 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
336 gcc_assert (bb
== single_exit (loop
)->dest
);
343 return (*live_p
|| *relevant
);
347 /* Function exist_non_indexing_operands_for_use_p
349 USE is one of the uses attached to STMT. Check if USE is
350 used in STMT for anything other than indexing an array. */
353 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
356 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
358 /* USE corresponds to some operand in STMT. If there is no data
359 reference in STMT, then any operand that corresponds to USE
360 is not indexing an array. */
361 if (!STMT_VINFO_DATA_REF (stmt_info
))
364 /* STMT has a data_ref. FORNOW this means that its of one of
368 (This should have been verified in analyze_data_refs).
370 'var' in the second case corresponds to a def, not a use,
371 so USE cannot correspond to any operands that are not used
374 Therefore, all we need to check is if STMT falls into the
375 first case, and whether var corresponds to USE. */
377 if (!gimple_assign_copy_p (stmt
))
379 if (is_gimple_call (stmt
)
380 && gimple_call_internal_p (stmt
))
381 switch (gimple_call_internal_fn (stmt
))
384 operand
= gimple_call_arg (stmt
, 3);
389 operand
= gimple_call_arg (stmt
, 2);
399 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
401 operand
= gimple_assign_rhs1 (stmt
);
402 if (TREE_CODE (operand
) != SSA_NAME
)
413 Function process_use.
416 - a USE in STMT in a loop represented by LOOP_VINFO
417 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
418 that defined USE. This is done by calling mark_relevant and passing it
419 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
420 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
424 Generally, LIVE_P and RELEVANT are used to define the liveness and
425 relevance info of the DEF_STMT of this USE:
426 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
427 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
429 - case 1: If USE is used only for address computations (e.g. array indexing),
430 which does not need to be directly vectorized, then the liveness/relevance
431 of the respective DEF_STMT is left unchanged.
432 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
433 skip DEF_STMT cause it had already been processed.
434 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
435 be modified accordingly.
437 Return true if everything is as expected. Return false otherwise. */
440 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
441 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
444 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
445 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
446 stmt_vec_info dstmt_vinfo
;
447 basic_block bb
, def_bb
;
449 enum vect_def_type dt
;
451 /* case 1: we are only interested in uses that need to be vectorized. Uses
452 that are used for address computation are not considered relevant. */
453 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
456 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
460 "not vectorized: unsupported use in stmt.\n");
464 if (!def_stmt
|| gimple_nop_p (def_stmt
))
467 def_bb
= gimple_bb (def_stmt
);
468 if (!flow_bb_inside_loop_p (loop
, def_bb
))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
476 DEF_STMT must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DEF_STMT in the loop. So we just
479 check that everything is as expected, and we are done. */
480 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
481 bb
= gimple_bb (stmt
);
482 if (gimple_code (stmt
) == GIMPLE_PHI
483 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
484 && gimple_code (def_stmt
) != GIMPLE_PHI
485 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
486 && bb
->loop_father
== def_bb
->loop_father
)
488 if (dump_enabled_p ())
489 dump_printf_loc (MSG_NOTE
, vect_location
,
490 "reduc-stmt defining reduc-phi in the same nest.\n");
491 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
492 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
493 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
494 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
495 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
499 /* case 3a: outer-loop stmt defining an inner-loop stmt:
500 outer-loop-header-bb:
506 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
508 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE
, vect_location
,
510 "outer-loop def-stmt defining inner-loop stmt.\n");
514 case vect_unused_in_scope
:
515 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
516 vect_used_in_scope
: vect_unused_in_scope
;
519 case vect_used_in_outer_by_reduction
:
520 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
521 relevant
= vect_used_by_reduction
;
524 case vect_used_in_outer
:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
526 relevant
= vect_used_in_scope
;
529 case vect_used_in_scope
:
537 /* case 3b: inner-loop stmt defining an outer-loop stmt:
538 outer-loop-header-bb:
542 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
544 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
546 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE
, vect_location
,
548 "inner-loop def-stmt defining outer-loop stmt.\n");
552 case vect_unused_in_scope
:
553 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
554 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
555 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
558 case vect_used_by_reduction
:
559 relevant
= vect_used_in_outer_by_reduction
;
562 case vect_used_in_scope
:
563 relevant
= vect_used_in_outer
;
571 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
572 is_pattern_stmt_p (stmt_vinfo
));
577 /* Function vect_mark_stmts_to_be_vectorized.
579 Not all stmts in the loop need to be vectorized. For example:
588 Stmt 1 and 3 do not need to be vectorized, because loop control and
589 addressing of vectorized data-refs are handled differently.
591 This pass detects such stmts. */
594 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
596 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
597 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
598 unsigned int nbbs
= loop
->num_nodes
;
599 gimple_stmt_iterator si
;
602 stmt_vec_info stmt_vinfo
;
606 enum vect_relevant relevant
, tmp_relevant
;
607 enum vect_def_type def_type
;
609 if (dump_enabled_p ())
610 dump_printf_loc (MSG_NOTE
, vect_location
,
611 "=== vect_mark_stmts_to_be_vectorized ===\n");
613 auto_vec
<gimple
*, 64> worklist
;
615 /* 1. Init worklist. */
616 for (i
= 0; i
< nbbs
; i
++)
619 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
622 if (dump_enabled_p ())
624 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
625 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
628 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
629 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
631 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
633 stmt
= gsi_stmt (si
);
634 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
637 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
640 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
641 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
645 /* 2. Process_worklist */
646 while (worklist
.length () > 0)
651 stmt
= worklist
.pop ();
652 if (dump_enabled_p ())
654 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
655 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
658 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
659 (DEF_STMT) as relevant/irrelevant and live/dead according to the
660 liveness and relevance properties of STMT. */
661 stmt_vinfo
= vinfo_for_stmt (stmt
);
662 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
663 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
665 /* Generally, the liveness and relevance properties of STMT are
666 propagated as is to the DEF_STMTs of its USEs:
667 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
668 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
670 One exception is when STMT has been identified as defining a reduction
671 variable; in this case we set the liveness/relevance as follows:
673 relevant = vect_used_by_reduction
674 This is because we distinguish between two kinds of relevant stmts -
675 those that are used by a reduction computation, and those that are
676 (also) used by a regular computation. This allows us later on to
677 identify stmts that are used solely by a reduction, and therefore the
678 order of the results that they produce does not have to be kept. */
680 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
681 tmp_relevant
= relevant
;
684 case vect_reduction_def
:
685 switch (tmp_relevant
)
687 case vect_unused_in_scope
:
688 relevant
= vect_used_by_reduction
;
691 case vect_used_by_reduction
:
692 if (gimple_code (stmt
) == GIMPLE_PHI
)
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
699 "unsupported use of reduction.\n");
706 case vect_nested_cycle
:
707 if (tmp_relevant
!= vect_unused_in_scope
708 && tmp_relevant
!= vect_used_in_outer_by_reduction
709 && tmp_relevant
!= vect_used_in_outer
)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
713 "unsupported use of nested cycle.\n");
721 case vect_double_reduction_def
:
722 if (tmp_relevant
!= vect_unused_in_scope
723 && tmp_relevant
!= vect_used_by_reduction
)
725 if (dump_enabled_p ())
726 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
727 "unsupported use of double reduction.\n");
739 if (is_pattern_stmt_p (stmt_vinfo
))
741 /* Pattern statements are not inserted into the code, so
742 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
743 have to scan the RHS or function arguments instead. */
744 if (is_gimple_assign (stmt
))
746 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
747 tree op
= gimple_assign_rhs1 (stmt
);
750 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
752 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
753 live_p
, relevant
, &worklist
, false)
754 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
755 live_p
, relevant
, &worklist
, false))
759 for (; i
< gimple_num_ops (stmt
); i
++)
761 op
= gimple_op (stmt
, i
);
762 if (TREE_CODE (op
) == SSA_NAME
763 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
768 else if (is_gimple_call (stmt
))
770 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
772 tree arg
= gimple_call_arg (stmt
, i
);
773 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
780 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
782 tree op
= USE_FROM_PTR (use_p
);
783 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
788 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
791 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
793 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
797 } /* while worklist */
803 /* Function vect_model_simple_cost.
805 Models cost for simple operations, i.e. those that only emit ncopies of a
806 single op. Right now, this does not account for multiple insns that could
807 be generated for the single vector op. We will handle that shortly. */
810 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
811 enum vect_def_type
*dt
,
812 stmt_vector_for_cost
*prologue_cost_vec
,
813 stmt_vector_for_cost
*body_cost_vec
)
816 int inside_cost
= 0, prologue_cost
= 0;
818 /* The SLP costs were already calculated during SLP tree build. */
819 if (PURE_SLP_STMT (stmt_info
))
822 /* FORNOW: Assuming maximum 2 args per stmts. */
823 for (i
= 0; i
< 2; i
++)
824 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
825 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
826 stmt_info
, 0, vect_prologue
);
828 /* Pass the inside-of-loop statements to the target-specific cost model. */
829 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
830 stmt_info
, 0, vect_body
);
832 if (dump_enabled_p ())
833 dump_printf_loc (MSG_NOTE
, vect_location
,
834 "vect_model_simple_cost: inside_cost = %d, "
835 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
839 /* Model cost for type demotion and promotion operations. PWR is normally
840 zero for single-step promotions and demotions. It will be one if
841 two-step promotion/demotion is required, and so on. Each additional
842 step doubles the number of instructions required. */
845 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
846 enum vect_def_type
*dt
, int pwr
)
849 int inside_cost
= 0, prologue_cost
= 0;
850 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
851 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
852 void *target_cost_data
;
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info
))
859 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
861 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
863 for (i
= 0; i
< pwr
+ 1; i
++)
865 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
867 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
868 vec_promote_demote
, stmt_info
, 0,
872 /* FORNOW: Assuming maximum 2 args per stmts. */
873 for (i
= 0; i
< 2; i
++)
874 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
875 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
876 stmt_info
, 0, vect_prologue
);
878 if (dump_enabled_p ())
879 dump_printf_loc (MSG_NOTE
, vect_location
,
880 "vect_model_promotion_demotion_cost: inside_cost = %d, "
881 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
884 /* Function vect_cost_group_size
886 For grouped load or store, return the group_size only if it is the first
887 load or store of a group, else return 1. This ensures that group size is
888 only returned once per group. */
891 vect_cost_group_size (stmt_vec_info stmt_info
)
893 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
895 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
896 return GROUP_SIZE (stmt_info
);
902 /* Function vect_model_store_cost
904 Models cost for stores. In the case of grouped accesses, one access
905 has the overhead of the grouped access attributed to it. */
908 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
909 bool store_lanes_p
, enum vect_def_type dt
,
911 stmt_vector_for_cost
*prologue_cost_vec
,
912 stmt_vector_for_cost
*body_cost_vec
)
915 unsigned int inside_cost
= 0, prologue_cost
= 0;
916 struct data_reference
*first_dr
;
919 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
920 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
921 stmt_info
, 0, vect_prologue
);
923 /* Grouped access? */
924 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
928 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
933 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
934 group_size
= vect_cost_group_size (stmt_info
);
937 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
939 /* Not a grouped access. */
943 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
946 /* We assume that the cost of a single store-lanes instruction is
947 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
948 access is instead being provided by a permute-and-store operation,
949 include the cost of the permutes. */
950 if (!store_lanes_p
&& group_size
> 1
951 && !STMT_VINFO_STRIDED_P (stmt_info
))
953 /* Uses a high and low interleave or shuffle operations for each
955 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
956 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
957 stmt_info
, 0, vect_body
);
959 if (dump_enabled_p ())
960 dump_printf_loc (MSG_NOTE
, vect_location
,
961 "vect_model_store_cost: strided group_size = %d .\n",
965 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
966 /* Costs of the stores. */
967 if (STMT_VINFO_STRIDED_P (stmt_info
)
968 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
970 /* N scalar stores plus extracting the elements. */
971 inside_cost
+= record_stmt_cost (body_cost_vec
,
972 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
973 scalar_store
, stmt_info
, 0, vect_body
);
976 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
978 if (STMT_VINFO_STRIDED_P (stmt_info
))
979 inside_cost
+= record_stmt_cost (body_cost_vec
,
980 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
981 vec_to_scalar
, stmt_info
, 0, vect_body
);
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE
, vect_location
,
985 "vect_model_store_cost: inside_cost = %d, "
986 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
990 /* Calculate cost of DR's memory access. */
992 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
993 unsigned int *inside_cost
,
994 stmt_vector_for_cost
*body_cost_vec
)
996 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
997 gimple
*stmt
= DR_STMT (dr
);
998 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1000 switch (alignment_support_scheme
)
1004 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1005 vector_store
, stmt_info
, 0,
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE
, vect_location
,
1010 "vect_model_store_cost: aligned.\n");
1014 case dr_unaligned_supported
:
1016 /* Here, we assign an additional cost for the unaligned store. */
1017 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1018 unaligned_store
, stmt_info
,
1019 DR_MISALIGNMENT (dr
), vect_body
);
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_NOTE
, vect_location
,
1022 "vect_model_store_cost: unaligned supported by "
1027 case dr_unaligned_unsupported
:
1029 *inside_cost
= VECT_MAX_COST
;
1031 if (dump_enabled_p ())
1032 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1033 "vect_model_store_cost: unsupported access.\n");
1043 /* Function vect_model_load_cost
1045 Models cost for loads. In the case of grouped accesses, the last access
1046 has the overhead of the grouped access attributed to it. Since unaligned
1047 accesses are supported for loads, we also account for the costs of the
1048 access scheme chosen. */
1051 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1052 bool load_lanes_p
, slp_tree slp_node
,
1053 stmt_vector_for_cost
*prologue_cost_vec
,
1054 stmt_vector_for_cost
*body_cost_vec
)
1058 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1059 unsigned int inside_cost
= 0, prologue_cost
= 0;
1061 /* Grouped accesses? */
1062 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1063 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1065 group_size
= vect_cost_group_size (stmt_info
);
1066 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1068 /* Not a grouped access. */
1075 /* We assume that the cost of a single load-lanes instruction is
1076 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1077 access is instead being provided by a load-and-permute operation,
1078 include the cost of the permutes. */
1079 if (!load_lanes_p
&& group_size
> 1
1080 && !STMT_VINFO_STRIDED_P (stmt_info
))
1082 /* Uses an even and odd extract operations or shuffle operations
1083 for each needed permute. */
1084 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1085 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1086 stmt_info
, 0, vect_body
);
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_NOTE
, vect_location
,
1090 "vect_model_load_cost: strided group_size = %d .\n",
1094 /* The loads themselves. */
1095 if (STMT_VINFO_STRIDED_P (stmt_info
)
1096 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1098 /* N scalar loads plus gathering them into a vector. */
1099 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1100 inside_cost
+= record_stmt_cost (body_cost_vec
,
1101 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1102 scalar_load
, stmt_info
, 0, vect_body
);
1105 vect_get_load_cost (first_dr
, ncopies
,
1106 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1107 || group_size
> 1 || slp_node
),
1108 &inside_cost
, &prologue_cost
,
1109 prologue_cost_vec
, body_cost_vec
, true);
1110 if (STMT_VINFO_STRIDED_P (stmt_info
))
1111 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1112 stmt_info
, 0, vect_body
);
1114 if (dump_enabled_p ())
1115 dump_printf_loc (MSG_NOTE
, vect_location
,
1116 "vect_model_load_cost: inside_cost = %d, "
1117 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1121 /* Calculate cost of DR's memory access. */
1123 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1124 bool add_realign_cost
, unsigned int *inside_cost
,
1125 unsigned int *prologue_cost
,
1126 stmt_vector_for_cost
*prologue_cost_vec
,
1127 stmt_vector_for_cost
*body_cost_vec
,
1128 bool record_prologue_costs
)
1130 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1131 gimple
*stmt
= DR_STMT (dr
);
1132 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1134 switch (alignment_support_scheme
)
1138 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1139 stmt_info
, 0, vect_body
);
1141 if (dump_enabled_p ())
1142 dump_printf_loc (MSG_NOTE
, vect_location
,
1143 "vect_model_load_cost: aligned.\n");
1147 case dr_unaligned_supported
:
1149 /* Here, we assign an additional cost for the unaligned load. */
1150 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1151 unaligned_load
, stmt_info
,
1152 DR_MISALIGNMENT (dr
), vect_body
);
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_NOTE
, vect_location
,
1156 "vect_model_load_cost: unaligned supported by "
1161 case dr_explicit_realign
:
1163 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1164 vector_load
, stmt_info
, 0, vect_body
);
1165 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1166 vec_perm
, stmt_info
, 0, vect_body
);
1168 /* FIXME: If the misalignment remains fixed across the iterations of
1169 the containing loop, the following cost should be added to the
1171 if (targetm
.vectorize
.builtin_mask_for_load
)
1172 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1173 stmt_info
, 0, vect_body
);
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE
, vect_location
,
1177 "vect_model_load_cost: explicit realign\n");
1181 case dr_explicit_realign_optimized
:
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_NOTE
, vect_location
,
1185 "vect_model_load_cost: unaligned software "
1188 /* Unaligned software pipeline has a load of an address, an initial
1189 load, and possibly a mask operation to "prime" the loop. However,
1190 if this is an access in a group of loads, which provide grouped
1191 access, then the above cost should only be considered for one
1192 access in the group. Inside the loop, there is a load op
1193 and a realignment op. */
1195 if (add_realign_cost
&& record_prologue_costs
)
1197 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1198 vector_stmt
, stmt_info
,
1200 if (targetm
.vectorize
.builtin_mask_for_load
)
1201 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1202 vector_stmt
, stmt_info
,
1206 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1207 stmt_info
, 0, vect_body
);
1208 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1209 stmt_info
, 0, vect_body
);
1211 if (dump_enabled_p ())
1212 dump_printf_loc (MSG_NOTE
, vect_location
,
1213 "vect_model_load_cost: explicit realign optimized"
1219 case dr_unaligned_unsupported
:
1221 *inside_cost
= VECT_MAX_COST
;
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1225 "vect_model_load_cost: unsupported access.\n");
1234 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1235 the loop preheader for the vectorized stmt STMT. */
1238 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1241 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1244 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1245 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1249 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1253 if (nested_in_vect_loop_p (loop
, stmt
))
1256 pe
= loop_preheader_edge (loop
);
1257 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1258 gcc_assert (!new_bb
);
1262 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1264 gimple_stmt_iterator gsi_bb_start
;
1266 gcc_assert (bb_vinfo
);
1267 bb
= BB_VINFO_BB (bb_vinfo
);
1268 gsi_bb_start
= gsi_after_labels (bb
);
1269 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1273 if (dump_enabled_p ())
1275 dump_printf_loc (MSG_NOTE
, vect_location
,
1276 "created new init_stmt: ");
1277 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1281 /* Function vect_init_vector.
1283 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1284 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1285 vector type a vector with all elements equal to VAL is created first.
1286 Place the initialization at BSI if it is not NULL. Otherwise, place the
1287 initialization at the loop preheader.
1288 Return the DEF of INIT_STMT.
1289 It will be used in the vectorization of STMT. */
1292 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1297 if (TREE_CODE (type
) == VECTOR_TYPE
1298 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1300 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1302 if (CONSTANT_CLASS_P (val
))
1303 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1306 new_temp
= make_ssa_name (TREE_TYPE (type
));
1307 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1308 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1312 val
= build_vector_from_val (type
, val
);
1315 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1316 init_stmt
= gimple_build_assign (new_temp
, val
);
1317 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1322 /* Function vect_get_vec_def_for_operand.
1324 OP is an operand in STMT. This function returns a (vector) def that will be
1325 used in the vectorized stmt for STMT.
1327 In the case that OP is an SSA_NAME which is defined in the loop, then
1328 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1330 In case OP is an invariant or constant, a new stmt that creates a vector def
1331 needs to be introduced. */
1334 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
)
1339 stmt_vec_info def_stmt_info
= NULL
;
1340 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1341 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1342 enum vect_def_type dt
;
1346 if (dump_enabled_p ())
1348 dump_printf_loc (MSG_NOTE
, vect_location
,
1349 "vect_get_vec_def_for_operand: ");
1350 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1351 dump_printf (MSG_NOTE
, "\n");
1354 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1355 gcc_assert (is_simple_use
);
1356 if (dump_enabled_p ())
1358 int loc_printed
= 0;
1362 dump_printf (MSG_NOTE
, " def_stmt = ");
1364 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1365 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1371 /* operand is a constant or a loop invariant. */
1372 case vect_constant_def
:
1373 case vect_external_def
:
1375 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1376 gcc_assert (vector_type
);
1377 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1380 /* operand is defined inside the loop. */
1381 case vect_internal_def
:
1383 /* Get the def from the vectorized stmt. */
1384 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1386 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1387 /* Get vectorized pattern statement. */
1389 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1390 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1391 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1392 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1393 gcc_assert (vec_stmt
);
1394 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1395 vec_oprnd
= PHI_RESULT (vec_stmt
);
1396 else if (is_gimple_call (vec_stmt
))
1397 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1399 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1403 /* operand is defined by a loop header phi - reduction */
1404 case vect_reduction_def
:
1405 case vect_double_reduction_def
:
1406 case vect_nested_cycle
:
1407 /* Code should use get_initial_def_for_reduction. */
1410 /* operand is defined by loop-header phi - induction. */
1411 case vect_induction_def
:
1413 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1415 /* Get the def from the vectorized stmt. */
1416 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1417 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1418 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1419 vec_oprnd
= PHI_RESULT (vec_stmt
);
1421 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1431 /* Function vect_get_vec_def_for_stmt_copy
1433 Return a vector-def for an operand. This function is used when the
1434 vectorized stmt to be created (by the caller to this function) is a "copy"
1435 created in case the vectorized result cannot fit in one vector, and several
1436 copies of the vector-stmt are required. In this case the vector-def is
1437 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1438 of the stmt that defines VEC_OPRND.
1439 DT is the type of the vector def VEC_OPRND.
1442 In case the vectorization factor (VF) is bigger than the number
1443 of elements that can fit in a vectype (nunits), we have to generate
1444 more than one vector stmt to vectorize the scalar stmt. This situation
1445 arises when there are multiple data-types operated upon in the loop; the
1446 smallest data-type determines the VF, and as a result, when vectorizing
1447 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1448 vector stmt (each computing a vector of 'nunits' results, and together
1449 computing 'VF' results in each iteration). This function is called when
1450 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1451 which VF=16 and nunits=4, so the number of copies required is 4):
1453 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1455 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1456 VS1.1: vx.1 = memref1 VS1.2
1457 VS1.2: vx.2 = memref2 VS1.3
1458 VS1.3: vx.3 = memref3
1460 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1461 VSnew.1: vz1 = vx.1 + ... VSnew.2
1462 VSnew.2: vz2 = vx.2 + ... VSnew.3
1463 VSnew.3: vz3 = vx.3 + ...
1465 The vectorization of S1 is explained in vectorizable_load.
1466 The vectorization of S2:
1467 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1468 the function 'vect_get_vec_def_for_operand' is called to
1469 get the relevant vector-def for each operand of S2. For operand x it
1470 returns the vector-def 'vx.0'.
1472 To create the remaining copies of the vector-stmt (VSnew.j), this
1473 function is called to get the relevant vector-def for each operand. It is
1474 obtained from the respective VS1.j stmt, which is recorded in the
1475 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1477 For example, to obtain the vector-def 'vx.1' in order to create the
1478 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1479 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1480 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1481 and return its def ('vx.1').
1482 Overall, to create the above sequence this function will be called 3 times:
1483 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1484 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1485 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1488 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1490 gimple
*vec_stmt_for_operand
;
1491 stmt_vec_info def_stmt_info
;
1493 /* Do nothing; can reuse same def. */
1494 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1497 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1498 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1499 gcc_assert (def_stmt_info
);
1500 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1501 gcc_assert (vec_stmt_for_operand
);
1502 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1503 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1505 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1510 /* Get vectorized definitions for the operands to create a copy of an original
1511 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1514 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1515 vec
<tree
> *vec_oprnds0
,
1516 vec
<tree
> *vec_oprnds1
)
1518 tree vec_oprnd
= vec_oprnds0
->pop ();
1520 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1521 vec_oprnds0
->quick_push (vec_oprnd
);
1523 if (vec_oprnds1
&& vec_oprnds1
->length ())
1525 vec_oprnd
= vec_oprnds1
->pop ();
1526 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1527 vec_oprnds1
->quick_push (vec_oprnd
);
1532 /* Get vectorized definitions for OP0 and OP1.
1533 REDUC_INDEX is the index of reduction operand in case of reduction,
1534 and -1 otherwise. */
1537 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1538 vec
<tree
> *vec_oprnds0
,
1539 vec
<tree
> *vec_oprnds1
,
1540 slp_tree slp_node
, int reduc_index
)
1544 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1545 auto_vec
<tree
> ops (nops
);
1546 auto_vec
<vec
<tree
> > vec_defs (nops
);
1548 ops
.quick_push (op0
);
1550 ops
.quick_push (op1
);
1552 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1554 *vec_oprnds0
= vec_defs
[0];
1556 *vec_oprnds1
= vec_defs
[1];
1562 vec_oprnds0
->create (1);
1563 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1564 vec_oprnds0
->quick_push (vec_oprnd
);
1568 vec_oprnds1
->create (1);
1569 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1570 vec_oprnds1
->quick_push (vec_oprnd
);
1576 /* Function vect_finish_stmt_generation.
1578 Insert a new stmt. */
1581 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1582 gimple_stmt_iterator
*gsi
)
1584 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1585 vec_info
*vinfo
= stmt_info
->vinfo
;
1587 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1589 if (!gsi_end_p (*gsi
)
1590 && gimple_has_mem_ops (vec_stmt
))
1592 gimple
*at_stmt
= gsi_stmt (*gsi
);
1593 tree vuse
= gimple_vuse (at_stmt
);
1594 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1596 tree vdef
= gimple_vdef (at_stmt
);
1597 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1598 /* If we have an SSA vuse and insert a store, update virtual
1599 SSA form to avoid triggering the renamer. Do so only
1600 if we can easily see all uses - which is what almost always
1601 happens with the way vectorized stmts are inserted. */
1602 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1603 && ((is_gimple_assign (vec_stmt
)
1604 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1605 || (is_gimple_call (vec_stmt
)
1606 && !(gimple_call_flags (vec_stmt
)
1607 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1609 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1610 gimple_set_vdef (vec_stmt
, new_vdef
);
1611 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1615 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1617 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1619 if (dump_enabled_p ())
1621 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1622 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1625 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1627 /* While EH edges will generally prevent vectorization, stmt might
1628 e.g. be in a must-not-throw region. Ensure newly created stmts
1629 that could throw are part of the same region. */
1630 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1631 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1632 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1635 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1636 a function declaration if the target has a vectorized version
1637 of the function, or NULL_TREE if the function cannot be vectorized. */
1640 vectorizable_function (gcall
*call
, tree vectype_out
, tree vectype_in
)
1642 tree fndecl
= gimple_call_fndecl (call
);
1644 /* We only handle functions that do not read or clobber memory -- i.e.
1645 const or novops ones. */
1646 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1650 || TREE_CODE (fndecl
) != FUNCTION_DECL
1651 || !DECL_BUILT_IN (fndecl
))
1654 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1659 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1660 gimple_stmt_iterator
*);
1663 /* Function vectorizable_mask_load_store.
1665 Check if STMT performs a conditional load or store that can be vectorized.
1666 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1667 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1668 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1671 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1672 gimple
**vec_stmt
, slp_tree slp_node
)
1674 tree vec_dest
= NULL
;
1675 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1676 stmt_vec_info prev_stmt_info
;
1677 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1678 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1679 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1680 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1681 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1685 tree dataref_ptr
= NULL_TREE
;
1687 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1691 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1692 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1693 int gather_scale
= 1;
1694 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1698 enum vect_def_type dt
;
1700 if (slp_node
!= NULL
)
1703 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1704 gcc_assert (ncopies
>= 1);
1706 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1707 mask
= gimple_call_arg (stmt
, 2);
1708 if (TYPE_PRECISION (TREE_TYPE (mask
))
1709 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1712 /* FORNOW. This restriction should be relaxed. */
1713 if (nested_in_vect_loop
&& ncopies
> 1)
1715 if (dump_enabled_p ())
1716 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1717 "multiple types in nested loop.");
1721 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1724 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1727 if (!STMT_VINFO_DATA_REF (stmt_info
))
1730 elem_type
= TREE_TYPE (vectype
);
1732 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1735 if (STMT_VINFO_STRIDED_P (stmt_info
))
1738 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1741 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1742 &gather_off
, &gather_scale
);
1743 gcc_assert (gather_decl
);
1744 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1745 &gather_off_vectype
))
1747 if (dump_enabled_p ())
1748 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1749 "gather index use not simple.");
1753 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1755 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1756 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1760 "masked gather with integer mask not supported.");
1764 else if (tree_int_cst_compare (nested_in_vect_loop
1765 ? STMT_VINFO_DR_STEP (stmt_info
)
1766 : DR_STEP (dr
), size_zero_node
) <= 0)
1768 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1769 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1772 if (TREE_CODE (mask
) != SSA_NAME
)
1775 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
))
1780 tree rhs
= gimple_call_arg (stmt
, 3);
1781 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
))
1785 if (!vec_stmt
) /* transformation not required. */
1787 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1789 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1792 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1798 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1800 tree vec_oprnd0
= NULL_TREE
, op
;
1801 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1802 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1803 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1804 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1805 tree mask_perm_mask
= NULL_TREE
;
1806 edge pe
= loop_preheader_edge (loop
);
1809 enum { NARROW
, NONE
, WIDEN
} modifier
;
1810 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1812 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1813 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1814 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1815 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1816 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1817 scaletype
= TREE_VALUE (arglist
);
1818 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1819 && types_compatible_p (srctype
, masktype
));
1821 if (nunits
== gather_off_nunits
)
1823 else if (nunits
== gather_off_nunits
/ 2)
1825 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1828 for (i
= 0; i
< gather_off_nunits
; ++i
)
1829 sel
[i
] = i
| nunits
;
1831 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1833 else if (nunits
== gather_off_nunits
* 2)
1835 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1838 for (i
= 0; i
< nunits
; ++i
)
1839 sel
[i
] = i
< gather_off_nunits
1840 ? i
: i
+ nunits
- gather_off_nunits
;
1842 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1844 for (i
= 0; i
< nunits
; ++i
)
1845 sel
[i
] = i
| gather_off_nunits
;
1846 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1851 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1853 ptr
= fold_convert (ptrtype
, gather_base
);
1854 if (!is_gimple_min_invariant (ptr
))
1856 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1857 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1858 gcc_assert (!new_bb
);
1861 scale
= build_int_cst (scaletype
, gather_scale
);
1863 prev_stmt_info
= NULL
;
1864 for (j
= 0; j
< ncopies
; ++j
)
1866 if (modifier
== WIDEN
&& (j
& 1))
1867 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1868 perm_mask
, stmt
, gsi
);
1871 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1874 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1876 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1878 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1879 == TYPE_VECTOR_SUBPARTS (idxtype
));
1880 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1881 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1883 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1884 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1888 if (mask_perm_mask
&& (j
& 1))
1889 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1890 mask_perm_mask
, stmt
, gsi
);
1894 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1897 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1898 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1902 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1904 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1905 == TYPE_VECTOR_SUBPARTS (masktype
));
1906 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1907 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1909 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1910 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1916 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1919 if (!useless_type_conversion_p (vectype
, rettype
))
1921 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1922 == TYPE_VECTOR_SUBPARTS (rettype
));
1923 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1924 gimple_call_set_lhs (new_stmt
, op
);
1925 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1926 var
= make_ssa_name (vec_dest
);
1927 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1928 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1932 var
= make_ssa_name (vec_dest
, new_stmt
);
1933 gimple_call_set_lhs (new_stmt
, var
);
1936 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1938 if (modifier
== NARROW
)
1945 var
= permute_vec_elements (prev_res
, var
,
1946 perm_mask
, stmt
, gsi
);
1947 new_stmt
= SSA_NAME_DEF_STMT (var
);
1950 if (prev_stmt_info
== NULL
)
1951 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1953 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1954 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1957 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1959 tree lhs
= gimple_call_lhs (stmt
);
1960 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1961 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1962 set_vinfo_for_stmt (stmt
, NULL
);
1963 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1964 gsi_replace (gsi
, new_stmt
, true);
1969 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
1970 prev_stmt_info
= NULL
;
1971 for (i
= 0; i
< ncopies
; i
++)
1973 unsigned align
, misalign
;
1977 tree rhs
= gimple_call_arg (stmt
, 3);
1978 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
1979 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1980 /* We should have catched mismatched types earlier. */
1981 gcc_assert (useless_type_conversion_p (vectype
,
1982 TREE_TYPE (vec_rhs
)));
1983 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
1984 NULL_TREE
, &dummy
, gsi
,
1985 &ptr_incr
, false, &inv_p
);
1986 gcc_assert (!inv_p
);
1990 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
1991 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
1992 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1993 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1994 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
1995 TYPE_SIZE_UNIT (vectype
));
1998 align
= TYPE_ALIGN_UNIT (vectype
);
1999 if (aligned_access_p (dr
))
2001 else if (DR_MISALIGNMENT (dr
) == -1)
2003 align
= TYPE_ALIGN_UNIT (elem_type
);
2007 misalign
= DR_MISALIGNMENT (dr
);
2008 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2011 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2012 gimple_call_arg (stmt
, 1),
2014 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2016 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2018 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2019 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2024 tree vec_mask
= NULL_TREE
;
2025 prev_stmt_info
= NULL
;
2026 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2027 for (i
= 0; i
< ncopies
; i
++)
2029 unsigned align
, misalign
;
2033 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2034 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2035 NULL_TREE
, &dummy
, gsi
,
2036 &ptr_incr
, false, &inv_p
);
2037 gcc_assert (!inv_p
);
2041 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2042 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2043 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2044 TYPE_SIZE_UNIT (vectype
));
2047 align
= TYPE_ALIGN_UNIT (vectype
);
2048 if (aligned_access_p (dr
))
2050 else if (DR_MISALIGNMENT (dr
) == -1)
2052 align
= TYPE_ALIGN_UNIT (elem_type
);
2056 misalign
= DR_MISALIGNMENT (dr
);
2057 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2060 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2061 gimple_call_arg (stmt
, 1),
2063 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2064 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2066 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2068 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2069 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2075 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2077 tree lhs
= gimple_call_lhs (stmt
);
2078 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2079 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2080 set_vinfo_for_stmt (stmt
, NULL
);
2081 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2082 gsi_replace (gsi
, new_stmt
, true);
2089 /* Function vectorizable_call.
2091 Check if GS performs a function call that can be vectorized.
2092 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2093 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2094 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2097 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2104 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2105 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2106 tree vectype_out
, vectype_in
;
2109 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2110 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2111 vec_info
*vinfo
= stmt_info
->vinfo
;
2112 tree fndecl
, new_temp
, rhs_type
;
2114 enum vect_def_type dt
[3]
2115 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2116 gimple
*new_stmt
= NULL
;
2118 vec
<tree
> vargs
= vNULL
;
2119 enum { NARROW
, NONE
, WIDEN
} modifier
;
2123 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2126 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2129 /* Is GS a vectorizable call? */
2130 stmt
= dyn_cast
<gcall
*> (gs
);
2134 if (gimple_call_internal_p (stmt
)
2135 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2136 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2137 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2140 if (gimple_call_lhs (stmt
) == NULL_TREE
2141 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2144 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2146 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2148 /* Process function arguments. */
2149 rhs_type
= NULL_TREE
;
2150 vectype_in
= NULL_TREE
;
2151 nargs
= gimple_call_num_args (stmt
);
2153 /* Bail out if the function has more than three arguments, we do not have
2154 interesting builtin functions to vectorize with more than two arguments
2155 except for fma. No arguments is also not good. */
2156 if (nargs
== 0 || nargs
> 3)
2159 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2160 if (gimple_call_internal_p (stmt
)
2161 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2164 rhs_type
= unsigned_type_node
;
2167 for (i
= 0; i
< nargs
; i
++)
2171 op
= gimple_call_arg (stmt
, i
);
2173 /* We can only handle calls with arguments of the same type. */
2175 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2177 if (dump_enabled_p ())
2178 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2179 "argument types differ.\n");
2183 rhs_type
= TREE_TYPE (op
);
2185 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2187 if (dump_enabled_p ())
2188 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2189 "use not simple.\n");
2194 vectype_in
= opvectype
;
2196 && opvectype
!= vectype_in
)
2198 if (dump_enabled_p ())
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2200 "argument vector types differ.\n");
2204 /* If all arguments are external or constant defs use a vector type with
2205 the same size as the output vector type. */
2207 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2209 gcc_assert (vectype_in
);
2212 if (dump_enabled_p ())
2214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2215 "no vectype for scalar type ");
2216 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2217 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2224 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2225 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2226 if (nunits_in
== nunits_out
/ 2)
2228 else if (nunits_out
== nunits_in
)
2230 else if (nunits_out
== nunits_in
/ 2)
2235 /* For now, we only vectorize functions if a target specific builtin
2236 is available. TODO -- in some cases, it might be profitable to
2237 insert the calls for pieces of the vector, in order to be able
2238 to vectorize other operations in the loop. */
2239 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2240 if (fndecl
== NULL_TREE
)
2242 if (gimple_call_internal_p (stmt
)
2243 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2246 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2247 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2248 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2249 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2251 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2252 { 0, 1, 2, ... vf - 1 } vector. */
2253 gcc_assert (nargs
== 0);
2257 if (dump_enabled_p ())
2258 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2259 "function is not vectorizable.\n");
2264 gcc_assert (!gimple_vuse (stmt
));
2266 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2268 else if (modifier
== NARROW
)
2269 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2271 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2273 /* Sanity check: make sure that at least one copy of the vectorized stmt
2274 needs to be generated. */
2275 gcc_assert (ncopies
>= 1);
2277 if (!vec_stmt
) /* transformation not required. */
2279 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2280 if (dump_enabled_p ())
2281 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2283 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2289 if (dump_enabled_p ())
2290 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2293 scalar_dest
= gimple_call_lhs (stmt
);
2294 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2296 prev_stmt_info
= NULL
;
2300 for (j
= 0; j
< ncopies
; ++j
)
2302 /* Build argument list for the vectorized call. */
2304 vargs
.create (nargs
);
2310 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2311 vec
<tree
> vec_oprnds0
;
2313 for (i
= 0; i
< nargs
; i
++)
2314 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2315 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2316 vec_oprnds0
= vec_defs
[0];
2318 /* Arguments are ready. Create the new vector stmt. */
2319 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2322 for (k
= 0; k
< nargs
; k
++)
2324 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2325 vargs
[k
] = vec_oprndsk
[i
];
2327 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2328 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2329 gimple_call_set_lhs (new_stmt
, new_temp
);
2330 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2331 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2334 for (i
= 0; i
< nargs
; i
++)
2336 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2337 vec_oprndsi
.release ();
2342 for (i
= 0; i
< nargs
; i
++)
2344 op
= gimple_call_arg (stmt
, i
);
2347 = vect_get_vec_def_for_operand (op
, stmt
);
2350 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2352 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2355 vargs
.quick_push (vec_oprnd0
);
2358 if (gimple_call_internal_p (stmt
)
2359 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2361 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2363 for (k
= 0; k
< nunits_out
; ++k
)
2364 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2365 tree cst
= build_vector (vectype_out
, v
);
2367 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2368 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2369 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2370 new_temp
= make_ssa_name (vec_dest
);
2371 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2375 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2376 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2377 gimple_call_set_lhs (new_stmt
, new_temp
);
2379 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2382 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2384 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2386 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2392 for (j
= 0; j
< ncopies
; ++j
)
2394 /* Build argument list for the vectorized call. */
2396 vargs
.create (nargs
* 2);
2402 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2403 vec
<tree
> vec_oprnds0
;
2405 for (i
= 0; i
< nargs
; i
++)
2406 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2407 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2408 vec_oprnds0
= vec_defs
[0];
2410 /* Arguments are ready. Create the new vector stmt. */
2411 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2415 for (k
= 0; k
< nargs
; k
++)
2417 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2418 vargs
.quick_push (vec_oprndsk
[i
]);
2419 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2421 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2422 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2423 gimple_call_set_lhs (new_stmt
, new_temp
);
2424 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2425 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2428 for (i
= 0; i
< nargs
; i
++)
2430 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2431 vec_oprndsi
.release ();
2436 for (i
= 0; i
< nargs
; i
++)
2438 op
= gimple_call_arg (stmt
, i
);
2442 = vect_get_vec_def_for_operand (op
, stmt
);
2444 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2448 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2450 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2452 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2455 vargs
.quick_push (vec_oprnd0
);
2456 vargs
.quick_push (vec_oprnd1
);
2459 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2460 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2461 gimple_call_set_lhs (new_stmt
, new_temp
);
2462 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2465 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2467 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2469 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2472 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2477 /* No current target implements this case. */
2483 /* The call in STMT might prevent it from being removed in dce.
2484 We however cannot remove it here, due to the way the ssa name
2485 it defines is mapped to the new definition. So just replace
2486 rhs of the statement with something harmless. */
2491 type
= TREE_TYPE (scalar_dest
);
2492 if (is_pattern_stmt_p (stmt_info
))
2493 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2495 lhs
= gimple_call_lhs (stmt
);
2497 if (gimple_call_internal_p (stmt
)
2498 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2500 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2501 with vf - 1 rather than 0, that is the last iteration of the
2503 imm_use_iterator iter
;
2504 use_operand_p use_p
;
2506 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2508 basic_block use_bb
= gimple_bb (use_stmt
);
2510 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2512 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2513 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2514 ncopies
* nunits_out
- 1));
2515 update_stmt (use_stmt
);
2520 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2521 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2522 set_vinfo_for_stmt (stmt
, NULL
);
2523 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2524 gsi_replace (gsi
, new_stmt
, false);
2530 struct simd_call_arg_info
2534 enum vect_def_type dt
;
2535 HOST_WIDE_INT linear_step
;
2537 bool simd_lane_linear
;
2540 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2541 is linear within simd lane (but not within whole loop), note it in
2545 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2546 struct simd_call_arg_info
*arginfo
)
2548 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2550 if (!is_gimple_assign (def_stmt
)
2551 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2552 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2555 tree base
= gimple_assign_rhs1 (def_stmt
);
2556 HOST_WIDE_INT linear_step
= 0;
2557 tree v
= gimple_assign_rhs2 (def_stmt
);
2558 while (TREE_CODE (v
) == SSA_NAME
)
2561 def_stmt
= SSA_NAME_DEF_STMT (v
);
2562 if (is_gimple_assign (def_stmt
))
2563 switch (gimple_assign_rhs_code (def_stmt
))
2566 t
= gimple_assign_rhs2 (def_stmt
);
2567 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2569 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2570 v
= gimple_assign_rhs1 (def_stmt
);
2573 t
= gimple_assign_rhs2 (def_stmt
);
2574 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2576 linear_step
= tree_to_shwi (t
);
2577 v
= gimple_assign_rhs1 (def_stmt
);
2580 t
= gimple_assign_rhs1 (def_stmt
);
2581 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2582 || (TYPE_PRECISION (TREE_TYPE (v
))
2583 < TYPE_PRECISION (TREE_TYPE (t
))))
2592 else if (is_gimple_call (def_stmt
)
2593 && gimple_call_internal_p (def_stmt
)
2594 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2596 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2597 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2602 arginfo
->linear_step
= linear_step
;
2604 arginfo
->simd_lane_linear
= true;
2610 /* Function vectorizable_simd_clone_call.
2612 Check if STMT performs a function call that can be vectorized
2613 by calling a simd clone of the function.
2614 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2615 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2616 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2619 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2620 gimple
**vec_stmt
, slp_tree slp_node
)
2625 tree vec_oprnd0
= NULL_TREE
;
2626 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2628 unsigned int nunits
;
2629 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2630 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2631 vec_info
*vinfo
= stmt_info
->vinfo
;
2632 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2633 tree fndecl
, new_temp
;
2635 gimple
*new_stmt
= NULL
;
2637 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2638 vec
<tree
> vargs
= vNULL
;
2640 tree lhs
, rtype
, ratype
;
2641 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2643 /* Is STMT a vectorizable call? */
2644 if (!is_gimple_call (stmt
))
2647 fndecl
= gimple_call_fndecl (stmt
);
2648 if (fndecl
== NULL_TREE
)
2651 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2652 if (node
== NULL
|| node
->simd_clones
== NULL
)
2655 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2658 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2661 if (gimple_call_lhs (stmt
)
2662 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2665 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2667 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2669 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2673 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2676 /* Process function arguments. */
2677 nargs
= gimple_call_num_args (stmt
);
2679 /* Bail out if the function has zero arguments. */
2683 arginfo
.create (nargs
);
2685 for (i
= 0; i
< nargs
; i
++)
2687 simd_call_arg_info thisarginfo
;
2690 thisarginfo
.linear_step
= 0;
2691 thisarginfo
.align
= 0;
2692 thisarginfo
.op
= NULL_TREE
;
2693 thisarginfo
.simd_lane_linear
= false;
2695 op
= gimple_call_arg (stmt
, i
);
2696 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2697 &thisarginfo
.vectype
)
2698 || thisarginfo
.dt
== vect_uninitialized_def
)
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2702 "use not simple.\n");
2707 if (thisarginfo
.dt
== vect_constant_def
2708 || thisarginfo
.dt
== vect_external_def
)
2709 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2711 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2713 /* For linear arguments, the analyze phase should have saved
2714 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2715 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2716 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2718 gcc_assert (vec_stmt
);
2719 thisarginfo
.linear_step
2720 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2722 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2723 thisarginfo
.simd_lane_linear
2724 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2725 == boolean_true_node
);
2726 /* If loop has been peeled for alignment, we need to adjust it. */
2727 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2728 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2729 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2731 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2732 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2733 tree opt
= TREE_TYPE (thisarginfo
.op
);
2734 bias
= fold_convert (TREE_TYPE (step
), bias
);
2735 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2737 = fold_build2 (POINTER_TYPE_P (opt
)
2738 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2739 thisarginfo
.op
, bias
);
2743 && thisarginfo
.dt
!= vect_constant_def
2744 && thisarginfo
.dt
!= vect_external_def
2746 && TREE_CODE (op
) == SSA_NAME
2747 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2749 && tree_fits_shwi_p (iv
.step
))
2751 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2752 thisarginfo
.op
= iv
.base
;
2754 else if ((thisarginfo
.dt
== vect_constant_def
2755 || thisarginfo
.dt
== vect_external_def
)
2756 && POINTER_TYPE_P (TREE_TYPE (op
)))
2757 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2758 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2760 if (POINTER_TYPE_P (TREE_TYPE (op
))
2761 && !thisarginfo
.linear_step
2763 && thisarginfo
.dt
!= vect_constant_def
2764 && thisarginfo
.dt
!= vect_external_def
2767 && TREE_CODE (op
) == SSA_NAME
)
2768 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2770 arginfo
.quick_push (thisarginfo
);
2773 unsigned int badness
= 0;
2774 struct cgraph_node
*bestn
= NULL
;
2775 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2776 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2778 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2779 n
= n
->simdclone
->next_clone
)
2781 unsigned int this_badness
= 0;
2782 if (n
->simdclone
->simdlen
2783 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2784 || n
->simdclone
->nargs
!= nargs
)
2786 if (n
->simdclone
->simdlen
2787 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2788 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2789 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2790 if (n
->simdclone
->inbranch
)
2791 this_badness
+= 2048;
2792 int target_badness
= targetm
.simd_clone
.usable (n
);
2793 if (target_badness
< 0)
2795 this_badness
+= target_badness
* 512;
2796 /* FORNOW: Have to add code to add the mask argument. */
2797 if (n
->simdclone
->inbranch
)
2799 for (i
= 0; i
< nargs
; i
++)
2801 switch (n
->simdclone
->args
[i
].arg_type
)
2803 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2804 if (!useless_type_conversion_p
2805 (n
->simdclone
->args
[i
].orig_type
,
2806 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2808 else if (arginfo
[i
].dt
== vect_constant_def
2809 || arginfo
[i
].dt
== vect_external_def
2810 || arginfo
[i
].linear_step
)
2813 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2814 if (arginfo
[i
].dt
!= vect_constant_def
2815 && arginfo
[i
].dt
!= vect_external_def
)
2818 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2819 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2820 if (arginfo
[i
].dt
== vect_constant_def
2821 || arginfo
[i
].dt
== vect_external_def
2822 || (arginfo
[i
].linear_step
2823 != n
->simdclone
->args
[i
].linear_step
))
2826 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2827 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2828 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2829 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2830 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2831 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2835 case SIMD_CLONE_ARG_TYPE_MASK
:
2838 if (i
== (size_t) -1)
2840 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2845 if (arginfo
[i
].align
)
2846 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2847 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2849 if (i
== (size_t) -1)
2851 if (bestn
== NULL
|| this_badness
< badness
)
2854 badness
= this_badness
;
2864 for (i
= 0; i
< nargs
; i
++)
2865 if ((arginfo
[i
].dt
== vect_constant_def
2866 || arginfo
[i
].dt
== vect_external_def
)
2867 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2870 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2872 if (arginfo
[i
].vectype
== NULL
2873 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2874 > bestn
->simdclone
->simdlen
))
2881 fndecl
= bestn
->decl
;
2882 nunits
= bestn
->simdclone
->simdlen
;
2883 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2885 /* If the function isn't const, only allow it in simd loops where user
2886 has asserted that at least nunits consecutive iterations can be
2887 performed using SIMD instructions. */
2888 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2889 && gimple_vuse (stmt
))
2895 /* Sanity check: make sure that at least one copy of the vectorized stmt
2896 needs to be generated. */
2897 gcc_assert (ncopies
>= 1);
2899 if (!vec_stmt
) /* transformation not required. */
2901 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2902 for (i
= 0; i
< nargs
; i
++)
2903 if (bestn
->simdclone
->args
[i
].arg_type
2904 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2906 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2908 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2909 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2910 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
2911 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
2912 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
2913 tree sll
= arginfo
[i
].simd_lane_linear
2914 ? boolean_true_node
: boolean_false_node
;
2915 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
2917 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2918 if (dump_enabled_p ())
2919 dump_printf_loc (MSG_NOTE
, vect_location
,
2920 "=== vectorizable_simd_clone_call ===\n");
2921 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2928 if (dump_enabled_p ())
2929 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2932 scalar_dest
= gimple_call_lhs (stmt
);
2933 vec_dest
= NULL_TREE
;
2938 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2939 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2940 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2943 rtype
= TREE_TYPE (ratype
);
2947 prev_stmt_info
= NULL
;
2948 for (j
= 0; j
< ncopies
; ++j
)
2950 /* Build argument list for the vectorized call. */
2952 vargs
.create (nargs
);
2956 for (i
= 0; i
< nargs
; i
++)
2958 unsigned int k
, l
, m
, o
;
2960 op
= gimple_call_arg (stmt
, i
);
2961 switch (bestn
->simdclone
->args
[i
].arg_type
)
2963 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2964 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2965 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2966 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2968 if (TYPE_VECTOR_SUBPARTS (atype
)
2969 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2971 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
2972 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2973 / TYPE_VECTOR_SUBPARTS (atype
));
2974 gcc_assert ((k
& (k
- 1)) == 0);
2977 = vect_get_vec_def_for_operand (op
, stmt
);
2980 vec_oprnd0
= arginfo
[i
].op
;
2981 if ((m
& (k
- 1)) == 0)
2983 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2986 arginfo
[i
].op
= vec_oprnd0
;
2988 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
2990 bitsize_int ((m
& (k
- 1)) * prec
));
2992 = gimple_build_assign (make_ssa_name (atype
),
2994 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2995 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2999 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3000 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3001 gcc_assert ((k
& (k
- 1)) == 0);
3002 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3004 vec_alloc (ctor_elts
, k
);
3007 for (l
= 0; l
< k
; l
++)
3009 if (m
== 0 && l
== 0)
3011 = vect_get_vec_def_for_operand (op
, stmt
);
3014 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3016 arginfo
[i
].op
= vec_oprnd0
;
3019 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3023 vargs
.safe_push (vec_oprnd0
);
3026 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3028 = gimple_build_assign (make_ssa_name (atype
),
3030 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3031 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3036 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3037 vargs
.safe_push (op
);
3039 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3044 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3049 edge pe
= loop_preheader_edge (loop
);
3050 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3051 gcc_assert (!new_bb
);
3053 if (arginfo
[i
].simd_lane_linear
)
3055 vargs
.safe_push (arginfo
[i
].op
);
3058 tree phi_res
= copy_ssa_name (op
);
3059 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3060 set_vinfo_for_stmt (new_phi
,
3061 new_stmt_vec_info (new_phi
, loop_vinfo
));
3062 add_phi_arg (new_phi
, arginfo
[i
].op
,
3063 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3065 = POINTER_TYPE_P (TREE_TYPE (op
))
3066 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3067 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3068 ? sizetype
: TREE_TYPE (op
);
3070 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3072 tree tcst
= wide_int_to_tree (type
, cst
);
3073 tree phi_arg
= copy_ssa_name (op
);
3075 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3076 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3077 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3078 set_vinfo_for_stmt (new_stmt
,
3079 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3080 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3082 arginfo
[i
].op
= phi_res
;
3083 vargs
.safe_push (phi_res
);
3088 = POINTER_TYPE_P (TREE_TYPE (op
))
3089 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3090 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3091 ? sizetype
: TREE_TYPE (op
);
3093 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3095 tree tcst
= wide_int_to_tree (type
, cst
);
3096 new_temp
= make_ssa_name (TREE_TYPE (op
));
3097 new_stmt
= gimple_build_assign (new_temp
, code
,
3098 arginfo
[i
].op
, tcst
);
3099 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3100 vargs
.safe_push (new_temp
);
3103 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3104 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3105 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3106 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3112 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3115 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3117 new_temp
= create_tmp_var (ratype
);
3118 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3119 == TYPE_VECTOR_SUBPARTS (rtype
))
3120 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3122 new_temp
= make_ssa_name (rtype
, new_stmt
);
3123 gimple_call_set_lhs (new_stmt
, new_temp
);
3125 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3129 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3132 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3133 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3134 gcc_assert ((k
& (k
- 1)) == 0);
3135 for (l
= 0; l
< k
; l
++)
3140 t
= build_fold_addr_expr (new_temp
);
3141 t
= build2 (MEM_REF
, vectype
, t
,
3142 build_int_cst (TREE_TYPE (t
),
3143 l
* prec
/ BITS_PER_UNIT
));
3146 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3147 size_int (prec
), bitsize_int (l
* prec
));
3149 = gimple_build_assign (make_ssa_name (vectype
), t
);
3150 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3151 if (j
== 0 && l
== 0)
3152 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3154 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3156 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3161 tree clobber
= build_constructor (ratype
, NULL
);
3162 TREE_THIS_VOLATILE (clobber
) = 1;
3163 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3164 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3168 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3170 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3171 / TYPE_VECTOR_SUBPARTS (rtype
));
3172 gcc_assert ((k
& (k
- 1)) == 0);
3173 if ((j
& (k
- 1)) == 0)
3174 vec_alloc (ret_ctor_elts
, k
);
3177 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3178 for (m
= 0; m
< o
; m
++)
3180 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3181 size_int (m
), NULL_TREE
, NULL_TREE
);
3183 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3184 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3185 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3186 gimple_assign_lhs (new_stmt
));
3188 tree clobber
= build_constructor (ratype
, NULL
);
3189 TREE_THIS_VOLATILE (clobber
) = 1;
3190 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3191 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3194 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3195 if ((j
& (k
- 1)) != k
- 1)
3197 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3199 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3200 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3202 if ((unsigned) j
== k
- 1)
3203 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3205 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3207 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3212 tree t
= build_fold_addr_expr (new_temp
);
3213 t
= build2 (MEM_REF
, vectype
, t
,
3214 build_int_cst (TREE_TYPE (t
), 0));
3216 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3217 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3218 tree clobber
= build_constructor (ratype
, NULL
);
3219 TREE_THIS_VOLATILE (clobber
) = 1;
3220 vect_finish_stmt_generation (stmt
,
3221 gimple_build_assign (new_temp
,
3227 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3229 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3231 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3236 /* The call in STMT might prevent it from being removed in dce.
3237 We however cannot remove it here, due to the way the ssa name
3238 it defines is mapped to the new definition. So just replace
3239 rhs of the statement with something harmless. */
3246 type
= TREE_TYPE (scalar_dest
);
3247 if (is_pattern_stmt_p (stmt_info
))
3248 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3250 lhs
= gimple_call_lhs (stmt
);
3251 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3254 new_stmt
= gimple_build_nop ();
3255 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3256 set_vinfo_for_stmt (stmt
, NULL
);
3257 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3258 gsi_replace (gsi
, new_stmt
, true);
3259 unlink_stmt_vdef (stmt
);
3265 /* Function vect_gen_widened_results_half
3267 Create a vector stmt whose code, type, number of arguments, and result
3268 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3269 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3270 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3271 needs to be created (DECL is a function-decl of a target-builtin).
3272 STMT is the original scalar stmt that we are vectorizing. */
3275 vect_gen_widened_results_half (enum tree_code code
,
3277 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3278 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3284 /* Generate half of the widened result: */
3285 if (code
== CALL_EXPR
)
3287 /* Target specific support */
3288 if (op_type
== binary_op
)
3289 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3291 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3292 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3293 gimple_call_set_lhs (new_stmt
, new_temp
);
3297 /* Generic support */
3298 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3299 if (op_type
!= binary_op
)
3301 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3302 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3303 gimple_assign_set_lhs (new_stmt
, new_temp
);
3305 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3311 /* Get vectorized definitions for loop-based vectorization. For the first
3312 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3313 scalar operand), and for the rest we get a copy with
3314 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3315 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3316 The vectors are collected into VEC_OPRNDS. */
3319 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3320 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3324 /* Get first vector operand. */
3325 /* All the vector operands except the very first one (that is scalar oprnd)
3327 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3328 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3330 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3332 vec_oprnds
->quick_push (vec_oprnd
);
3334 /* Get second vector operand. */
3335 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3336 vec_oprnds
->quick_push (vec_oprnd
);
3340 /* For conversion in multiple steps, continue to get operands
3343 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3347 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3348 For multi-step conversions store the resulting vectors and call the function
3352 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3353 int multi_step_cvt
, gimple
*stmt
,
3355 gimple_stmt_iterator
*gsi
,
3356 slp_tree slp_node
, enum tree_code code
,
3357 stmt_vec_info
*prev_stmt_info
)
3360 tree vop0
, vop1
, new_tmp
, vec_dest
;
3362 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3364 vec_dest
= vec_dsts
.pop ();
3366 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3368 /* Create demotion operation. */
3369 vop0
= (*vec_oprnds
)[i
];
3370 vop1
= (*vec_oprnds
)[i
+ 1];
3371 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3372 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3373 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3374 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3377 /* Store the resulting vector for next recursive call. */
3378 (*vec_oprnds
)[i
/2] = new_tmp
;
3381 /* This is the last step of the conversion sequence. Store the
3382 vectors in SLP_NODE or in vector info of the scalar statement
3383 (or in STMT_VINFO_RELATED_STMT chain). */
3385 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3388 if (!*prev_stmt_info
)
3389 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3391 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3393 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3398 /* For multi-step demotion operations we first generate demotion operations
3399 from the source type to the intermediate types, and then combine the
3400 results (stored in VEC_OPRNDS) in demotion operation to the destination
3404 /* At each level of recursion we have half of the operands we had at the
3406 vec_oprnds
->truncate ((i
+1)/2);
3407 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3408 stmt
, vec_dsts
, gsi
, slp_node
,
3409 VEC_PACK_TRUNC_EXPR
,
3413 vec_dsts
.quick_push (vec_dest
);
3417 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3418 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3419 the resulting vectors and call the function recursively. */
3422 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3423 vec
<tree
> *vec_oprnds1
,
3424 gimple
*stmt
, tree vec_dest
,
3425 gimple_stmt_iterator
*gsi
,
3426 enum tree_code code1
,
3427 enum tree_code code2
, tree decl1
,
3428 tree decl2
, int op_type
)
3431 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3432 gimple
*new_stmt1
, *new_stmt2
;
3433 vec
<tree
> vec_tmp
= vNULL
;
3435 vec_tmp
.create (vec_oprnds0
->length () * 2);
3436 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3438 if (op_type
== binary_op
)
3439 vop1
= (*vec_oprnds1
)[i
];
3443 /* Generate the two halves of promotion operation. */
3444 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3445 op_type
, vec_dest
, gsi
, stmt
);
3446 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3447 op_type
, vec_dest
, gsi
, stmt
);
3448 if (is_gimple_call (new_stmt1
))
3450 new_tmp1
= gimple_call_lhs (new_stmt1
);
3451 new_tmp2
= gimple_call_lhs (new_stmt2
);
3455 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3456 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3459 /* Store the results for the next step. */
3460 vec_tmp
.quick_push (new_tmp1
);
3461 vec_tmp
.quick_push (new_tmp2
);
3464 vec_oprnds0
->release ();
3465 *vec_oprnds0
= vec_tmp
;
3469 /* Check if STMT performs a conversion operation, that can be vectorized.
3470 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3471 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3472 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3475 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3476 gimple
**vec_stmt
, slp_tree slp_node
)
3480 tree op0
, op1
= NULL_TREE
;
3481 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3482 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3483 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3484 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3485 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3486 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3489 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3490 gimple
*new_stmt
= NULL
;
3491 stmt_vec_info prev_stmt_info
;
3494 tree vectype_out
, vectype_in
;
3496 tree lhs_type
, rhs_type
;
3497 enum { NARROW
, NONE
, WIDEN
} modifier
;
3498 vec
<tree
> vec_oprnds0
= vNULL
;
3499 vec
<tree
> vec_oprnds1
= vNULL
;
3501 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3502 vec_info
*vinfo
= stmt_info
->vinfo
;
3503 int multi_step_cvt
= 0;
3504 vec
<tree
> vec_dsts
= vNULL
;
3505 vec
<tree
> interm_types
= vNULL
;
3506 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3508 machine_mode rhs_mode
;
3509 unsigned short fltsz
;
3511 /* Is STMT a vectorizable conversion? */
3513 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3516 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3519 if (!is_gimple_assign (stmt
))
3522 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3525 code
= gimple_assign_rhs_code (stmt
);
3526 if (!CONVERT_EXPR_CODE_P (code
)
3527 && code
!= FIX_TRUNC_EXPR
3528 && code
!= FLOAT_EXPR
3529 && code
!= WIDEN_MULT_EXPR
3530 && code
!= WIDEN_LSHIFT_EXPR
)
3533 op_type
= TREE_CODE_LENGTH (code
);
3535 /* Check types of lhs and rhs. */
3536 scalar_dest
= gimple_assign_lhs (stmt
);
3537 lhs_type
= TREE_TYPE (scalar_dest
);
3538 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3540 op0
= gimple_assign_rhs1 (stmt
);
3541 rhs_type
= TREE_TYPE (op0
);
3543 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3544 && !((INTEGRAL_TYPE_P (lhs_type
)
3545 && INTEGRAL_TYPE_P (rhs_type
))
3546 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3547 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3550 if ((INTEGRAL_TYPE_P (lhs_type
)
3551 && (TYPE_PRECISION (lhs_type
)
3552 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3553 || (INTEGRAL_TYPE_P (rhs_type
)
3554 && (TYPE_PRECISION (rhs_type
)
3555 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3557 if (dump_enabled_p ())
3558 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3559 "type conversion to/from bit-precision unsupported."
3564 /* Check the operands of the operation. */
3565 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3567 if (dump_enabled_p ())
3568 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3569 "use not simple.\n");
3572 if (op_type
== binary_op
)
3576 op1
= gimple_assign_rhs2 (stmt
);
3577 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3578 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3580 if (CONSTANT_CLASS_P (op0
))
3581 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3583 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3587 if (dump_enabled_p ())
3588 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3589 "use not simple.\n");
3594 /* If op0 is an external or constant defs use a vector type of
3595 the same size as the output vector type. */
3597 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3599 gcc_assert (vectype_in
);
3602 if (dump_enabled_p ())
3604 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3605 "no vectype for scalar type ");
3606 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3607 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3613 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3614 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3615 if (nunits_in
< nunits_out
)
3617 else if (nunits_out
== nunits_in
)
3622 /* Multiple types in SLP are handled by creating the appropriate number of
3623 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3625 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3627 else if (modifier
== NARROW
)
3628 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3630 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3632 /* Sanity check: make sure that at least one copy of the vectorized stmt
3633 needs to be generated. */
3634 gcc_assert (ncopies
>= 1);
3636 /* Supportable by target? */
3640 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3642 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3647 if (dump_enabled_p ())
3648 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3649 "conversion not supported by target.\n");
3653 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3654 &code1
, &code2
, &multi_step_cvt
,
3657 /* Binary widening operation can only be supported directly by the
3659 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3663 if (code
!= FLOAT_EXPR
3664 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3665 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3668 rhs_mode
= TYPE_MODE (rhs_type
);
3669 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3670 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3671 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3672 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3675 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3676 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3677 if (cvt_type
== NULL_TREE
)
3680 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3682 if (!supportable_convert_operation (code
, vectype_out
,
3683 cvt_type
, &decl1
, &codecvt1
))
3686 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3687 cvt_type
, &codecvt1
,
3688 &codecvt2
, &multi_step_cvt
,
3692 gcc_assert (multi_step_cvt
== 0);
3694 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3695 vectype_in
, &code1
, &code2
,
3696 &multi_step_cvt
, &interm_types
))
3700 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3703 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3704 codecvt2
= ERROR_MARK
;
3708 interm_types
.safe_push (cvt_type
);
3709 cvt_type
= NULL_TREE
;
3714 gcc_assert (op_type
== unary_op
);
3715 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3716 &code1
, &multi_step_cvt
,
3720 if (code
!= FIX_TRUNC_EXPR
3721 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3722 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3725 rhs_mode
= TYPE_MODE (rhs_type
);
3727 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3728 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3729 if (cvt_type
== NULL_TREE
)
3731 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3734 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3735 &code1
, &multi_step_cvt
,
3744 if (!vec_stmt
) /* transformation not required. */
3746 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_NOTE
, vect_location
,
3748 "=== vectorizable_conversion ===\n");
3749 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3751 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3752 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3754 else if (modifier
== NARROW
)
3756 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3757 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3761 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3762 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3764 interm_types
.release ();
3769 if (dump_enabled_p ())
3770 dump_printf_loc (MSG_NOTE
, vect_location
,
3771 "transform conversion. ncopies = %d.\n", ncopies
);
3773 if (op_type
== binary_op
)
3775 if (CONSTANT_CLASS_P (op0
))
3776 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3777 else if (CONSTANT_CLASS_P (op1
))
3778 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3781 /* In case of multi-step conversion, we first generate conversion operations
3782 to the intermediate types, and then from that types to the final one.
3783 We create vector destinations for the intermediate type (TYPES) received
3784 from supportable_*_operation, and store them in the correct order
3785 for future use in vect_create_vectorized_*_stmts (). */
3786 vec_dsts
.create (multi_step_cvt
+ 1);
3787 vec_dest
= vect_create_destination_var (scalar_dest
,
3788 (cvt_type
&& modifier
== WIDEN
)
3789 ? cvt_type
: vectype_out
);
3790 vec_dsts
.quick_push (vec_dest
);
3794 for (i
= interm_types
.length () - 1;
3795 interm_types
.iterate (i
, &intermediate_type
); i
--)
3797 vec_dest
= vect_create_destination_var (scalar_dest
,
3799 vec_dsts
.quick_push (vec_dest
);
3804 vec_dest
= vect_create_destination_var (scalar_dest
,
3806 ? vectype_out
: cvt_type
);
3810 if (modifier
== WIDEN
)
3812 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3813 if (op_type
== binary_op
)
3814 vec_oprnds1
.create (1);
3816 else if (modifier
== NARROW
)
3817 vec_oprnds0
.create (
3818 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3820 else if (code
== WIDEN_LSHIFT_EXPR
)
3821 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3824 prev_stmt_info
= NULL
;
3828 for (j
= 0; j
< ncopies
; j
++)
3831 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3834 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3836 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3838 /* Arguments are ready, create the new vector stmt. */
3839 if (code1
== CALL_EXPR
)
3841 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3842 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3843 gimple_call_set_lhs (new_stmt
, new_temp
);
3847 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3848 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3849 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3850 gimple_assign_set_lhs (new_stmt
, new_temp
);
3853 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3855 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3858 if (!prev_stmt_info
)
3859 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3861 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3862 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3869 /* In case the vectorization factor (VF) is bigger than the number
3870 of elements that we can fit in a vectype (nunits), we have to
3871 generate more than one vector stmt - i.e - we need to "unroll"
3872 the vector stmt by a factor VF/nunits. */
3873 for (j
= 0; j
< ncopies
; j
++)
3880 if (code
== WIDEN_LSHIFT_EXPR
)
3885 /* Store vec_oprnd1 for every vector stmt to be created
3886 for SLP_NODE. We check during the analysis that all
3887 the shift arguments are the same. */
3888 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3889 vec_oprnds1
.quick_push (vec_oprnd1
);
3891 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3895 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3896 &vec_oprnds1
, slp_node
, -1);
3900 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
3901 vec_oprnds0
.quick_push (vec_oprnd0
);
3902 if (op_type
== binary_op
)
3904 if (code
== WIDEN_LSHIFT_EXPR
)
3907 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
3908 vec_oprnds1
.quick_push (vec_oprnd1
);
3914 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3915 vec_oprnds0
.truncate (0);
3916 vec_oprnds0
.quick_push (vec_oprnd0
);
3917 if (op_type
== binary_op
)
3919 if (code
== WIDEN_LSHIFT_EXPR
)
3922 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3924 vec_oprnds1
.truncate (0);
3925 vec_oprnds1
.quick_push (vec_oprnd1
);
3929 /* Arguments are ready. Create the new vector stmts. */
3930 for (i
= multi_step_cvt
; i
>= 0; i
--)
3932 tree this_dest
= vec_dsts
[i
];
3933 enum tree_code c1
= code1
, c2
= code2
;
3934 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3939 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3941 stmt
, this_dest
, gsi
,
3942 c1
, c2
, decl1
, decl2
,
3946 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3950 if (codecvt1
== CALL_EXPR
)
3952 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3953 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3954 gimple_call_set_lhs (new_stmt
, new_temp
);
3958 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3959 new_temp
= make_ssa_name (vec_dest
);
3960 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
3964 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3967 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
3970 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3973 if (!prev_stmt_info
)
3974 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3976 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3977 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3982 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3986 /* In case the vectorization factor (VF) is bigger than the number
3987 of elements that we can fit in a vectype (nunits), we have to
3988 generate more than one vector stmt - i.e - we need to "unroll"
3989 the vector stmt by a factor VF/nunits. */
3990 for (j
= 0; j
< ncopies
; j
++)
3994 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3998 vec_oprnds0
.truncate (0);
3999 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4000 vect_pow2 (multi_step_cvt
) - 1);
4003 /* Arguments are ready. Create the new vector stmts. */
4005 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4007 if (codecvt1
== CALL_EXPR
)
4009 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4010 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4011 gimple_call_set_lhs (new_stmt
, new_temp
);
4015 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4016 new_temp
= make_ssa_name (vec_dest
);
4017 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4021 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4022 vec_oprnds0
[i
] = new_temp
;
4025 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4026 stmt
, vec_dsts
, gsi
,
4031 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4035 vec_oprnds0
.release ();
4036 vec_oprnds1
.release ();
4037 vec_dsts
.release ();
4038 interm_types
.release ();
4044 /* Function vectorizable_assignment.
4046 Check if STMT performs an assignment (copy) that can be vectorized.
4047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4052 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4053 gimple
**vec_stmt
, slp_tree slp_node
)
4058 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4059 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4062 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4065 vec
<tree
> vec_oprnds
= vNULL
;
4067 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4068 vec_info
*vinfo
= stmt_info
->vinfo
;
4069 gimple
*new_stmt
= NULL
;
4070 stmt_vec_info prev_stmt_info
= NULL
;
4071 enum tree_code code
;
4074 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4077 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4080 /* Is vectorizable assignment? */
4081 if (!is_gimple_assign (stmt
))
4084 scalar_dest
= gimple_assign_lhs (stmt
);
4085 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4088 code
= gimple_assign_rhs_code (stmt
);
4089 if (gimple_assign_single_p (stmt
)
4090 || code
== PAREN_EXPR
4091 || CONVERT_EXPR_CODE_P (code
))
4092 op
= gimple_assign_rhs1 (stmt
);
4096 if (code
== VIEW_CONVERT_EXPR
)
4097 op
= TREE_OPERAND (op
, 0);
4099 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4100 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4102 /* Multiple types in SLP are handled by creating the appropriate number of
4103 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4105 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4108 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4110 gcc_assert (ncopies
>= 1);
4112 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4114 if (dump_enabled_p ())
4115 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4116 "use not simple.\n");
4120 /* We can handle NOP_EXPR conversions that do not change the number
4121 of elements or the vector size. */
4122 if ((CONVERT_EXPR_CODE_P (code
)
4123 || code
== VIEW_CONVERT_EXPR
)
4125 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4126 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4127 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4130 /* We do not handle bit-precision changes. */
4131 if ((CONVERT_EXPR_CODE_P (code
)
4132 || code
== VIEW_CONVERT_EXPR
)
4133 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4134 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4135 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4136 || ((TYPE_PRECISION (TREE_TYPE (op
))
4137 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4138 /* But a conversion that does not change the bit-pattern is ok. */
4139 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4140 > TYPE_PRECISION (TREE_TYPE (op
)))
4141 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4143 if (dump_enabled_p ())
4144 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4145 "type conversion to/from bit-precision "
4150 if (!vec_stmt
) /* transformation not required. */
4152 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4153 if (dump_enabled_p ())
4154 dump_printf_loc (MSG_NOTE
, vect_location
,
4155 "=== vectorizable_assignment ===\n");
4156 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4161 if (dump_enabled_p ())
4162 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4165 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4168 for (j
= 0; j
< ncopies
; j
++)
4172 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4174 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4176 /* Arguments are ready. create the new vector stmt. */
4177 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4179 if (CONVERT_EXPR_CODE_P (code
)
4180 || code
== VIEW_CONVERT_EXPR
)
4181 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4182 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4183 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4184 gimple_assign_set_lhs (new_stmt
, new_temp
);
4185 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4187 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4194 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4196 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4198 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4201 vec_oprnds
.release ();
4206 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4207 either as shift by a scalar or by a vector. */
4210 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4213 machine_mode vec_mode
;
4218 vectype
= get_vectype_for_scalar_type (scalar_type
);
4222 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4224 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4226 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4228 || (optab_handler (optab
, TYPE_MODE (vectype
))
4229 == CODE_FOR_nothing
))
4233 vec_mode
= TYPE_MODE (vectype
);
4234 icode
= (int) optab_handler (optab
, vec_mode
);
4235 if (icode
== CODE_FOR_nothing
)
4242 /* Function vectorizable_shift.
4244 Check if STMT performs a shift operation that can be vectorized.
4245 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4246 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4247 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4250 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4251 gimple
**vec_stmt
, slp_tree slp_node
)
4255 tree op0
, op1
= NULL
;
4256 tree vec_oprnd1
= NULL_TREE
;
4257 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4259 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4260 enum tree_code code
;
4261 machine_mode vec_mode
;
4265 machine_mode optab_op2_mode
;
4267 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4268 gimple
*new_stmt
= NULL
;
4269 stmt_vec_info prev_stmt_info
;
4276 vec
<tree
> vec_oprnds0
= vNULL
;
4277 vec
<tree
> vec_oprnds1
= vNULL
;
4280 bool scalar_shift_arg
= true;
4281 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4282 vec_info
*vinfo
= stmt_info
->vinfo
;
4285 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4288 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4291 /* Is STMT a vectorizable binary/unary operation? */
4292 if (!is_gimple_assign (stmt
))
4295 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4298 code
= gimple_assign_rhs_code (stmt
);
4300 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4301 || code
== RROTATE_EXPR
))
4304 scalar_dest
= gimple_assign_lhs (stmt
);
4305 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4306 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4307 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4309 if (dump_enabled_p ())
4310 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4311 "bit-precision shifts not supported.\n");
4315 op0
= gimple_assign_rhs1 (stmt
);
4316 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4318 if (dump_enabled_p ())
4319 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4320 "use not simple.\n");
4323 /* If op0 is an external or constant def use a vector type with
4324 the same size as the output vector type. */
4326 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4328 gcc_assert (vectype
);
4331 if (dump_enabled_p ())
4332 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4333 "no vectype for scalar type\n");
4337 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4338 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4339 if (nunits_out
!= nunits_in
)
4342 op1
= gimple_assign_rhs2 (stmt
);
4343 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4345 if (dump_enabled_p ())
4346 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4347 "use not simple.\n");
4352 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4356 /* Multiple types in SLP are handled by creating the appropriate number of
4357 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4359 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4362 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4364 gcc_assert (ncopies
>= 1);
4366 /* Determine whether the shift amount is a vector, or scalar. If the
4367 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4369 if ((dt
[1] == vect_internal_def
4370 || dt
[1] == vect_induction_def
)
4372 scalar_shift_arg
= false;
4373 else if (dt
[1] == vect_constant_def
4374 || dt
[1] == vect_external_def
4375 || dt
[1] == vect_internal_def
)
4377 /* In SLP, need to check whether the shift count is the same,
4378 in loops if it is a constant or invariant, it is always
4382 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4385 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4386 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4387 scalar_shift_arg
= false;
4392 if (dump_enabled_p ())
4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4394 "operand mode requires invariant argument.\n");
4398 /* Vector shifted by vector. */
4399 if (!scalar_shift_arg
)
4401 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4402 if (dump_enabled_p ())
4403 dump_printf_loc (MSG_NOTE
, vect_location
,
4404 "vector/vector shift/rotate found.\n");
4407 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4408 if (op1_vectype
== NULL_TREE
4409 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4411 if (dump_enabled_p ())
4412 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4413 "unusable type for last operand in"
4414 " vector/vector shift/rotate.\n");
4418 /* See if the machine has a vector shifted by scalar insn and if not
4419 then see if it has a vector shifted by vector insn. */
4422 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4424 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4426 if (dump_enabled_p ())
4427 dump_printf_loc (MSG_NOTE
, vect_location
,
4428 "vector/scalar shift/rotate found.\n");
4432 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4434 && (optab_handler (optab
, TYPE_MODE (vectype
))
4435 != CODE_FOR_nothing
))
4437 scalar_shift_arg
= false;
4439 if (dump_enabled_p ())
4440 dump_printf_loc (MSG_NOTE
, vect_location
,
4441 "vector/vector shift/rotate found.\n");
4443 /* Unlike the other binary operators, shifts/rotates have
4444 the rhs being int, instead of the same type as the lhs,
4445 so make sure the scalar is the right type if we are
4446 dealing with vectors of long long/long/short/char. */
4447 if (dt
[1] == vect_constant_def
)
4448 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4449 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4453 && TYPE_MODE (TREE_TYPE (vectype
))
4454 != TYPE_MODE (TREE_TYPE (op1
)))
4456 if (dump_enabled_p ())
4457 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4458 "unusable type for last operand in"
4459 " vector/vector shift/rotate.\n");
4462 if (vec_stmt
&& !slp_node
)
4464 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4465 op1
= vect_init_vector (stmt
, op1
,
4466 TREE_TYPE (vectype
), NULL
);
4473 /* Supportable by target? */
4476 if (dump_enabled_p ())
4477 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4481 vec_mode
= TYPE_MODE (vectype
);
4482 icode
= (int) optab_handler (optab
, vec_mode
);
4483 if (icode
== CODE_FOR_nothing
)
4485 if (dump_enabled_p ())
4486 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4487 "op not supported by target.\n");
4488 /* Check only during analysis. */
4489 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4490 || (vf
< vect_min_worthwhile_factor (code
)
4493 if (dump_enabled_p ())
4494 dump_printf_loc (MSG_NOTE
, vect_location
,
4495 "proceeding using word mode.\n");
4498 /* Worthwhile without SIMD support? Check only during analysis. */
4499 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4500 && vf
< vect_min_worthwhile_factor (code
)
4503 if (dump_enabled_p ())
4504 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4505 "not worthwhile without SIMD support.\n");
4509 if (!vec_stmt
) /* transformation not required. */
4511 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4512 if (dump_enabled_p ())
4513 dump_printf_loc (MSG_NOTE
, vect_location
,
4514 "=== vectorizable_shift ===\n");
4515 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4521 if (dump_enabled_p ())
4522 dump_printf_loc (MSG_NOTE
, vect_location
,
4523 "transform binary/unary operation.\n");
4526 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4528 prev_stmt_info
= NULL
;
4529 for (j
= 0; j
< ncopies
; j
++)
4534 if (scalar_shift_arg
)
4536 /* Vector shl and shr insn patterns can be defined with scalar
4537 operand 2 (shift operand). In this case, use constant or loop
4538 invariant op1 directly, without extending it to vector mode
4540 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4541 if (!VECTOR_MODE_P (optab_op2_mode
))
4543 if (dump_enabled_p ())
4544 dump_printf_loc (MSG_NOTE
, vect_location
,
4545 "operand 1 using scalar mode.\n");
4547 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4548 vec_oprnds1
.quick_push (vec_oprnd1
);
4551 /* Store vec_oprnd1 for every vector stmt to be created
4552 for SLP_NODE. We check during the analysis that all
4553 the shift arguments are the same.
4554 TODO: Allow different constants for different vector
4555 stmts generated for an SLP instance. */
4556 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4557 vec_oprnds1
.quick_push (vec_oprnd1
);
4562 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4563 (a special case for certain kind of vector shifts); otherwise,
4564 operand 1 should be of a vector type (the usual case). */
4566 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4569 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4573 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4575 /* Arguments are ready. Create the new vector stmt. */
4576 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4578 vop1
= vec_oprnds1
[i
];
4579 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4580 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4581 gimple_assign_set_lhs (new_stmt
, new_temp
);
4582 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4584 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4591 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4593 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4594 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4597 vec_oprnds0
.release ();
4598 vec_oprnds1
.release ();
4604 /* Function vectorizable_operation.
4606 Check if STMT performs a binary, unary or ternary operation that can
4608 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4609 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4610 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4613 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4614 gimple
**vec_stmt
, slp_tree slp_node
)
4618 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4619 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4621 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4622 enum tree_code code
;
4623 machine_mode vec_mode
;
4627 bool target_support_p
;
4629 enum vect_def_type dt
[3]
4630 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4631 gimple
*new_stmt
= NULL
;
4632 stmt_vec_info prev_stmt_info
;
4638 vec
<tree
> vec_oprnds0
= vNULL
;
4639 vec
<tree
> vec_oprnds1
= vNULL
;
4640 vec
<tree
> vec_oprnds2
= vNULL
;
4641 tree vop0
, vop1
, vop2
;
4642 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4643 vec_info
*vinfo
= stmt_info
->vinfo
;
4646 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4649 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4652 /* Is STMT a vectorizable binary/unary operation? */
4653 if (!is_gimple_assign (stmt
))
4656 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4659 code
= gimple_assign_rhs_code (stmt
);
4661 /* For pointer addition, we should use the normal plus for
4662 the vector addition. */
4663 if (code
== POINTER_PLUS_EXPR
)
4666 /* Support only unary or binary operations. */
4667 op_type
= TREE_CODE_LENGTH (code
);
4668 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4670 if (dump_enabled_p ())
4671 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4672 "num. args = %d (not unary/binary/ternary op).\n",
4677 scalar_dest
= gimple_assign_lhs (stmt
);
4678 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4680 /* Most operations cannot handle bit-precision types without extra
4682 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4683 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4684 /* Exception are bitwise binary operations. */
4685 && code
!= BIT_IOR_EXPR
4686 && code
!= BIT_XOR_EXPR
4687 && code
!= BIT_AND_EXPR
)
4689 if (dump_enabled_p ())
4690 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4691 "bit-precision arithmetic not supported.\n");
4695 op0
= gimple_assign_rhs1 (stmt
);
4696 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4698 if (dump_enabled_p ())
4699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4700 "use not simple.\n");
4703 /* If op0 is an external or constant def use a vector type with
4704 the same size as the output vector type. */
4706 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4708 gcc_assert (vectype
);
4711 if (dump_enabled_p ())
4713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4714 "no vectype for scalar type ");
4715 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4717 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4723 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4724 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4725 if (nunits_out
!= nunits_in
)
4728 if (op_type
== binary_op
|| op_type
== ternary_op
)
4730 op1
= gimple_assign_rhs2 (stmt
);
4731 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4733 if (dump_enabled_p ())
4734 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4735 "use not simple.\n");
4739 if (op_type
== ternary_op
)
4741 op2
= gimple_assign_rhs3 (stmt
);
4742 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4744 if (dump_enabled_p ())
4745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4746 "use not simple.\n");
4752 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4756 /* Multiple types in SLP are handled by creating the appropriate number of
4757 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4759 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4762 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4764 gcc_assert (ncopies
>= 1);
4766 /* Shifts are handled in vectorizable_shift (). */
4767 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4768 || code
== RROTATE_EXPR
)
4771 /* Supportable by target? */
4773 vec_mode
= TYPE_MODE (vectype
);
4774 if (code
== MULT_HIGHPART_EXPR
)
4775 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4778 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4781 if (dump_enabled_p ())
4782 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4786 target_support_p
= (optab_handler (optab
, vec_mode
)
4787 != CODE_FOR_nothing
);
4790 if (!target_support_p
)
4792 if (dump_enabled_p ())
4793 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4794 "op not supported by target.\n");
4795 /* Check only during analysis. */
4796 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4797 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4799 if (dump_enabled_p ())
4800 dump_printf_loc (MSG_NOTE
, vect_location
,
4801 "proceeding using word mode.\n");
4804 /* Worthwhile without SIMD support? Check only during analysis. */
4805 if (!VECTOR_MODE_P (vec_mode
)
4807 && vf
< vect_min_worthwhile_factor (code
))
4809 if (dump_enabled_p ())
4810 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4811 "not worthwhile without SIMD support.\n");
4815 if (!vec_stmt
) /* transformation not required. */
4817 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4818 if (dump_enabled_p ())
4819 dump_printf_loc (MSG_NOTE
, vect_location
,
4820 "=== vectorizable_operation ===\n");
4821 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4827 if (dump_enabled_p ())
4828 dump_printf_loc (MSG_NOTE
, vect_location
,
4829 "transform binary/unary operation.\n");
4832 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4834 /* In case the vectorization factor (VF) is bigger than the number
4835 of elements that we can fit in a vectype (nunits), we have to generate
4836 more than one vector stmt - i.e - we need to "unroll" the
4837 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4838 from one copy of the vector stmt to the next, in the field
4839 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4840 stages to find the correct vector defs to be used when vectorizing
4841 stmts that use the defs of the current stmt. The example below
4842 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4843 we need to create 4 vectorized stmts):
4845 before vectorization:
4846 RELATED_STMT VEC_STMT
4850 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4852 RELATED_STMT VEC_STMT
4853 VS1_0: vx0 = memref0 VS1_1 -
4854 VS1_1: vx1 = memref1 VS1_2 -
4855 VS1_2: vx2 = memref2 VS1_3 -
4856 VS1_3: vx3 = memref3 - -
4857 S1: x = load - VS1_0
4860 step2: vectorize stmt S2 (done here):
4861 To vectorize stmt S2 we first need to find the relevant vector
4862 def for the first operand 'x'. This is, as usual, obtained from
4863 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4864 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4865 relevant vector def 'vx0'. Having found 'vx0' we can generate
4866 the vector stmt VS2_0, and as usual, record it in the
4867 STMT_VINFO_VEC_STMT of stmt S2.
4868 When creating the second copy (VS2_1), we obtain the relevant vector
4869 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4870 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4871 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4872 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4873 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4874 chain of stmts and pointers:
4875 RELATED_STMT VEC_STMT
4876 VS1_0: vx0 = memref0 VS1_1 -
4877 VS1_1: vx1 = memref1 VS1_2 -
4878 VS1_2: vx2 = memref2 VS1_3 -
4879 VS1_3: vx3 = memref3 - -
4880 S1: x = load - VS1_0
4881 VS2_0: vz0 = vx0 + v1 VS2_1 -
4882 VS2_1: vz1 = vx1 + v1 VS2_2 -
4883 VS2_2: vz2 = vx2 + v1 VS2_3 -
4884 VS2_3: vz3 = vx3 + v1 - -
4885 S2: z = x + 1 - VS2_0 */
4887 prev_stmt_info
= NULL
;
4888 for (j
= 0; j
< ncopies
; j
++)
4893 if (op_type
== binary_op
|| op_type
== ternary_op
)
4894 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4897 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4899 if (op_type
== ternary_op
)
4901 vec_oprnds2
.create (1);
4902 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4908 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4909 if (op_type
== ternary_op
)
4911 tree vec_oprnd
= vec_oprnds2
.pop ();
4912 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4917 /* Arguments are ready. Create the new vector stmt. */
4918 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4920 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4921 ? vec_oprnds1
[i
] : NULL_TREE
);
4922 vop2
= ((op_type
== ternary_op
)
4923 ? vec_oprnds2
[i
] : NULL_TREE
);
4924 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
4925 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4926 gimple_assign_set_lhs (new_stmt
, new_temp
);
4927 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4929 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4936 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4938 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4939 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4942 vec_oprnds0
.release ();
4943 vec_oprnds1
.release ();
4944 vec_oprnds2
.release ();
4949 /* A helper function to ensure data reference DR's base alignment
4953 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
4958 if (DR_VECT_AUX (dr
)->base_misaligned
)
4960 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4961 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
4963 if (decl_in_symtab_p (base_decl
))
4964 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
4967 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
4968 DECL_USER_ALIGN (base_decl
) = 1;
4970 DR_VECT_AUX (dr
)->base_misaligned
= false;
4975 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4976 reversal of the vector elements. If that is impossible to do,
4980 perm_mask_for_reverse (tree vectype
)
4985 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4986 sel
= XALLOCAVEC (unsigned char, nunits
);
4988 for (i
= 0; i
< nunits
; ++i
)
4989 sel
[i
] = nunits
- 1 - i
;
4991 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
4993 return vect_gen_perm_mask_checked (vectype
, sel
);
4996 /* Function vectorizable_store.
4998 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5000 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5001 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5002 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5005 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5011 tree vec_oprnd
= NULL_TREE
;
5012 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5013 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5015 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5016 struct loop
*loop
= NULL
;
5017 machine_mode vec_mode
;
5019 enum dr_alignment_support alignment_support_scheme
;
5021 enum vect_def_type dt
;
5022 stmt_vec_info prev_stmt_info
= NULL
;
5023 tree dataref_ptr
= NULL_TREE
;
5024 tree dataref_offset
= NULL_TREE
;
5025 gimple
*ptr_incr
= NULL
;
5028 gimple
*next_stmt
, *first_stmt
= NULL
;
5029 bool grouped_store
= false;
5030 bool store_lanes_p
= false;
5031 unsigned int group_size
, i
;
5032 vec
<tree
> dr_chain
= vNULL
;
5033 vec
<tree
> oprnds
= vNULL
;
5034 vec
<tree
> result_chain
= vNULL
;
5036 bool negative
= false;
5037 tree offset
= NULL_TREE
;
5038 vec
<tree
> vec_oprnds
= vNULL
;
5039 bool slp
= (slp_node
!= NULL
);
5040 unsigned int vec_num
;
5041 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5042 vec_info
*vinfo
= stmt_info
->vinfo
;
5044 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5045 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5046 int scatter_scale
= 1;
5047 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5048 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5051 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5054 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5057 /* Is vectorizable store? */
5059 if (!is_gimple_assign (stmt
))
5062 scalar_dest
= gimple_assign_lhs (stmt
);
5063 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5064 && is_pattern_stmt_p (stmt_info
))
5065 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5066 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5067 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5068 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5069 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5070 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5071 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5072 && TREE_CODE (scalar_dest
) != MEM_REF
)
5075 gcc_assert (gimple_assign_single_p (stmt
));
5077 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5078 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5081 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5083 /* Multiple types in SLP are handled by creating the appropriate number of
5084 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5086 if (slp
|| PURE_SLP_STMT (stmt_info
))
5089 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5091 gcc_assert (ncopies
>= 1);
5093 /* FORNOW. This restriction should be relaxed. */
5094 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5096 if (dump_enabled_p ())
5097 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5098 "multiple types in nested loop.\n");
5102 op
= gimple_assign_rhs1 (stmt
);
5103 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5105 if (dump_enabled_p ())
5106 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5107 "use not simple.\n");
5111 elem_type
= TREE_TYPE (vectype
);
5112 vec_mode
= TYPE_MODE (vectype
);
5114 /* FORNOW. In some cases can vectorize even if data-type not supported
5115 (e.g. - array initialization with 0). */
5116 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5119 if (!STMT_VINFO_DATA_REF (stmt_info
))
5122 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5125 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5126 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5127 size_zero_node
) < 0;
5128 if (negative
&& ncopies
> 1)
5130 if (dump_enabled_p ())
5131 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5132 "multiple types with negative step.\n");
5137 gcc_assert (!grouped_store
);
5138 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5139 if (alignment_support_scheme
!= dr_aligned
5140 && alignment_support_scheme
!= dr_unaligned_supported
)
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5144 "negative step but alignment required.\n");
5147 if (dt
!= vect_constant_def
5148 && dt
!= vect_external_def
5149 && !perm_mask_for_reverse (vectype
))
5151 if (dump_enabled_p ())
5152 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5153 "negative step and reversing not supported.\n");
5159 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5161 grouped_store
= true;
5162 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5163 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5165 && !PURE_SLP_STMT (stmt_info
)
5166 && !STMT_VINFO_STRIDED_P (stmt_info
))
5168 if (vect_store_lanes_supported (vectype
, group_size
))
5169 store_lanes_p
= true;
5170 else if (!vect_grouped_store_supported (vectype
, group_size
))
5174 if (STMT_VINFO_STRIDED_P (stmt_info
)
5175 && (slp
|| PURE_SLP_STMT (stmt_info
))
5176 && (group_size
> nunits
5177 || nunits
% group_size
!= 0))
5179 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5180 "unhandled strided group store\n");
5184 if (first_stmt
== stmt
)
5186 /* STMT is the leader of the group. Check the operands of all the
5187 stmts of the group. */
5188 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5191 gcc_assert (gimple_assign_single_p (next_stmt
));
5192 op
= gimple_assign_rhs1 (next_stmt
);
5193 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5195 if (dump_enabled_p ())
5196 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5197 "use not simple.\n");
5200 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5205 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5208 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5209 &scatter_off
, &scatter_scale
);
5210 gcc_assert (scatter_decl
);
5211 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5212 &scatter_off_vectype
))
5214 if (dump_enabled_p ())
5215 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5216 "scatter index use not simple.");
5221 if (!vec_stmt
) /* transformation not required. */
5223 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5224 /* The SLP costs are calculated during SLP analysis. */
5225 if (!PURE_SLP_STMT (stmt_info
))
5226 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5233 ensure_base_align (stmt_info
, dr
);
5235 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5237 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5238 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5239 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5240 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5241 edge pe
= loop_preheader_edge (loop
);
5244 enum { NARROW
, NONE
, WIDEN
} modifier
;
5245 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5247 if (nunits
== (unsigned int) scatter_off_nunits
)
5249 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5251 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5254 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5255 sel
[i
] = i
| nunits
;
5257 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5258 gcc_assert (perm_mask
!= NULL_TREE
);
5260 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5262 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5265 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5266 sel
[i
] = i
| scatter_off_nunits
;
5268 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5269 gcc_assert (perm_mask
!= NULL_TREE
);
5275 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5276 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5277 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5278 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5279 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5280 scaletype
= TREE_VALUE (arglist
);
5282 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5283 && TREE_CODE (rettype
) == VOID_TYPE
);
5285 ptr
= fold_convert (ptrtype
, scatter_base
);
5286 if (!is_gimple_min_invariant (ptr
))
5288 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5289 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5290 gcc_assert (!new_bb
);
5293 /* Currently we support only unconditional scatter stores,
5294 so mask should be all ones. */
5295 mask
= build_int_cst (masktype
, -1);
5296 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5298 scale
= build_int_cst (scaletype
, scatter_scale
);
5300 prev_stmt_info
= NULL
;
5301 for (j
= 0; j
< ncopies
; ++j
)
5306 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5308 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5310 else if (modifier
!= NONE
&& (j
& 1))
5312 if (modifier
== WIDEN
)
5315 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5316 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5319 else if (modifier
== NARROW
)
5321 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5324 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5332 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5334 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5337 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5339 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5340 == TYPE_VECTOR_SUBPARTS (srctype
));
5341 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5342 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5343 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5344 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5348 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5350 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5351 == TYPE_VECTOR_SUBPARTS (idxtype
));
5352 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5353 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5354 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5355 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5360 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5362 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5364 if (prev_stmt_info
== NULL
)
5365 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5367 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5368 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5375 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5376 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5378 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5381 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5383 /* We vectorize all the stmts of the interleaving group when we
5384 reach the last stmt in the group. */
5385 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5386 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5395 grouped_store
= false;
5396 /* VEC_NUM is the number of vect stmts to be created for this
5398 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5399 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5400 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5401 op
= gimple_assign_rhs1 (first_stmt
);
5404 /* VEC_NUM is the number of vect stmts to be created for this
5406 vec_num
= group_size
;
5412 group_size
= vec_num
= 1;
5415 if (dump_enabled_p ())
5416 dump_printf_loc (MSG_NOTE
, vect_location
,
5417 "transform store. ncopies = %d\n", ncopies
);
5419 if (STMT_VINFO_STRIDED_P (stmt_info
))
5421 gimple_stmt_iterator incr_gsi
;
5427 gimple_seq stmts
= NULL
;
5428 tree stride_base
, stride_step
, alias_off
;
5432 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5435 = fold_build_pointer_plus
5436 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5437 size_binop (PLUS_EXPR
,
5438 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5439 convert_to_ptrofftype (DR_INIT(first_dr
))));
5440 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5442 /* For a store with loop-invariant (but other than power-of-2)
5443 stride (i.e. not a grouped access) like so:
5445 for (i = 0; i < n; i += stride)
5448 we generate a new induction variable and new stores from
5449 the components of the (vectorized) rhs:
5451 for (j = 0; ; j += VF*stride)
5456 array[j + stride] = tmp2;
5460 unsigned nstores
= nunits
;
5461 tree ltype
= elem_type
;
5464 nstores
= nunits
/ group_size
;
5465 if (group_size
< nunits
)
5466 ltype
= build_vector_type (elem_type
, group_size
);
5469 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5470 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5474 ivstep
= stride_step
;
5475 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5476 build_int_cst (TREE_TYPE (ivstep
),
5477 ncopies
* nstores
));
5479 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5481 create_iv (stride_base
, ivstep
, NULL
,
5482 loop
, &incr_gsi
, insert_after
,
5484 incr
= gsi_stmt (incr_gsi
);
5485 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5487 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5489 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5491 prev_stmt_info
= NULL
;
5492 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5493 next_stmt
= first_stmt
;
5494 for (g
= 0; g
< group_size
; g
++)
5496 running_off
= offvar
;
5499 tree size
= TYPE_SIZE_UNIT (ltype
);
5500 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5502 tree newoff
= copy_ssa_name (running_off
, NULL
);
5503 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5505 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5506 running_off
= newoff
;
5508 for (j
= 0; j
< ncopies
; j
++)
5510 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5511 and first_stmt == stmt. */
5516 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5518 vec_oprnd
= vec_oprnds
[0];
5522 gcc_assert (gimple_assign_single_p (next_stmt
));
5523 op
= gimple_assign_rhs1 (next_stmt
);
5524 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5530 vec_oprnd
= vec_oprnds
[j
];
5533 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5534 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5538 for (i
= 0; i
< nstores
; i
++)
5540 tree newref
, newoff
;
5541 gimple
*incr
, *assign
;
5542 tree size
= TYPE_SIZE (ltype
);
5543 /* Extract the i'th component. */
5544 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5545 bitsize_int (i
), size
);
5546 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5549 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5553 newref
= build2 (MEM_REF
, ltype
,
5554 running_off
, alias_off
);
5556 /* And store it to *running_off. */
5557 assign
= gimple_build_assign (newref
, elem
);
5558 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5560 newoff
= copy_ssa_name (running_off
, NULL
);
5561 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5562 running_off
, stride_step
);
5563 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5565 running_off
= newoff
;
5566 if (g
== group_size
- 1
5569 if (j
== 0 && i
== 0)
5570 STMT_VINFO_VEC_STMT (stmt_info
)
5571 = *vec_stmt
= assign
;
5573 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5574 prev_stmt_info
= vinfo_for_stmt (assign
);
5578 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5583 dr_chain
.create (group_size
);
5584 oprnds
.create (group_size
);
5586 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5587 gcc_assert (alignment_support_scheme
);
5588 /* Targets with store-lane instructions must not require explicit
5590 gcc_assert (!store_lanes_p
5591 || alignment_support_scheme
== dr_aligned
5592 || alignment_support_scheme
== dr_unaligned_supported
);
5595 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5598 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5600 aggr_type
= vectype
;
5602 /* In case the vectorization factor (VF) is bigger than the number
5603 of elements that we can fit in a vectype (nunits), we have to generate
5604 more than one vector stmt - i.e - we need to "unroll" the
5605 vector stmt by a factor VF/nunits. For more details see documentation in
5606 vect_get_vec_def_for_copy_stmt. */
5608 /* In case of interleaving (non-unit grouped access):
5615 We create vectorized stores starting from base address (the access of the
5616 first stmt in the chain (S2 in the above example), when the last store stmt
5617 of the chain (S4) is reached:
5620 VS2: &base + vec_size*1 = vx0
5621 VS3: &base + vec_size*2 = vx1
5622 VS4: &base + vec_size*3 = vx3
5624 Then permutation statements are generated:
5626 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5627 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5630 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5631 (the order of the data-refs in the output of vect_permute_store_chain
5632 corresponds to the order of scalar stmts in the interleaving chain - see
5633 the documentation of vect_permute_store_chain()).
5635 In case of both multiple types and interleaving, above vector stores and
5636 permutation stmts are created for every copy. The result vector stmts are
5637 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5638 STMT_VINFO_RELATED_STMT for the next copies.
5641 prev_stmt_info
= NULL
;
5642 for (j
= 0; j
< ncopies
; j
++)
5649 /* Get vectorized arguments for SLP_NODE. */
5650 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5651 NULL
, slp_node
, -1);
5653 vec_oprnd
= vec_oprnds
[0];
5657 /* For interleaved stores we collect vectorized defs for all the
5658 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5659 used as an input to vect_permute_store_chain(), and OPRNDS as
5660 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5662 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5663 OPRNDS are of size 1. */
5664 next_stmt
= first_stmt
;
5665 for (i
= 0; i
< group_size
; i
++)
5667 /* Since gaps are not supported for interleaved stores,
5668 GROUP_SIZE is the exact number of stmts in the chain.
5669 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5670 there is no interleaving, GROUP_SIZE is 1, and only one
5671 iteration of the loop will be executed. */
5672 gcc_assert (next_stmt
5673 && gimple_assign_single_p (next_stmt
));
5674 op
= gimple_assign_rhs1 (next_stmt
);
5676 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5677 dr_chain
.quick_push (vec_oprnd
);
5678 oprnds
.quick_push (vec_oprnd
);
5679 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5683 /* We should have catched mismatched types earlier. */
5684 gcc_assert (useless_type_conversion_p (vectype
,
5685 TREE_TYPE (vec_oprnd
)));
5686 bool simd_lane_access_p
5687 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5688 if (simd_lane_access_p
5689 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5690 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5691 && integer_zerop (DR_OFFSET (first_dr
))
5692 && integer_zerop (DR_INIT (first_dr
))
5693 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5694 get_alias_set (DR_REF (first_dr
))))
5696 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5697 dataref_offset
= build_int_cst (reference_alias_ptr_type
5698 (DR_REF (first_dr
)), 0);
5703 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5704 simd_lane_access_p
? loop
: NULL
,
5705 offset
, &dummy
, gsi
, &ptr_incr
,
5706 simd_lane_access_p
, &inv_p
);
5707 gcc_assert (bb_vinfo
|| !inv_p
);
5711 /* For interleaved stores we created vectorized defs for all the
5712 defs stored in OPRNDS in the previous iteration (previous copy).
5713 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5714 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5716 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5717 OPRNDS are of size 1. */
5718 for (i
= 0; i
< group_size
; i
++)
5721 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5722 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5723 dr_chain
[i
] = vec_oprnd
;
5724 oprnds
[i
] = vec_oprnd
;
5728 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5729 TYPE_SIZE_UNIT (aggr_type
));
5731 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5732 TYPE_SIZE_UNIT (aggr_type
));
5739 /* Combine all the vectors into an array. */
5740 vec_array
= create_vector_array (vectype
, vec_num
);
5741 for (i
= 0; i
< vec_num
; i
++)
5743 vec_oprnd
= dr_chain
[i
];
5744 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5748 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5749 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5750 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5751 gimple_call_set_lhs (new_stmt
, data_ref
);
5752 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5760 result_chain
.create (group_size
);
5762 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5766 next_stmt
= first_stmt
;
5767 for (i
= 0; i
< vec_num
; i
++)
5769 unsigned align
, misalign
;
5772 /* Bump the vector pointer. */
5773 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5777 vec_oprnd
= vec_oprnds
[i
];
5778 else if (grouped_store
)
5779 /* For grouped stores vectorized defs are interleaved in
5780 vect_permute_store_chain(). */
5781 vec_oprnd
= result_chain
[i
];
5783 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5787 : build_int_cst (reference_alias_ptr_type
5788 (DR_REF (first_dr
)), 0));
5789 align
= TYPE_ALIGN_UNIT (vectype
);
5790 if (aligned_access_p (first_dr
))
5792 else if (DR_MISALIGNMENT (first_dr
) == -1)
5794 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5795 align
= TYPE_ALIGN_UNIT (elem_type
);
5797 align
= get_object_alignment (DR_REF (first_dr
))
5800 TREE_TYPE (data_ref
)
5801 = build_aligned_type (TREE_TYPE (data_ref
),
5802 align
* BITS_PER_UNIT
);
5806 TREE_TYPE (data_ref
)
5807 = build_aligned_type (TREE_TYPE (data_ref
),
5808 TYPE_ALIGN (elem_type
));
5809 misalign
= DR_MISALIGNMENT (first_dr
);
5811 if (dataref_offset
== NULL_TREE
5812 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5813 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5817 && dt
!= vect_constant_def
5818 && dt
!= vect_external_def
)
5820 tree perm_mask
= perm_mask_for_reverse (vectype
);
5822 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5824 tree new_temp
= make_ssa_name (perm_dest
);
5826 /* Generate the permute statement. */
5828 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5829 vec_oprnd
, perm_mask
);
5830 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5832 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5833 vec_oprnd
= new_temp
;
5836 /* Arguments are ready. Create the new vector stmt. */
5837 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5838 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5843 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5851 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5853 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5854 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5858 dr_chain
.release ();
5860 result_chain
.release ();
5861 vec_oprnds
.release ();
5866 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5867 VECTOR_CST mask. No checks are made that the target platform supports the
5868 mask, so callers may wish to test can_vec_perm_p separately, or use
5869 vect_gen_perm_mask_checked. */
5872 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5874 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5877 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5879 mask_elt_type
= lang_hooks
.types
.type_for_mode
5880 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5881 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5883 mask_elts
= XALLOCAVEC (tree
, nunits
);
5884 for (i
= nunits
- 1; i
>= 0; i
--)
5885 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5886 mask_vec
= build_vector (mask_type
, mask_elts
);
5891 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5892 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5895 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
5897 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
5898 return vect_gen_perm_mask_any (vectype
, sel
);
5901 /* Given a vector variable X and Y, that was generated for the scalar
5902 STMT, generate instructions to permute the vector elements of X and Y
5903 using permutation mask MASK_VEC, insert them at *GSI and return the
5904 permuted vector variable. */
5907 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
5908 gimple_stmt_iterator
*gsi
)
5910 tree vectype
= TREE_TYPE (x
);
5911 tree perm_dest
, data_ref
;
5914 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5915 data_ref
= make_ssa_name (perm_dest
);
5917 /* Generate the permute statement. */
5918 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
5919 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5924 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5925 inserting them on the loops preheader edge. Returns true if we
5926 were successful in doing so (and thus STMT can be moved then),
5927 otherwise returns false. */
5930 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
5936 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5938 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
5939 if (!gimple_nop_p (def_stmt
)
5940 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5942 /* Make sure we don't need to recurse. While we could do
5943 so in simple cases when there are more complex use webs
5944 we don't have an easy way to preserve stmt order to fulfil
5945 dependencies within them. */
5948 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5950 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5952 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5953 if (!gimple_nop_p (def_stmt2
)
5954 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5964 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5966 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
5967 if (!gimple_nop_p (def_stmt
)
5968 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5970 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5971 gsi_remove (&gsi
, false);
5972 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5979 /* vectorizable_load.
5981 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5983 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5984 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5985 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5988 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5989 slp_tree slp_node
, slp_instance slp_node_instance
)
5992 tree vec_dest
= NULL
;
5993 tree data_ref
= NULL
;
5994 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5995 stmt_vec_info prev_stmt_info
;
5996 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5997 struct loop
*loop
= NULL
;
5998 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
5999 bool nested_in_vect_loop
= false;
6000 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6004 gimple
*new_stmt
= NULL
;
6006 enum dr_alignment_support alignment_support_scheme
;
6007 tree dataref_ptr
= NULL_TREE
;
6008 tree dataref_offset
= NULL_TREE
;
6009 gimple
*ptr_incr
= NULL
;
6011 int i
, j
, group_size
= -1, group_gap_adj
;
6012 tree msq
= NULL_TREE
, lsq
;
6013 tree offset
= NULL_TREE
;
6014 tree byte_offset
= NULL_TREE
;
6015 tree realignment_token
= NULL_TREE
;
6017 vec
<tree
> dr_chain
= vNULL
;
6018 bool grouped_load
= false;
6019 bool load_lanes_p
= false;
6022 bool negative
= false;
6023 bool compute_in_loop
= false;
6024 struct loop
*at_loop
;
6026 bool slp
= (slp_node
!= NULL
);
6027 bool slp_perm
= false;
6028 enum tree_code code
;
6029 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6032 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6033 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6034 int gather_scale
= 1;
6035 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6036 vec_info
*vinfo
= stmt_info
->vinfo
;
6038 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6041 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6044 /* Is vectorizable load? */
6045 if (!is_gimple_assign (stmt
))
6048 scalar_dest
= gimple_assign_lhs (stmt
);
6049 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6052 code
= gimple_assign_rhs_code (stmt
);
6053 if (code
!= ARRAY_REF
6054 && code
!= BIT_FIELD_REF
6055 && code
!= INDIRECT_REF
6056 && code
!= COMPONENT_REF
6057 && code
!= IMAGPART_EXPR
6058 && code
!= REALPART_EXPR
6060 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6063 if (!STMT_VINFO_DATA_REF (stmt_info
))
6066 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6067 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6071 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6072 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6073 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6078 /* Multiple types in SLP are handled by creating the appropriate number of
6079 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6081 if (slp
|| PURE_SLP_STMT (stmt_info
))
6084 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6086 gcc_assert (ncopies
>= 1);
6088 /* FORNOW. This restriction should be relaxed. */
6089 if (nested_in_vect_loop
&& ncopies
> 1)
6091 if (dump_enabled_p ())
6092 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6093 "multiple types in nested loop.\n");
6097 /* Invalidate assumptions made by dependence analysis when vectorization
6098 on the unrolled body effectively re-orders stmts. */
6100 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6101 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6102 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6104 if (dump_enabled_p ())
6105 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6106 "cannot perform implicit CSE when unrolling "
6107 "with negative dependence distance\n");
6111 elem_type
= TREE_TYPE (vectype
);
6112 mode
= TYPE_MODE (vectype
);
6114 /* FORNOW. In some cases can vectorize even if data-type not supported
6115 (e.g. - data copies). */
6116 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6118 if (dump_enabled_p ())
6119 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6120 "Aligned load, but unsupported type.\n");
6124 /* Check if the load is a part of an interleaving chain. */
6125 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6127 grouped_load
= true;
6129 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6131 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6133 /* If this is single-element interleaving with an element distance
6134 that leaves unused vector loads around punt - we at least create
6135 very sub-optimal code in that case (and blow up memory,
6137 if (first_stmt
== stmt
6138 && !GROUP_NEXT_ELEMENT (stmt_info
)
6139 && GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6141 if (dump_enabled_p ())
6142 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6143 "single-element interleaving not supported "
6144 "for not adjacent vector loads\n");
6148 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6151 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6153 && !PURE_SLP_STMT (stmt_info
)
6154 && !STMT_VINFO_STRIDED_P (stmt_info
))
6156 if (vect_load_lanes_supported (vectype
, group_size
))
6157 load_lanes_p
= true;
6158 else if (!vect_grouped_load_supported (vectype
, group_size
))
6162 /* Invalidate assumptions made by dependence analysis when vectorization
6163 on the unrolled body effectively re-orders stmts. */
6164 if (!PURE_SLP_STMT (stmt_info
)
6165 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6166 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6167 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6169 if (dump_enabled_p ())
6170 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6171 "cannot perform implicit CSE when performing "
6172 "group loads with negative dependence distance\n");
6176 /* Similarly when the stmt is a load that is both part of a SLP
6177 instance and a loop vectorized stmt via the same-dr mechanism
6178 we have to give up. */
6179 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6180 && (STMT_SLP_TYPE (stmt_info
)
6181 != STMT_SLP_TYPE (vinfo_for_stmt
6182 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6184 if (dump_enabled_p ())
6185 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6186 "conflicting SLP types for CSEd load\n");
6192 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6195 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6196 &gather_off
, &gather_scale
);
6197 gcc_assert (gather_decl
);
6198 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6199 &gather_off_vectype
))
6201 if (dump_enabled_p ())
6202 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6203 "gather index use not simple.\n");
6207 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6210 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6211 && (group_size
> nunits
6212 || nunits
% group_size
!= 0))
6214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6215 "unhandled strided group load\n");
6221 negative
= tree_int_cst_compare (nested_in_vect_loop
6222 ? STMT_VINFO_DR_STEP (stmt_info
)
6224 size_zero_node
) < 0;
6225 if (negative
&& ncopies
> 1)
6227 if (dump_enabled_p ())
6228 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6229 "multiple types with negative step.\n");
6237 if (dump_enabled_p ())
6238 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6239 "negative step for group load not supported"
6243 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6244 if (alignment_support_scheme
!= dr_aligned
6245 && alignment_support_scheme
!= dr_unaligned_supported
)
6247 if (dump_enabled_p ())
6248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6249 "negative step but alignment required.\n");
6252 if (!perm_mask_for_reverse (vectype
))
6254 if (dump_enabled_p ())
6255 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6256 "negative step and reversing not supported."
6263 if (!vec_stmt
) /* transformation not required. */
6265 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6266 /* The SLP costs are calculated during SLP analysis. */
6267 if (!PURE_SLP_STMT (stmt_info
))
6268 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6273 if (dump_enabled_p ())
6274 dump_printf_loc (MSG_NOTE
, vect_location
,
6275 "transform load. ncopies = %d\n", ncopies
);
6279 ensure_base_align (stmt_info
, dr
);
6281 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6283 tree vec_oprnd0
= NULL_TREE
, op
;
6284 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6285 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6286 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6287 edge pe
= loop_preheader_edge (loop
);
6290 enum { NARROW
, NONE
, WIDEN
} modifier
;
6291 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6293 if (nunits
== gather_off_nunits
)
6295 else if (nunits
== gather_off_nunits
/ 2)
6297 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6300 for (i
= 0; i
< gather_off_nunits
; ++i
)
6301 sel
[i
] = i
| nunits
;
6303 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6305 else if (nunits
== gather_off_nunits
* 2)
6307 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6310 for (i
= 0; i
< nunits
; ++i
)
6311 sel
[i
] = i
< gather_off_nunits
6312 ? i
: i
+ nunits
- gather_off_nunits
;
6314 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6320 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6321 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6322 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6323 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6324 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6325 scaletype
= TREE_VALUE (arglist
);
6326 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6328 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6330 ptr
= fold_convert (ptrtype
, gather_base
);
6331 if (!is_gimple_min_invariant (ptr
))
6333 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6334 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6335 gcc_assert (!new_bb
);
6338 /* Currently we support only unconditional gather loads,
6339 so mask should be all ones. */
6340 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6341 mask
= build_int_cst (masktype
, -1);
6342 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6344 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6345 mask
= build_vector_from_val (masktype
, mask
);
6346 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6348 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6352 for (j
= 0; j
< 6; ++j
)
6354 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6355 mask
= build_real (TREE_TYPE (masktype
), r
);
6356 mask
= build_vector_from_val (masktype
, mask
);
6357 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6362 scale
= build_int_cst (scaletype
, gather_scale
);
6364 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6365 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6366 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6370 for (j
= 0; j
< 6; ++j
)
6372 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6373 merge
= build_real (TREE_TYPE (rettype
), r
);
6377 merge
= build_vector_from_val (rettype
, merge
);
6378 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6380 prev_stmt_info
= NULL
;
6381 for (j
= 0; j
< ncopies
; ++j
)
6383 if (modifier
== WIDEN
&& (j
& 1))
6384 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6385 perm_mask
, stmt
, gsi
);
6388 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6391 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6393 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6395 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6396 == TYPE_VECTOR_SUBPARTS (idxtype
));
6397 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6398 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6400 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6401 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6406 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6408 if (!useless_type_conversion_p (vectype
, rettype
))
6410 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6411 == TYPE_VECTOR_SUBPARTS (rettype
));
6412 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6413 gimple_call_set_lhs (new_stmt
, op
);
6414 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6415 var
= make_ssa_name (vec_dest
);
6416 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6418 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6422 var
= make_ssa_name (vec_dest
, new_stmt
);
6423 gimple_call_set_lhs (new_stmt
, var
);
6426 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6428 if (modifier
== NARROW
)
6435 var
= permute_vec_elements (prev_res
, var
,
6436 perm_mask
, stmt
, gsi
);
6437 new_stmt
= SSA_NAME_DEF_STMT (var
);
6440 if (prev_stmt_info
== NULL
)
6441 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6443 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6444 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6448 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6450 gimple_stmt_iterator incr_gsi
;
6456 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6457 gimple_seq stmts
= NULL
;
6458 tree stride_base
, stride_step
, alias_off
;
6460 gcc_assert (!nested_in_vect_loop
);
6462 if (slp
&& grouped_load
)
6463 first_dr
= STMT_VINFO_DATA_REF
6464 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6469 = fold_build_pointer_plus
6470 (DR_BASE_ADDRESS (first_dr
),
6471 size_binop (PLUS_EXPR
,
6472 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6473 convert_to_ptrofftype (DR_INIT (first_dr
))));
6474 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6476 /* For a load with loop-invariant (but other than power-of-2)
6477 stride (i.e. not a grouped access) like so:
6479 for (i = 0; i < n; i += stride)
6482 we generate a new induction variable and new accesses to
6483 form a new vector (or vectors, depending on ncopies):
6485 for (j = 0; ; j += VF*stride)
6487 tmp2 = array[j + stride];
6489 vectemp = {tmp1, tmp2, ...}
6492 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6493 build_int_cst (TREE_TYPE (stride_step
), vf
));
6495 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6497 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6498 loop
, &incr_gsi
, insert_after
,
6500 incr
= gsi_stmt (incr_gsi
);
6501 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6503 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6504 &stmts
, true, NULL_TREE
);
6506 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6508 prev_stmt_info
= NULL
;
6509 running_off
= offvar
;
6510 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6511 int nloads
= nunits
;
6512 tree ltype
= TREE_TYPE (vectype
);
6513 auto_vec
<tree
> dr_chain
;
6516 nloads
= nunits
/ group_size
;
6517 if (group_size
< nunits
)
6518 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6521 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6522 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6524 dr_chain
.create (ncopies
);
6526 for (j
= 0; j
< ncopies
; j
++)
6532 vec_alloc (v
, nloads
);
6533 for (i
= 0; i
< nloads
; i
++)
6535 tree newref
, newoff
;
6537 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6539 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6542 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6543 newoff
= copy_ssa_name (running_off
);
6544 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6545 running_off
, stride_step
);
6546 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6548 running_off
= newoff
;
6551 vec_inv
= build_constructor (vectype
, v
);
6552 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6553 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6557 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6558 build2 (MEM_REF
, ltype
,
6559 running_off
, alias_off
));
6560 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6562 tree newoff
= copy_ssa_name (running_off
);
6563 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6564 running_off
, stride_step
);
6565 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6567 running_off
= newoff
;
6572 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6574 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6579 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6581 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6582 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6586 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6587 slp_node_instance
, false);
6593 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6595 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6596 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6597 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6599 /* Check if the chain of loads is already vectorized. */
6600 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6601 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6602 ??? But we can only do so if there is exactly one
6603 as we have no way to get at the rest. Leave the CSE
6605 ??? With the group load eventually participating
6606 in multiple different permutations (having multiple
6607 slp nodes which refer to the same group) the CSE
6608 is even wrong code. See PR56270. */
6611 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6614 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6615 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6618 /* VEC_NUM is the number of vect stmts to be created for this group. */
6621 grouped_load
= false;
6622 /* For SLP permutation support we need to load the whole group,
6623 not only the number of vector stmts the permutation result
6626 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6628 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6629 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6632 vec_num
= group_size
;
6638 group_size
= vec_num
= 1;
6642 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6643 gcc_assert (alignment_support_scheme
);
6644 /* Targets with load-lane instructions must not require explicit
6646 gcc_assert (!load_lanes_p
6647 || alignment_support_scheme
== dr_aligned
6648 || alignment_support_scheme
== dr_unaligned_supported
);
6650 /* In case the vectorization factor (VF) is bigger than the number
6651 of elements that we can fit in a vectype (nunits), we have to generate
6652 more than one vector stmt - i.e - we need to "unroll" the
6653 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6654 from one copy of the vector stmt to the next, in the field
6655 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6656 stages to find the correct vector defs to be used when vectorizing
6657 stmts that use the defs of the current stmt. The example below
6658 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6659 need to create 4 vectorized stmts):
6661 before vectorization:
6662 RELATED_STMT VEC_STMT
6666 step 1: vectorize stmt S1:
6667 We first create the vector stmt VS1_0, and, as usual, record a
6668 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6669 Next, we create the vector stmt VS1_1, and record a pointer to
6670 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6671 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6673 RELATED_STMT VEC_STMT
6674 VS1_0: vx0 = memref0 VS1_1 -
6675 VS1_1: vx1 = memref1 VS1_2 -
6676 VS1_2: vx2 = memref2 VS1_3 -
6677 VS1_3: vx3 = memref3 - -
6678 S1: x = load - VS1_0
6681 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6682 information we recorded in RELATED_STMT field is used to vectorize
6685 /* In case of interleaving (non-unit grouped access):
6692 Vectorized loads are created in the order of memory accesses
6693 starting from the access of the first stmt of the chain:
6696 VS2: vx1 = &base + vec_size*1
6697 VS3: vx3 = &base + vec_size*2
6698 VS4: vx4 = &base + vec_size*3
6700 Then permutation statements are generated:
6702 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6703 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6706 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6707 (the order of the data-refs in the output of vect_permute_load_chain
6708 corresponds to the order of scalar stmts in the interleaving chain - see
6709 the documentation of vect_permute_load_chain()).
6710 The generation of permutation stmts and recording them in
6711 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6713 In case of both multiple types and interleaving, the vector loads and
6714 permutation stmts above are created for every copy. The result vector
6715 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6716 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6718 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6719 on a target that supports unaligned accesses (dr_unaligned_supported)
6720 we generate the following code:
6724 p = p + indx * vectype_size;
6729 Otherwise, the data reference is potentially unaligned on a target that
6730 does not support unaligned accesses (dr_explicit_realign_optimized) -
6731 then generate the following code, in which the data in each iteration is
6732 obtained by two vector loads, one from the previous iteration, and one
6733 from the current iteration:
6735 msq_init = *(floor(p1))
6736 p2 = initial_addr + VS - 1;
6737 realignment_token = call target_builtin;
6740 p2 = p2 + indx * vectype_size
6742 vec_dest = realign_load (msq, lsq, realignment_token)
6747 /* If the misalignment remains the same throughout the execution of the
6748 loop, we can create the init_addr and permutation mask at the loop
6749 preheader. Otherwise, it needs to be created inside the loop.
6750 This can only occur when vectorizing memory accesses in the inner-loop
6751 nested within an outer-loop that is being vectorized. */
6753 if (nested_in_vect_loop
6754 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6755 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6757 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6758 compute_in_loop
= true;
6761 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6762 || alignment_support_scheme
== dr_explicit_realign
)
6763 && !compute_in_loop
)
6765 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6766 alignment_support_scheme
, NULL_TREE
,
6768 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6770 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6771 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6779 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6782 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6784 aggr_type
= vectype
;
6786 prev_stmt_info
= NULL
;
6787 for (j
= 0; j
< ncopies
; j
++)
6789 /* 1. Create the vector or array pointer update chain. */
6792 bool simd_lane_access_p
6793 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6794 if (simd_lane_access_p
6795 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6796 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6797 && integer_zerop (DR_OFFSET (first_dr
))
6798 && integer_zerop (DR_INIT (first_dr
))
6799 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6800 get_alias_set (DR_REF (first_dr
)))
6801 && (alignment_support_scheme
== dr_aligned
6802 || alignment_support_scheme
== dr_unaligned_supported
))
6804 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6805 dataref_offset
= build_int_cst (reference_alias_ptr_type
6806 (DR_REF (first_dr
)), 0);
6811 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6812 offset
, &dummy
, gsi
, &ptr_incr
,
6813 simd_lane_access_p
, &inv_p
,
6816 else if (dataref_offset
)
6817 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6818 TYPE_SIZE_UNIT (aggr_type
));
6820 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6821 TYPE_SIZE_UNIT (aggr_type
));
6823 if (grouped_load
|| slp_perm
)
6824 dr_chain
.create (vec_num
);
6830 vec_array
= create_vector_array (vectype
, vec_num
);
6833 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6834 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6835 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6836 gimple_call_set_lhs (new_stmt
, vec_array
);
6837 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6839 /* Extract each vector into an SSA_NAME. */
6840 for (i
= 0; i
< vec_num
; i
++)
6842 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6844 dr_chain
.quick_push (new_temp
);
6847 /* Record the mapping between SSA_NAMEs and statements. */
6848 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6852 for (i
= 0; i
< vec_num
; i
++)
6855 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6858 /* 2. Create the vector-load in the loop. */
6859 switch (alignment_support_scheme
)
6862 case dr_unaligned_supported
:
6864 unsigned int align
, misalign
;
6867 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
6870 : build_int_cst (reference_alias_ptr_type
6871 (DR_REF (first_dr
)), 0));
6872 align
= TYPE_ALIGN_UNIT (vectype
);
6873 if (alignment_support_scheme
== dr_aligned
)
6875 gcc_assert (aligned_access_p (first_dr
));
6878 else if (DR_MISALIGNMENT (first_dr
) == -1)
6880 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
6881 align
= TYPE_ALIGN_UNIT (elem_type
);
6883 align
= (get_object_alignment (DR_REF (first_dr
))
6886 TREE_TYPE (data_ref
)
6887 = build_aligned_type (TREE_TYPE (data_ref
),
6888 align
* BITS_PER_UNIT
);
6892 TREE_TYPE (data_ref
)
6893 = build_aligned_type (TREE_TYPE (data_ref
),
6894 TYPE_ALIGN (elem_type
));
6895 misalign
= DR_MISALIGNMENT (first_dr
);
6897 if (dataref_offset
== NULL_TREE
6898 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6899 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6903 case dr_explicit_realign
:
6907 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
6909 if (compute_in_loop
)
6910 msq
= vect_setup_realignment (first_stmt
, gsi
,
6912 dr_explicit_realign
,
6915 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6916 ptr
= copy_ssa_name (dataref_ptr
);
6918 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6919 new_stmt
= gimple_build_assign
6920 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
6922 (TREE_TYPE (dataref_ptr
),
6923 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6924 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6926 = build2 (MEM_REF
, vectype
, ptr
,
6927 build_int_cst (reference_alias_ptr_type
6928 (DR_REF (first_dr
)), 0));
6929 vec_dest
= vect_create_destination_var (scalar_dest
,
6931 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6932 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6933 gimple_assign_set_lhs (new_stmt
, new_temp
);
6934 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6935 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6936 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6939 bump
= size_binop (MULT_EXPR
, vs
,
6940 TYPE_SIZE_UNIT (elem_type
));
6941 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
6942 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6943 new_stmt
= gimple_build_assign
6944 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
6947 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6948 ptr
= copy_ssa_name (ptr
, new_stmt
);
6949 gimple_assign_set_lhs (new_stmt
, ptr
);
6950 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6952 = build2 (MEM_REF
, vectype
, ptr
,
6953 build_int_cst (reference_alias_ptr_type
6954 (DR_REF (first_dr
)), 0));
6957 case dr_explicit_realign_optimized
:
6958 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6959 new_temp
= copy_ssa_name (dataref_ptr
);
6961 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6962 new_stmt
= gimple_build_assign
6963 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
6965 (TREE_TYPE (dataref_ptr
),
6966 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6967 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6969 = build2 (MEM_REF
, vectype
, new_temp
,
6970 build_int_cst (reference_alias_ptr_type
6971 (DR_REF (first_dr
)), 0));
6976 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6977 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6978 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6979 gimple_assign_set_lhs (new_stmt
, new_temp
);
6980 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6982 /* 3. Handle explicit realignment if necessary/supported.
6984 vec_dest = realign_load (msq, lsq, realignment_token) */
6985 if (alignment_support_scheme
== dr_explicit_realign_optimized
6986 || alignment_support_scheme
== dr_explicit_realign
)
6988 lsq
= gimple_assign_lhs (new_stmt
);
6989 if (!realignment_token
)
6990 realignment_token
= dataref_ptr
;
6991 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6992 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
6993 msq
, lsq
, realignment_token
);
6994 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6995 gimple_assign_set_lhs (new_stmt
, new_temp
);
6996 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6998 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7001 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7002 add_phi_arg (phi
, lsq
,
7003 loop_latch_edge (containing_loop
),
7009 /* 4. Handle invariant-load. */
7010 if (inv_p
&& !bb_vinfo
)
7012 gcc_assert (!grouped_load
);
7013 /* If we have versioned for aliasing or the loop doesn't
7014 have any data dependencies that would preclude this,
7015 then we are sure this is a loop invariant load and
7016 thus we can insert it on the preheader edge. */
7017 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7018 && !nested_in_vect_loop
7019 && hoist_defs_of_uses (stmt
, loop
))
7021 if (dump_enabled_p ())
7023 dump_printf_loc (MSG_NOTE
, vect_location
,
7024 "hoisting out of the vectorized "
7026 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7028 tree tem
= copy_ssa_name (scalar_dest
);
7029 gsi_insert_on_edge_immediate
7030 (loop_preheader_edge (loop
),
7031 gimple_build_assign (tem
,
7033 (gimple_assign_rhs1 (stmt
))));
7034 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7038 gimple_stmt_iterator gsi2
= *gsi
;
7040 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7043 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7044 set_vinfo_for_stmt (new_stmt
,
7045 new_stmt_vec_info (new_stmt
, vinfo
));
7050 tree perm_mask
= perm_mask_for_reverse (vectype
);
7051 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7052 perm_mask
, stmt
, gsi
);
7053 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7056 /* Collect vector loads and later create their permutation in
7057 vect_transform_grouped_load (). */
7058 if (grouped_load
|| slp_perm
)
7059 dr_chain
.quick_push (new_temp
);
7061 /* Store vector loads in the corresponding SLP_NODE. */
7062 if (slp
&& !slp_perm
)
7063 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7065 /* Bump the vector pointer to account for a gap or for excess
7066 elements loaded for a permuted SLP load. */
7067 if (group_gap_adj
!= 0)
7071 = wide_int_to_tree (sizetype
,
7072 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7073 group_gap_adj
, &ovf
));
7074 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7079 if (slp
&& !slp_perm
)
7084 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7085 slp_node_instance
, false))
7087 dr_chain
.release ();
7096 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7097 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7102 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7104 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7105 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7108 dr_chain
.release ();
7114 /* Function vect_is_simple_cond.
7117 LOOP - the loop that is being vectorized.
7118 COND - Condition that is checked for simple use.
7121 *COMP_VECTYPE - the vector type for the comparison.
7123 Returns whether a COND can be vectorized. Checks whether
7124 condition operands are supportable using vec_is_simple_use. */
7127 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7130 enum vect_def_type dt
;
7131 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7133 if (!COMPARISON_CLASS_P (cond
))
7136 lhs
= TREE_OPERAND (cond
, 0);
7137 rhs
= TREE_OPERAND (cond
, 1);
7139 if (TREE_CODE (lhs
) == SSA_NAME
)
7141 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7142 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7145 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7146 && TREE_CODE (lhs
) != FIXED_CST
)
7149 if (TREE_CODE (rhs
) == SSA_NAME
)
7151 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7152 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7155 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7156 && TREE_CODE (rhs
) != FIXED_CST
)
7159 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7163 /* vectorizable_condition.
7165 Check if STMT is conditional modify expression that can be vectorized.
7166 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7167 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7170 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7171 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7172 else clause if it is 2).
7174 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7177 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7178 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7181 tree scalar_dest
= NULL_TREE
;
7182 tree vec_dest
= NULL_TREE
;
7183 tree cond_expr
, then_clause
, else_clause
;
7184 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7185 tree comp_vectype
= NULL_TREE
;
7186 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7187 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7188 tree vec_compare
, vec_cond_expr
;
7190 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7191 enum vect_def_type dt
, dts
[4];
7193 enum tree_code code
;
7194 stmt_vec_info prev_stmt_info
= NULL
;
7196 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7197 vec
<tree
> vec_oprnds0
= vNULL
;
7198 vec
<tree
> vec_oprnds1
= vNULL
;
7199 vec
<tree
> vec_oprnds2
= vNULL
;
7200 vec
<tree
> vec_oprnds3
= vNULL
;
7203 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7206 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7208 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7211 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7212 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7216 /* FORNOW: not yet supported. */
7217 if (STMT_VINFO_LIVE_P (stmt_info
))
7219 if (dump_enabled_p ())
7220 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7221 "value used after loop.\n");
7226 /* Is vectorizable conditional operation? */
7227 if (!is_gimple_assign (stmt
))
7230 code
= gimple_assign_rhs_code (stmt
);
7232 if (code
!= COND_EXPR
)
7235 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7236 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7238 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7241 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7243 gcc_assert (ncopies
>= 1);
7244 if (reduc_index
&& ncopies
> 1)
7245 return false; /* FORNOW */
7247 cond_expr
= gimple_assign_rhs1 (stmt
);
7248 then_clause
= gimple_assign_rhs2 (stmt
);
7249 else_clause
= gimple_assign_rhs3 (stmt
);
7251 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7256 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7258 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7261 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7262 if (vec_cmp_type
== NULL_TREE
)
7267 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7268 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7275 vec_oprnds0
.create (1);
7276 vec_oprnds1
.create (1);
7277 vec_oprnds2
.create (1);
7278 vec_oprnds3
.create (1);
7282 scalar_dest
= gimple_assign_lhs (stmt
);
7283 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7285 /* Handle cond expr. */
7286 for (j
= 0; j
< ncopies
; j
++)
7288 gassign
*new_stmt
= NULL
;
7293 auto_vec
<tree
, 4> ops
;
7294 auto_vec
<vec
<tree
>, 4> vec_defs
;
7296 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7297 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7298 ops
.safe_push (then_clause
);
7299 ops
.safe_push (else_clause
);
7300 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7301 vec_oprnds3
= vec_defs
.pop ();
7302 vec_oprnds2
= vec_defs
.pop ();
7303 vec_oprnds1
= vec_defs
.pop ();
7304 vec_oprnds0
= vec_defs
.pop ();
7307 vec_defs
.release ();
7313 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0), stmt
);
7314 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7315 loop_vinfo
, >emp
, &dts
[0]);
7318 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7320 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7321 loop_vinfo
, >emp
, &dts
[1]);
7322 if (reduc_index
== 1)
7323 vec_then_clause
= reduc_def
;
7326 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7328 vect_is_simple_use (then_clause
, loop_vinfo
,
7331 if (reduc_index
== 2)
7332 vec_else_clause
= reduc_def
;
7335 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7337 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7343 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
7344 vec_oprnds0
.pop ());
7345 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
7346 vec_oprnds1
.pop ());
7347 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7348 vec_oprnds2
.pop ());
7349 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7350 vec_oprnds3
.pop ());
7355 vec_oprnds0
.quick_push (vec_cond_lhs
);
7356 vec_oprnds1
.quick_push (vec_cond_rhs
);
7357 vec_oprnds2
.quick_push (vec_then_clause
);
7358 vec_oprnds3
.quick_push (vec_else_clause
);
7361 /* Arguments are ready. Create the new vector stmt. */
7362 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7364 vec_cond_rhs
= vec_oprnds1
[i
];
7365 vec_then_clause
= vec_oprnds2
[i
];
7366 vec_else_clause
= vec_oprnds3
[i
];
7368 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7369 vec_cond_lhs
, vec_cond_rhs
);
7370 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7371 vec_compare
, vec_then_clause
, vec_else_clause
);
7373 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7374 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7375 gimple_assign_set_lhs (new_stmt
, new_temp
);
7376 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7378 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7385 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7387 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7389 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7392 vec_oprnds0
.release ();
7393 vec_oprnds1
.release ();
7394 vec_oprnds2
.release ();
7395 vec_oprnds3
.release ();
7401 /* Make sure the statement is vectorizable. */
7404 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7406 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7407 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7408 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7410 tree scalar_type
, vectype
;
7411 gimple
*pattern_stmt
;
7412 gimple_seq pattern_def_seq
;
7414 if (dump_enabled_p ())
7416 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7417 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7420 if (gimple_has_volatile_ops (stmt
))
7422 if (dump_enabled_p ())
7423 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7424 "not vectorized: stmt has volatile operands\n");
7429 /* Skip stmts that do not need to be vectorized. In loops this is expected
7431 - the COND_EXPR which is the loop exit condition
7432 - any LABEL_EXPRs in the loop
7433 - computations that are used only for array indexing or loop control.
7434 In basic blocks we only analyze statements that are a part of some SLP
7435 instance, therefore, all the statements are relevant.
7437 Pattern statement needs to be analyzed instead of the original statement
7438 if the original statement is not relevant. Otherwise, we analyze both
7439 statements. In basic blocks we are called from some SLP instance
7440 traversal, don't analyze pattern stmts instead, the pattern stmts
7441 already will be part of SLP instance. */
7443 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7444 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7445 && !STMT_VINFO_LIVE_P (stmt_info
))
7447 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7449 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7450 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7452 /* Analyze PATTERN_STMT instead of the original stmt. */
7453 stmt
= pattern_stmt
;
7454 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7455 if (dump_enabled_p ())
7457 dump_printf_loc (MSG_NOTE
, vect_location
,
7458 "==> examining pattern statement: ");
7459 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7464 if (dump_enabled_p ())
7465 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7470 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7473 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7474 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7476 /* Analyze PATTERN_STMT too. */
7477 if (dump_enabled_p ())
7479 dump_printf_loc (MSG_NOTE
, vect_location
,
7480 "==> examining pattern statement: ");
7481 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7484 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7488 if (is_pattern_stmt_p (stmt_info
)
7490 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7492 gimple_stmt_iterator si
;
7494 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7496 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7497 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7498 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7500 /* Analyze def stmt of STMT if it's a pattern stmt. */
7501 if (dump_enabled_p ())
7503 dump_printf_loc (MSG_NOTE
, vect_location
,
7504 "==> examining pattern def statement: ");
7505 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7508 if (!vect_analyze_stmt (pattern_def_stmt
,
7509 need_to_vectorize
, node
))
7515 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7517 case vect_internal_def
:
7520 case vect_reduction_def
:
7521 case vect_nested_cycle
:
7522 gcc_assert (!bb_vinfo
7523 && (relevance
== vect_used_in_outer
7524 || relevance
== vect_used_in_outer_by_reduction
7525 || relevance
== vect_used_by_reduction
7526 || relevance
== vect_unused_in_scope
));
7529 case vect_induction_def
:
7530 case vect_constant_def
:
7531 case vect_external_def
:
7532 case vect_unknown_def_type
:
7539 gcc_assert (PURE_SLP_STMT (stmt_info
));
7541 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7542 if (dump_enabled_p ())
7544 dump_printf_loc (MSG_NOTE
, vect_location
,
7545 "get vectype for scalar type: ");
7546 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7547 dump_printf (MSG_NOTE
, "\n");
7550 vectype
= get_vectype_for_scalar_type (scalar_type
);
7553 if (dump_enabled_p ())
7555 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7556 "not SLPed: unsupported data-type ");
7557 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7559 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7564 if (dump_enabled_p ())
7566 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7567 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7568 dump_printf (MSG_NOTE
, "\n");
7571 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7574 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7576 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7577 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7578 || (is_gimple_call (stmt
)
7579 && gimple_call_lhs (stmt
) == NULL_TREE
));
7580 *need_to_vectorize
= true;
7583 if (PURE_SLP_STMT (stmt_info
) && !node
)
7585 dump_printf_loc (MSG_NOTE
, vect_location
,
7586 "handled only by SLP analysis\n");
7592 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7593 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7594 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7595 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7596 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7597 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7598 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7599 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7600 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7601 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7602 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7603 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7607 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7608 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7609 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7610 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7611 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7612 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7613 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7614 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7615 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7620 if (dump_enabled_p ())
7622 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7623 "not vectorized: relevant stmt not ");
7624 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7625 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7634 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7635 need extra handling, except for vectorizable reductions. */
7636 if (STMT_VINFO_LIVE_P (stmt_info
)
7637 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7638 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7642 if (dump_enabled_p ())
7644 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7645 "not vectorized: live stmt not ");
7646 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7647 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7657 /* Function vect_transform_stmt.
7659 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7662 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7663 bool *grouped_store
, slp_tree slp_node
,
7664 slp_instance slp_node_instance
)
7666 bool is_store
= false;
7667 gimple
*vec_stmt
= NULL
;
7668 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7671 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7673 switch (STMT_VINFO_TYPE (stmt_info
))
7675 case type_demotion_vec_info_type
:
7676 case type_promotion_vec_info_type
:
7677 case type_conversion_vec_info_type
:
7678 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7682 case induc_vec_info_type
:
7683 gcc_assert (!slp_node
);
7684 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7688 case shift_vec_info_type
:
7689 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7693 case op_vec_info_type
:
7694 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7698 case assignment_vec_info_type
:
7699 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7703 case load_vec_info_type
:
7704 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7709 case store_vec_info_type
:
7710 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7712 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7714 /* In case of interleaving, the whole chain is vectorized when the
7715 last store in the chain is reached. Store stmts before the last
7716 one are skipped, and there vec_stmt_info shouldn't be freed
7718 *grouped_store
= true;
7719 if (STMT_VINFO_VEC_STMT (stmt_info
))
7726 case condition_vec_info_type
:
7727 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7731 case call_vec_info_type
:
7732 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7733 stmt
= gsi_stmt (*gsi
);
7734 if (is_gimple_call (stmt
)
7735 && gimple_call_internal_p (stmt
)
7736 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7740 case call_simd_clone_vec_info_type
:
7741 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7742 stmt
= gsi_stmt (*gsi
);
7745 case reduc_vec_info_type
:
7746 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7751 if (!STMT_VINFO_LIVE_P (stmt_info
))
7753 if (dump_enabled_p ())
7754 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7755 "stmt not supported.\n");
7760 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7761 This would break hybrid SLP vectorization. */
7763 gcc_assert (!vec_stmt
7764 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
7766 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7767 is being vectorized, but outside the immediately enclosing loop. */
7769 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7770 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7771 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7772 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7773 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7774 || STMT_VINFO_RELEVANT (stmt_info
) ==
7775 vect_used_in_outer_by_reduction
))
7777 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7778 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7779 imm_use_iterator imm_iter
;
7780 use_operand_p use_p
;
7784 if (dump_enabled_p ())
7785 dump_printf_loc (MSG_NOTE
, vect_location
,
7786 "Record the vdef for outer-loop vectorization.\n");
7788 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7789 (to be used when vectorizing outer-loop stmts that use the DEF of
7791 if (gimple_code (stmt
) == GIMPLE_PHI
)
7792 scalar_dest
= PHI_RESULT (stmt
);
7794 scalar_dest
= gimple_assign_lhs (stmt
);
7796 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7798 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7800 exit_phi
= USE_STMT (use_p
);
7801 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7806 /* Handle stmts whose DEF is used outside the loop-nest that is
7807 being vectorized. */
7808 if (STMT_VINFO_LIVE_P (stmt_info
)
7809 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7811 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7816 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7822 /* Remove a group of stores (for SLP or interleaving), free their
7826 vect_remove_stores (gimple
*first_stmt
)
7828 gimple
*next
= first_stmt
;
7830 gimple_stmt_iterator next_si
;
7834 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7836 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7837 if (is_pattern_stmt_p (stmt_info
))
7838 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7839 /* Free the attached stmt_vec_info and remove the stmt. */
7840 next_si
= gsi_for_stmt (next
);
7841 unlink_stmt_vdef (next
);
7842 gsi_remove (&next_si
, true);
7843 release_defs (next
);
7844 free_stmt_vec_info (next
);
7850 /* Function new_stmt_vec_info.
7852 Create and initialize a new stmt_vec_info struct for STMT. */
7855 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
7858 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7860 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7861 STMT_VINFO_STMT (res
) = stmt
;
7863 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7864 STMT_VINFO_LIVE_P (res
) = false;
7865 STMT_VINFO_VECTYPE (res
) = NULL
;
7866 STMT_VINFO_VEC_STMT (res
) = NULL
;
7867 STMT_VINFO_VECTORIZABLE (res
) = true;
7868 STMT_VINFO_IN_PATTERN_P (res
) = false;
7869 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7870 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7871 STMT_VINFO_DATA_REF (res
) = NULL
;
7872 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
7874 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7875 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7876 STMT_VINFO_DR_INIT (res
) = NULL
;
7877 STMT_VINFO_DR_STEP (res
) = NULL
;
7878 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7880 if (gimple_code (stmt
) == GIMPLE_PHI
7881 && is_loop_header_bb_p (gimple_bb (stmt
)))
7882 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7884 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7886 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7887 STMT_SLP_TYPE (res
) = loop_vect
;
7888 GROUP_FIRST_ELEMENT (res
) = NULL
;
7889 GROUP_NEXT_ELEMENT (res
) = NULL
;
7890 GROUP_SIZE (res
) = 0;
7891 GROUP_STORE_COUNT (res
) = 0;
7892 GROUP_GAP (res
) = 0;
7893 GROUP_SAME_DR_STMT (res
) = NULL
;
7899 /* Create a hash table for stmt_vec_info. */
7902 init_stmt_vec_info_vec (void)
7904 gcc_assert (!stmt_vec_info_vec
.exists ());
7905 stmt_vec_info_vec
.create (50);
7909 /* Free hash table for stmt_vec_info. */
7912 free_stmt_vec_info_vec (void)
7916 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7918 free_stmt_vec_info (STMT_VINFO_STMT (info
));
7919 gcc_assert (stmt_vec_info_vec
.exists ());
7920 stmt_vec_info_vec
.release ();
7924 /* Free stmt vectorization related info. */
7927 free_stmt_vec_info (gimple
*stmt
)
7929 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7934 /* Check if this statement has a related "pattern stmt"
7935 (introduced by the vectorizer during the pattern recognition
7936 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7938 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7940 stmt_vec_info patt_info
7941 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7944 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7945 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
7946 gimple_set_bb (patt_stmt
, NULL
);
7947 tree lhs
= gimple_get_lhs (patt_stmt
);
7948 if (TREE_CODE (lhs
) == SSA_NAME
)
7949 release_ssa_name (lhs
);
7952 gimple_stmt_iterator si
;
7953 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7955 gimple
*seq_stmt
= gsi_stmt (si
);
7956 gimple_set_bb (seq_stmt
, NULL
);
7957 lhs
= gimple_get_lhs (seq_stmt
);
7958 if (TREE_CODE (lhs
) == SSA_NAME
)
7959 release_ssa_name (lhs
);
7960 free_stmt_vec_info (seq_stmt
);
7963 free_stmt_vec_info (patt_stmt
);
7967 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7968 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
7969 set_vinfo_for_stmt (stmt
, NULL
);
7974 /* Function get_vectype_for_scalar_type_and_size.
7976 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7980 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7982 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7983 machine_mode simd_mode
;
7984 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
7991 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
7992 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
7995 /* For vector types of elements whose mode precision doesn't
7996 match their types precision we use a element type of mode
7997 precision. The vectorization routines will have to make sure
7998 they support the proper result truncation/extension.
7999 We also make sure to build vector types with INTEGER_TYPE
8000 component type only. */
8001 if (INTEGRAL_TYPE_P (scalar_type
)
8002 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8003 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8004 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8005 TYPE_UNSIGNED (scalar_type
));
8007 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8008 When the component mode passes the above test simply use a type
8009 corresponding to that mode. The theory is that any use that
8010 would cause problems with this will disable vectorization anyway. */
8011 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8012 && !INTEGRAL_TYPE_P (scalar_type
))
8013 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8015 /* We can't build a vector type of elements with alignment bigger than
8017 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8018 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8019 TYPE_UNSIGNED (scalar_type
));
8021 /* If we felt back to using the mode fail if there was
8022 no scalar type for it. */
8023 if (scalar_type
== NULL_TREE
)
8026 /* If no size was supplied use the mode the target prefers. Otherwise
8027 lookup a vector mode of the specified size. */
8029 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8031 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8032 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8036 vectype
= build_vector_type (scalar_type
, nunits
);
8038 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8039 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8045 unsigned int current_vector_size
;
8047 /* Function get_vectype_for_scalar_type.
8049 Returns the vector type corresponding to SCALAR_TYPE as supported
8053 get_vectype_for_scalar_type (tree scalar_type
)
8056 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8057 current_vector_size
);
8059 && current_vector_size
== 0)
8060 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8064 /* Function get_same_sized_vectype
8066 Returns a vector type corresponding to SCALAR_TYPE of size
8067 VECTOR_TYPE if supported by the target. */
8070 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8072 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8073 return build_same_sized_truth_vector_type (vector_type
);
8075 return get_vectype_for_scalar_type_and_size
8076 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8079 /* Function vect_is_simple_use.
8082 VINFO - the vect info of the loop or basic block that is being vectorized.
8083 OPERAND - operand in the loop or bb.
8085 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8086 DT - the type of definition
8088 Returns whether a stmt with OPERAND can be vectorized.
8089 For loops, supportable operands are constants, loop invariants, and operands
8090 that are defined by the current iteration of the loop. Unsupportable
8091 operands are those that are defined by a previous iteration of the loop (as
8092 is the case in reduction/induction computations).
8093 For basic blocks, supportable operands are constants and bb invariants.
8094 For now, operands defined outside the basic block are not supported. */
8097 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8098 gimple
**def_stmt
, enum vect_def_type
*dt
)
8101 *dt
= vect_unknown_def_type
;
8103 if (dump_enabled_p ())
8105 dump_printf_loc (MSG_NOTE
, vect_location
,
8106 "vect_is_simple_use: operand ");
8107 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8108 dump_printf (MSG_NOTE
, "\n");
8111 if (CONSTANT_CLASS_P (operand
))
8113 *dt
= vect_constant_def
;
8117 if (is_gimple_min_invariant (operand
))
8119 *dt
= vect_external_def
;
8123 if (TREE_CODE (operand
) != SSA_NAME
)
8125 if (dump_enabled_p ())
8126 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8131 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8133 *dt
= vect_external_def
;
8137 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8138 if (dump_enabled_p ())
8140 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8141 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8144 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8145 *dt
= vect_external_def
;
8148 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8149 if (is_a
<bb_vec_info
> (vinfo
) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8150 *dt
= vect_external_def
;
8152 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8155 if (dump_enabled_p ())
8157 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8160 case vect_uninitialized_def
:
8161 dump_printf (MSG_NOTE
, "uninitialized\n");
8163 case vect_constant_def
:
8164 dump_printf (MSG_NOTE
, "constant\n");
8166 case vect_external_def
:
8167 dump_printf (MSG_NOTE
, "external\n");
8169 case vect_internal_def
:
8170 dump_printf (MSG_NOTE
, "internal\n");
8172 case vect_induction_def
:
8173 dump_printf (MSG_NOTE
, "induction\n");
8175 case vect_reduction_def
:
8176 dump_printf (MSG_NOTE
, "reduction\n");
8178 case vect_double_reduction_def
:
8179 dump_printf (MSG_NOTE
, "double reduction\n");
8181 case vect_nested_cycle
:
8182 dump_printf (MSG_NOTE
, "nested cycle\n");
8184 case vect_unknown_def_type
:
8185 dump_printf (MSG_NOTE
, "unknown\n");
8190 if (*dt
== vect_unknown_def_type
)
8192 if (dump_enabled_p ())
8193 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8194 "Unsupported pattern.\n");
8198 switch (gimple_code (*def_stmt
))
8205 if (dump_enabled_p ())
8206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8207 "unsupported defining stmt:\n");
8214 /* Function vect_is_simple_use.
8216 Same as vect_is_simple_use but also determines the vector operand
8217 type of OPERAND and stores it to *VECTYPE. If the definition of
8218 OPERAND is vect_uninitialized_def, vect_constant_def or
8219 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8220 is responsible to compute the best suited vector type for the
8224 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8225 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8227 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8230 /* Now get a vector type if the def is internal, otherwise supply
8231 NULL_TREE and leave it up to the caller to figure out a proper
8232 type for the use stmt. */
8233 if (*dt
== vect_internal_def
8234 || *dt
== vect_induction_def
8235 || *dt
== vect_reduction_def
8236 || *dt
== vect_double_reduction_def
8237 || *dt
== vect_nested_cycle
)
8239 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8241 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8242 && !STMT_VINFO_RELEVANT (stmt_info
)
8243 && !STMT_VINFO_LIVE_P (stmt_info
))
8244 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8246 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8247 gcc_assert (*vectype
!= NULL_TREE
);
8249 else if (*dt
== vect_uninitialized_def
8250 || *dt
== vect_constant_def
8251 || *dt
== vect_external_def
)
8252 *vectype
= NULL_TREE
;
8260 /* Function supportable_widening_operation
8262 Check whether an operation represented by the code CODE is a
8263 widening operation that is supported by the target platform in
8264 vector form (i.e., when operating on arguments of type VECTYPE_IN
8265 producing a result of type VECTYPE_OUT).
8267 Widening operations we currently support are NOP (CONVERT), FLOAT
8268 and WIDEN_MULT. This function checks if these operations are supported
8269 by the target platform either directly (via vector tree-codes), or via
8273 - CODE1 and CODE2 are codes of vector operations to be used when
8274 vectorizing the operation, if available.
8275 - MULTI_STEP_CVT determines the number of required intermediate steps in
8276 case of multi-step conversion (like char->short->int - in that case
8277 MULTI_STEP_CVT will be 1).
8278 - INTERM_TYPES contains the intermediate type required to perform the
8279 widening operation (short in the above example). */
8282 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8283 tree vectype_out
, tree vectype_in
,
8284 enum tree_code
*code1
, enum tree_code
*code2
,
8285 int *multi_step_cvt
,
8286 vec
<tree
> *interm_types
)
8288 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8289 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8290 struct loop
*vect_loop
= NULL
;
8291 machine_mode vec_mode
;
8292 enum insn_code icode1
, icode2
;
8293 optab optab1
, optab2
;
8294 tree vectype
= vectype_in
;
8295 tree wide_vectype
= vectype_out
;
8296 enum tree_code c1
, c2
;
8298 tree prev_type
, intermediate_type
;
8299 machine_mode intermediate_mode
, prev_mode
;
8300 optab optab3
, optab4
;
8302 *multi_step_cvt
= 0;
8304 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8308 case WIDEN_MULT_EXPR
:
8309 /* The result of a vectorized widening operation usually requires
8310 two vectors (because the widened results do not fit into one vector).
8311 The generated vector results would normally be expected to be
8312 generated in the same order as in the original scalar computation,
8313 i.e. if 8 results are generated in each vector iteration, they are
8314 to be organized as follows:
8315 vect1: [res1,res2,res3,res4],
8316 vect2: [res5,res6,res7,res8].
8318 However, in the special case that the result of the widening
8319 operation is used in a reduction computation only, the order doesn't
8320 matter (because when vectorizing a reduction we change the order of
8321 the computation). Some targets can take advantage of this and
8322 generate more efficient code. For example, targets like Altivec,
8323 that support widen_mult using a sequence of {mult_even,mult_odd}
8324 generate the following vectors:
8325 vect1: [res1,res3,res5,res7],
8326 vect2: [res2,res4,res6,res8].
8328 When vectorizing outer-loops, we execute the inner-loop sequentially
8329 (each vectorized inner-loop iteration contributes to VF outer-loop
8330 iterations in parallel). We therefore don't allow to change the
8331 order of the computation in the inner-loop during outer-loop
8333 /* TODO: Another case in which order doesn't *really* matter is when we
8334 widen and then contract again, e.g. (short)((int)x * y >> 8).
8335 Normally, pack_trunc performs an even/odd permute, whereas the
8336 repack from an even/odd expansion would be an interleave, which
8337 would be significantly simpler for e.g. AVX2. */
8338 /* In any case, in order to avoid duplicating the code below, recurse
8339 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8340 are properly set up for the caller. If we fail, we'll continue with
8341 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8343 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8344 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8345 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8346 stmt
, vectype_out
, vectype_in
,
8347 code1
, code2
, multi_step_cvt
,
8350 /* Elements in a vector with vect_used_by_reduction property cannot
8351 be reordered if the use chain with this property does not have the
8352 same operation. One such an example is s += a * b, where elements
8353 in a and b cannot be reordered. Here we check if the vector defined
8354 by STMT is only directly used in the reduction statement. */
8355 tree lhs
= gimple_assign_lhs (stmt
);
8356 use_operand_p dummy
;
8358 stmt_vec_info use_stmt_info
= NULL
;
8359 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8360 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8361 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8364 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8365 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8378 case VEC_WIDEN_MULT_EVEN_EXPR
:
8379 /* Support the recursion induced just above. */
8380 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8381 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8384 case WIDEN_LSHIFT_EXPR
:
8385 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8386 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8390 c1
= VEC_UNPACK_LO_EXPR
;
8391 c2
= VEC_UNPACK_HI_EXPR
;
8395 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8396 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8399 case FIX_TRUNC_EXPR
:
8400 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8401 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8402 computing the operation. */
8409 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8412 if (code
== FIX_TRUNC_EXPR
)
8414 /* The signedness is determined from output operand. */
8415 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8416 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8420 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8421 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8424 if (!optab1
|| !optab2
)
8427 vec_mode
= TYPE_MODE (vectype
);
8428 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8429 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8435 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8436 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8439 /* Check if it's a multi-step conversion that can be done using intermediate
8442 prev_type
= vectype
;
8443 prev_mode
= vec_mode
;
8445 if (!CONVERT_EXPR_CODE_P (code
))
8448 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8449 intermediate steps in promotion sequence. We try
8450 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8452 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8453 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8455 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8457 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8458 TYPE_UNSIGNED (prev_type
));
8459 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8460 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8462 if (!optab3
|| !optab4
8463 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8464 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8465 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8466 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8467 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8468 == CODE_FOR_nothing
)
8469 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8470 == CODE_FOR_nothing
))
8473 interm_types
->quick_push (intermediate_type
);
8474 (*multi_step_cvt
)++;
8476 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8477 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8480 prev_type
= intermediate_type
;
8481 prev_mode
= intermediate_mode
;
8484 interm_types
->release ();
8489 /* Function supportable_narrowing_operation
8491 Check whether an operation represented by the code CODE is a
8492 narrowing operation that is supported by the target platform in
8493 vector form (i.e., when operating on arguments of type VECTYPE_IN
8494 and producing a result of type VECTYPE_OUT).
8496 Narrowing operations we currently support are NOP (CONVERT) and
8497 FIX_TRUNC. This function checks if these operations are supported by
8498 the target platform directly via vector tree-codes.
8501 - CODE1 is the code of a vector operation to be used when
8502 vectorizing the operation, if available.
8503 - MULTI_STEP_CVT determines the number of required intermediate steps in
8504 case of multi-step conversion (like int->short->char - in that case
8505 MULTI_STEP_CVT will be 1).
8506 - INTERM_TYPES contains the intermediate type required to perform the
8507 narrowing operation (short in the above example). */
8510 supportable_narrowing_operation (enum tree_code code
,
8511 tree vectype_out
, tree vectype_in
,
8512 enum tree_code
*code1
, int *multi_step_cvt
,
8513 vec
<tree
> *interm_types
)
8515 machine_mode vec_mode
;
8516 enum insn_code icode1
;
8517 optab optab1
, interm_optab
;
8518 tree vectype
= vectype_in
;
8519 tree narrow_vectype
= vectype_out
;
8521 tree intermediate_type
;
8522 machine_mode intermediate_mode
, prev_mode
;
8526 *multi_step_cvt
= 0;
8530 c1
= VEC_PACK_TRUNC_EXPR
;
8533 case FIX_TRUNC_EXPR
:
8534 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8538 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8539 tree code and optabs used for computing the operation. */
8546 if (code
== FIX_TRUNC_EXPR
)
8547 /* The signedness is determined from output operand. */
8548 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8550 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8555 vec_mode
= TYPE_MODE (vectype
);
8556 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8561 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8564 /* Check if it's a multi-step conversion that can be done using intermediate
8566 prev_mode
= vec_mode
;
8567 if (code
== FIX_TRUNC_EXPR
)
8568 uns
= TYPE_UNSIGNED (vectype_out
);
8570 uns
= TYPE_UNSIGNED (vectype
);
8572 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8573 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8574 costly than signed. */
8575 if (code
== FIX_TRUNC_EXPR
&& uns
)
8577 enum insn_code icode2
;
8580 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8582 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8583 if (interm_optab
!= unknown_optab
8584 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8585 && insn_data
[icode1
].operand
[0].mode
8586 == insn_data
[icode2
].operand
[0].mode
)
8589 optab1
= interm_optab
;
8594 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8595 intermediate steps in promotion sequence. We try
8596 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8597 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8598 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8600 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8602 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8604 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8607 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8608 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8609 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8610 == CODE_FOR_nothing
))
8613 interm_types
->quick_push (intermediate_type
);
8614 (*multi_step_cvt
)++;
8616 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8619 prev_mode
= intermediate_mode
;
8620 optab1
= interm_optab
;
8623 interm_types
->release ();