1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
42 #include "tree-ssa-loop-manip.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
47 #include "insn-config.h"
48 #include "recog.h" /* FIXME: for insn_data */
49 #include "insn-codes.h"
50 #include "optabs-tree.h"
51 #include "diagnostic-core.h"
52 #include "tree-vectorizer.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
62 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
64 return STMT_VINFO_VECTYPE (stmt_info
);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
70 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
72 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
73 basic_block bb
= gimple_bb (stmt
);
74 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
80 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
82 return (bb
->loop_father
== loop
->inner
);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
90 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
91 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
92 int misalign
, enum vect_cost_model_location where
)
96 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
97 stmt_info_for_cost si
= { count
, kind
,
98 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
100 body_cost_vec
->safe_push (si
);
102 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
105 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
106 count
, kind
, stmt_info
, misalign
, where
);
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
112 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
114 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT and the vector is associated
121 with scalar destination SCALAR_DEST. */
124 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
125 tree array
, unsigned HOST_WIDE_INT n
)
127 tree vect_type
, vect
, vect_name
, array_ref
;
130 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
131 vect_type
= TREE_TYPE (TREE_TYPE (array
));
132 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
133 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
134 build_int_cst (size_type_node
, n
),
135 NULL_TREE
, NULL_TREE
);
137 new_stmt
= gimple_build_assign (vect
, array_ref
);
138 vect_name
= make_ssa_name (vect
, new_stmt
);
139 gimple_assign_set_lhs (new_stmt
, vect_name
);
140 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT. */
150 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
151 tree array
, unsigned HOST_WIDE_INT n
)
156 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
157 build_int_cst (size_type_node
, n
),
158 NULL_TREE
, NULL_TREE
);
160 new_stmt
= gimple_build_assign (array_ref
, vect
);
161 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
169 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
171 tree mem_ref
, alias_ptr_type
;
173 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
174 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
175 /* Arrays have the same alignment as their type. */
176 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
180 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
182 /* Function vect_mark_relevant.
184 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
187 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
188 enum vect_relevant relevant
, bool live_p
,
189 bool used_in_pattern
)
191 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
192 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
193 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
194 gimple
*pattern_stmt
;
196 if (dump_enabled_p ())
197 dump_printf_loc (MSG_NOTE
, vect_location
,
198 "mark relevant %d, live %d.\n", relevant
, live_p
);
200 /* If this stmt is an original stmt in a pattern, we might need to mark its
201 related pattern stmt instead of the original stmt. However, such stmts
202 may have their own uses that are not in any pattern, in such cases the
203 stmt itself should be marked. */
204 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
207 if (!used_in_pattern
)
209 imm_use_iterator imm_iter
;
213 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
214 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
216 if (is_gimple_assign (stmt
))
217 lhs
= gimple_assign_lhs (stmt
);
219 lhs
= gimple_call_lhs (stmt
);
221 /* This use is out of pattern use, if LHS has other uses that are
222 pattern uses, we should mark the stmt itself, and not the pattern
224 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
225 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
227 if (is_gimple_debug (USE_STMT (use_p
)))
229 use_stmt
= USE_STMT (use_p
);
231 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
234 if (vinfo_for_stmt (use_stmt
)
235 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
245 /* This is the last stmt in a sequence that was detected as a
246 pattern that can potentially be vectorized. Don't mark the stmt
247 as relevant/live because it's not going to be vectorized.
248 Instead mark the pattern-stmt that replaces it. */
250 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
252 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE
, vect_location
,
254 "last stmt in pattern. don't mark"
255 " relevant/live.\n");
256 stmt_info
= vinfo_for_stmt (pattern_stmt
);
257 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
258 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
259 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
264 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
265 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
266 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
268 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
269 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE
, vect_location
,
273 "already marked relevant/live.\n");
277 worklist
->safe_push (stmt
);
281 /* Function vect_stmt_relevant_p.
283 Return true if STMT in loop that is represented by LOOP_VINFO is
284 "relevant for vectorization".
286 A stmt is considered "relevant for vectorization" if:
287 - it has uses outside the loop.
288 - it has vdefs (it alters memory).
289 - control stmts in the loop (except for the exit condition).
291 CHECKME: what other side effects would the vectorizer allow? */
294 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
295 enum vect_relevant
*relevant
, bool *live_p
)
297 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
299 imm_use_iterator imm_iter
;
303 *relevant
= vect_unused_in_scope
;
306 /* cond stmt other than loop exit cond. */
307 if (is_ctrl_stmt (stmt
)
308 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
309 != loop_exit_ctrl_vec_info_type
)
310 *relevant
= vect_used_in_scope
;
312 /* changing memory. */
313 if (gimple_code (stmt
) != GIMPLE_PHI
)
314 if (gimple_vdef (stmt
)
315 && !gimple_clobber_p (stmt
))
317 if (dump_enabled_p ())
318 dump_printf_loc (MSG_NOTE
, vect_location
,
319 "vec_stmt_relevant_p: stmt has vdefs.\n");
320 *relevant
= vect_used_in_scope
;
323 /* uses outside the loop. */
324 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
326 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
328 basic_block bb
= gimple_bb (USE_STMT (use_p
));
329 if (!flow_bb_inside_loop_p (loop
, bb
))
331 if (dump_enabled_p ())
332 dump_printf_loc (MSG_NOTE
, vect_location
,
333 "vec_stmt_relevant_p: used out of loop.\n");
335 if (is_gimple_debug (USE_STMT (use_p
)))
338 /* We expect all such uses to be in the loop exit phis
339 (because of loop closed form) */
340 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
341 gcc_assert (bb
== single_exit (loop
)->dest
);
348 return (*live_p
|| *relevant
);
352 /* Function exist_non_indexing_operands_for_use_p
354 USE is one of the uses attached to STMT. Check if USE is
355 used in STMT for anything other than indexing an array. */
358 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
361 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
363 /* USE corresponds to some operand in STMT. If there is no data
364 reference in STMT, then any operand that corresponds to USE
365 is not indexing an array. */
366 if (!STMT_VINFO_DATA_REF (stmt_info
))
369 /* STMT has a data_ref. FORNOW this means that its of one of
373 (This should have been verified in analyze_data_refs).
375 'var' in the second case corresponds to a def, not a use,
376 so USE cannot correspond to any operands that are not used
379 Therefore, all we need to check is if STMT falls into the
380 first case, and whether var corresponds to USE. */
382 if (!gimple_assign_copy_p (stmt
))
384 if (is_gimple_call (stmt
)
385 && gimple_call_internal_p (stmt
))
386 switch (gimple_call_internal_fn (stmt
))
389 operand
= gimple_call_arg (stmt
, 3);
394 operand
= gimple_call_arg (stmt
, 2);
404 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
406 operand
= gimple_assign_rhs1 (stmt
);
407 if (TREE_CODE (operand
) != SSA_NAME
)
418 Function process_use.
421 - a USE in STMT in a loop represented by LOOP_VINFO
422 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
423 that defined USE. This is done by calling mark_relevant and passing it
424 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
425 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
429 Generally, LIVE_P and RELEVANT are used to define the liveness and
430 relevance info of the DEF_STMT of this USE:
431 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
432 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
434 - case 1: If USE is used only for address computations (e.g. array indexing),
435 which does not need to be directly vectorized, then the liveness/relevance
436 of the respective DEF_STMT is left unchanged.
437 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
438 skip DEF_STMT cause it had already been processed.
439 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
440 be modified accordingly.
442 Return true if everything is as expected. Return false otherwise. */
445 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
446 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
449 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
450 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
451 stmt_vec_info dstmt_vinfo
;
452 basic_block bb
, def_bb
;
454 enum vect_def_type dt
;
456 /* case 1: we are only interested in uses that need to be vectorized. Uses
457 that are used for address computation are not considered relevant. */
458 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
461 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
463 if (dump_enabled_p ())
464 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
465 "not vectorized: unsupported use in stmt.\n");
469 if (!def_stmt
|| gimple_nop_p (def_stmt
))
472 def_bb
= gimple_bb (def_stmt
);
473 if (!flow_bb_inside_loop_p (loop
, def_bb
))
475 if (dump_enabled_p ())
476 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
481 DEF_STMT must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DEF_STMT in the loop. So we just
484 check that everything is as expected, and we are done. */
485 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
486 bb
= gimple_bb (stmt
);
487 if (gimple_code (stmt
) == GIMPLE_PHI
488 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
489 && gimple_code (def_stmt
) != GIMPLE_PHI
490 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
491 && bb
->loop_father
== def_bb
->loop_father
)
493 if (dump_enabled_p ())
494 dump_printf_loc (MSG_NOTE
, vect_location
,
495 "reduc-stmt defining reduc-phi in the same nest.\n");
496 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
497 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
498 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
499 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
500 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
504 /* case 3a: outer-loop stmt defining an inner-loop stmt:
505 outer-loop-header-bb:
511 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE
, vect_location
,
515 "outer-loop def-stmt defining inner-loop stmt.\n");
519 case vect_unused_in_scope
:
520 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
521 vect_used_in_scope
: vect_unused_in_scope
;
524 case vect_used_in_outer_by_reduction
:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
526 relevant
= vect_used_by_reduction
;
529 case vect_used_in_outer
:
530 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
531 relevant
= vect_used_in_scope
;
534 case vect_used_in_scope
:
542 /* case 3b: inner-loop stmt defining an outer-loop stmt:
543 outer-loop-header-bb:
547 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
549 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
551 if (dump_enabled_p ())
552 dump_printf_loc (MSG_NOTE
, vect_location
,
553 "inner-loop def-stmt defining outer-loop stmt.\n");
557 case vect_unused_in_scope
:
558 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
559 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
560 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
563 case vect_used_by_reduction
:
564 relevant
= vect_used_in_outer_by_reduction
;
567 case vect_used_in_scope
:
568 relevant
= vect_used_in_outer
;
576 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
577 is_pattern_stmt_p (stmt_vinfo
));
582 /* Function vect_mark_stmts_to_be_vectorized.
584 Not all stmts in the loop need to be vectorized. For example:
593 Stmt 1 and 3 do not need to be vectorized, because loop control and
594 addressing of vectorized data-refs are handled differently.
596 This pass detects such stmts. */
599 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
601 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
602 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
603 unsigned int nbbs
= loop
->num_nodes
;
604 gimple_stmt_iterator si
;
607 stmt_vec_info stmt_vinfo
;
611 enum vect_relevant relevant
, tmp_relevant
;
612 enum vect_def_type def_type
;
614 if (dump_enabled_p ())
615 dump_printf_loc (MSG_NOTE
, vect_location
,
616 "=== vect_mark_stmts_to_be_vectorized ===\n");
618 auto_vec
<gimple
*, 64> worklist
;
620 /* 1. Init worklist. */
621 for (i
= 0; i
< nbbs
; i
++)
624 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
627 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
630 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
633 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
634 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
636 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
638 stmt
= gsi_stmt (si
);
639 if (dump_enabled_p ())
641 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
642 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
645 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
646 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
650 /* 2. Process_worklist */
651 while (worklist
.length () > 0)
656 stmt
= worklist
.pop ();
657 if (dump_enabled_p ())
659 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
660 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
663 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
664 (DEF_STMT) as relevant/irrelevant and live/dead according to the
665 liveness and relevance properties of STMT. */
666 stmt_vinfo
= vinfo_for_stmt (stmt
);
667 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
668 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
670 /* Generally, the liveness and relevance properties of STMT are
671 propagated as is to the DEF_STMTs of its USEs:
672 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
673 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
675 One exception is when STMT has been identified as defining a reduction
676 variable; in this case we set the liveness/relevance as follows:
678 relevant = vect_used_by_reduction
679 This is because we distinguish between two kinds of relevant stmts -
680 those that are used by a reduction computation, and those that are
681 (also) used by a regular computation. This allows us later on to
682 identify stmts that are used solely by a reduction, and therefore the
683 order of the results that they produce does not have to be kept. */
685 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
686 tmp_relevant
= relevant
;
689 case vect_reduction_def
:
690 switch (tmp_relevant
)
692 case vect_unused_in_scope
:
693 relevant
= vect_used_by_reduction
;
696 case vect_used_by_reduction
:
697 if (gimple_code (stmt
) == GIMPLE_PHI
)
702 if (dump_enabled_p ())
703 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
704 "unsupported use of reduction.\n");
711 case vect_nested_cycle
:
712 if (tmp_relevant
!= vect_unused_in_scope
713 && tmp_relevant
!= vect_used_in_outer_by_reduction
714 && tmp_relevant
!= vect_used_in_outer
)
716 if (dump_enabled_p ())
717 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
718 "unsupported use of nested cycle.\n");
726 case vect_double_reduction_def
:
727 if (tmp_relevant
!= vect_unused_in_scope
728 && tmp_relevant
!= vect_used_by_reduction
)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
732 "unsupported use of double reduction.\n");
744 if (is_pattern_stmt_p (stmt_vinfo
))
746 /* Pattern statements are not inserted into the code, so
747 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
748 have to scan the RHS or function arguments instead. */
749 if (is_gimple_assign (stmt
))
751 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
752 tree op
= gimple_assign_rhs1 (stmt
);
755 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
757 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
758 live_p
, relevant
, &worklist
, false)
759 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
760 live_p
, relevant
, &worklist
, false))
764 for (; i
< gimple_num_ops (stmt
); i
++)
766 op
= gimple_op (stmt
, i
);
767 if (TREE_CODE (op
) == SSA_NAME
768 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
773 else if (is_gimple_call (stmt
))
775 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
777 tree arg
= gimple_call_arg (stmt
, i
);
778 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
785 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
787 tree op
= USE_FROM_PTR (use_p
);
788 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
793 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
796 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
798 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
802 } /* while worklist */
808 /* Function vect_model_simple_cost.
810 Models cost for simple operations, i.e. those that only emit ncopies of a
811 single op. Right now, this does not account for multiple insns that could
812 be generated for the single vector op. We will handle that shortly. */
815 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
816 enum vect_def_type
*dt
,
817 stmt_vector_for_cost
*prologue_cost_vec
,
818 stmt_vector_for_cost
*body_cost_vec
)
821 int inside_cost
= 0, prologue_cost
= 0;
823 /* The SLP costs were already calculated during SLP tree build. */
824 if (PURE_SLP_STMT (stmt_info
))
827 /* FORNOW: Assuming maximum 2 args per stmts. */
828 for (i
= 0; i
< 2; i
++)
829 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
830 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
831 stmt_info
, 0, vect_prologue
);
833 /* Pass the inside-of-loop statements to the target-specific cost model. */
834 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
835 stmt_info
, 0, vect_body
);
837 if (dump_enabled_p ())
838 dump_printf_loc (MSG_NOTE
, vect_location
,
839 "vect_model_simple_cost: inside_cost = %d, "
840 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
844 /* Model cost for type demotion and promotion operations. PWR is normally
845 zero for single-step promotions and demotions. It will be one if
846 two-step promotion/demotion is required, and so on. Each additional
847 step doubles the number of instructions required. */
850 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
851 enum vect_def_type
*dt
, int pwr
)
854 int inside_cost
= 0, prologue_cost
= 0;
855 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
856 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
857 void *target_cost_data
;
859 /* The SLP costs were already calculated during SLP tree build. */
860 if (PURE_SLP_STMT (stmt_info
))
864 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
866 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
868 for (i
= 0; i
< pwr
+ 1; i
++)
870 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
872 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
873 vec_promote_demote
, stmt_info
, 0,
877 /* FORNOW: Assuming maximum 2 args per stmts. */
878 for (i
= 0; i
< 2; i
++)
879 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
880 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
881 stmt_info
, 0, vect_prologue
);
883 if (dump_enabled_p ())
884 dump_printf_loc (MSG_NOTE
, vect_location
,
885 "vect_model_promotion_demotion_cost: inside_cost = %d, "
886 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
889 /* Function vect_cost_group_size
891 For grouped load or store, return the group_size only if it is the first
892 load or store of a group, else return 1. This ensures that group size is
893 only returned once per group. */
896 vect_cost_group_size (stmt_vec_info stmt_info
)
898 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
900 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
901 return GROUP_SIZE (stmt_info
);
907 /* Function vect_model_store_cost
909 Models cost for stores. In the case of grouped accesses, one access
910 has the overhead of the grouped access attributed to it. */
913 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
914 bool store_lanes_p
, enum vect_def_type dt
,
916 stmt_vector_for_cost
*prologue_cost_vec
,
917 stmt_vector_for_cost
*body_cost_vec
)
920 unsigned int inside_cost
= 0, prologue_cost
= 0;
921 struct data_reference
*first_dr
;
924 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
925 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
926 stmt_info
, 0, vect_prologue
);
928 /* Grouped access? */
929 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
933 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
938 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
939 group_size
= vect_cost_group_size (stmt_info
);
942 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
944 /* Not a grouped access. */
948 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
951 /* We assume that the cost of a single store-lanes instruction is
952 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
953 access is instead being provided by a permute-and-store operation,
954 include the cost of the permutes. */
955 if (!store_lanes_p
&& group_size
> 1
956 && !STMT_VINFO_STRIDED_P (stmt_info
))
958 /* Uses a high and low interleave or shuffle operations for each
960 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
961 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
962 stmt_info
, 0, vect_body
);
964 if (dump_enabled_p ())
965 dump_printf_loc (MSG_NOTE
, vect_location
,
966 "vect_model_store_cost: strided group_size = %d .\n",
970 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
971 /* Costs of the stores. */
972 if (STMT_VINFO_STRIDED_P (stmt_info
)
973 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
975 /* N scalar stores plus extracting the elements. */
976 inside_cost
+= record_stmt_cost (body_cost_vec
,
977 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
978 scalar_store
, stmt_info
, 0, vect_body
);
981 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
983 if (STMT_VINFO_STRIDED_P (stmt_info
))
984 inside_cost
+= record_stmt_cost (body_cost_vec
,
985 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
986 vec_to_scalar
, stmt_info
, 0, vect_body
);
988 if (dump_enabled_p ())
989 dump_printf_loc (MSG_NOTE
, vect_location
,
990 "vect_model_store_cost: inside_cost = %d, "
991 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
995 /* Calculate cost of DR's memory access. */
997 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
998 unsigned int *inside_cost
,
999 stmt_vector_for_cost
*body_cost_vec
)
1001 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1002 gimple
*stmt
= DR_STMT (dr
);
1003 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1005 switch (alignment_support_scheme
)
1009 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1010 vector_store
, stmt_info
, 0,
1013 if (dump_enabled_p ())
1014 dump_printf_loc (MSG_NOTE
, vect_location
,
1015 "vect_model_store_cost: aligned.\n");
1019 case dr_unaligned_supported
:
1021 /* Here, we assign an additional cost for the unaligned store. */
1022 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1023 unaligned_store
, stmt_info
,
1024 DR_MISALIGNMENT (dr
), vect_body
);
1025 if (dump_enabled_p ())
1026 dump_printf_loc (MSG_NOTE
, vect_location
,
1027 "vect_model_store_cost: unaligned supported by "
1032 case dr_unaligned_unsupported
:
1034 *inside_cost
= VECT_MAX_COST
;
1036 if (dump_enabled_p ())
1037 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1038 "vect_model_store_cost: unsupported access.\n");
1048 /* Function vect_model_load_cost
1050 Models cost for loads. In the case of grouped accesses, the last access
1051 has the overhead of the grouped access attributed to it. Since unaligned
1052 accesses are supported for loads, we also account for the costs of the
1053 access scheme chosen. */
1056 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1057 bool load_lanes_p
, slp_tree slp_node
,
1058 stmt_vector_for_cost
*prologue_cost_vec
,
1059 stmt_vector_for_cost
*body_cost_vec
)
1063 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1064 unsigned int inside_cost
= 0, prologue_cost
= 0;
1066 /* Grouped accesses? */
1067 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1068 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1070 group_size
= vect_cost_group_size (stmt_info
);
1071 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1073 /* Not a grouped access. */
1080 /* We assume that the cost of a single load-lanes instruction is
1081 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1082 access is instead being provided by a load-and-permute operation,
1083 include the cost of the permutes. */
1084 if (!load_lanes_p
&& group_size
> 1
1085 && !STMT_VINFO_STRIDED_P (stmt_info
))
1087 /* Uses an even and odd extract operations or shuffle operations
1088 for each needed permute. */
1089 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1090 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1091 stmt_info
, 0, vect_body
);
1093 if (dump_enabled_p ())
1094 dump_printf_loc (MSG_NOTE
, vect_location
,
1095 "vect_model_load_cost: strided group_size = %d .\n",
1099 /* The loads themselves. */
1100 if (STMT_VINFO_STRIDED_P (stmt_info
)
1101 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1103 /* N scalar loads plus gathering them into a vector. */
1104 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1105 inside_cost
+= record_stmt_cost (body_cost_vec
,
1106 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1107 scalar_load
, stmt_info
, 0, vect_body
);
1110 vect_get_load_cost (first_dr
, ncopies
,
1111 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1112 || group_size
> 1 || slp_node
),
1113 &inside_cost
, &prologue_cost
,
1114 prologue_cost_vec
, body_cost_vec
, true);
1115 if (STMT_VINFO_STRIDED_P (stmt_info
))
1116 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1117 stmt_info
, 0, vect_body
);
1119 if (dump_enabled_p ())
1120 dump_printf_loc (MSG_NOTE
, vect_location
,
1121 "vect_model_load_cost: inside_cost = %d, "
1122 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1126 /* Calculate cost of DR's memory access. */
1128 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1129 bool add_realign_cost
, unsigned int *inside_cost
,
1130 unsigned int *prologue_cost
,
1131 stmt_vector_for_cost
*prologue_cost_vec
,
1132 stmt_vector_for_cost
*body_cost_vec
,
1133 bool record_prologue_costs
)
1135 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1136 gimple
*stmt
= DR_STMT (dr
);
1137 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1139 switch (alignment_support_scheme
)
1143 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1144 stmt_info
, 0, vect_body
);
1146 if (dump_enabled_p ())
1147 dump_printf_loc (MSG_NOTE
, vect_location
,
1148 "vect_model_load_cost: aligned.\n");
1152 case dr_unaligned_supported
:
1154 /* Here, we assign an additional cost for the unaligned load. */
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1156 unaligned_load
, stmt_info
,
1157 DR_MISALIGNMENT (dr
), vect_body
);
1159 if (dump_enabled_p ())
1160 dump_printf_loc (MSG_NOTE
, vect_location
,
1161 "vect_model_load_cost: unaligned supported by "
1166 case dr_explicit_realign
:
1168 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1169 vector_load
, stmt_info
, 0, vect_body
);
1170 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1171 vec_perm
, stmt_info
, 0, vect_body
);
1173 /* FIXME: If the misalignment remains fixed across the iterations of
1174 the containing loop, the following cost should be added to the
1176 if (targetm
.vectorize
.builtin_mask_for_load
)
1177 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1178 stmt_info
, 0, vect_body
);
1180 if (dump_enabled_p ())
1181 dump_printf_loc (MSG_NOTE
, vect_location
,
1182 "vect_model_load_cost: explicit realign\n");
1186 case dr_explicit_realign_optimized
:
1188 if (dump_enabled_p ())
1189 dump_printf_loc (MSG_NOTE
, vect_location
,
1190 "vect_model_load_cost: unaligned software "
1193 /* Unaligned software pipeline has a load of an address, an initial
1194 load, and possibly a mask operation to "prime" the loop. However,
1195 if this is an access in a group of loads, which provide grouped
1196 access, then the above cost should only be considered for one
1197 access in the group. Inside the loop, there is a load op
1198 and a realignment op. */
1200 if (add_realign_cost
&& record_prologue_costs
)
1202 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1203 vector_stmt
, stmt_info
,
1205 if (targetm
.vectorize
.builtin_mask_for_load
)
1206 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1207 vector_stmt
, stmt_info
,
1211 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1212 stmt_info
, 0, vect_body
);
1213 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1214 stmt_info
, 0, vect_body
);
1216 if (dump_enabled_p ())
1217 dump_printf_loc (MSG_NOTE
, vect_location
,
1218 "vect_model_load_cost: explicit realign optimized"
1224 case dr_unaligned_unsupported
:
1226 *inside_cost
= VECT_MAX_COST
;
1228 if (dump_enabled_p ())
1229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1230 "vect_model_load_cost: unsupported access.\n");
1239 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1240 the loop preheader for the vectorized stmt STMT. */
1243 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1246 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1249 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1250 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1254 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1258 if (nested_in_vect_loop_p (loop
, stmt
))
1261 pe
= loop_preheader_edge (loop
);
1262 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1263 gcc_assert (!new_bb
);
1267 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1269 gimple_stmt_iterator gsi_bb_start
;
1271 gcc_assert (bb_vinfo
);
1272 bb
= BB_VINFO_BB (bb_vinfo
);
1273 gsi_bb_start
= gsi_after_labels (bb
);
1274 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1278 if (dump_enabled_p ())
1280 dump_printf_loc (MSG_NOTE
, vect_location
,
1281 "created new init_stmt: ");
1282 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1286 /* Function vect_init_vector.
1288 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1289 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1290 vector type a vector with all elements equal to VAL is created first.
1291 Place the initialization at BSI if it is not NULL. Otherwise, place the
1292 initialization at the loop preheader.
1293 Return the DEF of INIT_STMT.
1294 It will be used in the vectorization of STMT. */
1297 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1302 if (TREE_CODE (type
) == VECTOR_TYPE
1303 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1305 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1307 if (CONSTANT_CLASS_P (val
))
1308 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1311 new_temp
= make_ssa_name (TREE_TYPE (type
));
1312 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1313 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1317 val
= build_vector_from_val (type
, val
);
1320 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1321 init_stmt
= gimple_build_assign (new_temp
, val
);
1322 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1327 /* Function vect_get_vec_def_for_operand.
1329 OP is an operand in STMT. This function returns a (vector) def that will be
1330 used in the vectorized stmt for STMT.
1332 In the case that OP is an SSA_NAME which is defined in the loop, then
1333 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1335 In case OP is an invariant or constant, a new stmt that creates a vector def
1336 needs to be introduced. */
1339 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
)
1344 stmt_vec_info def_stmt_info
= NULL
;
1345 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1346 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1347 enum vect_def_type dt
;
1351 if (dump_enabled_p ())
1353 dump_printf_loc (MSG_NOTE
, vect_location
,
1354 "vect_get_vec_def_for_operand: ");
1355 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1356 dump_printf (MSG_NOTE
, "\n");
1359 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1360 gcc_assert (is_simple_use
);
1361 if (dump_enabled_p ())
1363 int loc_printed
= 0;
1367 dump_printf (MSG_NOTE
, " def_stmt = ");
1369 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1370 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1376 /* operand is a constant or a loop invariant. */
1377 case vect_constant_def
:
1378 case vect_external_def
:
1380 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1381 gcc_assert (vector_type
);
1382 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1385 /* operand is defined inside the loop. */
1386 case vect_internal_def
:
1388 /* Get the def from the vectorized stmt. */
1389 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1391 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1392 /* Get vectorized pattern statement. */
1394 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1395 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1396 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1397 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1398 gcc_assert (vec_stmt
);
1399 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1400 vec_oprnd
= PHI_RESULT (vec_stmt
);
1401 else if (is_gimple_call (vec_stmt
))
1402 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1404 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1408 /* operand is defined by a loop header phi - reduction */
1409 case vect_reduction_def
:
1410 case vect_double_reduction_def
:
1411 case vect_nested_cycle
:
1412 /* Code should use get_initial_def_for_reduction. */
1415 /* operand is defined by loop-header phi - induction. */
1416 case vect_induction_def
:
1418 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1420 /* Get the def from the vectorized stmt. */
1421 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1422 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1423 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1424 vec_oprnd
= PHI_RESULT (vec_stmt
);
1426 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1436 /* Function vect_get_vec_def_for_stmt_copy
1438 Return a vector-def for an operand. This function is used when the
1439 vectorized stmt to be created (by the caller to this function) is a "copy"
1440 created in case the vectorized result cannot fit in one vector, and several
1441 copies of the vector-stmt are required. In this case the vector-def is
1442 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1443 of the stmt that defines VEC_OPRND.
1444 DT is the type of the vector def VEC_OPRND.
1447 In case the vectorization factor (VF) is bigger than the number
1448 of elements that can fit in a vectype (nunits), we have to generate
1449 more than one vector stmt to vectorize the scalar stmt. This situation
1450 arises when there are multiple data-types operated upon in the loop; the
1451 smallest data-type determines the VF, and as a result, when vectorizing
1452 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1453 vector stmt (each computing a vector of 'nunits' results, and together
1454 computing 'VF' results in each iteration). This function is called when
1455 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1456 which VF=16 and nunits=4, so the number of copies required is 4):
1458 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1460 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1461 VS1.1: vx.1 = memref1 VS1.2
1462 VS1.2: vx.2 = memref2 VS1.3
1463 VS1.3: vx.3 = memref3
1465 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1466 VSnew.1: vz1 = vx.1 + ... VSnew.2
1467 VSnew.2: vz2 = vx.2 + ... VSnew.3
1468 VSnew.3: vz3 = vx.3 + ...
1470 The vectorization of S1 is explained in vectorizable_load.
1471 The vectorization of S2:
1472 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1473 the function 'vect_get_vec_def_for_operand' is called to
1474 get the relevant vector-def for each operand of S2. For operand x it
1475 returns the vector-def 'vx.0'.
1477 To create the remaining copies of the vector-stmt (VSnew.j), this
1478 function is called to get the relevant vector-def for each operand. It is
1479 obtained from the respective VS1.j stmt, which is recorded in the
1480 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1482 For example, to obtain the vector-def 'vx.1' in order to create the
1483 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1484 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1485 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1486 and return its def ('vx.1').
1487 Overall, to create the above sequence this function will be called 3 times:
1488 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1489 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1490 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1493 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1495 gimple
*vec_stmt_for_operand
;
1496 stmt_vec_info def_stmt_info
;
1498 /* Do nothing; can reuse same def. */
1499 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1502 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1503 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1504 gcc_assert (def_stmt_info
);
1505 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1506 gcc_assert (vec_stmt_for_operand
);
1507 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1508 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1510 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1515 /* Get vectorized definitions for the operands to create a copy of an original
1516 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1519 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1520 vec
<tree
> *vec_oprnds0
,
1521 vec
<tree
> *vec_oprnds1
)
1523 tree vec_oprnd
= vec_oprnds0
->pop ();
1525 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1526 vec_oprnds0
->quick_push (vec_oprnd
);
1528 if (vec_oprnds1
&& vec_oprnds1
->length ())
1530 vec_oprnd
= vec_oprnds1
->pop ();
1531 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1532 vec_oprnds1
->quick_push (vec_oprnd
);
1537 /* Get vectorized definitions for OP0 and OP1.
1538 REDUC_INDEX is the index of reduction operand in case of reduction,
1539 and -1 otherwise. */
1542 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1543 vec
<tree
> *vec_oprnds0
,
1544 vec
<tree
> *vec_oprnds1
,
1545 slp_tree slp_node
, int reduc_index
)
1549 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1550 auto_vec
<tree
> ops (nops
);
1551 auto_vec
<vec
<tree
> > vec_defs (nops
);
1553 ops
.quick_push (op0
);
1555 ops
.quick_push (op1
);
1557 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1559 *vec_oprnds0
= vec_defs
[0];
1561 *vec_oprnds1
= vec_defs
[1];
1567 vec_oprnds0
->create (1);
1568 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1569 vec_oprnds0
->quick_push (vec_oprnd
);
1573 vec_oprnds1
->create (1);
1574 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1575 vec_oprnds1
->quick_push (vec_oprnd
);
1581 /* Function vect_finish_stmt_generation.
1583 Insert a new stmt. */
1586 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1587 gimple_stmt_iterator
*gsi
)
1589 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1590 vec_info
*vinfo
= stmt_info
->vinfo
;
1592 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1594 if (!gsi_end_p (*gsi
)
1595 && gimple_has_mem_ops (vec_stmt
))
1597 gimple
*at_stmt
= gsi_stmt (*gsi
);
1598 tree vuse
= gimple_vuse (at_stmt
);
1599 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1601 tree vdef
= gimple_vdef (at_stmt
);
1602 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1603 /* If we have an SSA vuse and insert a store, update virtual
1604 SSA form to avoid triggering the renamer. Do so only
1605 if we can easily see all uses - which is what almost always
1606 happens with the way vectorized stmts are inserted. */
1607 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1608 && ((is_gimple_assign (vec_stmt
)
1609 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1610 || (is_gimple_call (vec_stmt
)
1611 && !(gimple_call_flags (vec_stmt
)
1612 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1614 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1615 gimple_set_vdef (vec_stmt
, new_vdef
);
1616 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1620 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1622 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1624 if (dump_enabled_p ())
1626 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1627 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1630 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1632 /* While EH edges will generally prevent vectorization, stmt might
1633 e.g. be in a must-not-throw region. Ensure newly created stmts
1634 that could throw are part of the same region. */
1635 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1636 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1637 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1640 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1641 a function declaration if the target has a vectorized version
1642 of the function, or NULL_TREE if the function cannot be vectorized. */
1645 vectorizable_function (gcall
*call
, tree vectype_out
, tree vectype_in
)
1647 tree fndecl
= gimple_call_fndecl (call
);
1649 /* We only handle functions that do not read or clobber memory -- i.e.
1650 const or novops ones. */
1651 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1655 || TREE_CODE (fndecl
) != FUNCTION_DECL
1656 || !DECL_BUILT_IN (fndecl
))
1659 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1664 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1665 gimple_stmt_iterator
*);
1668 /* Function vectorizable_mask_load_store.
1670 Check if STMT performs a conditional load or store that can be vectorized.
1671 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1672 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1673 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1676 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1677 gimple
**vec_stmt
, slp_tree slp_node
)
1679 tree vec_dest
= NULL
;
1680 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1681 stmt_vec_info prev_stmt_info
;
1682 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1683 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1684 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1685 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1686 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1690 tree dataref_ptr
= NULL_TREE
;
1692 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1696 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1697 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1698 int gather_scale
= 1;
1699 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1703 enum vect_def_type dt
;
1705 if (slp_node
!= NULL
)
1708 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1709 gcc_assert (ncopies
>= 1);
1711 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1712 mask
= gimple_call_arg (stmt
, 2);
1713 if (TYPE_PRECISION (TREE_TYPE (mask
))
1714 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1717 /* FORNOW. This restriction should be relaxed. */
1718 if (nested_in_vect_loop
&& ncopies
> 1)
1720 if (dump_enabled_p ())
1721 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1722 "multiple types in nested loop.");
1726 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1729 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1732 if (!STMT_VINFO_DATA_REF (stmt_info
))
1735 elem_type
= TREE_TYPE (vectype
);
1737 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1740 if (STMT_VINFO_STRIDED_P (stmt_info
))
1743 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1746 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1747 &gather_off
, &gather_scale
);
1748 gcc_assert (gather_decl
);
1749 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1750 &gather_off_vectype
))
1752 if (dump_enabled_p ())
1753 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1754 "gather index use not simple.");
1758 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1760 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1761 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1763 if (dump_enabled_p ())
1764 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1765 "masked gather with integer mask not supported.");
1769 else if (tree_int_cst_compare (nested_in_vect_loop
1770 ? STMT_VINFO_DR_STEP (stmt_info
)
1771 : DR_STEP (dr
), size_zero_node
) <= 0)
1773 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1774 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1777 if (TREE_CODE (mask
) != SSA_NAME
)
1780 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
))
1785 tree rhs
= gimple_call_arg (stmt
, 3);
1786 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
))
1790 if (!vec_stmt
) /* transformation not required. */
1792 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1794 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1797 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1803 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1805 tree vec_oprnd0
= NULL_TREE
, op
;
1806 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1807 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1808 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1809 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1810 tree mask_perm_mask
= NULL_TREE
;
1811 edge pe
= loop_preheader_edge (loop
);
1814 enum { NARROW
, NONE
, WIDEN
} modifier
;
1815 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1817 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1818 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1819 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1820 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1821 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1822 scaletype
= TREE_VALUE (arglist
);
1823 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1824 && types_compatible_p (srctype
, masktype
));
1826 if (nunits
== gather_off_nunits
)
1828 else if (nunits
== gather_off_nunits
/ 2)
1830 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1833 for (i
= 0; i
< gather_off_nunits
; ++i
)
1834 sel
[i
] = i
| nunits
;
1836 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1838 else if (nunits
== gather_off_nunits
* 2)
1840 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1843 for (i
= 0; i
< nunits
; ++i
)
1844 sel
[i
] = i
< gather_off_nunits
1845 ? i
: i
+ nunits
- gather_off_nunits
;
1847 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1849 for (i
= 0; i
< nunits
; ++i
)
1850 sel
[i
] = i
| gather_off_nunits
;
1851 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1856 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1858 ptr
= fold_convert (ptrtype
, gather_base
);
1859 if (!is_gimple_min_invariant (ptr
))
1861 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1862 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1863 gcc_assert (!new_bb
);
1866 scale
= build_int_cst (scaletype
, gather_scale
);
1868 prev_stmt_info
= NULL
;
1869 for (j
= 0; j
< ncopies
; ++j
)
1871 if (modifier
== WIDEN
&& (j
& 1))
1872 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1873 perm_mask
, stmt
, gsi
);
1876 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1879 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1881 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1883 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1884 == TYPE_VECTOR_SUBPARTS (idxtype
));
1885 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1886 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1888 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1889 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1893 if (mask_perm_mask
&& (j
& 1))
1894 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1895 mask_perm_mask
, stmt
, gsi
);
1899 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1902 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1903 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1907 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1909 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1910 == TYPE_VECTOR_SUBPARTS (masktype
));
1911 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1912 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1914 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1915 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1921 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1924 if (!useless_type_conversion_p (vectype
, rettype
))
1926 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1927 == TYPE_VECTOR_SUBPARTS (rettype
));
1928 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1929 gimple_call_set_lhs (new_stmt
, op
);
1930 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1931 var
= make_ssa_name (vec_dest
);
1932 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1933 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1937 var
= make_ssa_name (vec_dest
, new_stmt
);
1938 gimple_call_set_lhs (new_stmt
, var
);
1941 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1943 if (modifier
== NARROW
)
1950 var
= permute_vec_elements (prev_res
, var
,
1951 perm_mask
, stmt
, gsi
);
1952 new_stmt
= SSA_NAME_DEF_STMT (var
);
1955 if (prev_stmt_info
== NULL
)
1956 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1958 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1959 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1962 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1964 tree lhs
= gimple_call_lhs (stmt
);
1965 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1966 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1967 set_vinfo_for_stmt (stmt
, NULL
);
1968 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1969 gsi_replace (gsi
, new_stmt
, true);
1974 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
1975 prev_stmt_info
= NULL
;
1976 for (i
= 0; i
< ncopies
; i
++)
1978 unsigned align
, misalign
;
1982 tree rhs
= gimple_call_arg (stmt
, 3);
1983 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
1984 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1985 /* We should have catched mismatched types earlier. */
1986 gcc_assert (useless_type_conversion_p (vectype
,
1987 TREE_TYPE (vec_rhs
)));
1988 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
1989 NULL_TREE
, &dummy
, gsi
,
1990 &ptr_incr
, false, &inv_p
);
1991 gcc_assert (!inv_p
);
1995 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
1996 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
1997 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1998 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1999 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2000 TYPE_SIZE_UNIT (vectype
));
2003 align
= TYPE_ALIGN_UNIT (vectype
);
2004 if (aligned_access_p (dr
))
2006 else if (DR_MISALIGNMENT (dr
) == -1)
2008 align
= TYPE_ALIGN_UNIT (elem_type
);
2012 misalign
= DR_MISALIGNMENT (dr
);
2013 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2016 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2017 gimple_call_arg (stmt
, 1),
2019 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2021 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2023 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2024 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2029 tree vec_mask
= NULL_TREE
;
2030 prev_stmt_info
= NULL
;
2031 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2032 for (i
= 0; i
< ncopies
; i
++)
2034 unsigned align
, misalign
;
2038 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2039 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2040 NULL_TREE
, &dummy
, gsi
,
2041 &ptr_incr
, false, &inv_p
);
2042 gcc_assert (!inv_p
);
2046 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2047 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2048 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2049 TYPE_SIZE_UNIT (vectype
));
2052 align
= TYPE_ALIGN_UNIT (vectype
);
2053 if (aligned_access_p (dr
))
2055 else if (DR_MISALIGNMENT (dr
) == -1)
2057 align
= TYPE_ALIGN_UNIT (elem_type
);
2061 misalign
= DR_MISALIGNMENT (dr
);
2062 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2065 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2066 gimple_call_arg (stmt
, 1),
2068 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2069 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2071 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2073 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2074 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2080 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2082 tree lhs
= gimple_call_lhs (stmt
);
2083 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2084 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2085 set_vinfo_for_stmt (stmt
, NULL
);
2086 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2087 gsi_replace (gsi
, new_stmt
, true);
2094 /* Function vectorizable_call.
2096 Check if GS performs a function call that can be vectorized.
2097 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2098 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2099 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2102 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2109 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2110 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2111 tree vectype_out
, vectype_in
;
2114 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2115 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2116 vec_info
*vinfo
= stmt_info
->vinfo
;
2117 tree fndecl
, new_temp
, rhs_type
;
2119 enum vect_def_type dt
[3]
2120 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2121 gimple
*new_stmt
= NULL
;
2123 vec
<tree
> vargs
= vNULL
;
2124 enum { NARROW
, NONE
, WIDEN
} modifier
;
2128 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2131 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2134 /* Is GS a vectorizable call? */
2135 stmt
= dyn_cast
<gcall
*> (gs
);
2139 if (gimple_call_internal_p (stmt
)
2140 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2141 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2142 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2145 if (gimple_call_lhs (stmt
) == NULL_TREE
2146 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2149 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2151 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2153 /* Process function arguments. */
2154 rhs_type
= NULL_TREE
;
2155 vectype_in
= NULL_TREE
;
2156 nargs
= gimple_call_num_args (stmt
);
2158 /* Bail out if the function has more than three arguments, we do not have
2159 interesting builtin functions to vectorize with more than two arguments
2160 except for fma. No arguments is also not good. */
2161 if (nargs
== 0 || nargs
> 3)
2164 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2165 if (gimple_call_internal_p (stmt
)
2166 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2169 rhs_type
= unsigned_type_node
;
2172 for (i
= 0; i
< nargs
; i
++)
2176 op
= gimple_call_arg (stmt
, i
);
2178 /* We can only handle calls with arguments of the same type. */
2180 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2182 if (dump_enabled_p ())
2183 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2184 "argument types differ.\n");
2188 rhs_type
= TREE_TYPE (op
);
2190 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2192 if (dump_enabled_p ())
2193 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2194 "use not simple.\n");
2199 vectype_in
= opvectype
;
2201 && opvectype
!= vectype_in
)
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2205 "argument vector types differ.\n");
2209 /* If all arguments are external or constant defs use a vector type with
2210 the same size as the output vector type. */
2212 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2214 gcc_assert (vectype_in
);
2217 if (dump_enabled_p ())
2219 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2220 "no vectype for scalar type ");
2221 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2222 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2229 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2230 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2231 if (nunits_in
== nunits_out
/ 2)
2233 else if (nunits_out
== nunits_in
)
2235 else if (nunits_out
== nunits_in
/ 2)
2240 /* For now, we only vectorize functions if a target specific builtin
2241 is available. TODO -- in some cases, it might be profitable to
2242 insert the calls for pieces of the vector, in order to be able
2243 to vectorize other operations in the loop. */
2244 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2245 if (fndecl
== NULL_TREE
)
2247 if (gimple_call_internal_p (stmt
)
2248 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2251 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2252 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2253 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2254 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2256 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2257 { 0, 1, 2, ... vf - 1 } vector. */
2258 gcc_assert (nargs
== 0);
2262 if (dump_enabled_p ())
2263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2264 "function is not vectorizable.\n");
2269 gcc_assert (!gimple_vuse (stmt
));
2271 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2273 else if (modifier
== NARROW
)
2274 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2276 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2278 /* Sanity check: make sure that at least one copy of the vectorized stmt
2279 needs to be generated. */
2280 gcc_assert (ncopies
>= 1);
2282 if (!vec_stmt
) /* transformation not required. */
2284 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2288 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2294 if (dump_enabled_p ())
2295 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2298 scalar_dest
= gimple_call_lhs (stmt
);
2299 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2301 prev_stmt_info
= NULL
;
2305 for (j
= 0; j
< ncopies
; ++j
)
2307 /* Build argument list for the vectorized call. */
2309 vargs
.create (nargs
);
2315 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2316 vec
<tree
> vec_oprnds0
;
2318 for (i
= 0; i
< nargs
; i
++)
2319 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2320 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2321 vec_oprnds0
= vec_defs
[0];
2323 /* Arguments are ready. Create the new vector stmt. */
2324 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2327 for (k
= 0; k
< nargs
; k
++)
2329 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2330 vargs
[k
] = vec_oprndsk
[i
];
2332 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2333 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2334 gimple_call_set_lhs (new_stmt
, new_temp
);
2335 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2336 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2339 for (i
= 0; i
< nargs
; i
++)
2341 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2342 vec_oprndsi
.release ();
2347 for (i
= 0; i
< nargs
; i
++)
2349 op
= gimple_call_arg (stmt
, i
);
2352 = vect_get_vec_def_for_operand (op
, stmt
);
2355 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2357 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2360 vargs
.quick_push (vec_oprnd0
);
2363 if (gimple_call_internal_p (stmt
)
2364 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2366 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2368 for (k
= 0; k
< nunits_out
; ++k
)
2369 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2370 tree cst
= build_vector (vectype_out
, v
);
2372 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2373 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2374 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2375 new_temp
= make_ssa_name (vec_dest
);
2376 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2380 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2381 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2382 gimple_call_set_lhs (new_stmt
, new_temp
);
2384 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2387 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2389 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2391 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2397 for (j
= 0; j
< ncopies
; ++j
)
2399 /* Build argument list for the vectorized call. */
2401 vargs
.create (nargs
* 2);
2407 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2408 vec
<tree
> vec_oprnds0
;
2410 for (i
= 0; i
< nargs
; i
++)
2411 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2412 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2413 vec_oprnds0
= vec_defs
[0];
2415 /* Arguments are ready. Create the new vector stmt. */
2416 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2420 for (k
= 0; k
< nargs
; k
++)
2422 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2423 vargs
.quick_push (vec_oprndsk
[i
]);
2424 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2426 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2427 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2428 gimple_call_set_lhs (new_stmt
, new_temp
);
2429 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2430 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2433 for (i
= 0; i
< nargs
; i
++)
2435 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2436 vec_oprndsi
.release ();
2441 for (i
= 0; i
< nargs
; i
++)
2443 op
= gimple_call_arg (stmt
, i
);
2447 = vect_get_vec_def_for_operand (op
, stmt
);
2449 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2453 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2455 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2457 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2460 vargs
.quick_push (vec_oprnd0
);
2461 vargs
.quick_push (vec_oprnd1
);
2464 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2465 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2466 gimple_call_set_lhs (new_stmt
, new_temp
);
2467 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2470 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2472 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2474 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2477 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2482 /* No current target implements this case. */
2488 /* The call in STMT might prevent it from being removed in dce.
2489 We however cannot remove it here, due to the way the ssa name
2490 it defines is mapped to the new definition. So just replace
2491 rhs of the statement with something harmless. */
2496 type
= TREE_TYPE (scalar_dest
);
2497 if (is_pattern_stmt_p (stmt_info
))
2498 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2500 lhs
= gimple_call_lhs (stmt
);
2502 if (gimple_call_internal_p (stmt
)
2503 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2505 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2506 with vf - 1 rather than 0, that is the last iteration of the
2508 imm_use_iterator iter
;
2509 use_operand_p use_p
;
2511 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2513 basic_block use_bb
= gimple_bb (use_stmt
);
2515 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2517 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2518 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2519 ncopies
* nunits_out
- 1));
2520 update_stmt (use_stmt
);
2525 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2526 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2527 set_vinfo_for_stmt (stmt
, NULL
);
2528 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2529 gsi_replace (gsi
, new_stmt
, false);
2535 struct simd_call_arg_info
2539 enum vect_def_type dt
;
2540 HOST_WIDE_INT linear_step
;
2542 bool simd_lane_linear
;
2545 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2546 is linear within simd lane (but not within whole loop), note it in
2550 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2551 struct simd_call_arg_info
*arginfo
)
2553 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2555 if (!is_gimple_assign (def_stmt
)
2556 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2557 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2560 tree base
= gimple_assign_rhs1 (def_stmt
);
2561 HOST_WIDE_INT linear_step
= 0;
2562 tree v
= gimple_assign_rhs2 (def_stmt
);
2563 while (TREE_CODE (v
) == SSA_NAME
)
2566 def_stmt
= SSA_NAME_DEF_STMT (v
);
2567 if (is_gimple_assign (def_stmt
))
2568 switch (gimple_assign_rhs_code (def_stmt
))
2571 t
= gimple_assign_rhs2 (def_stmt
);
2572 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2574 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2575 v
= gimple_assign_rhs1 (def_stmt
);
2578 t
= gimple_assign_rhs2 (def_stmt
);
2579 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2581 linear_step
= tree_to_shwi (t
);
2582 v
= gimple_assign_rhs1 (def_stmt
);
2585 t
= gimple_assign_rhs1 (def_stmt
);
2586 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2587 || (TYPE_PRECISION (TREE_TYPE (v
))
2588 < TYPE_PRECISION (TREE_TYPE (t
))))
2597 else if (is_gimple_call (def_stmt
)
2598 && gimple_call_internal_p (def_stmt
)
2599 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2601 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2602 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2607 arginfo
->linear_step
= linear_step
;
2609 arginfo
->simd_lane_linear
= true;
2615 /* Function vectorizable_simd_clone_call.
2617 Check if STMT performs a function call that can be vectorized
2618 by calling a simd clone of the function.
2619 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2620 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2621 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2624 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2625 gimple
**vec_stmt
, slp_tree slp_node
)
2630 tree vec_oprnd0
= NULL_TREE
;
2631 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2633 unsigned int nunits
;
2634 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2635 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2636 vec_info
*vinfo
= stmt_info
->vinfo
;
2637 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2638 tree fndecl
, new_temp
;
2640 gimple
*new_stmt
= NULL
;
2642 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2643 vec
<tree
> vargs
= vNULL
;
2645 tree lhs
, rtype
, ratype
;
2646 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2648 /* Is STMT a vectorizable call? */
2649 if (!is_gimple_call (stmt
))
2652 fndecl
= gimple_call_fndecl (stmt
);
2653 if (fndecl
== NULL_TREE
)
2656 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2657 if (node
== NULL
|| node
->simd_clones
== NULL
)
2660 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2663 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2666 if (gimple_call_lhs (stmt
)
2667 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2670 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2672 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2674 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2678 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2681 /* Process function arguments. */
2682 nargs
= gimple_call_num_args (stmt
);
2684 /* Bail out if the function has zero arguments. */
2688 arginfo
.create (nargs
);
2690 for (i
= 0; i
< nargs
; i
++)
2692 simd_call_arg_info thisarginfo
;
2695 thisarginfo
.linear_step
= 0;
2696 thisarginfo
.align
= 0;
2697 thisarginfo
.op
= NULL_TREE
;
2698 thisarginfo
.simd_lane_linear
= false;
2700 op
= gimple_call_arg (stmt
, i
);
2701 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2702 &thisarginfo
.vectype
)
2703 || thisarginfo
.dt
== vect_uninitialized_def
)
2705 if (dump_enabled_p ())
2706 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2707 "use not simple.\n");
2712 if (thisarginfo
.dt
== vect_constant_def
2713 || thisarginfo
.dt
== vect_external_def
)
2714 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2716 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2718 /* For linear arguments, the analyze phase should have saved
2719 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2720 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2721 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2723 gcc_assert (vec_stmt
);
2724 thisarginfo
.linear_step
2725 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2727 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2728 thisarginfo
.simd_lane_linear
2729 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2730 == boolean_true_node
);
2731 /* If loop has been peeled for alignment, we need to adjust it. */
2732 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2733 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2734 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2736 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2737 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2738 tree opt
= TREE_TYPE (thisarginfo
.op
);
2739 bias
= fold_convert (TREE_TYPE (step
), bias
);
2740 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2742 = fold_build2 (POINTER_TYPE_P (opt
)
2743 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2744 thisarginfo
.op
, bias
);
2748 && thisarginfo
.dt
!= vect_constant_def
2749 && thisarginfo
.dt
!= vect_external_def
2751 && TREE_CODE (op
) == SSA_NAME
2752 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2754 && tree_fits_shwi_p (iv
.step
))
2756 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2757 thisarginfo
.op
= iv
.base
;
2759 else if ((thisarginfo
.dt
== vect_constant_def
2760 || thisarginfo
.dt
== vect_external_def
)
2761 && POINTER_TYPE_P (TREE_TYPE (op
)))
2762 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2763 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2765 if (POINTER_TYPE_P (TREE_TYPE (op
))
2766 && !thisarginfo
.linear_step
2768 && thisarginfo
.dt
!= vect_constant_def
2769 && thisarginfo
.dt
!= vect_external_def
2772 && TREE_CODE (op
) == SSA_NAME
)
2773 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2775 arginfo
.quick_push (thisarginfo
);
2778 unsigned int badness
= 0;
2779 struct cgraph_node
*bestn
= NULL
;
2780 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2781 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2783 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2784 n
= n
->simdclone
->next_clone
)
2786 unsigned int this_badness
= 0;
2787 if (n
->simdclone
->simdlen
2788 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2789 || n
->simdclone
->nargs
!= nargs
)
2791 if (n
->simdclone
->simdlen
2792 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2793 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2794 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2795 if (n
->simdclone
->inbranch
)
2796 this_badness
+= 2048;
2797 int target_badness
= targetm
.simd_clone
.usable (n
);
2798 if (target_badness
< 0)
2800 this_badness
+= target_badness
* 512;
2801 /* FORNOW: Have to add code to add the mask argument. */
2802 if (n
->simdclone
->inbranch
)
2804 for (i
= 0; i
< nargs
; i
++)
2806 switch (n
->simdclone
->args
[i
].arg_type
)
2808 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2809 if (!useless_type_conversion_p
2810 (n
->simdclone
->args
[i
].orig_type
,
2811 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2813 else if (arginfo
[i
].dt
== vect_constant_def
2814 || arginfo
[i
].dt
== vect_external_def
2815 || arginfo
[i
].linear_step
)
2818 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2819 if (arginfo
[i
].dt
!= vect_constant_def
2820 && arginfo
[i
].dt
!= vect_external_def
)
2823 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2824 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2825 if (arginfo
[i
].dt
== vect_constant_def
2826 || arginfo
[i
].dt
== vect_external_def
2827 || (arginfo
[i
].linear_step
2828 != n
->simdclone
->args
[i
].linear_step
))
2831 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2832 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2833 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2837 case SIMD_CLONE_ARG_TYPE_MASK
:
2840 if (i
== (size_t) -1)
2842 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2847 if (arginfo
[i
].align
)
2848 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2849 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2851 if (i
== (size_t) -1)
2853 if (bestn
== NULL
|| this_badness
< badness
)
2856 badness
= this_badness
;
2866 for (i
= 0; i
< nargs
; i
++)
2867 if ((arginfo
[i
].dt
== vect_constant_def
2868 || arginfo
[i
].dt
== vect_external_def
)
2869 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2872 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2874 if (arginfo
[i
].vectype
== NULL
2875 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2876 > bestn
->simdclone
->simdlen
))
2883 fndecl
= bestn
->decl
;
2884 nunits
= bestn
->simdclone
->simdlen
;
2885 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2887 /* If the function isn't const, only allow it in simd loops where user
2888 has asserted that at least nunits consecutive iterations can be
2889 performed using SIMD instructions. */
2890 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2891 && gimple_vuse (stmt
))
2897 /* Sanity check: make sure that at least one copy of the vectorized stmt
2898 needs to be generated. */
2899 gcc_assert (ncopies
>= 1);
2901 if (!vec_stmt
) /* transformation not required. */
2903 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2904 for (i
= 0; i
< nargs
; i
++)
2905 if (bestn
->simdclone
->args
[i
].arg_type
2906 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2908 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2910 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2911 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2912 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
2913 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
2914 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
2915 tree sll
= arginfo
[i
].simd_lane_linear
2916 ? boolean_true_node
: boolean_false_node
;
2917 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
2919 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2920 if (dump_enabled_p ())
2921 dump_printf_loc (MSG_NOTE
, vect_location
,
2922 "=== vectorizable_simd_clone_call ===\n");
2923 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2930 if (dump_enabled_p ())
2931 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2934 scalar_dest
= gimple_call_lhs (stmt
);
2935 vec_dest
= NULL_TREE
;
2940 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2941 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2942 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2945 rtype
= TREE_TYPE (ratype
);
2949 prev_stmt_info
= NULL
;
2950 for (j
= 0; j
< ncopies
; ++j
)
2952 /* Build argument list for the vectorized call. */
2954 vargs
.create (nargs
);
2958 for (i
= 0; i
< nargs
; i
++)
2960 unsigned int k
, l
, m
, o
;
2962 op
= gimple_call_arg (stmt
, i
);
2963 switch (bestn
->simdclone
->args
[i
].arg_type
)
2965 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2966 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2967 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2968 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2970 if (TYPE_VECTOR_SUBPARTS (atype
)
2971 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2973 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
2974 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2975 / TYPE_VECTOR_SUBPARTS (atype
));
2976 gcc_assert ((k
& (k
- 1)) == 0);
2979 = vect_get_vec_def_for_operand (op
, stmt
);
2982 vec_oprnd0
= arginfo
[i
].op
;
2983 if ((m
& (k
- 1)) == 0)
2985 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2988 arginfo
[i
].op
= vec_oprnd0
;
2990 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
2992 bitsize_int ((m
& (k
- 1)) * prec
));
2994 = gimple_build_assign (make_ssa_name (atype
),
2996 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2997 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3001 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3002 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3003 gcc_assert ((k
& (k
- 1)) == 0);
3004 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3006 vec_alloc (ctor_elts
, k
);
3009 for (l
= 0; l
< k
; l
++)
3011 if (m
== 0 && l
== 0)
3013 = vect_get_vec_def_for_operand (op
, stmt
);
3016 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3018 arginfo
[i
].op
= vec_oprnd0
;
3021 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3025 vargs
.safe_push (vec_oprnd0
);
3028 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3030 = gimple_build_assign (make_ssa_name (atype
),
3032 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3033 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3038 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3039 vargs
.safe_push (op
);
3041 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3046 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3051 edge pe
= loop_preheader_edge (loop
);
3052 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3053 gcc_assert (!new_bb
);
3055 if (arginfo
[i
].simd_lane_linear
)
3057 vargs
.safe_push (arginfo
[i
].op
);
3060 tree phi_res
= copy_ssa_name (op
);
3061 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3062 set_vinfo_for_stmt (new_phi
,
3063 new_stmt_vec_info (new_phi
, loop_vinfo
));
3064 add_phi_arg (new_phi
, arginfo
[i
].op
,
3065 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3067 = POINTER_TYPE_P (TREE_TYPE (op
))
3068 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3069 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3070 ? sizetype
: TREE_TYPE (op
);
3072 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3074 tree tcst
= wide_int_to_tree (type
, cst
);
3075 tree phi_arg
= copy_ssa_name (op
);
3077 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3078 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3079 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3080 set_vinfo_for_stmt (new_stmt
,
3081 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3082 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3084 arginfo
[i
].op
= phi_res
;
3085 vargs
.safe_push (phi_res
);
3090 = POINTER_TYPE_P (TREE_TYPE (op
))
3091 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3092 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3093 ? sizetype
: TREE_TYPE (op
);
3095 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3097 tree tcst
= wide_int_to_tree (type
, cst
);
3098 new_temp
= make_ssa_name (TREE_TYPE (op
));
3099 new_stmt
= gimple_build_assign (new_temp
, code
,
3100 arginfo
[i
].op
, tcst
);
3101 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3102 vargs
.safe_push (new_temp
);
3105 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3111 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3114 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3116 new_temp
= create_tmp_var (ratype
);
3117 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3118 == TYPE_VECTOR_SUBPARTS (rtype
))
3119 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3121 new_temp
= make_ssa_name (rtype
, new_stmt
);
3122 gimple_call_set_lhs (new_stmt
, new_temp
);
3124 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3128 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3131 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3132 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3133 gcc_assert ((k
& (k
- 1)) == 0);
3134 for (l
= 0; l
< k
; l
++)
3139 t
= build_fold_addr_expr (new_temp
);
3140 t
= build2 (MEM_REF
, vectype
, t
,
3141 build_int_cst (TREE_TYPE (t
),
3142 l
* prec
/ BITS_PER_UNIT
));
3145 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3146 size_int (prec
), bitsize_int (l
* prec
));
3148 = gimple_build_assign (make_ssa_name (vectype
), t
);
3149 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3150 if (j
== 0 && l
== 0)
3151 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3153 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3155 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3160 tree clobber
= build_constructor (ratype
, NULL
);
3161 TREE_THIS_VOLATILE (clobber
) = 1;
3162 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3163 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3167 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3169 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3170 / TYPE_VECTOR_SUBPARTS (rtype
));
3171 gcc_assert ((k
& (k
- 1)) == 0);
3172 if ((j
& (k
- 1)) == 0)
3173 vec_alloc (ret_ctor_elts
, k
);
3176 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3177 for (m
= 0; m
< o
; m
++)
3179 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3180 size_int (m
), NULL_TREE
, NULL_TREE
);
3182 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3183 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3184 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3185 gimple_assign_lhs (new_stmt
));
3187 tree clobber
= build_constructor (ratype
, NULL
);
3188 TREE_THIS_VOLATILE (clobber
) = 1;
3189 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3190 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3193 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3194 if ((j
& (k
- 1)) != k
- 1)
3196 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3198 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3199 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3201 if ((unsigned) j
== k
- 1)
3202 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3204 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3206 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3211 tree t
= build_fold_addr_expr (new_temp
);
3212 t
= build2 (MEM_REF
, vectype
, t
,
3213 build_int_cst (TREE_TYPE (t
), 0));
3215 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3216 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3217 tree clobber
= build_constructor (ratype
, NULL
);
3218 TREE_THIS_VOLATILE (clobber
) = 1;
3219 vect_finish_stmt_generation (stmt
,
3220 gimple_build_assign (new_temp
,
3226 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3228 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3230 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3235 /* The call in STMT might prevent it from being removed in dce.
3236 We however cannot remove it here, due to the way the ssa name
3237 it defines is mapped to the new definition. So just replace
3238 rhs of the statement with something harmless. */
3245 type
= TREE_TYPE (scalar_dest
);
3246 if (is_pattern_stmt_p (stmt_info
))
3247 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3249 lhs
= gimple_call_lhs (stmt
);
3250 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3253 new_stmt
= gimple_build_nop ();
3254 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3255 set_vinfo_for_stmt (stmt
, NULL
);
3256 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3257 gsi_replace (gsi
, new_stmt
, true);
3258 unlink_stmt_vdef (stmt
);
3264 /* Function vect_gen_widened_results_half
3266 Create a vector stmt whose code, type, number of arguments, and result
3267 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3268 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3269 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3270 needs to be created (DECL is a function-decl of a target-builtin).
3271 STMT is the original scalar stmt that we are vectorizing. */
3274 vect_gen_widened_results_half (enum tree_code code
,
3276 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3277 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3283 /* Generate half of the widened result: */
3284 if (code
== CALL_EXPR
)
3286 /* Target specific support */
3287 if (op_type
== binary_op
)
3288 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3290 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3291 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3292 gimple_call_set_lhs (new_stmt
, new_temp
);
3296 /* Generic support */
3297 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3298 if (op_type
!= binary_op
)
3300 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3301 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3302 gimple_assign_set_lhs (new_stmt
, new_temp
);
3304 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3310 /* Get vectorized definitions for loop-based vectorization. For the first
3311 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3312 scalar operand), and for the rest we get a copy with
3313 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3314 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3315 The vectors are collected into VEC_OPRNDS. */
3318 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3319 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3323 /* Get first vector operand. */
3324 /* All the vector operands except the very first one (that is scalar oprnd)
3326 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3327 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3329 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3331 vec_oprnds
->quick_push (vec_oprnd
);
3333 /* Get second vector operand. */
3334 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3335 vec_oprnds
->quick_push (vec_oprnd
);
3339 /* For conversion in multiple steps, continue to get operands
3342 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3346 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3347 For multi-step conversions store the resulting vectors and call the function
3351 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3352 int multi_step_cvt
, gimple
*stmt
,
3354 gimple_stmt_iterator
*gsi
,
3355 slp_tree slp_node
, enum tree_code code
,
3356 stmt_vec_info
*prev_stmt_info
)
3359 tree vop0
, vop1
, new_tmp
, vec_dest
;
3361 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3363 vec_dest
= vec_dsts
.pop ();
3365 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3367 /* Create demotion operation. */
3368 vop0
= (*vec_oprnds
)[i
];
3369 vop1
= (*vec_oprnds
)[i
+ 1];
3370 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3371 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3372 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3373 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3376 /* Store the resulting vector for next recursive call. */
3377 (*vec_oprnds
)[i
/2] = new_tmp
;
3380 /* This is the last step of the conversion sequence. Store the
3381 vectors in SLP_NODE or in vector info of the scalar statement
3382 (or in STMT_VINFO_RELATED_STMT chain). */
3384 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3387 if (!*prev_stmt_info
)
3388 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3390 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3392 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3397 /* For multi-step demotion operations we first generate demotion operations
3398 from the source type to the intermediate types, and then combine the
3399 results (stored in VEC_OPRNDS) in demotion operation to the destination
3403 /* At each level of recursion we have half of the operands we had at the
3405 vec_oprnds
->truncate ((i
+1)/2);
3406 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3407 stmt
, vec_dsts
, gsi
, slp_node
,
3408 VEC_PACK_TRUNC_EXPR
,
3412 vec_dsts
.quick_push (vec_dest
);
3416 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3417 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3418 the resulting vectors and call the function recursively. */
3421 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3422 vec
<tree
> *vec_oprnds1
,
3423 gimple
*stmt
, tree vec_dest
,
3424 gimple_stmt_iterator
*gsi
,
3425 enum tree_code code1
,
3426 enum tree_code code2
, tree decl1
,
3427 tree decl2
, int op_type
)
3430 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3431 gimple
*new_stmt1
, *new_stmt2
;
3432 vec
<tree
> vec_tmp
= vNULL
;
3434 vec_tmp
.create (vec_oprnds0
->length () * 2);
3435 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3437 if (op_type
== binary_op
)
3438 vop1
= (*vec_oprnds1
)[i
];
3442 /* Generate the two halves of promotion operation. */
3443 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3444 op_type
, vec_dest
, gsi
, stmt
);
3445 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3446 op_type
, vec_dest
, gsi
, stmt
);
3447 if (is_gimple_call (new_stmt1
))
3449 new_tmp1
= gimple_call_lhs (new_stmt1
);
3450 new_tmp2
= gimple_call_lhs (new_stmt2
);
3454 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3455 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3458 /* Store the results for the next step. */
3459 vec_tmp
.quick_push (new_tmp1
);
3460 vec_tmp
.quick_push (new_tmp2
);
3463 vec_oprnds0
->release ();
3464 *vec_oprnds0
= vec_tmp
;
3468 /* Check if STMT performs a conversion operation, that can be vectorized.
3469 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3470 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3471 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3474 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3475 gimple
**vec_stmt
, slp_tree slp_node
)
3479 tree op0
, op1
= NULL_TREE
;
3480 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3481 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3482 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3483 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3484 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3485 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3488 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3489 gimple
*new_stmt
= NULL
;
3490 stmt_vec_info prev_stmt_info
;
3493 tree vectype_out
, vectype_in
;
3495 tree lhs_type
, rhs_type
;
3496 enum { NARROW
, NONE
, WIDEN
} modifier
;
3497 vec
<tree
> vec_oprnds0
= vNULL
;
3498 vec
<tree
> vec_oprnds1
= vNULL
;
3500 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3501 vec_info
*vinfo
= stmt_info
->vinfo
;
3502 int multi_step_cvt
= 0;
3503 vec
<tree
> vec_dsts
= vNULL
;
3504 vec
<tree
> interm_types
= vNULL
;
3505 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3507 machine_mode rhs_mode
;
3508 unsigned short fltsz
;
3510 /* Is STMT a vectorizable conversion? */
3512 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3515 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3518 if (!is_gimple_assign (stmt
))
3521 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3524 code
= gimple_assign_rhs_code (stmt
);
3525 if (!CONVERT_EXPR_CODE_P (code
)
3526 && code
!= FIX_TRUNC_EXPR
3527 && code
!= FLOAT_EXPR
3528 && code
!= WIDEN_MULT_EXPR
3529 && code
!= WIDEN_LSHIFT_EXPR
)
3532 op_type
= TREE_CODE_LENGTH (code
);
3534 /* Check types of lhs and rhs. */
3535 scalar_dest
= gimple_assign_lhs (stmt
);
3536 lhs_type
= TREE_TYPE (scalar_dest
);
3537 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3539 op0
= gimple_assign_rhs1 (stmt
);
3540 rhs_type
= TREE_TYPE (op0
);
3542 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3543 && !((INTEGRAL_TYPE_P (lhs_type
)
3544 && INTEGRAL_TYPE_P (rhs_type
))
3545 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3546 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3549 if ((INTEGRAL_TYPE_P (lhs_type
)
3550 && (TYPE_PRECISION (lhs_type
)
3551 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3552 || (INTEGRAL_TYPE_P (rhs_type
)
3553 && (TYPE_PRECISION (rhs_type
)
3554 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3556 if (dump_enabled_p ())
3557 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3558 "type conversion to/from bit-precision unsupported."
3563 /* Check the operands of the operation. */
3564 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3566 if (dump_enabled_p ())
3567 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3568 "use not simple.\n");
3571 if (op_type
== binary_op
)
3575 op1
= gimple_assign_rhs2 (stmt
);
3576 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3577 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3579 if (CONSTANT_CLASS_P (op0
))
3580 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3582 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3586 if (dump_enabled_p ())
3587 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3588 "use not simple.\n");
3593 /* If op0 is an external or constant defs use a vector type of
3594 the same size as the output vector type. */
3596 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3598 gcc_assert (vectype_in
);
3601 if (dump_enabled_p ())
3603 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3604 "no vectype for scalar type ");
3605 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3606 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3612 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3613 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3614 if (nunits_in
< nunits_out
)
3616 else if (nunits_out
== nunits_in
)
3621 /* Multiple types in SLP are handled by creating the appropriate number of
3622 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3624 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3626 else if (modifier
== NARROW
)
3627 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3629 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3631 /* Sanity check: make sure that at least one copy of the vectorized stmt
3632 needs to be generated. */
3633 gcc_assert (ncopies
>= 1);
3635 /* Supportable by target? */
3639 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3641 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3646 if (dump_enabled_p ())
3647 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3648 "conversion not supported by target.\n");
3652 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3653 &code1
, &code2
, &multi_step_cvt
,
3656 /* Binary widening operation can only be supported directly by the
3658 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3662 if (code
!= FLOAT_EXPR
3663 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3664 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3667 rhs_mode
= TYPE_MODE (rhs_type
);
3668 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3669 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3670 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3671 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3674 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3675 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3676 if (cvt_type
== NULL_TREE
)
3679 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3681 if (!supportable_convert_operation (code
, vectype_out
,
3682 cvt_type
, &decl1
, &codecvt1
))
3685 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3686 cvt_type
, &codecvt1
,
3687 &codecvt2
, &multi_step_cvt
,
3691 gcc_assert (multi_step_cvt
== 0);
3693 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3694 vectype_in
, &code1
, &code2
,
3695 &multi_step_cvt
, &interm_types
))
3699 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3702 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3703 codecvt2
= ERROR_MARK
;
3707 interm_types
.safe_push (cvt_type
);
3708 cvt_type
= NULL_TREE
;
3713 gcc_assert (op_type
== unary_op
);
3714 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3715 &code1
, &multi_step_cvt
,
3719 if (code
!= FIX_TRUNC_EXPR
3720 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3721 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3724 rhs_mode
= TYPE_MODE (rhs_type
);
3726 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3727 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3728 if (cvt_type
== NULL_TREE
)
3730 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3733 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3734 &code1
, &multi_step_cvt
,
3743 if (!vec_stmt
) /* transformation not required. */
3745 if (dump_enabled_p ())
3746 dump_printf_loc (MSG_NOTE
, vect_location
,
3747 "=== vectorizable_conversion ===\n");
3748 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3750 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3751 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3753 else if (modifier
== NARROW
)
3755 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3756 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3760 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3761 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3763 interm_types
.release ();
3768 if (dump_enabled_p ())
3769 dump_printf_loc (MSG_NOTE
, vect_location
,
3770 "transform conversion. ncopies = %d.\n", ncopies
);
3772 if (op_type
== binary_op
)
3774 if (CONSTANT_CLASS_P (op0
))
3775 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3776 else if (CONSTANT_CLASS_P (op1
))
3777 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3780 /* In case of multi-step conversion, we first generate conversion operations
3781 to the intermediate types, and then from that types to the final one.
3782 We create vector destinations for the intermediate type (TYPES) received
3783 from supportable_*_operation, and store them in the correct order
3784 for future use in vect_create_vectorized_*_stmts (). */
3785 vec_dsts
.create (multi_step_cvt
+ 1);
3786 vec_dest
= vect_create_destination_var (scalar_dest
,
3787 (cvt_type
&& modifier
== WIDEN
)
3788 ? cvt_type
: vectype_out
);
3789 vec_dsts
.quick_push (vec_dest
);
3793 for (i
= interm_types
.length () - 1;
3794 interm_types
.iterate (i
, &intermediate_type
); i
--)
3796 vec_dest
= vect_create_destination_var (scalar_dest
,
3798 vec_dsts
.quick_push (vec_dest
);
3803 vec_dest
= vect_create_destination_var (scalar_dest
,
3805 ? vectype_out
: cvt_type
);
3809 if (modifier
== WIDEN
)
3811 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3812 if (op_type
== binary_op
)
3813 vec_oprnds1
.create (1);
3815 else if (modifier
== NARROW
)
3816 vec_oprnds0
.create (
3817 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3819 else if (code
== WIDEN_LSHIFT_EXPR
)
3820 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3823 prev_stmt_info
= NULL
;
3827 for (j
= 0; j
< ncopies
; j
++)
3830 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3833 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3835 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3837 /* Arguments are ready, create the new vector stmt. */
3838 if (code1
== CALL_EXPR
)
3840 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3841 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3842 gimple_call_set_lhs (new_stmt
, new_temp
);
3846 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3847 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3848 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3849 gimple_assign_set_lhs (new_stmt
, new_temp
);
3852 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3854 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3857 if (!prev_stmt_info
)
3858 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3860 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3861 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3868 /* In case the vectorization factor (VF) is bigger than the number
3869 of elements that we can fit in a vectype (nunits), we have to
3870 generate more than one vector stmt - i.e - we need to "unroll"
3871 the vector stmt by a factor VF/nunits. */
3872 for (j
= 0; j
< ncopies
; j
++)
3879 if (code
== WIDEN_LSHIFT_EXPR
)
3884 /* Store vec_oprnd1 for every vector stmt to be created
3885 for SLP_NODE. We check during the analysis that all
3886 the shift arguments are the same. */
3887 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3888 vec_oprnds1
.quick_push (vec_oprnd1
);
3890 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3894 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3895 &vec_oprnds1
, slp_node
, -1);
3899 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
3900 vec_oprnds0
.quick_push (vec_oprnd0
);
3901 if (op_type
== binary_op
)
3903 if (code
== WIDEN_LSHIFT_EXPR
)
3906 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
3907 vec_oprnds1
.quick_push (vec_oprnd1
);
3913 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3914 vec_oprnds0
.truncate (0);
3915 vec_oprnds0
.quick_push (vec_oprnd0
);
3916 if (op_type
== binary_op
)
3918 if (code
== WIDEN_LSHIFT_EXPR
)
3921 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3923 vec_oprnds1
.truncate (0);
3924 vec_oprnds1
.quick_push (vec_oprnd1
);
3928 /* Arguments are ready. Create the new vector stmts. */
3929 for (i
= multi_step_cvt
; i
>= 0; i
--)
3931 tree this_dest
= vec_dsts
[i
];
3932 enum tree_code c1
= code1
, c2
= code2
;
3933 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3938 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3940 stmt
, this_dest
, gsi
,
3941 c1
, c2
, decl1
, decl2
,
3945 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3949 if (codecvt1
== CALL_EXPR
)
3951 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3952 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3953 gimple_call_set_lhs (new_stmt
, new_temp
);
3957 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3958 new_temp
= make_ssa_name (vec_dest
);
3959 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
3963 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3966 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
3969 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3972 if (!prev_stmt_info
)
3973 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3975 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3976 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3981 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3985 /* In case the vectorization factor (VF) is bigger than the number
3986 of elements that we can fit in a vectype (nunits), we have to
3987 generate more than one vector stmt - i.e - we need to "unroll"
3988 the vector stmt by a factor VF/nunits. */
3989 for (j
= 0; j
< ncopies
; j
++)
3993 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3997 vec_oprnds0
.truncate (0);
3998 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
3999 vect_pow2 (multi_step_cvt
) - 1);
4002 /* Arguments are ready. Create the new vector stmts. */
4004 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4006 if (codecvt1
== CALL_EXPR
)
4008 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4009 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4010 gimple_call_set_lhs (new_stmt
, new_temp
);
4014 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4015 new_temp
= make_ssa_name (vec_dest
);
4016 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4020 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4021 vec_oprnds0
[i
] = new_temp
;
4024 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4025 stmt
, vec_dsts
, gsi
,
4030 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4034 vec_oprnds0
.release ();
4035 vec_oprnds1
.release ();
4036 vec_dsts
.release ();
4037 interm_types
.release ();
4043 /* Function vectorizable_assignment.
4045 Check if STMT performs an assignment (copy) that can be vectorized.
4046 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4047 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4048 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4051 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4052 gimple
**vec_stmt
, slp_tree slp_node
)
4057 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4058 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4061 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4064 vec
<tree
> vec_oprnds
= vNULL
;
4066 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4067 vec_info
*vinfo
= stmt_info
->vinfo
;
4068 gimple
*new_stmt
= NULL
;
4069 stmt_vec_info prev_stmt_info
= NULL
;
4070 enum tree_code code
;
4073 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4076 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4079 /* Is vectorizable assignment? */
4080 if (!is_gimple_assign (stmt
))
4083 scalar_dest
= gimple_assign_lhs (stmt
);
4084 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4087 code
= gimple_assign_rhs_code (stmt
);
4088 if (gimple_assign_single_p (stmt
)
4089 || code
== PAREN_EXPR
4090 || CONVERT_EXPR_CODE_P (code
))
4091 op
= gimple_assign_rhs1 (stmt
);
4095 if (code
== VIEW_CONVERT_EXPR
)
4096 op
= TREE_OPERAND (op
, 0);
4098 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4099 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4101 /* Multiple types in SLP are handled by creating the appropriate number of
4102 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4104 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4107 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4109 gcc_assert (ncopies
>= 1);
4111 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4113 if (dump_enabled_p ())
4114 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4115 "use not simple.\n");
4119 /* We can handle NOP_EXPR conversions that do not change the number
4120 of elements or the vector size. */
4121 if ((CONVERT_EXPR_CODE_P (code
)
4122 || code
== VIEW_CONVERT_EXPR
)
4124 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4125 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4126 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4129 /* We do not handle bit-precision changes. */
4130 if ((CONVERT_EXPR_CODE_P (code
)
4131 || code
== VIEW_CONVERT_EXPR
)
4132 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4133 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4134 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4135 || ((TYPE_PRECISION (TREE_TYPE (op
))
4136 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4137 /* But a conversion that does not change the bit-pattern is ok. */
4138 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4139 > TYPE_PRECISION (TREE_TYPE (op
)))
4140 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4142 if (dump_enabled_p ())
4143 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4144 "type conversion to/from bit-precision "
4149 if (!vec_stmt
) /* transformation not required. */
4151 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4152 if (dump_enabled_p ())
4153 dump_printf_loc (MSG_NOTE
, vect_location
,
4154 "=== vectorizable_assignment ===\n");
4155 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4160 if (dump_enabled_p ())
4161 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4164 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4167 for (j
= 0; j
< ncopies
; j
++)
4171 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4173 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4175 /* Arguments are ready. create the new vector stmt. */
4176 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4178 if (CONVERT_EXPR_CODE_P (code
)
4179 || code
== VIEW_CONVERT_EXPR
)
4180 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4181 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4182 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4183 gimple_assign_set_lhs (new_stmt
, new_temp
);
4184 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4186 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4193 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4195 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4197 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4200 vec_oprnds
.release ();
4205 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4206 either as shift by a scalar or by a vector. */
4209 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4212 machine_mode vec_mode
;
4217 vectype
= get_vectype_for_scalar_type (scalar_type
);
4221 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4223 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4225 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4227 || (optab_handler (optab
, TYPE_MODE (vectype
))
4228 == CODE_FOR_nothing
))
4232 vec_mode
= TYPE_MODE (vectype
);
4233 icode
= (int) optab_handler (optab
, vec_mode
);
4234 if (icode
== CODE_FOR_nothing
)
4241 /* Function vectorizable_shift.
4243 Check if STMT performs a shift operation that can be vectorized.
4244 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4245 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4246 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4249 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4250 gimple
**vec_stmt
, slp_tree slp_node
)
4254 tree op0
, op1
= NULL
;
4255 tree vec_oprnd1
= NULL_TREE
;
4256 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4258 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4259 enum tree_code code
;
4260 machine_mode vec_mode
;
4264 machine_mode optab_op2_mode
;
4266 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4267 gimple
*new_stmt
= NULL
;
4268 stmt_vec_info prev_stmt_info
;
4275 vec
<tree
> vec_oprnds0
= vNULL
;
4276 vec
<tree
> vec_oprnds1
= vNULL
;
4279 bool scalar_shift_arg
= true;
4280 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4281 vec_info
*vinfo
= stmt_info
->vinfo
;
4284 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4287 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4290 /* Is STMT a vectorizable binary/unary operation? */
4291 if (!is_gimple_assign (stmt
))
4294 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4297 code
= gimple_assign_rhs_code (stmt
);
4299 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4300 || code
== RROTATE_EXPR
))
4303 scalar_dest
= gimple_assign_lhs (stmt
);
4304 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4305 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4306 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4308 if (dump_enabled_p ())
4309 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4310 "bit-precision shifts not supported.\n");
4314 op0
= gimple_assign_rhs1 (stmt
);
4315 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4317 if (dump_enabled_p ())
4318 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4319 "use not simple.\n");
4322 /* If op0 is an external or constant def use a vector type with
4323 the same size as the output vector type. */
4325 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4327 gcc_assert (vectype
);
4330 if (dump_enabled_p ())
4331 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4332 "no vectype for scalar type\n");
4336 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4337 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4338 if (nunits_out
!= nunits_in
)
4341 op1
= gimple_assign_rhs2 (stmt
);
4342 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4344 if (dump_enabled_p ())
4345 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4346 "use not simple.\n");
4351 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4355 /* Multiple types in SLP are handled by creating the appropriate number of
4356 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4358 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4361 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4363 gcc_assert (ncopies
>= 1);
4365 /* Determine whether the shift amount is a vector, or scalar. If the
4366 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4368 if ((dt
[1] == vect_internal_def
4369 || dt
[1] == vect_induction_def
)
4371 scalar_shift_arg
= false;
4372 else if (dt
[1] == vect_constant_def
4373 || dt
[1] == vect_external_def
4374 || dt
[1] == vect_internal_def
)
4376 /* In SLP, need to check whether the shift count is the same,
4377 in loops if it is a constant or invariant, it is always
4381 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4384 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4385 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4386 scalar_shift_arg
= false;
4391 if (dump_enabled_p ())
4392 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4393 "operand mode requires invariant argument.\n");
4397 /* Vector shifted by vector. */
4398 if (!scalar_shift_arg
)
4400 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4401 if (dump_enabled_p ())
4402 dump_printf_loc (MSG_NOTE
, vect_location
,
4403 "vector/vector shift/rotate found.\n");
4406 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4407 if (op1_vectype
== NULL_TREE
4408 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4410 if (dump_enabled_p ())
4411 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4412 "unusable type for last operand in"
4413 " vector/vector shift/rotate.\n");
4417 /* See if the machine has a vector shifted by scalar insn and if not
4418 then see if it has a vector shifted by vector insn. */
4421 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4423 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4425 if (dump_enabled_p ())
4426 dump_printf_loc (MSG_NOTE
, vect_location
,
4427 "vector/scalar shift/rotate found.\n");
4431 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4433 && (optab_handler (optab
, TYPE_MODE (vectype
))
4434 != CODE_FOR_nothing
))
4436 scalar_shift_arg
= false;
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE
, vect_location
,
4440 "vector/vector shift/rotate found.\n");
4442 /* Unlike the other binary operators, shifts/rotates have
4443 the rhs being int, instead of the same type as the lhs,
4444 so make sure the scalar is the right type if we are
4445 dealing with vectors of long long/long/short/char. */
4446 if (dt
[1] == vect_constant_def
)
4447 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4448 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4452 && TYPE_MODE (TREE_TYPE (vectype
))
4453 != TYPE_MODE (TREE_TYPE (op1
)))
4455 if (dump_enabled_p ())
4456 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4457 "unusable type for last operand in"
4458 " vector/vector shift/rotate.\n");
4461 if (vec_stmt
&& !slp_node
)
4463 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4464 op1
= vect_init_vector (stmt
, op1
,
4465 TREE_TYPE (vectype
), NULL
);
4472 /* Supportable by target? */
4475 if (dump_enabled_p ())
4476 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4480 vec_mode
= TYPE_MODE (vectype
);
4481 icode
= (int) optab_handler (optab
, vec_mode
);
4482 if (icode
== CODE_FOR_nothing
)
4484 if (dump_enabled_p ())
4485 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4486 "op not supported by target.\n");
4487 /* Check only during analysis. */
4488 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4489 || (vf
< vect_min_worthwhile_factor (code
)
4492 if (dump_enabled_p ())
4493 dump_printf_loc (MSG_NOTE
, vect_location
,
4494 "proceeding using word mode.\n");
4497 /* Worthwhile without SIMD support? Check only during analysis. */
4498 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4499 && vf
< vect_min_worthwhile_factor (code
)
4502 if (dump_enabled_p ())
4503 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4504 "not worthwhile without SIMD support.\n");
4508 if (!vec_stmt
) /* transformation not required. */
4510 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4511 if (dump_enabled_p ())
4512 dump_printf_loc (MSG_NOTE
, vect_location
,
4513 "=== vectorizable_shift ===\n");
4514 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4520 if (dump_enabled_p ())
4521 dump_printf_loc (MSG_NOTE
, vect_location
,
4522 "transform binary/unary operation.\n");
4525 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4527 prev_stmt_info
= NULL
;
4528 for (j
= 0; j
< ncopies
; j
++)
4533 if (scalar_shift_arg
)
4535 /* Vector shl and shr insn patterns can be defined with scalar
4536 operand 2 (shift operand). In this case, use constant or loop
4537 invariant op1 directly, without extending it to vector mode
4539 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4540 if (!VECTOR_MODE_P (optab_op2_mode
))
4542 if (dump_enabled_p ())
4543 dump_printf_loc (MSG_NOTE
, vect_location
,
4544 "operand 1 using scalar mode.\n");
4546 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4547 vec_oprnds1
.quick_push (vec_oprnd1
);
4550 /* Store vec_oprnd1 for every vector stmt to be created
4551 for SLP_NODE. We check during the analysis that all
4552 the shift arguments are the same.
4553 TODO: Allow different constants for different vector
4554 stmts generated for an SLP instance. */
4555 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4556 vec_oprnds1
.quick_push (vec_oprnd1
);
4561 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4562 (a special case for certain kind of vector shifts); otherwise,
4563 operand 1 should be of a vector type (the usual case). */
4565 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4568 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4572 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4574 /* Arguments are ready. Create the new vector stmt. */
4575 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4577 vop1
= vec_oprnds1
[i
];
4578 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4579 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4580 gimple_assign_set_lhs (new_stmt
, new_temp
);
4581 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4583 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4590 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4592 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4593 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4596 vec_oprnds0
.release ();
4597 vec_oprnds1
.release ();
4603 /* Function vectorizable_operation.
4605 Check if STMT performs a binary, unary or ternary operation that can
4607 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4608 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4609 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4612 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4613 gimple
**vec_stmt
, slp_tree slp_node
)
4617 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4618 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4620 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4621 enum tree_code code
;
4622 machine_mode vec_mode
;
4626 bool target_support_p
;
4628 enum vect_def_type dt
[3]
4629 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4630 gimple
*new_stmt
= NULL
;
4631 stmt_vec_info prev_stmt_info
;
4637 vec
<tree
> vec_oprnds0
= vNULL
;
4638 vec
<tree
> vec_oprnds1
= vNULL
;
4639 vec
<tree
> vec_oprnds2
= vNULL
;
4640 tree vop0
, vop1
, vop2
;
4641 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4642 vec_info
*vinfo
= stmt_info
->vinfo
;
4645 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4648 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4651 /* Is STMT a vectorizable binary/unary operation? */
4652 if (!is_gimple_assign (stmt
))
4655 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4658 code
= gimple_assign_rhs_code (stmt
);
4660 /* For pointer addition, we should use the normal plus for
4661 the vector addition. */
4662 if (code
== POINTER_PLUS_EXPR
)
4665 /* Support only unary or binary operations. */
4666 op_type
= TREE_CODE_LENGTH (code
);
4667 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4669 if (dump_enabled_p ())
4670 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4671 "num. args = %d (not unary/binary/ternary op).\n",
4676 scalar_dest
= gimple_assign_lhs (stmt
);
4677 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4679 /* Most operations cannot handle bit-precision types without extra
4681 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4682 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4683 /* Exception are bitwise binary operations. */
4684 && code
!= BIT_IOR_EXPR
4685 && code
!= BIT_XOR_EXPR
4686 && code
!= BIT_AND_EXPR
)
4688 if (dump_enabled_p ())
4689 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4690 "bit-precision arithmetic not supported.\n");
4694 op0
= gimple_assign_rhs1 (stmt
);
4695 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4697 if (dump_enabled_p ())
4698 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4699 "use not simple.\n");
4702 /* If op0 is an external or constant def use a vector type with
4703 the same size as the output vector type. */
4705 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4707 gcc_assert (vectype
);
4710 if (dump_enabled_p ())
4712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4713 "no vectype for scalar type ");
4714 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4716 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4722 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4723 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4724 if (nunits_out
!= nunits_in
)
4727 if (op_type
== binary_op
|| op_type
== ternary_op
)
4729 op1
= gimple_assign_rhs2 (stmt
);
4730 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4732 if (dump_enabled_p ())
4733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4734 "use not simple.\n");
4738 if (op_type
== ternary_op
)
4740 op2
= gimple_assign_rhs3 (stmt
);
4741 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4743 if (dump_enabled_p ())
4744 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4745 "use not simple.\n");
4751 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4755 /* Multiple types in SLP are handled by creating the appropriate number of
4756 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4758 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4761 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4763 gcc_assert (ncopies
>= 1);
4765 /* Shifts are handled in vectorizable_shift (). */
4766 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4767 || code
== RROTATE_EXPR
)
4770 /* Supportable by target? */
4772 vec_mode
= TYPE_MODE (vectype
);
4773 if (code
== MULT_HIGHPART_EXPR
)
4774 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4777 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4780 if (dump_enabled_p ())
4781 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4785 target_support_p
= (optab_handler (optab
, vec_mode
)
4786 != CODE_FOR_nothing
);
4789 if (!target_support_p
)
4791 if (dump_enabled_p ())
4792 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4793 "op not supported by target.\n");
4794 /* Check only during analysis. */
4795 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4796 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4798 if (dump_enabled_p ())
4799 dump_printf_loc (MSG_NOTE
, vect_location
,
4800 "proceeding using word mode.\n");
4803 /* Worthwhile without SIMD support? Check only during analysis. */
4804 if (!VECTOR_MODE_P (vec_mode
)
4806 && vf
< vect_min_worthwhile_factor (code
))
4808 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4810 "not worthwhile without SIMD support.\n");
4814 if (!vec_stmt
) /* transformation not required. */
4816 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4817 if (dump_enabled_p ())
4818 dump_printf_loc (MSG_NOTE
, vect_location
,
4819 "=== vectorizable_operation ===\n");
4820 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4826 if (dump_enabled_p ())
4827 dump_printf_loc (MSG_NOTE
, vect_location
,
4828 "transform binary/unary operation.\n");
4831 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4833 /* In case the vectorization factor (VF) is bigger than the number
4834 of elements that we can fit in a vectype (nunits), we have to generate
4835 more than one vector stmt - i.e - we need to "unroll" the
4836 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4837 from one copy of the vector stmt to the next, in the field
4838 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4839 stages to find the correct vector defs to be used when vectorizing
4840 stmts that use the defs of the current stmt. The example below
4841 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4842 we need to create 4 vectorized stmts):
4844 before vectorization:
4845 RELATED_STMT VEC_STMT
4849 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4851 RELATED_STMT VEC_STMT
4852 VS1_0: vx0 = memref0 VS1_1 -
4853 VS1_1: vx1 = memref1 VS1_2 -
4854 VS1_2: vx2 = memref2 VS1_3 -
4855 VS1_3: vx3 = memref3 - -
4856 S1: x = load - VS1_0
4859 step2: vectorize stmt S2 (done here):
4860 To vectorize stmt S2 we first need to find the relevant vector
4861 def for the first operand 'x'. This is, as usual, obtained from
4862 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4863 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4864 relevant vector def 'vx0'. Having found 'vx0' we can generate
4865 the vector stmt VS2_0, and as usual, record it in the
4866 STMT_VINFO_VEC_STMT of stmt S2.
4867 When creating the second copy (VS2_1), we obtain the relevant vector
4868 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4869 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4870 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4871 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4872 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4873 chain of stmts and pointers:
4874 RELATED_STMT VEC_STMT
4875 VS1_0: vx0 = memref0 VS1_1 -
4876 VS1_1: vx1 = memref1 VS1_2 -
4877 VS1_2: vx2 = memref2 VS1_3 -
4878 VS1_3: vx3 = memref3 - -
4879 S1: x = load - VS1_0
4880 VS2_0: vz0 = vx0 + v1 VS2_1 -
4881 VS2_1: vz1 = vx1 + v1 VS2_2 -
4882 VS2_2: vz2 = vx2 + v1 VS2_3 -
4883 VS2_3: vz3 = vx3 + v1 - -
4884 S2: z = x + 1 - VS2_0 */
4886 prev_stmt_info
= NULL
;
4887 for (j
= 0; j
< ncopies
; j
++)
4892 if (op_type
== binary_op
|| op_type
== ternary_op
)
4893 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4896 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4898 if (op_type
== ternary_op
)
4900 vec_oprnds2
.create (1);
4901 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4907 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4908 if (op_type
== ternary_op
)
4910 tree vec_oprnd
= vec_oprnds2
.pop ();
4911 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4916 /* Arguments are ready. Create the new vector stmt. */
4917 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4919 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4920 ? vec_oprnds1
[i
] : NULL_TREE
);
4921 vop2
= ((op_type
== ternary_op
)
4922 ? vec_oprnds2
[i
] : NULL_TREE
);
4923 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
4924 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4925 gimple_assign_set_lhs (new_stmt
, new_temp
);
4926 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4928 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4935 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4937 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4938 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4941 vec_oprnds0
.release ();
4942 vec_oprnds1
.release ();
4943 vec_oprnds2
.release ();
4948 /* A helper function to ensure data reference DR's base alignment
4952 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
4957 if (DR_VECT_AUX (dr
)->base_misaligned
)
4959 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4960 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
4962 if (decl_in_symtab_p (base_decl
))
4963 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
4966 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
4967 DECL_USER_ALIGN (base_decl
) = 1;
4969 DR_VECT_AUX (dr
)->base_misaligned
= false;
4974 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4975 reversal of the vector elements. If that is impossible to do,
4979 perm_mask_for_reverse (tree vectype
)
4984 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4985 sel
= XALLOCAVEC (unsigned char, nunits
);
4987 for (i
= 0; i
< nunits
; ++i
)
4988 sel
[i
] = nunits
- 1 - i
;
4990 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
4992 return vect_gen_perm_mask_checked (vectype
, sel
);
4995 /* Function vectorizable_store.
4997 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4999 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5000 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5001 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5004 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5010 tree vec_oprnd
= NULL_TREE
;
5011 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5012 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5014 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5015 struct loop
*loop
= NULL
;
5016 machine_mode vec_mode
;
5018 enum dr_alignment_support alignment_support_scheme
;
5020 enum vect_def_type dt
;
5021 stmt_vec_info prev_stmt_info
= NULL
;
5022 tree dataref_ptr
= NULL_TREE
;
5023 tree dataref_offset
= NULL_TREE
;
5024 gimple
*ptr_incr
= NULL
;
5027 gimple
*next_stmt
, *first_stmt
= NULL
;
5028 bool grouped_store
= false;
5029 bool store_lanes_p
= false;
5030 unsigned int group_size
, i
;
5031 vec
<tree
> dr_chain
= vNULL
;
5032 vec
<tree
> oprnds
= vNULL
;
5033 vec
<tree
> result_chain
= vNULL
;
5035 bool negative
= false;
5036 tree offset
= NULL_TREE
;
5037 vec
<tree
> vec_oprnds
= vNULL
;
5038 bool slp
= (slp_node
!= NULL
);
5039 unsigned int vec_num
;
5040 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5041 vec_info
*vinfo
= stmt_info
->vinfo
;
5043 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5044 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5045 int scatter_scale
= 1;
5046 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5047 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5050 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5053 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5056 /* Is vectorizable store? */
5058 if (!is_gimple_assign (stmt
))
5061 scalar_dest
= gimple_assign_lhs (stmt
);
5062 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5063 && is_pattern_stmt_p (stmt_info
))
5064 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5065 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5066 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5067 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5068 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5069 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5070 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5071 && TREE_CODE (scalar_dest
) != MEM_REF
)
5074 gcc_assert (gimple_assign_single_p (stmt
));
5076 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5077 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5080 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5082 /* Multiple types in SLP are handled by creating the appropriate number of
5083 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5085 if (slp
|| PURE_SLP_STMT (stmt_info
))
5088 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5090 gcc_assert (ncopies
>= 1);
5092 /* FORNOW. This restriction should be relaxed. */
5093 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5095 if (dump_enabled_p ())
5096 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5097 "multiple types in nested loop.\n");
5101 op
= gimple_assign_rhs1 (stmt
);
5102 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5104 if (dump_enabled_p ())
5105 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5106 "use not simple.\n");
5110 elem_type
= TREE_TYPE (vectype
);
5111 vec_mode
= TYPE_MODE (vectype
);
5113 /* FORNOW. In some cases can vectorize even if data-type not supported
5114 (e.g. - array initialization with 0). */
5115 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5118 if (!STMT_VINFO_DATA_REF (stmt_info
))
5121 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5124 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5125 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5126 size_zero_node
) < 0;
5127 if (negative
&& ncopies
> 1)
5129 if (dump_enabled_p ())
5130 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5131 "multiple types with negative step.\n");
5136 gcc_assert (!grouped_store
);
5137 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5138 if (alignment_support_scheme
!= dr_aligned
5139 && alignment_support_scheme
!= dr_unaligned_supported
)
5141 if (dump_enabled_p ())
5142 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5143 "negative step but alignment required.\n");
5146 if (dt
!= vect_constant_def
5147 && dt
!= vect_external_def
5148 && !perm_mask_for_reverse (vectype
))
5150 if (dump_enabled_p ())
5151 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5152 "negative step and reversing not supported.\n");
5158 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5160 grouped_store
= true;
5161 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5162 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5164 && !PURE_SLP_STMT (stmt_info
)
5165 && !STMT_VINFO_STRIDED_P (stmt_info
))
5167 if (vect_store_lanes_supported (vectype
, group_size
))
5168 store_lanes_p
= true;
5169 else if (!vect_grouped_store_supported (vectype
, group_size
))
5173 if (STMT_VINFO_STRIDED_P (stmt_info
)
5174 && (slp
|| PURE_SLP_STMT (stmt_info
))
5175 && (group_size
> nunits
5176 || nunits
% group_size
!= 0))
5178 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5179 "unhandled strided group store\n");
5183 if (first_stmt
== stmt
)
5185 /* STMT is the leader of the group. Check the operands of all the
5186 stmts of the group. */
5187 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5190 gcc_assert (gimple_assign_single_p (next_stmt
));
5191 op
= gimple_assign_rhs1 (next_stmt
);
5192 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5194 if (dump_enabled_p ())
5195 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5196 "use not simple.\n");
5199 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5204 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5207 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5208 &scatter_off
, &scatter_scale
);
5209 gcc_assert (scatter_decl
);
5210 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5211 &scatter_off_vectype
))
5213 if (dump_enabled_p ())
5214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5215 "scatter index use not simple.");
5220 if (!vec_stmt
) /* transformation not required. */
5222 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5223 /* The SLP costs are calculated during SLP analysis. */
5224 if (!PURE_SLP_STMT (stmt_info
))
5225 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5232 ensure_base_align (stmt_info
, dr
);
5234 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5236 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5237 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5238 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5239 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5240 edge pe
= loop_preheader_edge (loop
);
5243 enum { NARROW
, NONE
, WIDEN
} modifier
;
5244 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5246 if (nunits
== (unsigned int) scatter_off_nunits
)
5248 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5250 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5253 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5254 sel
[i
] = i
| nunits
;
5256 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5257 gcc_assert (perm_mask
!= NULL_TREE
);
5259 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5261 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5264 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5265 sel
[i
] = i
| scatter_off_nunits
;
5267 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5268 gcc_assert (perm_mask
!= NULL_TREE
);
5274 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5275 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5276 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5277 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5278 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5279 scaletype
= TREE_VALUE (arglist
);
5281 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5282 && TREE_CODE (rettype
) == VOID_TYPE
);
5284 ptr
= fold_convert (ptrtype
, scatter_base
);
5285 if (!is_gimple_min_invariant (ptr
))
5287 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5288 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5289 gcc_assert (!new_bb
);
5292 /* Currently we support only unconditional scatter stores,
5293 so mask should be all ones. */
5294 mask
= build_int_cst (masktype
, -1);
5295 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5297 scale
= build_int_cst (scaletype
, scatter_scale
);
5299 prev_stmt_info
= NULL
;
5300 for (j
= 0; j
< ncopies
; ++j
)
5305 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5307 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5309 else if (modifier
!= NONE
&& (j
& 1))
5311 if (modifier
== WIDEN
)
5314 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5315 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5318 else if (modifier
== NARROW
)
5320 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5323 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5331 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5333 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5336 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5338 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5339 == TYPE_VECTOR_SUBPARTS (srctype
));
5340 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5341 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5342 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5343 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5347 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5349 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5350 == TYPE_VECTOR_SUBPARTS (idxtype
));
5351 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5352 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5353 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5354 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5359 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5361 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5363 if (prev_stmt_info
== NULL
)
5364 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5366 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5367 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5374 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5375 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5377 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5380 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5382 /* We vectorize all the stmts of the interleaving group when we
5383 reach the last stmt in the group. */
5384 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5385 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5394 grouped_store
= false;
5395 /* VEC_NUM is the number of vect stmts to be created for this
5397 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5398 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5399 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5400 op
= gimple_assign_rhs1 (first_stmt
);
5403 /* VEC_NUM is the number of vect stmts to be created for this
5405 vec_num
= group_size
;
5411 group_size
= vec_num
= 1;
5414 if (dump_enabled_p ())
5415 dump_printf_loc (MSG_NOTE
, vect_location
,
5416 "transform store. ncopies = %d\n", ncopies
);
5418 if (STMT_VINFO_STRIDED_P (stmt_info
))
5420 gimple_stmt_iterator incr_gsi
;
5426 gimple_seq stmts
= NULL
;
5427 tree stride_base
, stride_step
, alias_off
;
5431 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5434 = fold_build_pointer_plus
5435 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5436 size_binop (PLUS_EXPR
,
5437 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5438 convert_to_ptrofftype (DR_INIT(first_dr
))));
5439 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5441 /* For a store with loop-invariant (but other than power-of-2)
5442 stride (i.e. not a grouped access) like so:
5444 for (i = 0; i < n; i += stride)
5447 we generate a new induction variable and new stores from
5448 the components of the (vectorized) rhs:
5450 for (j = 0; ; j += VF*stride)
5455 array[j + stride] = tmp2;
5459 unsigned nstores
= nunits
;
5460 tree ltype
= elem_type
;
5463 nstores
= nunits
/ group_size
;
5464 if (group_size
< nunits
)
5465 ltype
= build_vector_type (elem_type
, group_size
);
5468 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5469 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5473 ivstep
= stride_step
;
5474 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5475 build_int_cst (TREE_TYPE (ivstep
),
5476 ncopies
* nstores
));
5478 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5480 create_iv (stride_base
, ivstep
, NULL
,
5481 loop
, &incr_gsi
, insert_after
,
5483 incr
= gsi_stmt (incr_gsi
);
5484 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5486 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5488 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5490 prev_stmt_info
= NULL
;
5491 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5492 next_stmt
= first_stmt
;
5493 for (g
= 0; g
< group_size
; g
++)
5495 running_off
= offvar
;
5498 tree size
= TYPE_SIZE_UNIT (ltype
);
5499 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5501 tree newoff
= copy_ssa_name (running_off
, NULL
);
5502 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5504 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5505 running_off
= newoff
;
5507 for (j
= 0; j
< ncopies
; j
++)
5509 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5510 and first_stmt == stmt. */
5515 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5517 vec_oprnd
= vec_oprnds
[0];
5521 gcc_assert (gimple_assign_single_p (next_stmt
));
5522 op
= gimple_assign_rhs1 (next_stmt
);
5523 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5529 vec_oprnd
= vec_oprnds
[j
];
5532 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5533 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5537 for (i
= 0; i
< nstores
; i
++)
5539 tree newref
, newoff
;
5540 gimple
*incr
, *assign
;
5541 tree size
= TYPE_SIZE (ltype
);
5542 /* Extract the i'th component. */
5543 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5544 bitsize_int (i
), size
);
5545 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5548 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5552 newref
= build2 (MEM_REF
, ltype
,
5553 running_off
, alias_off
);
5555 /* And store it to *running_off. */
5556 assign
= gimple_build_assign (newref
, elem
);
5557 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5559 newoff
= copy_ssa_name (running_off
, NULL
);
5560 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5561 running_off
, stride_step
);
5562 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5564 running_off
= newoff
;
5565 if (g
== group_size
- 1
5568 if (j
== 0 && i
== 0)
5569 STMT_VINFO_VEC_STMT (stmt_info
)
5570 = *vec_stmt
= assign
;
5572 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5573 prev_stmt_info
= vinfo_for_stmt (assign
);
5577 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5582 dr_chain
.create (group_size
);
5583 oprnds
.create (group_size
);
5585 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5586 gcc_assert (alignment_support_scheme
);
5587 /* Targets with store-lane instructions must not require explicit
5589 gcc_assert (!store_lanes_p
5590 || alignment_support_scheme
== dr_aligned
5591 || alignment_support_scheme
== dr_unaligned_supported
);
5594 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5597 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5599 aggr_type
= vectype
;
5601 /* In case the vectorization factor (VF) is bigger than the number
5602 of elements that we can fit in a vectype (nunits), we have to generate
5603 more than one vector stmt - i.e - we need to "unroll" the
5604 vector stmt by a factor VF/nunits. For more details see documentation in
5605 vect_get_vec_def_for_copy_stmt. */
5607 /* In case of interleaving (non-unit grouped access):
5614 We create vectorized stores starting from base address (the access of the
5615 first stmt in the chain (S2 in the above example), when the last store stmt
5616 of the chain (S4) is reached:
5619 VS2: &base + vec_size*1 = vx0
5620 VS3: &base + vec_size*2 = vx1
5621 VS4: &base + vec_size*3 = vx3
5623 Then permutation statements are generated:
5625 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5626 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5629 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5630 (the order of the data-refs in the output of vect_permute_store_chain
5631 corresponds to the order of scalar stmts in the interleaving chain - see
5632 the documentation of vect_permute_store_chain()).
5634 In case of both multiple types and interleaving, above vector stores and
5635 permutation stmts are created for every copy. The result vector stmts are
5636 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5637 STMT_VINFO_RELATED_STMT for the next copies.
5640 prev_stmt_info
= NULL
;
5641 for (j
= 0; j
< ncopies
; j
++)
5648 /* Get vectorized arguments for SLP_NODE. */
5649 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5650 NULL
, slp_node
, -1);
5652 vec_oprnd
= vec_oprnds
[0];
5656 /* For interleaved stores we collect vectorized defs for all the
5657 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5658 used as an input to vect_permute_store_chain(), and OPRNDS as
5659 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5661 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5662 OPRNDS are of size 1. */
5663 next_stmt
= first_stmt
;
5664 for (i
= 0; i
< group_size
; i
++)
5666 /* Since gaps are not supported for interleaved stores,
5667 GROUP_SIZE is the exact number of stmts in the chain.
5668 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5669 there is no interleaving, GROUP_SIZE is 1, and only one
5670 iteration of the loop will be executed. */
5671 gcc_assert (next_stmt
5672 && gimple_assign_single_p (next_stmt
));
5673 op
= gimple_assign_rhs1 (next_stmt
);
5675 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5676 dr_chain
.quick_push (vec_oprnd
);
5677 oprnds
.quick_push (vec_oprnd
);
5678 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5682 /* We should have catched mismatched types earlier. */
5683 gcc_assert (useless_type_conversion_p (vectype
,
5684 TREE_TYPE (vec_oprnd
)));
5685 bool simd_lane_access_p
5686 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5687 if (simd_lane_access_p
5688 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5689 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5690 && integer_zerop (DR_OFFSET (first_dr
))
5691 && integer_zerop (DR_INIT (first_dr
))
5692 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5693 get_alias_set (DR_REF (first_dr
))))
5695 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5696 dataref_offset
= build_int_cst (reference_alias_ptr_type
5697 (DR_REF (first_dr
)), 0);
5702 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5703 simd_lane_access_p
? loop
: NULL
,
5704 offset
, &dummy
, gsi
, &ptr_incr
,
5705 simd_lane_access_p
, &inv_p
);
5706 gcc_assert (bb_vinfo
|| !inv_p
);
5710 /* For interleaved stores we created vectorized defs for all the
5711 defs stored in OPRNDS in the previous iteration (previous copy).
5712 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5713 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5715 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5716 OPRNDS are of size 1. */
5717 for (i
= 0; i
< group_size
; i
++)
5720 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5721 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5722 dr_chain
[i
] = vec_oprnd
;
5723 oprnds
[i
] = vec_oprnd
;
5727 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5728 TYPE_SIZE_UNIT (aggr_type
));
5730 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5731 TYPE_SIZE_UNIT (aggr_type
));
5738 /* Combine all the vectors into an array. */
5739 vec_array
= create_vector_array (vectype
, vec_num
);
5740 for (i
= 0; i
< vec_num
; i
++)
5742 vec_oprnd
= dr_chain
[i
];
5743 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5747 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5748 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5749 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5750 gimple_call_set_lhs (new_stmt
, data_ref
);
5751 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5759 result_chain
.create (group_size
);
5761 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5765 next_stmt
= first_stmt
;
5766 for (i
= 0; i
< vec_num
; i
++)
5768 unsigned align
, misalign
;
5771 /* Bump the vector pointer. */
5772 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5776 vec_oprnd
= vec_oprnds
[i
];
5777 else if (grouped_store
)
5778 /* For grouped stores vectorized defs are interleaved in
5779 vect_permute_store_chain(). */
5780 vec_oprnd
= result_chain
[i
];
5782 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5786 : build_int_cst (reference_alias_ptr_type
5787 (DR_REF (first_dr
)), 0));
5788 align
= TYPE_ALIGN_UNIT (vectype
);
5789 if (aligned_access_p (first_dr
))
5791 else if (DR_MISALIGNMENT (first_dr
) == -1)
5793 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5794 align
= TYPE_ALIGN_UNIT (elem_type
);
5796 align
= get_object_alignment (DR_REF (first_dr
))
5799 TREE_TYPE (data_ref
)
5800 = build_aligned_type (TREE_TYPE (data_ref
),
5801 align
* BITS_PER_UNIT
);
5805 TREE_TYPE (data_ref
)
5806 = build_aligned_type (TREE_TYPE (data_ref
),
5807 TYPE_ALIGN (elem_type
));
5808 misalign
= DR_MISALIGNMENT (first_dr
);
5810 if (dataref_offset
== NULL_TREE
5811 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5812 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5816 && dt
!= vect_constant_def
5817 && dt
!= vect_external_def
)
5819 tree perm_mask
= perm_mask_for_reverse (vectype
);
5821 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5823 tree new_temp
= make_ssa_name (perm_dest
);
5825 /* Generate the permute statement. */
5827 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5828 vec_oprnd
, perm_mask
);
5829 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5831 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5832 vec_oprnd
= new_temp
;
5835 /* Arguments are ready. Create the new vector stmt. */
5836 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5837 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5842 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5850 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5852 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5853 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5857 dr_chain
.release ();
5859 result_chain
.release ();
5860 vec_oprnds
.release ();
5865 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5866 VECTOR_CST mask. No checks are made that the target platform supports the
5867 mask, so callers may wish to test can_vec_perm_p separately, or use
5868 vect_gen_perm_mask_checked. */
5871 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5873 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5876 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5878 mask_elt_type
= lang_hooks
.types
.type_for_mode
5879 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5880 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5882 mask_elts
= XALLOCAVEC (tree
, nunits
);
5883 for (i
= nunits
- 1; i
>= 0; i
--)
5884 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5885 mask_vec
= build_vector (mask_type
, mask_elts
);
5890 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5891 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5894 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
5896 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
5897 return vect_gen_perm_mask_any (vectype
, sel
);
5900 /* Given a vector variable X and Y, that was generated for the scalar
5901 STMT, generate instructions to permute the vector elements of X and Y
5902 using permutation mask MASK_VEC, insert them at *GSI and return the
5903 permuted vector variable. */
5906 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
5907 gimple_stmt_iterator
*gsi
)
5909 tree vectype
= TREE_TYPE (x
);
5910 tree perm_dest
, data_ref
;
5913 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5914 data_ref
= make_ssa_name (perm_dest
);
5916 /* Generate the permute statement. */
5917 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
5918 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5923 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5924 inserting them on the loops preheader edge. Returns true if we
5925 were successful in doing so (and thus STMT can be moved then),
5926 otherwise returns false. */
5929 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
5935 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5937 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
5938 if (!gimple_nop_p (def_stmt
)
5939 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5941 /* Make sure we don't need to recurse. While we could do
5942 so in simple cases when there are more complex use webs
5943 we don't have an easy way to preserve stmt order to fulfil
5944 dependencies within them. */
5947 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5949 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5951 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5952 if (!gimple_nop_p (def_stmt2
)
5953 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5963 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5965 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
5966 if (!gimple_nop_p (def_stmt
)
5967 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5969 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5970 gsi_remove (&gsi
, false);
5971 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5978 /* vectorizable_load.
5980 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5982 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5983 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5984 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5987 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5988 slp_tree slp_node
, slp_instance slp_node_instance
)
5991 tree vec_dest
= NULL
;
5992 tree data_ref
= NULL
;
5993 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5994 stmt_vec_info prev_stmt_info
;
5995 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5996 struct loop
*loop
= NULL
;
5997 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
5998 bool nested_in_vect_loop
= false;
5999 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6003 gimple
*new_stmt
= NULL
;
6005 enum dr_alignment_support alignment_support_scheme
;
6006 tree dataref_ptr
= NULL_TREE
;
6007 tree dataref_offset
= NULL_TREE
;
6008 gimple
*ptr_incr
= NULL
;
6010 int i
, j
, group_size
= -1, group_gap_adj
;
6011 tree msq
= NULL_TREE
, lsq
;
6012 tree offset
= NULL_TREE
;
6013 tree byte_offset
= NULL_TREE
;
6014 tree realignment_token
= NULL_TREE
;
6016 vec
<tree
> dr_chain
= vNULL
;
6017 bool grouped_load
= false;
6018 bool load_lanes_p
= false;
6021 bool negative
= false;
6022 bool compute_in_loop
= false;
6023 struct loop
*at_loop
;
6025 bool slp
= (slp_node
!= NULL
);
6026 bool slp_perm
= false;
6027 enum tree_code code
;
6028 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6031 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6032 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6033 int gather_scale
= 1;
6034 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6035 vec_info
*vinfo
= stmt_info
->vinfo
;
6037 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6040 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6043 /* Is vectorizable load? */
6044 if (!is_gimple_assign (stmt
))
6047 scalar_dest
= gimple_assign_lhs (stmt
);
6048 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6051 code
= gimple_assign_rhs_code (stmt
);
6052 if (code
!= ARRAY_REF
6053 && code
!= BIT_FIELD_REF
6054 && code
!= INDIRECT_REF
6055 && code
!= COMPONENT_REF
6056 && code
!= IMAGPART_EXPR
6057 && code
!= REALPART_EXPR
6059 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6062 if (!STMT_VINFO_DATA_REF (stmt_info
))
6065 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6066 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6070 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6071 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6072 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6077 /* Multiple types in SLP are handled by creating the appropriate number of
6078 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6080 if (slp
|| PURE_SLP_STMT (stmt_info
))
6083 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6085 gcc_assert (ncopies
>= 1);
6087 /* FORNOW. This restriction should be relaxed. */
6088 if (nested_in_vect_loop
&& ncopies
> 1)
6090 if (dump_enabled_p ())
6091 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6092 "multiple types in nested loop.\n");
6096 /* Invalidate assumptions made by dependence analysis when vectorization
6097 on the unrolled body effectively re-orders stmts. */
6099 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6100 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6101 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6103 if (dump_enabled_p ())
6104 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6105 "cannot perform implicit CSE when unrolling "
6106 "with negative dependence distance\n");
6110 elem_type
= TREE_TYPE (vectype
);
6111 mode
= TYPE_MODE (vectype
);
6113 /* FORNOW. In some cases can vectorize even if data-type not supported
6114 (e.g. - data copies). */
6115 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6117 if (dump_enabled_p ())
6118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6119 "Aligned load, but unsupported type.\n");
6123 /* Check if the load is a part of an interleaving chain. */
6124 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6126 grouped_load
= true;
6128 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6130 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6132 /* If this is single-element interleaving with an element distance
6133 that leaves unused vector loads around punt - we at least create
6134 very sub-optimal code in that case (and blow up memory,
6136 if (first_stmt
== stmt
6137 && !GROUP_NEXT_ELEMENT (stmt_info
)
6138 && GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6140 if (dump_enabled_p ())
6141 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6142 "single-element interleaving not supported "
6143 "for not adjacent vector loads\n");
6147 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6150 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6152 && !PURE_SLP_STMT (stmt_info
)
6153 && !STMT_VINFO_STRIDED_P (stmt_info
))
6155 if (vect_load_lanes_supported (vectype
, group_size
))
6156 load_lanes_p
= true;
6157 else if (!vect_grouped_load_supported (vectype
, group_size
))
6161 /* Invalidate assumptions made by dependence analysis when vectorization
6162 on the unrolled body effectively re-orders stmts. */
6163 if (!PURE_SLP_STMT (stmt_info
)
6164 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6165 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6166 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6168 if (dump_enabled_p ())
6169 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6170 "cannot perform implicit CSE when performing "
6171 "group loads with negative dependence distance\n");
6175 /* Similarly when the stmt is a load that is both part of a SLP
6176 instance and a loop vectorized stmt via the same-dr mechanism
6177 we have to give up. */
6178 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6179 && (STMT_SLP_TYPE (stmt_info
)
6180 != STMT_SLP_TYPE (vinfo_for_stmt
6181 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6183 if (dump_enabled_p ())
6184 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6185 "conflicting SLP types for CSEd load\n");
6191 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6194 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6195 &gather_off
, &gather_scale
);
6196 gcc_assert (gather_decl
);
6197 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6198 &gather_off_vectype
))
6200 if (dump_enabled_p ())
6201 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6202 "gather index use not simple.\n");
6206 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6209 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6210 && (group_size
> nunits
6211 || nunits
% group_size
!= 0))
6213 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6214 "unhandled strided group load\n");
6220 negative
= tree_int_cst_compare (nested_in_vect_loop
6221 ? STMT_VINFO_DR_STEP (stmt_info
)
6223 size_zero_node
) < 0;
6224 if (negative
&& ncopies
> 1)
6226 if (dump_enabled_p ())
6227 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6228 "multiple types with negative step.\n");
6236 if (dump_enabled_p ())
6237 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6238 "negative step for group load not supported"
6242 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6243 if (alignment_support_scheme
!= dr_aligned
6244 && alignment_support_scheme
!= dr_unaligned_supported
)
6246 if (dump_enabled_p ())
6247 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6248 "negative step but alignment required.\n");
6251 if (!perm_mask_for_reverse (vectype
))
6253 if (dump_enabled_p ())
6254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6255 "negative step and reversing not supported."
6262 if (!vec_stmt
) /* transformation not required. */
6264 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6265 /* The SLP costs are calculated during SLP analysis. */
6266 if (!PURE_SLP_STMT (stmt_info
))
6267 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6272 if (dump_enabled_p ())
6273 dump_printf_loc (MSG_NOTE
, vect_location
,
6274 "transform load. ncopies = %d\n", ncopies
);
6278 ensure_base_align (stmt_info
, dr
);
6280 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6282 tree vec_oprnd0
= NULL_TREE
, op
;
6283 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6284 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6285 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6286 edge pe
= loop_preheader_edge (loop
);
6289 enum { NARROW
, NONE
, WIDEN
} modifier
;
6290 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6292 if (nunits
== gather_off_nunits
)
6294 else if (nunits
== gather_off_nunits
/ 2)
6296 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6299 for (i
= 0; i
< gather_off_nunits
; ++i
)
6300 sel
[i
] = i
| nunits
;
6302 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6304 else if (nunits
== gather_off_nunits
* 2)
6306 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6309 for (i
= 0; i
< nunits
; ++i
)
6310 sel
[i
] = i
< gather_off_nunits
6311 ? i
: i
+ nunits
- gather_off_nunits
;
6313 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6319 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6320 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6321 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6322 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6323 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6324 scaletype
= TREE_VALUE (arglist
);
6325 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6327 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6329 ptr
= fold_convert (ptrtype
, gather_base
);
6330 if (!is_gimple_min_invariant (ptr
))
6332 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6333 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6334 gcc_assert (!new_bb
);
6337 /* Currently we support only unconditional gather loads,
6338 so mask should be all ones. */
6339 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6340 mask
= build_int_cst (masktype
, -1);
6341 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6343 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6344 mask
= build_vector_from_val (masktype
, mask
);
6345 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6347 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6351 for (j
= 0; j
< 6; ++j
)
6353 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6354 mask
= build_real (TREE_TYPE (masktype
), r
);
6355 mask
= build_vector_from_val (masktype
, mask
);
6356 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6361 scale
= build_int_cst (scaletype
, gather_scale
);
6363 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6364 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6365 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6369 for (j
= 0; j
< 6; ++j
)
6371 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6372 merge
= build_real (TREE_TYPE (rettype
), r
);
6376 merge
= build_vector_from_val (rettype
, merge
);
6377 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6379 prev_stmt_info
= NULL
;
6380 for (j
= 0; j
< ncopies
; ++j
)
6382 if (modifier
== WIDEN
&& (j
& 1))
6383 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6384 perm_mask
, stmt
, gsi
);
6387 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6390 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6392 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6394 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6395 == TYPE_VECTOR_SUBPARTS (idxtype
));
6396 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6397 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6399 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6400 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6405 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6407 if (!useless_type_conversion_p (vectype
, rettype
))
6409 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6410 == TYPE_VECTOR_SUBPARTS (rettype
));
6411 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6412 gimple_call_set_lhs (new_stmt
, op
);
6413 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6414 var
= make_ssa_name (vec_dest
);
6415 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6417 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6421 var
= make_ssa_name (vec_dest
, new_stmt
);
6422 gimple_call_set_lhs (new_stmt
, var
);
6425 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6427 if (modifier
== NARROW
)
6434 var
= permute_vec_elements (prev_res
, var
,
6435 perm_mask
, stmt
, gsi
);
6436 new_stmt
= SSA_NAME_DEF_STMT (var
);
6439 if (prev_stmt_info
== NULL
)
6440 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6442 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6443 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6447 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6449 gimple_stmt_iterator incr_gsi
;
6455 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6456 gimple_seq stmts
= NULL
;
6457 tree stride_base
, stride_step
, alias_off
;
6459 gcc_assert (!nested_in_vect_loop
);
6461 if (slp
&& grouped_load
)
6462 first_dr
= STMT_VINFO_DATA_REF
6463 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6468 = fold_build_pointer_plus
6469 (DR_BASE_ADDRESS (first_dr
),
6470 size_binop (PLUS_EXPR
,
6471 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6472 convert_to_ptrofftype (DR_INIT (first_dr
))));
6473 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6475 /* For a load with loop-invariant (but other than power-of-2)
6476 stride (i.e. not a grouped access) like so:
6478 for (i = 0; i < n; i += stride)
6481 we generate a new induction variable and new accesses to
6482 form a new vector (or vectors, depending on ncopies):
6484 for (j = 0; ; j += VF*stride)
6486 tmp2 = array[j + stride];
6488 vectemp = {tmp1, tmp2, ...}
6491 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6492 build_int_cst (TREE_TYPE (stride_step
), vf
));
6494 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6496 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6497 loop
, &incr_gsi
, insert_after
,
6499 incr
= gsi_stmt (incr_gsi
);
6500 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6502 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6503 &stmts
, true, NULL_TREE
);
6505 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6507 prev_stmt_info
= NULL
;
6508 running_off
= offvar
;
6509 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6510 int nloads
= nunits
;
6511 tree ltype
= TREE_TYPE (vectype
);
6512 auto_vec
<tree
> dr_chain
;
6515 nloads
= nunits
/ group_size
;
6516 if (group_size
< nunits
)
6517 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6520 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6521 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6523 dr_chain
.create (ncopies
);
6525 for (j
= 0; j
< ncopies
; j
++)
6531 vec_alloc (v
, nloads
);
6532 for (i
= 0; i
< nloads
; i
++)
6534 tree newref
, newoff
;
6536 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6538 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6541 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6542 newoff
= copy_ssa_name (running_off
);
6543 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6544 running_off
, stride_step
);
6545 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6547 running_off
= newoff
;
6550 vec_inv
= build_constructor (vectype
, v
);
6551 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6552 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6556 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6557 build2 (MEM_REF
, ltype
,
6558 running_off
, alias_off
));
6559 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6561 tree newoff
= copy_ssa_name (running_off
);
6562 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6563 running_off
, stride_step
);
6564 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6566 running_off
= newoff
;
6571 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6573 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6578 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6580 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6581 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6585 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6586 slp_node_instance
, false);
6592 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6594 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6595 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6596 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6598 /* Check if the chain of loads is already vectorized. */
6599 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6600 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6601 ??? But we can only do so if there is exactly one
6602 as we have no way to get at the rest. Leave the CSE
6604 ??? With the group load eventually participating
6605 in multiple different permutations (having multiple
6606 slp nodes which refer to the same group) the CSE
6607 is even wrong code. See PR56270. */
6610 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6613 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6614 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6617 /* VEC_NUM is the number of vect stmts to be created for this group. */
6620 grouped_load
= false;
6621 /* For SLP permutation support we need to load the whole group,
6622 not only the number of vector stmts the permutation result
6625 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6627 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6628 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6631 vec_num
= group_size
;
6637 group_size
= vec_num
= 1;
6641 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6642 gcc_assert (alignment_support_scheme
);
6643 /* Targets with load-lane instructions must not require explicit
6645 gcc_assert (!load_lanes_p
6646 || alignment_support_scheme
== dr_aligned
6647 || alignment_support_scheme
== dr_unaligned_supported
);
6649 /* In case the vectorization factor (VF) is bigger than the number
6650 of elements that we can fit in a vectype (nunits), we have to generate
6651 more than one vector stmt - i.e - we need to "unroll" the
6652 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6653 from one copy of the vector stmt to the next, in the field
6654 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6655 stages to find the correct vector defs to be used when vectorizing
6656 stmts that use the defs of the current stmt. The example below
6657 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6658 need to create 4 vectorized stmts):
6660 before vectorization:
6661 RELATED_STMT VEC_STMT
6665 step 1: vectorize stmt S1:
6666 We first create the vector stmt VS1_0, and, as usual, record a
6667 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6668 Next, we create the vector stmt VS1_1, and record a pointer to
6669 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6670 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6672 RELATED_STMT VEC_STMT
6673 VS1_0: vx0 = memref0 VS1_1 -
6674 VS1_1: vx1 = memref1 VS1_2 -
6675 VS1_2: vx2 = memref2 VS1_3 -
6676 VS1_3: vx3 = memref3 - -
6677 S1: x = load - VS1_0
6680 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6681 information we recorded in RELATED_STMT field is used to vectorize
6684 /* In case of interleaving (non-unit grouped access):
6691 Vectorized loads are created in the order of memory accesses
6692 starting from the access of the first stmt of the chain:
6695 VS2: vx1 = &base + vec_size*1
6696 VS3: vx3 = &base + vec_size*2
6697 VS4: vx4 = &base + vec_size*3
6699 Then permutation statements are generated:
6701 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6702 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6705 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6706 (the order of the data-refs in the output of vect_permute_load_chain
6707 corresponds to the order of scalar stmts in the interleaving chain - see
6708 the documentation of vect_permute_load_chain()).
6709 The generation of permutation stmts and recording them in
6710 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6712 In case of both multiple types and interleaving, the vector loads and
6713 permutation stmts above are created for every copy. The result vector
6714 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6715 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6717 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6718 on a target that supports unaligned accesses (dr_unaligned_supported)
6719 we generate the following code:
6723 p = p + indx * vectype_size;
6728 Otherwise, the data reference is potentially unaligned on a target that
6729 does not support unaligned accesses (dr_explicit_realign_optimized) -
6730 then generate the following code, in which the data in each iteration is
6731 obtained by two vector loads, one from the previous iteration, and one
6732 from the current iteration:
6734 msq_init = *(floor(p1))
6735 p2 = initial_addr + VS - 1;
6736 realignment_token = call target_builtin;
6739 p2 = p2 + indx * vectype_size
6741 vec_dest = realign_load (msq, lsq, realignment_token)
6746 /* If the misalignment remains the same throughout the execution of the
6747 loop, we can create the init_addr and permutation mask at the loop
6748 preheader. Otherwise, it needs to be created inside the loop.
6749 This can only occur when vectorizing memory accesses in the inner-loop
6750 nested within an outer-loop that is being vectorized. */
6752 if (nested_in_vect_loop
6753 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6754 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6756 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6757 compute_in_loop
= true;
6760 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6761 || alignment_support_scheme
== dr_explicit_realign
)
6762 && !compute_in_loop
)
6764 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6765 alignment_support_scheme
, NULL_TREE
,
6767 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6769 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6770 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6778 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6781 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6783 aggr_type
= vectype
;
6785 prev_stmt_info
= NULL
;
6786 for (j
= 0; j
< ncopies
; j
++)
6788 /* 1. Create the vector or array pointer update chain. */
6791 bool simd_lane_access_p
6792 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6793 if (simd_lane_access_p
6794 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6795 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6796 && integer_zerop (DR_OFFSET (first_dr
))
6797 && integer_zerop (DR_INIT (first_dr
))
6798 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6799 get_alias_set (DR_REF (first_dr
)))
6800 && (alignment_support_scheme
== dr_aligned
6801 || alignment_support_scheme
== dr_unaligned_supported
))
6803 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6804 dataref_offset
= build_int_cst (reference_alias_ptr_type
6805 (DR_REF (first_dr
)), 0);
6810 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6811 offset
, &dummy
, gsi
, &ptr_incr
,
6812 simd_lane_access_p
, &inv_p
,
6815 else if (dataref_offset
)
6816 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6817 TYPE_SIZE_UNIT (aggr_type
));
6819 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6820 TYPE_SIZE_UNIT (aggr_type
));
6822 if (grouped_load
|| slp_perm
)
6823 dr_chain
.create (vec_num
);
6829 vec_array
= create_vector_array (vectype
, vec_num
);
6832 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6833 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6834 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6835 gimple_call_set_lhs (new_stmt
, vec_array
);
6836 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6838 /* Extract each vector into an SSA_NAME. */
6839 for (i
= 0; i
< vec_num
; i
++)
6841 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6843 dr_chain
.quick_push (new_temp
);
6846 /* Record the mapping between SSA_NAMEs and statements. */
6847 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6851 for (i
= 0; i
< vec_num
; i
++)
6854 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6857 /* 2. Create the vector-load in the loop. */
6858 switch (alignment_support_scheme
)
6861 case dr_unaligned_supported
:
6863 unsigned int align
, misalign
;
6866 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
6869 : build_int_cst (reference_alias_ptr_type
6870 (DR_REF (first_dr
)), 0));
6871 align
= TYPE_ALIGN_UNIT (vectype
);
6872 if (alignment_support_scheme
== dr_aligned
)
6874 gcc_assert (aligned_access_p (first_dr
));
6877 else if (DR_MISALIGNMENT (first_dr
) == -1)
6879 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
6880 align
= TYPE_ALIGN_UNIT (elem_type
);
6882 align
= (get_object_alignment (DR_REF (first_dr
))
6885 TREE_TYPE (data_ref
)
6886 = build_aligned_type (TREE_TYPE (data_ref
),
6887 align
* BITS_PER_UNIT
);
6891 TREE_TYPE (data_ref
)
6892 = build_aligned_type (TREE_TYPE (data_ref
),
6893 TYPE_ALIGN (elem_type
));
6894 misalign
= DR_MISALIGNMENT (first_dr
);
6896 if (dataref_offset
== NULL_TREE
6897 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6898 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6902 case dr_explicit_realign
:
6906 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
6908 if (compute_in_loop
)
6909 msq
= vect_setup_realignment (first_stmt
, gsi
,
6911 dr_explicit_realign
,
6914 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6915 ptr
= copy_ssa_name (dataref_ptr
);
6917 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6918 new_stmt
= gimple_build_assign
6919 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
6921 (TREE_TYPE (dataref_ptr
),
6922 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6923 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6925 = build2 (MEM_REF
, vectype
, ptr
,
6926 build_int_cst (reference_alias_ptr_type
6927 (DR_REF (first_dr
)), 0));
6928 vec_dest
= vect_create_destination_var (scalar_dest
,
6930 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6931 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6932 gimple_assign_set_lhs (new_stmt
, new_temp
);
6933 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6934 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6935 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6938 bump
= size_binop (MULT_EXPR
, vs
,
6939 TYPE_SIZE_UNIT (elem_type
));
6940 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
6941 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6942 new_stmt
= gimple_build_assign
6943 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
6946 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6947 ptr
= copy_ssa_name (ptr
, new_stmt
);
6948 gimple_assign_set_lhs (new_stmt
, ptr
);
6949 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6951 = build2 (MEM_REF
, vectype
, ptr
,
6952 build_int_cst (reference_alias_ptr_type
6953 (DR_REF (first_dr
)), 0));
6956 case dr_explicit_realign_optimized
:
6957 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6958 new_temp
= copy_ssa_name (dataref_ptr
);
6960 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6961 new_stmt
= gimple_build_assign
6962 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
6964 (TREE_TYPE (dataref_ptr
),
6965 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6966 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6968 = build2 (MEM_REF
, vectype
, new_temp
,
6969 build_int_cst (reference_alias_ptr_type
6970 (DR_REF (first_dr
)), 0));
6975 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6976 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6977 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6978 gimple_assign_set_lhs (new_stmt
, new_temp
);
6979 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6981 /* 3. Handle explicit realignment if necessary/supported.
6983 vec_dest = realign_load (msq, lsq, realignment_token) */
6984 if (alignment_support_scheme
== dr_explicit_realign_optimized
6985 || alignment_support_scheme
== dr_explicit_realign
)
6987 lsq
= gimple_assign_lhs (new_stmt
);
6988 if (!realignment_token
)
6989 realignment_token
= dataref_ptr
;
6990 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6991 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
6992 msq
, lsq
, realignment_token
);
6993 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6994 gimple_assign_set_lhs (new_stmt
, new_temp
);
6995 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6997 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7000 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7001 add_phi_arg (phi
, lsq
,
7002 loop_latch_edge (containing_loop
),
7008 /* 4. Handle invariant-load. */
7009 if (inv_p
&& !bb_vinfo
)
7011 gcc_assert (!grouped_load
);
7012 /* If we have versioned for aliasing or the loop doesn't
7013 have any data dependencies that would preclude this,
7014 then we are sure this is a loop invariant load and
7015 thus we can insert it on the preheader edge. */
7016 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7017 && !nested_in_vect_loop
7018 && hoist_defs_of_uses (stmt
, loop
))
7020 if (dump_enabled_p ())
7022 dump_printf_loc (MSG_NOTE
, vect_location
,
7023 "hoisting out of the vectorized "
7025 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7027 tree tem
= copy_ssa_name (scalar_dest
);
7028 gsi_insert_on_edge_immediate
7029 (loop_preheader_edge (loop
),
7030 gimple_build_assign (tem
,
7032 (gimple_assign_rhs1 (stmt
))));
7033 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7037 gimple_stmt_iterator gsi2
= *gsi
;
7039 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7042 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7043 set_vinfo_for_stmt (new_stmt
,
7044 new_stmt_vec_info (new_stmt
, vinfo
));
7049 tree perm_mask
= perm_mask_for_reverse (vectype
);
7050 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7051 perm_mask
, stmt
, gsi
);
7052 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7055 /* Collect vector loads and later create their permutation in
7056 vect_transform_grouped_load (). */
7057 if (grouped_load
|| slp_perm
)
7058 dr_chain
.quick_push (new_temp
);
7060 /* Store vector loads in the corresponding SLP_NODE. */
7061 if (slp
&& !slp_perm
)
7062 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7064 /* Bump the vector pointer to account for a gap or for excess
7065 elements loaded for a permuted SLP load. */
7066 if (group_gap_adj
!= 0)
7070 = wide_int_to_tree (sizetype
,
7071 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7072 group_gap_adj
, &ovf
));
7073 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7078 if (slp
&& !slp_perm
)
7083 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7084 slp_node_instance
, false))
7086 dr_chain
.release ();
7095 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7096 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7101 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7103 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7104 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7107 dr_chain
.release ();
7113 /* Function vect_is_simple_cond.
7116 LOOP - the loop that is being vectorized.
7117 COND - Condition that is checked for simple use.
7120 *COMP_VECTYPE - the vector type for the comparison.
7122 Returns whether a COND can be vectorized. Checks whether
7123 condition operands are supportable using vec_is_simple_use. */
7126 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7129 enum vect_def_type dt
;
7130 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7132 if (!COMPARISON_CLASS_P (cond
))
7135 lhs
= TREE_OPERAND (cond
, 0);
7136 rhs
= TREE_OPERAND (cond
, 1);
7138 if (TREE_CODE (lhs
) == SSA_NAME
)
7140 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7141 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7144 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7145 && TREE_CODE (lhs
) != FIXED_CST
)
7148 if (TREE_CODE (rhs
) == SSA_NAME
)
7150 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7151 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7154 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7155 && TREE_CODE (rhs
) != FIXED_CST
)
7158 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7162 /* vectorizable_condition.
7164 Check if STMT is conditional modify expression that can be vectorized.
7165 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7166 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7169 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7170 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7171 else clause if it is 2).
7173 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7176 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7177 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7180 tree scalar_dest
= NULL_TREE
;
7181 tree vec_dest
= NULL_TREE
;
7182 tree cond_expr
, then_clause
, else_clause
;
7183 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7184 tree comp_vectype
= NULL_TREE
;
7185 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7186 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7187 tree vec_compare
, vec_cond_expr
;
7189 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7190 enum vect_def_type dt
, dts
[4];
7192 enum tree_code code
;
7193 stmt_vec_info prev_stmt_info
= NULL
;
7195 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7196 vec
<tree
> vec_oprnds0
= vNULL
;
7197 vec
<tree
> vec_oprnds1
= vNULL
;
7198 vec
<tree
> vec_oprnds2
= vNULL
;
7199 vec
<tree
> vec_oprnds3
= vNULL
;
7202 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7205 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7207 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7210 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7211 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7215 /* FORNOW: not yet supported. */
7216 if (STMT_VINFO_LIVE_P (stmt_info
))
7218 if (dump_enabled_p ())
7219 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7220 "value used after loop.\n");
7225 /* Is vectorizable conditional operation? */
7226 if (!is_gimple_assign (stmt
))
7229 code
= gimple_assign_rhs_code (stmt
);
7231 if (code
!= COND_EXPR
)
7234 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7235 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7237 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7240 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7242 gcc_assert (ncopies
>= 1);
7243 if (reduc_index
&& ncopies
> 1)
7244 return false; /* FORNOW */
7246 cond_expr
= gimple_assign_rhs1 (stmt
);
7247 then_clause
= gimple_assign_rhs2 (stmt
);
7248 else_clause
= gimple_assign_rhs3 (stmt
);
7250 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7255 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7257 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7260 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7261 if (vec_cmp_type
== NULL_TREE
)
7266 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7267 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7274 vec_oprnds0
.create (1);
7275 vec_oprnds1
.create (1);
7276 vec_oprnds2
.create (1);
7277 vec_oprnds3
.create (1);
7281 scalar_dest
= gimple_assign_lhs (stmt
);
7282 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7284 /* Handle cond expr. */
7285 for (j
= 0; j
< ncopies
; j
++)
7287 gassign
*new_stmt
= NULL
;
7292 auto_vec
<tree
, 4> ops
;
7293 auto_vec
<vec
<tree
>, 4> vec_defs
;
7295 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7296 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7297 ops
.safe_push (then_clause
);
7298 ops
.safe_push (else_clause
);
7299 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7300 vec_oprnds3
= vec_defs
.pop ();
7301 vec_oprnds2
= vec_defs
.pop ();
7302 vec_oprnds1
= vec_defs
.pop ();
7303 vec_oprnds0
= vec_defs
.pop ();
7306 vec_defs
.release ();
7312 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0), stmt
);
7313 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7314 loop_vinfo
, >emp
, &dts
[0]);
7317 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7319 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7320 loop_vinfo
, >emp
, &dts
[1]);
7321 if (reduc_index
== 1)
7322 vec_then_clause
= reduc_def
;
7325 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7327 vect_is_simple_use (then_clause
, loop_vinfo
,
7330 if (reduc_index
== 2)
7331 vec_else_clause
= reduc_def
;
7334 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7336 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7342 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
7343 vec_oprnds0
.pop ());
7344 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
7345 vec_oprnds1
.pop ());
7346 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7347 vec_oprnds2
.pop ());
7348 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7349 vec_oprnds3
.pop ());
7354 vec_oprnds0
.quick_push (vec_cond_lhs
);
7355 vec_oprnds1
.quick_push (vec_cond_rhs
);
7356 vec_oprnds2
.quick_push (vec_then_clause
);
7357 vec_oprnds3
.quick_push (vec_else_clause
);
7360 /* Arguments are ready. Create the new vector stmt. */
7361 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7363 vec_cond_rhs
= vec_oprnds1
[i
];
7364 vec_then_clause
= vec_oprnds2
[i
];
7365 vec_else_clause
= vec_oprnds3
[i
];
7367 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7368 vec_cond_lhs
, vec_cond_rhs
);
7369 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7370 vec_compare
, vec_then_clause
, vec_else_clause
);
7372 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7373 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7374 gimple_assign_set_lhs (new_stmt
, new_temp
);
7375 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7377 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7384 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7386 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7388 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7391 vec_oprnds0
.release ();
7392 vec_oprnds1
.release ();
7393 vec_oprnds2
.release ();
7394 vec_oprnds3
.release ();
7400 /* Make sure the statement is vectorizable. */
7403 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7405 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7406 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7407 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7409 tree scalar_type
, vectype
;
7410 gimple
*pattern_stmt
;
7411 gimple_seq pattern_def_seq
;
7413 if (dump_enabled_p ())
7415 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7416 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7419 if (gimple_has_volatile_ops (stmt
))
7421 if (dump_enabled_p ())
7422 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7423 "not vectorized: stmt has volatile operands\n");
7428 /* Skip stmts that do not need to be vectorized. In loops this is expected
7430 - the COND_EXPR which is the loop exit condition
7431 - any LABEL_EXPRs in the loop
7432 - computations that are used only for array indexing or loop control.
7433 In basic blocks we only analyze statements that are a part of some SLP
7434 instance, therefore, all the statements are relevant.
7436 Pattern statement needs to be analyzed instead of the original statement
7437 if the original statement is not relevant. Otherwise, we analyze both
7438 statements. In basic blocks we are called from some SLP instance
7439 traversal, don't analyze pattern stmts instead, the pattern stmts
7440 already will be part of SLP instance. */
7442 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7443 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7444 && !STMT_VINFO_LIVE_P (stmt_info
))
7446 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7448 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7449 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7451 /* Analyze PATTERN_STMT instead of the original stmt. */
7452 stmt
= pattern_stmt
;
7453 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7454 if (dump_enabled_p ())
7456 dump_printf_loc (MSG_NOTE
, vect_location
,
7457 "==> examining pattern statement: ");
7458 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7463 if (dump_enabled_p ())
7464 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7469 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7472 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7473 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7475 /* Analyze PATTERN_STMT too. */
7476 if (dump_enabled_p ())
7478 dump_printf_loc (MSG_NOTE
, vect_location
,
7479 "==> examining pattern statement: ");
7480 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7483 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7487 if (is_pattern_stmt_p (stmt_info
)
7489 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7491 gimple_stmt_iterator si
;
7493 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7495 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7496 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7497 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7499 /* Analyze def stmt of STMT if it's a pattern stmt. */
7500 if (dump_enabled_p ())
7502 dump_printf_loc (MSG_NOTE
, vect_location
,
7503 "==> examining pattern def statement: ");
7504 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7507 if (!vect_analyze_stmt (pattern_def_stmt
,
7508 need_to_vectorize
, node
))
7514 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7516 case vect_internal_def
:
7519 case vect_reduction_def
:
7520 case vect_nested_cycle
:
7521 gcc_assert (!bb_vinfo
7522 && (relevance
== vect_used_in_outer
7523 || relevance
== vect_used_in_outer_by_reduction
7524 || relevance
== vect_used_by_reduction
7525 || relevance
== vect_unused_in_scope
));
7528 case vect_induction_def
:
7529 case vect_constant_def
:
7530 case vect_external_def
:
7531 case vect_unknown_def_type
:
7538 gcc_assert (PURE_SLP_STMT (stmt_info
));
7540 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7541 if (dump_enabled_p ())
7543 dump_printf_loc (MSG_NOTE
, vect_location
,
7544 "get vectype for scalar type: ");
7545 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7546 dump_printf (MSG_NOTE
, "\n");
7549 vectype
= get_vectype_for_scalar_type (scalar_type
);
7552 if (dump_enabled_p ())
7554 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7555 "not SLPed: unsupported data-type ");
7556 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7558 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7563 if (dump_enabled_p ())
7565 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7566 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7567 dump_printf (MSG_NOTE
, "\n");
7570 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7573 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7575 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7576 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7577 || (is_gimple_call (stmt
)
7578 && gimple_call_lhs (stmt
) == NULL_TREE
));
7579 *need_to_vectorize
= true;
7582 if (PURE_SLP_STMT (stmt_info
) && !node
)
7584 dump_printf_loc (MSG_NOTE
, vect_location
,
7585 "handled only by SLP analysis\n");
7591 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7592 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7593 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7594 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7595 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7596 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7597 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7598 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7599 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7600 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7601 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7602 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7606 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7607 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7608 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7609 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7610 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7611 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7612 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7613 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7614 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7619 if (dump_enabled_p ())
7621 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7622 "not vectorized: relevant stmt not ");
7623 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7624 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7633 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7634 need extra handling, except for vectorizable reductions. */
7635 if (STMT_VINFO_LIVE_P (stmt_info
)
7636 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7637 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7641 if (dump_enabled_p ())
7643 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7644 "not vectorized: live stmt not ");
7645 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7646 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7656 /* Function vect_transform_stmt.
7658 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7661 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7662 bool *grouped_store
, slp_tree slp_node
,
7663 slp_instance slp_node_instance
)
7665 bool is_store
= false;
7666 gimple
*vec_stmt
= NULL
;
7667 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7670 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7672 switch (STMT_VINFO_TYPE (stmt_info
))
7674 case type_demotion_vec_info_type
:
7675 case type_promotion_vec_info_type
:
7676 case type_conversion_vec_info_type
:
7677 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7681 case induc_vec_info_type
:
7682 gcc_assert (!slp_node
);
7683 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7687 case shift_vec_info_type
:
7688 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7692 case op_vec_info_type
:
7693 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7697 case assignment_vec_info_type
:
7698 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7702 case load_vec_info_type
:
7703 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7708 case store_vec_info_type
:
7709 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7711 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7713 /* In case of interleaving, the whole chain is vectorized when the
7714 last store in the chain is reached. Store stmts before the last
7715 one are skipped, and there vec_stmt_info shouldn't be freed
7717 *grouped_store
= true;
7718 if (STMT_VINFO_VEC_STMT (stmt_info
))
7725 case condition_vec_info_type
:
7726 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7730 case call_vec_info_type
:
7731 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7732 stmt
= gsi_stmt (*gsi
);
7733 if (is_gimple_call (stmt
)
7734 && gimple_call_internal_p (stmt
)
7735 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7739 case call_simd_clone_vec_info_type
:
7740 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7741 stmt
= gsi_stmt (*gsi
);
7744 case reduc_vec_info_type
:
7745 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7750 if (!STMT_VINFO_LIVE_P (stmt_info
))
7752 if (dump_enabled_p ())
7753 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7754 "stmt not supported.\n");
7759 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7760 This would break hybrid SLP vectorization. */
7762 gcc_assert (!vec_stmt
7763 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
7765 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7766 is being vectorized, but outside the immediately enclosing loop. */
7768 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7769 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7770 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7771 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7772 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7773 || STMT_VINFO_RELEVANT (stmt_info
) ==
7774 vect_used_in_outer_by_reduction
))
7776 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7777 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7778 imm_use_iterator imm_iter
;
7779 use_operand_p use_p
;
7783 if (dump_enabled_p ())
7784 dump_printf_loc (MSG_NOTE
, vect_location
,
7785 "Record the vdef for outer-loop vectorization.\n");
7787 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7788 (to be used when vectorizing outer-loop stmts that use the DEF of
7790 if (gimple_code (stmt
) == GIMPLE_PHI
)
7791 scalar_dest
= PHI_RESULT (stmt
);
7793 scalar_dest
= gimple_assign_lhs (stmt
);
7795 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7797 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7799 exit_phi
= USE_STMT (use_p
);
7800 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7805 /* Handle stmts whose DEF is used outside the loop-nest that is
7806 being vectorized. */
7807 if (STMT_VINFO_LIVE_P (stmt_info
)
7808 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7810 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7815 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7821 /* Remove a group of stores (for SLP or interleaving), free their
7825 vect_remove_stores (gimple
*first_stmt
)
7827 gimple
*next
= first_stmt
;
7829 gimple_stmt_iterator next_si
;
7833 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7835 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7836 if (is_pattern_stmt_p (stmt_info
))
7837 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7838 /* Free the attached stmt_vec_info and remove the stmt. */
7839 next_si
= gsi_for_stmt (next
);
7840 unlink_stmt_vdef (next
);
7841 gsi_remove (&next_si
, true);
7842 release_defs (next
);
7843 free_stmt_vec_info (next
);
7849 /* Function new_stmt_vec_info.
7851 Create and initialize a new stmt_vec_info struct for STMT. */
7854 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
7857 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7859 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7860 STMT_VINFO_STMT (res
) = stmt
;
7862 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7863 STMT_VINFO_LIVE_P (res
) = false;
7864 STMT_VINFO_VECTYPE (res
) = NULL
;
7865 STMT_VINFO_VEC_STMT (res
) = NULL
;
7866 STMT_VINFO_VECTORIZABLE (res
) = true;
7867 STMT_VINFO_IN_PATTERN_P (res
) = false;
7868 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7869 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7870 STMT_VINFO_DATA_REF (res
) = NULL
;
7871 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
7873 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7874 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7875 STMT_VINFO_DR_INIT (res
) = NULL
;
7876 STMT_VINFO_DR_STEP (res
) = NULL
;
7877 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7879 if (gimple_code (stmt
) == GIMPLE_PHI
7880 && is_loop_header_bb_p (gimple_bb (stmt
)))
7881 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7883 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7885 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7886 STMT_SLP_TYPE (res
) = loop_vect
;
7887 GROUP_FIRST_ELEMENT (res
) = NULL
;
7888 GROUP_NEXT_ELEMENT (res
) = NULL
;
7889 GROUP_SIZE (res
) = 0;
7890 GROUP_STORE_COUNT (res
) = 0;
7891 GROUP_GAP (res
) = 0;
7892 GROUP_SAME_DR_STMT (res
) = NULL
;
7898 /* Create a hash table for stmt_vec_info. */
7901 init_stmt_vec_info_vec (void)
7903 gcc_assert (!stmt_vec_info_vec
.exists ());
7904 stmt_vec_info_vec
.create (50);
7908 /* Free hash table for stmt_vec_info. */
7911 free_stmt_vec_info_vec (void)
7915 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7917 free_stmt_vec_info (STMT_VINFO_STMT (info
));
7918 gcc_assert (stmt_vec_info_vec
.exists ());
7919 stmt_vec_info_vec
.release ();
7923 /* Free stmt vectorization related info. */
7926 free_stmt_vec_info (gimple
*stmt
)
7928 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7933 /* Check if this statement has a related "pattern stmt"
7934 (introduced by the vectorizer during the pattern recognition
7935 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7937 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7939 stmt_vec_info patt_info
7940 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7943 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7944 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
7945 gimple_set_bb (patt_stmt
, NULL
);
7946 tree lhs
= gimple_get_lhs (patt_stmt
);
7947 if (TREE_CODE (lhs
) == SSA_NAME
)
7948 release_ssa_name (lhs
);
7951 gimple_stmt_iterator si
;
7952 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7954 gimple
*seq_stmt
= gsi_stmt (si
);
7955 gimple_set_bb (seq_stmt
, NULL
);
7956 lhs
= gimple_get_lhs (seq_stmt
);
7957 if (TREE_CODE (lhs
) == SSA_NAME
)
7958 release_ssa_name (lhs
);
7959 free_stmt_vec_info (seq_stmt
);
7962 free_stmt_vec_info (patt_stmt
);
7966 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7967 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
7968 set_vinfo_for_stmt (stmt
, NULL
);
7973 /* Function get_vectype_for_scalar_type_and_size.
7975 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7979 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7981 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7982 machine_mode simd_mode
;
7983 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
7990 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
7991 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
7994 /* For vector types of elements whose mode precision doesn't
7995 match their types precision we use a element type of mode
7996 precision. The vectorization routines will have to make sure
7997 they support the proper result truncation/extension.
7998 We also make sure to build vector types with INTEGER_TYPE
7999 component type only. */
8000 if (INTEGRAL_TYPE_P (scalar_type
)
8001 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8002 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8003 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8004 TYPE_UNSIGNED (scalar_type
));
8006 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8007 When the component mode passes the above test simply use a type
8008 corresponding to that mode. The theory is that any use that
8009 would cause problems with this will disable vectorization anyway. */
8010 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8011 && !INTEGRAL_TYPE_P (scalar_type
))
8012 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8014 /* We can't build a vector type of elements with alignment bigger than
8016 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8017 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8018 TYPE_UNSIGNED (scalar_type
));
8020 /* If we felt back to using the mode fail if there was
8021 no scalar type for it. */
8022 if (scalar_type
== NULL_TREE
)
8025 /* If no size was supplied use the mode the target prefers. Otherwise
8026 lookup a vector mode of the specified size. */
8028 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8030 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8031 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8035 vectype
= build_vector_type (scalar_type
, nunits
);
8037 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8038 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8044 unsigned int current_vector_size
;
8046 /* Function get_vectype_for_scalar_type.
8048 Returns the vector type corresponding to SCALAR_TYPE as supported
8052 get_vectype_for_scalar_type (tree scalar_type
)
8055 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8056 current_vector_size
);
8058 && current_vector_size
== 0)
8059 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8063 /* Function get_same_sized_vectype
8065 Returns a vector type corresponding to SCALAR_TYPE of size
8066 VECTOR_TYPE if supported by the target. */
8069 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8071 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8072 return build_same_sized_truth_vector_type (vector_type
);
8074 return get_vectype_for_scalar_type_and_size
8075 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8078 /* Function vect_is_simple_use.
8081 VINFO - the vect info of the loop or basic block that is being vectorized.
8082 OPERAND - operand in the loop or bb.
8084 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8085 DT - the type of definition
8087 Returns whether a stmt with OPERAND can be vectorized.
8088 For loops, supportable operands are constants, loop invariants, and operands
8089 that are defined by the current iteration of the loop. Unsupportable
8090 operands are those that are defined by a previous iteration of the loop (as
8091 is the case in reduction/induction computations).
8092 For basic blocks, supportable operands are constants and bb invariants.
8093 For now, operands defined outside the basic block are not supported. */
8096 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8097 gimple
**def_stmt
, enum vect_def_type
*dt
)
8100 *dt
= vect_unknown_def_type
;
8102 if (dump_enabled_p ())
8104 dump_printf_loc (MSG_NOTE
, vect_location
,
8105 "vect_is_simple_use: operand ");
8106 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8107 dump_printf (MSG_NOTE
, "\n");
8110 if (CONSTANT_CLASS_P (operand
))
8112 *dt
= vect_constant_def
;
8116 if (is_gimple_min_invariant (operand
))
8118 *dt
= vect_external_def
;
8122 if (TREE_CODE (operand
) != SSA_NAME
)
8124 if (dump_enabled_p ())
8125 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8130 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8132 *dt
= vect_external_def
;
8136 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8137 if (dump_enabled_p ())
8139 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8140 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8143 basic_block bb
= gimple_bb (*def_stmt
);
8144 if ((is_a
<loop_vec_info
> (vinfo
)
8145 && !flow_bb_inside_loop_p (as_a
<loop_vec_info
> (vinfo
)->loop
, bb
))
8146 || (is_a
<bb_vec_info
> (vinfo
)
8147 && (bb
!= as_a
<bb_vec_info
> (vinfo
)->bb
8148 || gimple_code (*def_stmt
) == GIMPLE_PHI
)))
8149 *dt
= vect_external_def
;
8152 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8153 if (is_a
<bb_vec_info
> (vinfo
) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8154 *dt
= vect_external_def
;
8156 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8159 if (dump_enabled_p ())
8161 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8164 case vect_uninitialized_def
:
8165 dump_printf (MSG_NOTE
, "uninitialized\n");
8167 case vect_constant_def
:
8168 dump_printf (MSG_NOTE
, "constant\n");
8170 case vect_external_def
:
8171 dump_printf (MSG_NOTE
, "external\n");
8173 case vect_internal_def
:
8174 dump_printf (MSG_NOTE
, "internal\n");
8176 case vect_induction_def
:
8177 dump_printf (MSG_NOTE
, "induction\n");
8179 case vect_reduction_def
:
8180 dump_printf (MSG_NOTE
, "reduction\n");
8182 case vect_double_reduction_def
:
8183 dump_printf (MSG_NOTE
, "double reduction\n");
8185 case vect_nested_cycle
:
8186 dump_printf (MSG_NOTE
, "nested cycle\n");
8188 case vect_unknown_def_type
:
8189 dump_printf (MSG_NOTE
, "unknown\n");
8194 if (*dt
== vect_unknown_def_type
)
8196 if (dump_enabled_p ())
8197 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8198 "Unsupported pattern.\n");
8202 switch (gimple_code (*def_stmt
))
8209 if (dump_enabled_p ())
8210 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8211 "unsupported defining stmt:\n");
8218 /* Function vect_is_simple_use.
8220 Same as vect_is_simple_use but also determines the vector operand
8221 type of OPERAND and stores it to *VECTYPE. If the definition of
8222 OPERAND is vect_uninitialized_def, vect_constant_def or
8223 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8224 is responsible to compute the best suited vector type for the
8228 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8229 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8231 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8234 /* Now get a vector type if the def is internal, otherwise supply
8235 NULL_TREE and leave it up to the caller to figure out a proper
8236 type for the use stmt. */
8237 if (*dt
== vect_internal_def
8238 || *dt
== vect_induction_def
8239 || *dt
== vect_reduction_def
8240 || *dt
== vect_double_reduction_def
8241 || *dt
== vect_nested_cycle
)
8243 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8245 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8246 && !STMT_VINFO_RELEVANT (stmt_info
)
8247 && !STMT_VINFO_LIVE_P (stmt_info
))
8248 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8250 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8251 gcc_assert (*vectype
!= NULL_TREE
);
8253 else if (*dt
== vect_uninitialized_def
8254 || *dt
== vect_constant_def
8255 || *dt
== vect_external_def
)
8256 *vectype
= NULL_TREE
;
8264 /* Function supportable_widening_operation
8266 Check whether an operation represented by the code CODE is a
8267 widening operation that is supported by the target platform in
8268 vector form (i.e., when operating on arguments of type VECTYPE_IN
8269 producing a result of type VECTYPE_OUT).
8271 Widening operations we currently support are NOP (CONVERT), FLOAT
8272 and WIDEN_MULT. This function checks if these operations are supported
8273 by the target platform either directly (via vector tree-codes), or via
8277 - CODE1 and CODE2 are codes of vector operations to be used when
8278 vectorizing the operation, if available.
8279 - MULTI_STEP_CVT determines the number of required intermediate steps in
8280 case of multi-step conversion (like char->short->int - in that case
8281 MULTI_STEP_CVT will be 1).
8282 - INTERM_TYPES contains the intermediate type required to perform the
8283 widening operation (short in the above example). */
8286 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8287 tree vectype_out
, tree vectype_in
,
8288 enum tree_code
*code1
, enum tree_code
*code2
,
8289 int *multi_step_cvt
,
8290 vec
<tree
> *interm_types
)
8292 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8293 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8294 struct loop
*vect_loop
= NULL
;
8295 machine_mode vec_mode
;
8296 enum insn_code icode1
, icode2
;
8297 optab optab1
, optab2
;
8298 tree vectype
= vectype_in
;
8299 tree wide_vectype
= vectype_out
;
8300 enum tree_code c1
, c2
;
8302 tree prev_type
, intermediate_type
;
8303 machine_mode intermediate_mode
, prev_mode
;
8304 optab optab3
, optab4
;
8306 *multi_step_cvt
= 0;
8308 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8312 case WIDEN_MULT_EXPR
:
8313 /* The result of a vectorized widening operation usually requires
8314 two vectors (because the widened results do not fit into one vector).
8315 The generated vector results would normally be expected to be
8316 generated in the same order as in the original scalar computation,
8317 i.e. if 8 results are generated in each vector iteration, they are
8318 to be organized as follows:
8319 vect1: [res1,res2,res3,res4],
8320 vect2: [res5,res6,res7,res8].
8322 However, in the special case that the result of the widening
8323 operation is used in a reduction computation only, the order doesn't
8324 matter (because when vectorizing a reduction we change the order of
8325 the computation). Some targets can take advantage of this and
8326 generate more efficient code. For example, targets like Altivec,
8327 that support widen_mult using a sequence of {mult_even,mult_odd}
8328 generate the following vectors:
8329 vect1: [res1,res3,res5,res7],
8330 vect2: [res2,res4,res6,res8].
8332 When vectorizing outer-loops, we execute the inner-loop sequentially
8333 (each vectorized inner-loop iteration contributes to VF outer-loop
8334 iterations in parallel). We therefore don't allow to change the
8335 order of the computation in the inner-loop during outer-loop
8337 /* TODO: Another case in which order doesn't *really* matter is when we
8338 widen and then contract again, e.g. (short)((int)x * y >> 8).
8339 Normally, pack_trunc performs an even/odd permute, whereas the
8340 repack from an even/odd expansion would be an interleave, which
8341 would be significantly simpler for e.g. AVX2. */
8342 /* In any case, in order to avoid duplicating the code below, recurse
8343 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8344 are properly set up for the caller. If we fail, we'll continue with
8345 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8347 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8348 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8349 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8350 stmt
, vectype_out
, vectype_in
,
8351 code1
, code2
, multi_step_cvt
,
8354 /* Elements in a vector with vect_used_by_reduction property cannot
8355 be reordered if the use chain with this property does not have the
8356 same operation. One such an example is s += a * b, where elements
8357 in a and b cannot be reordered. Here we check if the vector defined
8358 by STMT is only directly used in the reduction statement. */
8359 tree lhs
= gimple_assign_lhs (stmt
);
8360 use_operand_p dummy
;
8362 stmt_vec_info use_stmt_info
= NULL
;
8363 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8364 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8365 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8368 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8369 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8382 case VEC_WIDEN_MULT_EVEN_EXPR
:
8383 /* Support the recursion induced just above. */
8384 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8385 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8388 case WIDEN_LSHIFT_EXPR
:
8389 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8390 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8394 c1
= VEC_UNPACK_LO_EXPR
;
8395 c2
= VEC_UNPACK_HI_EXPR
;
8399 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8400 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8403 case FIX_TRUNC_EXPR
:
8404 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8405 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8406 computing the operation. */
8413 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8416 if (code
== FIX_TRUNC_EXPR
)
8418 /* The signedness is determined from output operand. */
8419 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8420 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8424 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8425 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8428 if (!optab1
|| !optab2
)
8431 vec_mode
= TYPE_MODE (vectype
);
8432 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8433 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8439 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8440 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8443 /* Check if it's a multi-step conversion that can be done using intermediate
8446 prev_type
= vectype
;
8447 prev_mode
= vec_mode
;
8449 if (!CONVERT_EXPR_CODE_P (code
))
8452 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8453 intermediate steps in promotion sequence. We try
8454 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8456 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8457 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8459 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8461 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8462 TYPE_UNSIGNED (prev_type
));
8463 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8464 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8466 if (!optab3
|| !optab4
8467 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8468 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8469 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8470 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8471 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8472 == CODE_FOR_nothing
)
8473 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8474 == CODE_FOR_nothing
))
8477 interm_types
->quick_push (intermediate_type
);
8478 (*multi_step_cvt
)++;
8480 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8481 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8484 prev_type
= intermediate_type
;
8485 prev_mode
= intermediate_mode
;
8488 interm_types
->release ();
8493 /* Function supportable_narrowing_operation
8495 Check whether an operation represented by the code CODE is a
8496 narrowing operation that is supported by the target platform in
8497 vector form (i.e., when operating on arguments of type VECTYPE_IN
8498 and producing a result of type VECTYPE_OUT).
8500 Narrowing operations we currently support are NOP (CONVERT) and
8501 FIX_TRUNC. This function checks if these operations are supported by
8502 the target platform directly via vector tree-codes.
8505 - CODE1 is the code of a vector operation to be used when
8506 vectorizing the operation, if available.
8507 - MULTI_STEP_CVT determines the number of required intermediate steps in
8508 case of multi-step conversion (like int->short->char - in that case
8509 MULTI_STEP_CVT will be 1).
8510 - INTERM_TYPES contains the intermediate type required to perform the
8511 narrowing operation (short in the above example). */
8514 supportable_narrowing_operation (enum tree_code code
,
8515 tree vectype_out
, tree vectype_in
,
8516 enum tree_code
*code1
, int *multi_step_cvt
,
8517 vec
<tree
> *interm_types
)
8519 machine_mode vec_mode
;
8520 enum insn_code icode1
;
8521 optab optab1
, interm_optab
;
8522 tree vectype
= vectype_in
;
8523 tree narrow_vectype
= vectype_out
;
8525 tree intermediate_type
;
8526 machine_mode intermediate_mode
, prev_mode
;
8530 *multi_step_cvt
= 0;
8534 c1
= VEC_PACK_TRUNC_EXPR
;
8537 case FIX_TRUNC_EXPR
:
8538 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8542 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8543 tree code and optabs used for computing the operation. */
8550 if (code
== FIX_TRUNC_EXPR
)
8551 /* The signedness is determined from output operand. */
8552 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8554 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8559 vec_mode
= TYPE_MODE (vectype
);
8560 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8565 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8568 /* Check if it's a multi-step conversion that can be done using intermediate
8570 prev_mode
= vec_mode
;
8571 if (code
== FIX_TRUNC_EXPR
)
8572 uns
= TYPE_UNSIGNED (vectype_out
);
8574 uns
= TYPE_UNSIGNED (vectype
);
8576 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8577 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8578 costly than signed. */
8579 if (code
== FIX_TRUNC_EXPR
&& uns
)
8581 enum insn_code icode2
;
8584 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8586 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8587 if (interm_optab
!= unknown_optab
8588 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8589 && insn_data
[icode1
].operand
[0].mode
8590 == insn_data
[icode2
].operand
[0].mode
)
8593 optab1
= interm_optab
;
8598 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8599 intermediate steps in promotion sequence. We try
8600 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8601 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8602 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8604 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8606 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8608 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8611 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8612 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8613 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8614 == CODE_FOR_nothing
))
8617 interm_types
->quick_push (intermediate_type
);
8618 (*multi_step_cvt
)++;
8620 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8623 prev_mode
= intermediate_mode
;
8624 optab1
= interm_optab
;
8627 interm_types
->release ();