1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "stor-layout.h"
31 #include "basic-block.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-ssa-alias.h"
34 #include "internal-fn.h"
36 #include "gimple-expr.h"
40 #include "gimple-iterator.h"
41 #include "gimplify-me.h"
42 #include "gimple-ssa.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-ssa-loop-ivopts.h"
48 #include "tree-ssa-loop-manip.h"
49 #include "tree-ssa-loop.h"
52 #include "tree-chrec.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-vectorizer.h"
55 #include "diagnostic-core.h"
57 /* Need to include rtl.h, expr.h, etc. for optabs. */
63 /* Return true if load- or store-lanes optab OPTAB is implemented for
64 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
67 vect_lanes_optab_supported_p (const char *name
, convert_optab optab
,
68 tree vectype
, unsigned HOST_WIDE_INT count
)
70 enum machine_mode mode
, array_mode
;
73 mode
= TYPE_MODE (vectype
);
74 limit_p
= !targetm
.array_mode_supported_p (mode
, count
);
75 array_mode
= mode_for_size (count
* GET_MODE_BITSIZE (mode
),
78 if (array_mode
== BLKmode
)
80 if (dump_enabled_p ())
81 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
82 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC
"]\n",
83 GET_MODE_NAME (mode
), count
);
87 if (convert_optab_handler (optab
, array_mode
, mode
) == CODE_FOR_nothing
)
89 if (dump_enabled_p ())
90 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
91 "cannot use %s<%s><%s>\n", name
,
92 GET_MODE_NAME (array_mode
), GET_MODE_NAME (mode
));
96 if (dump_enabled_p ())
97 dump_printf_loc (MSG_NOTE
, vect_location
,
98 "can use %s<%s><%s>\n", name
, GET_MODE_NAME (array_mode
),
99 GET_MODE_NAME (mode
));
105 /* Return the smallest scalar part of STMT.
106 This is used to determine the vectype of the stmt. We generally set the
107 vectype according to the type of the result (lhs). For stmts whose
108 result-type is different than the type of the arguments (e.g., demotion,
109 promotion), vectype will be reset appropriately (later). Note that we have
110 to visit the smallest datatype in this function, because that determines the
111 VF. If the smallest datatype in the loop is present only as the rhs of a
112 promotion operation - we'd miss it.
113 Such a case, where a variable of this datatype does not appear in the lhs
114 anywhere in the loop, can only occur if it's an invariant: e.g.:
115 'int_x = (int) short_inv', which we'd expect to have been optimized away by
116 invariant motion. However, we cannot rely on invariant motion to always
117 take invariants out of the loop, and so in the case of promotion we also
118 have to check the rhs.
119 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
123 vect_get_smallest_scalar_type (gimple stmt
, HOST_WIDE_INT
*lhs_size_unit
,
124 HOST_WIDE_INT
*rhs_size_unit
)
126 tree scalar_type
= gimple_expr_type (stmt
);
127 HOST_WIDE_INT lhs
, rhs
;
129 lhs
= rhs
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
131 if (is_gimple_assign (stmt
)
132 && (gimple_assign_cast_p (stmt
)
133 || gimple_assign_rhs_code (stmt
) == WIDEN_MULT_EXPR
134 || gimple_assign_rhs_code (stmt
) == WIDEN_LSHIFT_EXPR
135 || gimple_assign_rhs_code (stmt
) == FLOAT_EXPR
))
137 tree rhs_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
139 rhs
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type
));
141 scalar_type
= rhs_type
;
144 *lhs_size_unit
= lhs
;
145 *rhs_size_unit
= rhs
;
150 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
151 tested at run-time. Return TRUE if DDR was successfully inserted.
152 Return false if versioning is not supported. */
155 vect_mark_for_runtime_alias_test (ddr_p ddr
, loop_vec_info loop_vinfo
)
157 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
159 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
) == 0)
162 if (dump_enabled_p ())
164 dump_printf_loc (MSG_NOTE
, vect_location
,
165 "mark for run-time aliasing test between ");
166 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_A (ddr
)));
167 dump_printf (MSG_NOTE
, " and ");
168 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (DDR_B (ddr
)));
169 dump_printf (MSG_NOTE
, "\n");
172 if (optimize_loop_nest_for_size_p (loop
))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
176 "versioning not supported when optimizing"
181 /* FORNOW: We don't support versioning with outer-loop vectorization. */
184 if (dump_enabled_p ())
185 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
186 "versioning not yet supported for outer-loops.\n");
190 /* FORNOW: We don't support creating runtime alias tests for non-constant
192 if (TREE_CODE (DR_STEP (DDR_A (ddr
))) != INTEGER_CST
193 || TREE_CODE (DR_STEP (DDR_B (ddr
))) != INTEGER_CST
)
195 if (dump_enabled_p ())
196 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
197 "versioning not yet supported for non-constant "
202 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
).safe_push (ddr
);
207 /* Function vect_analyze_data_ref_dependence.
209 Return TRUE if there (might) exist a dependence between a memory-reference
210 DRA and a memory-reference DRB. When versioning for alias may check a
211 dependence at run-time, return FALSE. Adjust *MAX_VF according to
212 the data dependence. */
215 vect_analyze_data_ref_dependence (struct data_dependence_relation
*ddr
,
216 loop_vec_info loop_vinfo
, int *max_vf
)
219 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
220 struct data_reference
*dra
= DDR_A (ddr
);
221 struct data_reference
*drb
= DDR_B (ddr
);
222 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
223 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
224 lambda_vector dist_v
;
225 unsigned int loop_depth
;
227 /* In loop analysis all data references should be vectorizable. */
228 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a
)
229 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b
))
232 /* Independent data accesses. */
233 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
237 || (DR_IS_READ (dra
) && DR_IS_READ (drb
)))
240 /* Even if we have an anti-dependence then, as the vectorized loop covers at
241 least two scalar iterations, there is always also a true dependence.
242 As the vectorizer does not re-order loads and stores we can ignore
243 the anti-dependence if TBAA can disambiguate both DRs similar to the
244 case with known negative distance anti-dependences (positive
245 distance anti-dependences would violate TBAA constraints). */
246 if (((DR_IS_READ (dra
) && DR_IS_WRITE (drb
))
247 || (DR_IS_WRITE (dra
) && DR_IS_READ (drb
)))
248 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra
)),
249 get_alias_set (DR_REF (drb
))))
252 /* Unknown data dependence. */
253 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
255 /* If user asserted safelen consecutive iterations can be
256 executed concurrently, assume independence. */
257 if (loop
->safelen
>= 2)
259 if (loop
->safelen
< *max_vf
)
260 *max_vf
= loop
->safelen
;
261 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
) = false;
265 if (STMT_VINFO_GATHER_P (stmtinfo_a
)
266 || STMT_VINFO_GATHER_P (stmtinfo_b
))
268 if (dump_enabled_p ())
270 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
271 "versioning for alias not supported for: "
272 "can't determine dependence between ");
273 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
275 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
276 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
278 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
283 if (dump_enabled_p ())
285 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
286 "versioning for alias required: "
287 "can't determine dependence between ");
288 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
290 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
291 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
293 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
296 /* Add to list of ddrs that need to be tested at run-time. */
297 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
300 /* Known data dependence. */
301 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
303 /* If user asserted safelen consecutive iterations can be
304 executed concurrently, assume independence. */
305 if (loop
->safelen
>= 2)
307 if (loop
->safelen
< *max_vf
)
308 *max_vf
= loop
->safelen
;
309 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
) = false;
313 if (STMT_VINFO_GATHER_P (stmtinfo_a
)
314 || STMT_VINFO_GATHER_P (stmtinfo_b
))
316 if (dump_enabled_p ())
318 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
319 "versioning for alias not supported for: "
320 "bad dist vector for ");
321 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
323 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
324 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
326 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
331 if (dump_enabled_p ())
333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
334 "versioning for alias required: "
335 "bad dist vector for ");
336 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (dra
));
337 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
338 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (drb
));
339 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
341 /* Add to list of ddrs that need to be tested at run-time. */
342 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
345 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
346 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), i
, dist_v
)
348 int dist
= dist_v
[loop_depth
];
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE
, vect_location
,
352 "dependence distance = %d.\n", dist
);
356 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE
, vect_location
,
359 "dependence distance == 0 between ");
360 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
361 dump_printf (MSG_NOTE
, " and ");
362 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
363 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
366 /* When we perform grouped accesses and perform implicit CSE
367 by detecting equal accesses and doing disambiguation with
368 runtime alias tests like for
376 where we will end up loading { a[i], a[i+1] } once, make
377 sure that inserting group loads before the first load and
378 stores after the last store will do the right thing. */
379 if ((STMT_VINFO_GROUPED_ACCESS (stmtinfo_a
)
380 && GROUP_SAME_DR_STMT (stmtinfo_a
))
381 || (STMT_VINFO_GROUPED_ACCESS (stmtinfo_b
)
382 && GROUP_SAME_DR_STMT (stmtinfo_b
)))
385 earlier_stmt
= get_earlier_stmt (DR_STMT (dra
), DR_STMT (drb
));
387 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt
))))
389 if (dump_enabled_p ())
390 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
391 "READ_WRITE dependence in interleaving."
400 if (dist
> 0 && DDR_REVERSED_P (ddr
))
402 /* If DDR_REVERSED_P the order of the data-refs in DDR was
403 reversed (to make distance vector positive), and the actual
404 distance is negative. */
405 if (dump_enabled_p ())
406 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
407 "dependence distance negative.\n");
408 /* Record a negative dependence distance to later limit the
409 amount of stmt copying / unrolling we can perform.
410 Only need to handle read-after-write dependence. */
412 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b
) == 0
413 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b
) > (unsigned)dist
))
414 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b
) = dist
;
419 && abs (dist
) < *max_vf
)
421 /* The dependence distance requires reduction of the maximal
422 vectorization factor. */
423 *max_vf
= abs (dist
);
424 if (dump_enabled_p ())
425 dump_printf_loc (MSG_NOTE
, vect_location
,
426 "adjusting maximal vectorization factor to %i\n",
430 if (abs (dist
) >= *max_vf
)
432 /* Dependence distance does not create dependence, as far as
433 vectorization is concerned, in this case. */
434 if (dump_enabled_p ())
435 dump_printf_loc (MSG_NOTE
, vect_location
,
436 "dependence distance >= VF.\n");
440 if (dump_enabled_p ())
442 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
443 "not vectorized, possible dependence "
444 "between data-refs ");
445 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
446 dump_printf (MSG_NOTE
, " and ");
447 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
448 dump_printf (MSG_NOTE
, "\n");
457 /* Function vect_analyze_data_ref_dependences.
459 Examine all the data references in the loop, and make sure there do not
460 exist any data dependences between them. Set *MAX_VF according to
461 the maximum vectorization factor the data dependences allow. */
464 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo
, int *max_vf
)
467 struct data_dependence_relation
*ddr
;
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_NOTE
, vect_location
,
471 "=== vect_analyze_data_ref_dependences ===\n");
473 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
) = true;
474 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo
),
475 &LOOP_VINFO_DDRS (loop_vinfo
),
476 LOOP_VINFO_LOOP_NEST (loop_vinfo
), true))
479 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo
), i
, ddr
)
480 if (vect_analyze_data_ref_dependence (ddr
, loop_vinfo
, max_vf
))
487 /* Function vect_slp_analyze_data_ref_dependence.
489 Return TRUE if there (might) exist a dependence between a memory-reference
490 DRA and a memory-reference DRB. When versioning for alias may check a
491 dependence at run-time, return FALSE. Adjust *MAX_VF according to
492 the data dependence. */
495 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation
*ddr
)
497 struct data_reference
*dra
= DDR_A (ddr
);
498 struct data_reference
*drb
= DDR_B (ddr
);
500 /* We need to check dependences of statements marked as unvectorizable
501 as well, they still can prohibit vectorization. */
503 /* Independent data accesses. */
504 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
510 /* Read-read is OK. */
511 if (DR_IS_READ (dra
) && DR_IS_READ (drb
))
514 /* If dra and drb are part of the same interleaving chain consider
516 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra
)))
517 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra
)))
518 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb
)))))
521 /* Unknown data dependence. */
522 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
524 if (dump_enabled_p ())
526 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
527 "can't determine dependence between ");
528 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (dra
));
529 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
530 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, DR_REF (drb
));
531 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
534 else if (dump_enabled_p ())
536 dump_printf_loc (MSG_NOTE
, vect_location
,
537 "determined dependence between ");
538 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
539 dump_printf (MSG_NOTE
, " and ");
540 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
541 dump_printf (MSG_NOTE
, "\n");
544 /* We do not vectorize basic blocks with write-write dependencies. */
545 if (DR_IS_WRITE (dra
) && DR_IS_WRITE (drb
))
548 /* If we have a read-write dependence check that the load is before the store.
549 When we vectorize basic blocks, vector load can be only before
550 corresponding scalar load, and vector store can be only after its
551 corresponding scalar store. So the order of the acceses is preserved in
552 case the load is before the store. */
553 gimple earlier_stmt
= get_earlier_stmt (DR_STMT (dra
), DR_STMT (drb
));
554 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt
))))
556 /* That only holds for load-store pairs taking part in vectorization. */
557 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra
)))
558 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb
))))
566 /* Function vect_analyze_data_ref_dependences.
568 Examine all the data references in the basic-block, and make sure there
569 do not exist any data dependences between them. Set *MAX_VF according to
570 the maximum vectorization factor the data dependences allow. */
573 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo
)
575 struct data_dependence_relation
*ddr
;
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE
, vect_location
,
580 "=== vect_slp_analyze_data_ref_dependences ===\n");
582 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo
),
583 &BB_VINFO_DDRS (bb_vinfo
),
587 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo
), i
, ddr
)
588 if (vect_slp_analyze_data_ref_dependence (ddr
))
595 /* Function vect_compute_data_ref_alignment
597 Compute the misalignment of the data reference DR.
600 1. If during the misalignment computation it is found that the data reference
601 cannot be vectorized then false is returned.
602 2. DR_MISALIGNMENT (DR) is defined.
604 FOR NOW: No analysis is actually performed. Misalignment is calculated
605 only for trivial cases. TODO. */
608 vect_compute_data_ref_alignment (struct data_reference
*dr
)
610 gimple stmt
= DR_STMT (dr
);
611 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
612 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
613 struct loop
*loop
= NULL
;
614 tree ref
= DR_REF (dr
);
616 tree base
, base_addr
;
619 tree aligned_to
, alignment
;
621 if (dump_enabled_p ())
622 dump_printf_loc (MSG_NOTE
, vect_location
,
623 "vect_compute_data_ref_alignment:\n");
626 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
628 /* Initialize misalignment to unknown. */
629 SET_DR_MISALIGNMENT (dr
, -1);
631 /* Strided loads perform only component accesses, misalignment information
632 is irrelevant for them. */
633 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
636 misalign
= DR_INIT (dr
);
637 aligned_to
= DR_ALIGNED_TO (dr
);
638 base_addr
= DR_BASE_ADDRESS (dr
);
639 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
641 /* In case the dataref is in an inner-loop of the loop that is being
642 vectorized (LOOP), we use the base and misalignment information
643 relative to the outer-loop (LOOP). This is ok only if the misalignment
644 stays the same throughout the execution of the inner-loop, which is why
645 we have to check that the stride of the dataref in the inner-loop evenly
646 divides by the vector size. */
647 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
649 tree step
= DR_STEP (dr
);
650 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
652 if (dr_step
% GET_MODE_SIZE (TYPE_MODE (vectype
)) == 0)
654 if (dump_enabled_p ())
655 dump_printf_loc (MSG_NOTE
, vect_location
,
656 "inner step divides the vector-size.\n");
657 misalign
= STMT_VINFO_DR_INIT (stmt_info
);
658 aligned_to
= STMT_VINFO_DR_ALIGNED_TO (stmt_info
);
659 base_addr
= STMT_VINFO_DR_BASE_ADDRESS (stmt_info
);
663 if (dump_enabled_p ())
664 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
665 "inner step doesn't divide the vector-size.\n");
666 misalign
= NULL_TREE
;
670 /* Similarly, if we're doing basic-block vectorization, we can only use
671 base and misalignment information relative to an innermost loop if the
672 misalignment stays the same throughout the execution of the loop.
673 As above, this is the case if the stride of the dataref evenly divides
674 by the vector size. */
677 tree step
= DR_STEP (dr
);
678 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
680 if (dr_step
% GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0)
682 if (dump_enabled_p ())
683 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
684 "SLP: step doesn't divide the vector-size.\n");
685 misalign
= NULL_TREE
;
689 base
= build_fold_indirect_ref (base_addr
);
690 alignment
= ssize_int (TYPE_ALIGN (vectype
)/BITS_PER_UNIT
);
692 if ((aligned_to
&& tree_int_cst_compare (aligned_to
, alignment
) < 0)
695 if (dump_enabled_p ())
697 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
698 "Unknown alignment for access: ");
699 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, base
);
700 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
706 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base
)),
708 || (TREE_CODE (base_addr
) == SSA_NAME
709 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
710 TREE_TYPE (base_addr
)))),
712 || (get_pointer_alignment (base_addr
) >= TYPE_ALIGN (vectype
)))
715 base_aligned
= false;
719 /* Do not change the alignment of global variables here if
720 flag_section_anchors is enabled as we already generated
721 RTL for other functions. Most global variables should
722 have been aligned during the IPA increase_alignment pass. */
723 if (!vect_can_force_dr_alignment_p (base
, TYPE_ALIGN (vectype
))
724 || (TREE_STATIC (base
) && flag_section_anchors
))
726 if (dump_enabled_p ())
728 dump_printf_loc (MSG_NOTE
, vect_location
,
729 "can't force alignment of ref: ");
730 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
731 dump_printf (MSG_NOTE
, "\n");
736 /* Force the alignment of the decl.
737 NOTE: This is the only change to the code we make during
738 the analysis phase, before deciding to vectorize the loop. */
739 if (dump_enabled_p ())
741 dump_printf_loc (MSG_NOTE
, vect_location
, "force alignment of ");
742 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
743 dump_printf (MSG_NOTE
, "\n");
746 ((dataref_aux
*)dr
->aux
)->base_decl
= base
;
747 ((dataref_aux
*)dr
->aux
)->base_misaligned
= true;
750 /* If this is a backward running DR then first access in the larger
751 vectype actually is N-1 elements before the address in the DR.
752 Adjust misalign accordingly. */
753 if (tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0)
755 tree offset
= ssize_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
756 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
757 otherwise we wouldn't be here. */
758 offset
= fold_build2 (MULT_EXPR
, ssizetype
, offset
, DR_STEP (dr
));
759 /* PLUS because DR_STEP was negative. */
760 misalign
= size_binop (PLUS_EXPR
, misalign
, offset
);
763 /* Modulo alignment. */
764 misalign
= size_binop (FLOOR_MOD_EXPR
, misalign
, alignment
);
766 if (!tree_fits_uhwi_p (misalign
))
768 /* Negative or overflowed misalignment value. */
769 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
771 "unexpected misalign value\n");
775 SET_DR_MISALIGNMENT (dr
, tree_to_uhwi (misalign
));
777 if (dump_enabled_p ())
779 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
780 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr
));
781 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, ref
);
782 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
789 /* Function vect_compute_data_refs_alignment
791 Compute the misalignment of data references in the loop.
792 Return FALSE if a data reference is found that cannot be vectorized. */
795 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo
,
796 bb_vec_info bb_vinfo
)
798 vec
<data_reference_p
> datarefs
;
799 struct data_reference
*dr
;
803 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
805 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
807 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
808 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
)))
809 && !vect_compute_data_ref_alignment (dr
))
813 /* Mark unsupported statement as unvectorizable. */
814 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
825 /* Function vect_update_misalignment_for_peel
827 DR - the data reference whose misalignment is to be adjusted.
828 DR_PEEL - the data reference whose misalignment is being made
829 zero in the vector loop by the peel.
830 NPEEL - the number of iterations in the peel loop if the misalignment
831 of DR_PEEL is known at compile time. */
834 vect_update_misalignment_for_peel (struct data_reference
*dr
,
835 struct data_reference
*dr_peel
, int npeel
)
838 vec
<dr_p
> same_align_drs
;
839 struct data_reference
*current_dr
;
840 int dr_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr
))));
841 int dr_peel_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel
))));
842 stmt_vec_info stmt_info
= vinfo_for_stmt (DR_STMT (dr
));
843 stmt_vec_info peel_stmt_info
= vinfo_for_stmt (DR_STMT (dr_peel
));
845 /* For interleaved data accesses the step in the loop must be multiplied by
846 the size of the interleaving group. */
847 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
848 dr_size
*= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
849 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info
))
850 dr_peel_size
*= GROUP_SIZE (peel_stmt_info
);
852 /* It can be assumed that the data refs with the same alignment as dr_peel
853 are aligned in the vector loop. */
855 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel
)));
856 FOR_EACH_VEC_ELT (same_align_drs
, i
, current_dr
)
858 if (current_dr
!= dr
)
860 gcc_assert (DR_MISALIGNMENT (dr
) / dr_size
==
861 DR_MISALIGNMENT (dr_peel
) / dr_peel_size
);
862 SET_DR_MISALIGNMENT (dr
, 0);
866 if (known_alignment_for_access_p (dr
)
867 && known_alignment_for_access_p (dr_peel
))
869 bool negative
= tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0;
870 int misal
= DR_MISALIGNMENT (dr
);
871 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
872 misal
+= negative
? -npeel
* dr_size
: npeel
* dr_size
;
873 misal
&= (TYPE_ALIGN (vectype
) / BITS_PER_UNIT
) - 1;
874 SET_DR_MISALIGNMENT (dr
, misal
);
878 if (dump_enabled_p ())
879 dump_printf_loc (MSG_NOTE
, vect_location
, "Setting misalignment to -1.\n");
880 SET_DR_MISALIGNMENT (dr
, -1);
884 /* Function vect_verify_datarefs_alignment
886 Return TRUE if all data references in the loop can be
887 handled with respect to alignment. */
890 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
892 vec
<data_reference_p
> datarefs
;
893 struct data_reference
*dr
;
894 enum dr_alignment_support supportable_dr_alignment
;
898 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
900 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
902 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
904 gimple stmt
= DR_STMT (dr
);
905 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
907 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
910 /* For interleaving, only the alignment of the first access matters.
911 Skip statements marked as not vectorizable. */
912 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info
)
913 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
914 || !STMT_VINFO_VECTORIZABLE (stmt_info
))
917 /* Strided loads perform only component accesses, alignment is
918 irrelevant for them. */
919 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
922 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, false);
923 if (!supportable_dr_alignment
)
925 if (dump_enabled_p ())
928 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
929 "not vectorized: unsupported unaligned load.");
931 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
932 "not vectorized: unsupported unaligned "
935 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
937 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
941 if (supportable_dr_alignment
!= dr_aligned
&& dump_enabled_p ())
942 dump_printf_loc (MSG_NOTE
, vect_location
,
943 "Vectorizing an unaligned access.\n");
948 /* Given an memory reference EXP return whether its alignment is less
952 not_size_aligned (tree exp
)
954 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp
))))
957 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp
)))
958 > get_object_alignment (exp
));
961 /* Function vector_alignment_reachable_p
963 Return true if vector alignment for DR is reachable by peeling
964 a few loop iterations. Return false otherwise. */
967 vector_alignment_reachable_p (struct data_reference
*dr
)
969 gimple stmt
= DR_STMT (dr
);
970 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
971 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
973 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
975 /* For interleaved access we peel only if number of iterations in
976 the prolog loop ({VF - misalignment}), is a multiple of the
977 number of the interleaved accesses. */
978 int elem_size
, mis_in_elements
;
979 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
981 /* FORNOW: handle only known alignment. */
982 if (!known_alignment_for_access_p (dr
))
985 elem_size
= GET_MODE_SIZE (TYPE_MODE (vectype
)) / nelements
;
986 mis_in_elements
= DR_MISALIGNMENT (dr
) / elem_size
;
988 if ((nelements
- mis_in_elements
) % GROUP_SIZE (stmt_info
))
992 /* If misalignment is known at the compile time then allow peeling
993 only if natural alignment is reachable through peeling. */
994 if (known_alignment_for_access_p (dr
) && !aligned_access_p (dr
))
996 HOST_WIDE_INT elmsize
=
997 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
998 if (dump_enabled_p ())
1000 dump_printf_loc (MSG_NOTE
, vect_location
,
1001 "data size =" HOST_WIDE_INT_PRINT_DEC
, elmsize
);
1002 dump_printf (MSG_NOTE
,
1003 ". misalignment = %d.\n", DR_MISALIGNMENT (dr
));
1005 if (DR_MISALIGNMENT (dr
) % elmsize
)
1007 if (dump_enabled_p ())
1008 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1009 "data size does not divide the misalignment.\n");
1014 if (!known_alignment_for_access_p (dr
))
1016 tree type
= TREE_TYPE (DR_REF (dr
));
1017 bool is_packed
= not_size_aligned (DR_REF (dr
));
1018 if (dump_enabled_p ())
1019 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1020 "Unknown misalignment, is_packed = %d\n",is_packed
);
1021 if ((TYPE_USER_ALIGN (type
) && !is_packed
)
1022 || targetm
.vectorize
.vector_alignment_reachable (type
, is_packed
))
1032 /* Calculate the cost of the memory access represented by DR. */
1035 vect_get_data_access_cost (struct data_reference
*dr
,
1036 unsigned int *inside_cost
,
1037 unsigned int *outside_cost
,
1038 stmt_vector_for_cost
*body_cost_vec
)
1040 gimple stmt
= DR_STMT (dr
);
1041 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1042 int nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
1043 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1044 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1045 int ncopies
= vf
/ nunits
;
1047 if (DR_IS_READ (dr
))
1048 vect_get_load_cost (dr
, ncopies
, true, inside_cost
, outside_cost
,
1049 NULL
, body_cost_vec
, false);
1051 vect_get_store_cost (dr
, ncopies
, inside_cost
, body_cost_vec
);
1053 if (dump_enabled_p ())
1054 dump_printf_loc (MSG_NOTE
, vect_location
,
1055 "vect_get_data_access_cost: inside_cost = %d, "
1056 "outside_cost = %d.\n", *inside_cost
, *outside_cost
);
1060 /* Insert DR into peeling hash table with NPEEL as key. */
1063 vect_peeling_hash_insert (loop_vec_info loop_vinfo
, struct data_reference
*dr
,
1066 struct _vect_peel_info elem
, *slot
;
1067 _vect_peel_info
**new_slot
;
1068 bool supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, true);
1071 slot
= LOOP_VINFO_PEELING_HTAB (loop_vinfo
)->find (&elem
);
1076 slot
= XNEW (struct _vect_peel_info
);
1077 slot
->npeel
= npeel
;
1081 = LOOP_VINFO_PEELING_HTAB (loop_vinfo
)->find_slot (slot
, INSERT
);
1085 if (!supportable_dr_alignment
1086 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
1087 slot
->count
+= VECT_MAX_COST
;
1091 /* Traverse peeling hash table to find peeling option that aligns maximum
1092 number of data accesses. */
1095 vect_peeling_hash_get_most_frequent (_vect_peel_info
**slot
,
1096 _vect_peel_extended_info
*max
)
1098 vect_peel_info elem
= *slot
;
1100 if (elem
->count
> max
->peel_info
.count
1101 || (elem
->count
== max
->peel_info
.count
1102 && max
->peel_info
.npeel
> elem
->npeel
))
1104 max
->peel_info
.npeel
= elem
->npeel
;
1105 max
->peel_info
.count
= elem
->count
;
1106 max
->peel_info
.dr
= elem
->dr
;
1113 /* Traverse peeling hash table and calculate cost for each peeling option.
1114 Find the one with the lowest cost. */
1117 vect_peeling_hash_get_lowest_cost (_vect_peel_info
**slot
,
1118 _vect_peel_extended_info
*min
)
1120 vect_peel_info elem
= *slot
;
1121 int save_misalignment
, dummy
;
1122 unsigned int inside_cost
= 0, outside_cost
= 0, i
;
1123 gimple stmt
= DR_STMT (elem
->dr
);
1124 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1125 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1126 vec
<data_reference_p
> datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1127 struct data_reference
*dr
;
1128 stmt_vector_for_cost prologue_cost_vec
, body_cost_vec
, epilogue_cost_vec
;
1129 int single_iter_cost
;
1131 prologue_cost_vec
.create (2);
1132 body_cost_vec
.create (2);
1133 epilogue_cost_vec
.create (2);
1135 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1137 stmt
= DR_STMT (dr
);
1138 stmt_info
= vinfo_for_stmt (stmt
);
1139 /* For interleaving, only the alignment of the first access
1141 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1142 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1145 save_misalignment
= DR_MISALIGNMENT (dr
);
1146 vect_update_misalignment_for_peel (dr
, elem
->dr
, elem
->npeel
);
1147 vect_get_data_access_cost (dr
, &inside_cost
, &outside_cost
,
1149 SET_DR_MISALIGNMENT (dr
, save_misalignment
);
1152 single_iter_cost
= vect_get_single_scalar_iteration_cost (loop_vinfo
);
1153 outside_cost
+= vect_get_known_peeling_cost (loop_vinfo
, elem
->npeel
,
1154 &dummy
, single_iter_cost
,
1156 &epilogue_cost_vec
);
1158 /* Prologue and epilogue costs are added to the target model later.
1159 These costs depend only on the scalar iteration cost, the
1160 number of peeling iterations finally chosen, and the number of
1161 misaligned statements. So discard the information found here. */
1162 prologue_cost_vec
.release ();
1163 epilogue_cost_vec
.release ();
1165 if (inside_cost
< min
->inside_cost
1166 || (inside_cost
== min
->inside_cost
&& outside_cost
< min
->outside_cost
))
1168 min
->inside_cost
= inside_cost
;
1169 min
->outside_cost
= outside_cost
;
1170 min
->body_cost_vec
.release ();
1171 min
->body_cost_vec
= body_cost_vec
;
1172 min
->peel_info
.dr
= elem
->dr
;
1173 min
->peel_info
.npeel
= elem
->npeel
;
1176 body_cost_vec
.release ();
1182 /* Choose best peeling option by traversing peeling hash table and either
1183 choosing an option with the lowest cost (if cost model is enabled) or the
1184 option that aligns as many accesses as possible. */
1186 static struct data_reference
*
1187 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo
,
1188 unsigned int *npeel
,
1189 stmt_vector_for_cost
*body_cost_vec
)
1191 struct _vect_peel_extended_info res
;
1193 res
.peel_info
.dr
= NULL
;
1194 res
.body_cost_vec
= stmt_vector_for_cost ();
1196 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
1198 res
.inside_cost
= INT_MAX
;
1199 res
.outside_cost
= INT_MAX
;
1200 LOOP_VINFO_PEELING_HTAB (loop_vinfo
)
1201 ->traverse
<_vect_peel_extended_info
*,
1202 vect_peeling_hash_get_lowest_cost
> (&res
);
1206 res
.peel_info
.count
= 0;
1207 LOOP_VINFO_PEELING_HTAB (loop_vinfo
)
1208 ->traverse
<_vect_peel_extended_info
*,
1209 vect_peeling_hash_get_most_frequent
> (&res
);
1212 *npeel
= res
.peel_info
.npeel
;
1213 *body_cost_vec
= res
.body_cost_vec
;
1214 return res
.peel_info
.dr
;
1218 /* Function vect_enhance_data_refs_alignment
1220 This pass will use loop versioning and loop peeling in order to enhance
1221 the alignment of data references in the loop.
1223 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1224 original loop is to be vectorized. Any other loops that are created by
1225 the transformations performed in this pass - are not supposed to be
1226 vectorized. This restriction will be relaxed.
1228 This pass will require a cost model to guide it whether to apply peeling
1229 or versioning or a combination of the two. For example, the scheme that
1230 intel uses when given a loop with several memory accesses, is as follows:
1231 choose one memory access ('p') which alignment you want to force by doing
1232 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1233 other accesses are not necessarily aligned, or (2) use loop versioning to
1234 generate one loop in which all accesses are aligned, and another loop in
1235 which only 'p' is necessarily aligned.
1237 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1238 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1239 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1241 Devising a cost model is the most critical aspect of this work. It will
1242 guide us on which access to peel for, whether to use loop versioning, how
1243 many versions to create, etc. The cost model will probably consist of
1244 generic considerations as well as target specific considerations (on
1245 powerpc for example, misaligned stores are more painful than misaligned
1248 Here are the general steps involved in alignment enhancements:
1250 -- original loop, before alignment analysis:
1251 for (i=0; i<N; i++){
1252 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1253 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1256 -- After vect_compute_data_refs_alignment:
1257 for (i=0; i<N; i++){
1258 x = q[i]; # DR_MISALIGNMENT(q) = 3
1259 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1262 -- Possibility 1: we do loop versioning:
1264 for (i=0; i<N; i++){ # loop 1A
1265 x = q[i]; # DR_MISALIGNMENT(q) = 3
1266 p[i] = y; # DR_MISALIGNMENT(p) = 0
1270 for (i=0; i<N; i++){ # loop 1B
1271 x = q[i]; # DR_MISALIGNMENT(q) = 3
1272 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1276 -- Possibility 2: we do loop peeling:
1277 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1281 for (i = 3; i < N; i++){ # loop 2A
1282 x = q[i]; # DR_MISALIGNMENT(q) = 0
1283 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1286 -- Possibility 3: combination of loop peeling and versioning:
1287 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1292 for (i = 3; i<N; i++){ # loop 3A
1293 x = q[i]; # DR_MISALIGNMENT(q) = 0
1294 p[i] = y; # DR_MISALIGNMENT(p) = 0
1298 for (i = 3; i<N; i++){ # loop 3B
1299 x = q[i]; # DR_MISALIGNMENT(q) = 0
1300 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1304 These loops are later passed to loop_transform to be vectorized. The
1305 vectorizer will use the alignment information to guide the transformation
1306 (whether to generate regular loads/stores, or with special handling for
1310 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo
)
1312 vec
<data_reference_p
> datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1313 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1314 enum dr_alignment_support supportable_dr_alignment
;
1315 struct data_reference
*dr0
= NULL
, *first_store
= NULL
;
1316 struct data_reference
*dr
;
1318 bool do_peeling
= false;
1319 bool do_versioning
= false;
1322 stmt_vec_info stmt_info
;
1323 unsigned int npeel
= 0;
1324 bool all_misalignments_unknown
= true;
1325 unsigned int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1326 unsigned possible_npeel_number
= 1;
1328 unsigned int nelements
, mis
, same_align_drs_max
= 0;
1329 stmt_vector_for_cost body_cost_vec
= stmt_vector_for_cost ();
1331 if (dump_enabled_p ())
1332 dump_printf_loc (MSG_NOTE
, vect_location
,
1333 "=== vect_enhance_data_refs_alignment ===\n");
1335 /* While cost model enhancements are expected in the future, the high level
1336 view of the code at this time is as follows:
1338 A) If there is a misaligned access then see if peeling to align
1339 this access can make all data references satisfy
1340 vect_supportable_dr_alignment. If so, update data structures
1341 as needed and return true.
1343 B) If peeling wasn't possible and there is a data reference with an
1344 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1345 then see if loop versioning checks can be used to make all data
1346 references satisfy vect_supportable_dr_alignment. If so, update
1347 data structures as needed and return true.
1349 C) If neither peeling nor versioning were successful then return false if
1350 any data reference does not satisfy vect_supportable_dr_alignment.
1352 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1354 Note, Possibility 3 above (which is peeling and versioning together) is not
1355 being done at this time. */
1357 /* (1) Peeling to force alignment. */
1359 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1361 + How many accesses will become aligned due to the peeling
1362 - How many accesses will become unaligned due to the peeling,
1363 and the cost of misaligned accesses.
1364 - The cost of peeling (the extra runtime checks, the increase
1367 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1369 stmt
= DR_STMT (dr
);
1370 stmt_info
= vinfo_for_stmt (stmt
);
1372 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1375 /* For interleaving, only the alignment of the first access
1377 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1378 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1381 /* For invariant accesses there is nothing to enhance. */
1382 if (integer_zerop (DR_STEP (dr
)))
1385 /* Strided loads perform only component accesses, alignment is
1386 irrelevant for them. */
1387 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1390 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, true);
1391 do_peeling
= vector_alignment_reachable_p (dr
);
1394 if (known_alignment_for_access_p (dr
))
1396 unsigned int npeel_tmp
;
1397 bool negative
= tree_int_cst_compare (DR_STEP (dr
),
1398 size_zero_node
) < 0;
1400 /* Save info about DR in the hash table. */
1401 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo
))
1402 LOOP_VINFO_PEELING_HTAB (loop_vinfo
)
1403 = new hash_table
<peel_info_hasher
> (1);
1405 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1406 nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1407 mis
= DR_MISALIGNMENT (dr
) / GET_MODE_SIZE (TYPE_MODE (
1408 TREE_TYPE (DR_REF (dr
))));
1409 npeel_tmp
= (negative
1410 ? (mis
- nelements
) : (nelements
- mis
))
1413 /* For multiple types, it is possible that the bigger type access
1414 will have more than one peeling option. E.g., a loop with two
1415 types: one of size (vector size / 4), and the other one of
1416 size (vector size / 8). Vectorization factor will 8. If both
1417 access are misaligned by 3, the first one needs one scalar
1418 iteration to be aligned, and the second one needs 5. But the
1419 the first one will be aligned also by peeling 5 scalar
1420 iterations, and in that case both accesses will be aligned.
1421 Hence, except for the immediate peeling amount, we also want
1422 to try to add full vector size, while we don't exceed
1423 vectorization factor.
1424 We do this automtically for cost model, since we calculate cost
1425 for every peeling option. */
1426 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
1427 possible_npeel_number
= vf
/nelements
;
1429 /* Handle the aligned case. We may decide to align some other
1430 access, making DR unaligned. */
1431 if (DR_MISALIGNMENT (dr
) == 0)
1434 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
1435 possible_npeel_number
++;
1438 for (j
= 0; j
< possible_npeel_number
; j
++)
1440 gcc_assert (npeel_tmp
<= vf
);
1441 vect_peeling_hash_insert (loop_vinfo
, dr
, npeel_tmp
);
1442 npeel_tmp
+= nelements
;
1445 all_misalignments_unknown
= false;
1446 /* Data-ref that was chosen for the case that all the
1447 misalignments are unknown is not relevant anymore, since we
1448 have a data-ref with known alignment. */
1453 /* If we don't know any misalignment values, we prefer
1454 peeling for data-ref that has the maximum number of data-refs
1455 with the same alignment, unless the target prefers to align
1456 stores over load. */
1457 if (all_misalignments_unknown
)
1459 unsigned same_align_drs
1460 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).length ();
1462 || same_align_drs_max
< same_align_drs
)
1464 same_align_drs_max
= same_align_drs
;
1467 /* For data-refs with the same number of related
1468 accesses prefer the one where the misalign
1469 computation will be invariant in the outermost loop. */
1470 else if (same_align_drs_max
== same_align_drs
)
1472 struct loop
*ivloop0
, *ivloop
;
1473 ivloop0
= outermost_invariant_loop_for_expr
1474 (loop
, DR_BASE_ADDRESS (dr0
));
1475 ivloop
= outermost_invariant_loop_for_expr
1476 (loop
, DR_BASE_ADDRESS (dr
));
1477 if ((ivloop
&& !ivloop0
)
1478 || (ivloop
&& ivloop0
1479 && flow_loop_nested_p (ivloop
, ivloop0
)))
1483 if (!first_store
&& DR_IS_WRITE (dr
))
1487 /* If there are both known and unknown misaligned accesses in the
1488 loop, we choose peeling amount according to the known
1490 if (!supportable_dr_alignment
)
1493 if (!first_store
&& DR_IS_WRITE (dr
))
1500 if (!aligned_access_p (dr
))
1502 if (dump_enabled_p ())
1503 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1504 "vector alignment may not be reachable\n");
1510 /* Check if we can possibly peel the loop. */
1511 if (!vect_can_advance_ivs_p (loop_vinfo
)
1512 || !slpeel_can_duplicate_loop_p (loop
, single_exit (loop
)))
1515 if (do_peeling
&& all_misalignments_unknown
1516 && vect_supportable_dr_alignment (dr0
, false))
1519 /* Check if the target requires to prefer stores over loads, i.e., if
1520 misaligned stores are more expensive than misaligned loads (taking
1521 drs with same alignment into account). */
1522 if (first_store
&& DR_IS_READ (dr0
))
1524 unsigned int load_inside_cost
= 0, load_outside_cost
= 0;
1525 unsigned int store_inside_cost
= 0, store_outside_cost
= 0;
1526 unsigned int load_inside_penalty
= 0, load_outside_penalty
= 0;
1527 unsigned int store_inside_penalty
= 0, store_outside_penalty
= 0;
1528 stmt_vector_for_cost dummy
;
1531 vect_get_data_access_cost (dr0
, &load_inside_cost
, &load_outside_cost
,
1533 vect_get_data_access_cost (first_store
, &store_inside_cost
,
1534 &store_outside_cost
, &dummy
);
1538 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1539 aligning the load DR0). */
1540 load_inside_penalty
= store_inside_cost
;
1541 load_outside_penalty
= store_outside_cost
;
1543 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1544 DR_STMT (first_store
))).iterate (i
, &dr
);
1546 if (DR_IS_READ (dr
))
1548 load_inside_penalty
+= load_inside_cost
;
1549 load_outside_penalty
+= load_outside_cost
;
1553 load_inside_penalty
+= store_inside_cost
;
1554 load_outside_penalty
+= store_outside_cost
;
1557 /* Calculate the penalty for leaving DR0 unaligned (by
1558 aligning the FIRST_STORE). */
1559 store_inside_penalty
= load_inside_cost
;
1560 store_outside_penalty
= load_outside_cost
;
1562 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1563 DR_STMT (dr0
))).iterate (i
, &dr
);
1565 if (DR_IS_READ (dr
))
1567 store_inside_penalty
+= load_inside_cost
;
1568 store_outside_penalty
+= load_outside_cost
;
1572 store_inside_penalty
+= store_inside_cost
;
1573 store_outside_penalty
+= store_outside_cost
;
1576 if (load_inside_penalty
> store_inside_penalty
1577 || (load_inside_penalty
== store_inside_penalty
1578 && load_outside_penalty
> store_outside_penalty
))
1582 /* In case there are only loads with different unknown misalignments, use
1583 peeling only if it may help to align other accesses in the loop. */
1585 && !STMT_VINFO_SAME_ALIGN_REFS (
1586 vinfo_for_stmt (DR_STMT (dr0
))).length ()
1587 && vect_supportable_dr_alignment (dr0
, false)
1588 != dr_unaligned_supported
)
1592 if (do_peeling
&& !dr0
)
1594 /* Peeling is possible, but there is no data access that is not supported
1595 unless aligned. So we try to choose the best possible peeling. */
1597 /* We should get here only if there are drs with known misalignment. */
1598 gcc_assert (!all_misalignments_unknown
);
1600 /* Choose the best peeling from the hash table. */
1601 dr0
= vect_peeling_hash_choose_best_peeling (loop_vinfo
, &npeel
,
1609 stmt
= DR_STMT (dr0
);
1610 stmt_info
= vinfo_for_stmt (stmt
);
1611 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1612 nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1614 if (known_alignment_for_access_p (dr0
))
1616 bool negative
= tree_int_cst_compare (DR_STEP (dr0
),
1617 size_zero_node
) < 0;
1620 /* Since it's known at compile time, compute the number of
1621 iterations in the peeled loop (the peeling factor) for use in
1622 updating DR_MISALIGNMENT values. The peeling factor is the
1623 vectorization factor minus the misalignment as an element
1625 mis
= DR_MISALIGNMENT (dr0
);
1626 mis
/= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0
))));
1627 npeel
= ((negative
? mis
- nelements
: nelements
- mis
)
1631 /* For interleaved data access every iteration accesses all the
1632 members of the group, therefore we divide the number of iterations
1633 by the group size. */
1634 stmt_info
= vinfo_for_stmt (DR_STMT (dr0
));
1635 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1636 npeel
/= GROUP_SIZE (stmt_info
);
1638 if (dump_enabled_p ())
1639 dump_printf_loc (MSG_NOTE
, vect_location
,
1640 "Try peeling by %d\n", npeel
);
1643 /* Ensure that all data refs can be vectorized after the peel. */
1644 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1646 int save_misalignment
;
1651 stmt
= DR_STMT (dr
);
1652 stmt_info
= vinfo_for_stmt (stmt
);
1653 /* For interleaving, only the alignment of the first access
1655 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1656 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
1659 /* Strided loads perform only component accesses, alignment is
1660 irrelevant for them. */
1661 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1664 save_misalignment
= DR_MISALIGNMENT (dr
);
1665 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1666 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, false);
1667 SET_DR_MISALIGNMENT (dr
, save_misalignment
);
1669 if (!supportable_dr_alignment
)
1676 if (do_peeling
&& known_alignment_for_access_p (dr0
) && npeel
== 0)
1678 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1683 body_cost_vec
.release ();
1690 unsigned max_allowed_peel
1691 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT
);
1692 if (max_allowed_peel
!= (unsigned)-1)
1694 unsigned max_peel
= npeel
;
1697 gimple dr_stmt
= DR_STMT (dr0
);
1698 stmt_vec_info vinfo
= vinfo_for_stmt (dr_stmt
);
1699 tree vtype
= STMT_VINFO_VECTYPE (vinfo
);
1700 max_peel
= TYPE_VECTOR_SUBPARTS (vtype
) - 1;
1702 if (max_peel
> max_allowed_peel
)
1705 if (dump_enabled_p ())
1706 dump_printf_loc (MSG_NOTE
, vect_location
,
1707 "Disable peeling, max peels reached: %d\n", max_peel
);
1714 stmt_info_for_cost
*si
;
1715 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
1717 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1718 If the misalignment of DR_i is identical to that of dr0 then set
1719 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1720 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1721 by the peeling factor times the element size of DR_i (MOD the
1722 vectorization factor times the size). Otherwise, the
1723 misalignment of DR_i must be set to unknown. */
1724 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1726 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1728 LOOP_VINFO_UNALIGNED_DR (loop_vinfo
) = dr0
;
1730 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) = npeel
;
1732 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
1733 = DR_MISALIGNMENT (dr0
);
1734 SET_DR_MISALIGNMENT (dr0
, 0);
1735 if (dump_enabled_p ())
1737 dump_printf_loc (MSG_NOTE
, vect_location
,
1738 "Alignment of access forced using peeling.\n");
1739 dump_printf_loc (MSG_NOTE
, vect_location
,
1740 "Peeling for alignment will be applied.\n");
1742 /* We've delayed passing the inside-loop peeling costs to the
1743 target cost model until we were sure peeling would happen.
1745 if (body_cost_vec
.exists ())
1747 FOR_EACH_VEC_ELT (body_cost_vec
, i
, si
)
1749 struct _stmt_vec_info
*stmt_info
1750 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1751 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1752 si
->misalign
, vect_body
);
1754 body_cost_vec
.release ();
1757 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1763 body_cost_vec
.release ();
1765 /* (2) Versioning to force alignment. */
1767 /* Try versioning if:
1768 1) optimize loop for speed
1769 2) there is at least one unsupported misaligned data ref with an unknown
1771 3) all misaligned data refs with a known misalignment are supported, and
1772 4) the number of runtime alignment checks is within reason. */
1775 optimize_loop_nest_for_speed_p (loop
)
1776 && (!loop
->inner
); /* FORNOW */
1780 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
1782 stmt
= DR_STMT (dr
);
1783 stmt_info
= vinfo_for_stmt (stmt
);
1785 /* For interleaving, only the alignment of the first access
1787 if (aligned_access_p (dr
)
1788 || (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
1789 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
))
1792 /* Strided loads perform only component accesses, alignment is
1793 irrelevant for them. */
1794 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1797 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
, false);
1799 if (!supportable_dr_alignment
)
1805 if (known_alignment_for_access_p (dr
)
1806 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ()
1807 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
))
1809 do_versioning
= false;
1813 stmt
= DR_STMT (dr
);
1814 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1815 gcc_assert (vectype
);
1817 /* The rightmost bits of an aligned address must be zeros.
1818 Construct the mask needed for this test. For example,
1819 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1820 mask must be 15 = 0xf. */
1821 mask
= GET_MODE_SIZE (TYPE_MODE (vectype
)) - 1;
1823 /* FORNOW: use the same mask to test all potentially unaligned
1824 references in the loop. The vectorizer currently supports
1825 a single vector size, see the reference to
1826 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1827 vectorization factor is computed. */
1828 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo
)
1829 || LOOP_VINFO_PTR_MASK (loop_vinfo
) == mask
);
1830 LOOP_VINFO_PTR_MASK (loop_vinfo
) = mask
;
1831 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).safe_push (
1836 /* Versioning requires at least one misaligned data reference. */
1837 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
1838 do_versioning
= false;
1839 else if (!do_versioning
)
1840 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).truncate (0);
1845 vec
<gimple
> may_misalign_stmts
1846 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
);
1849 /* It can now be assumed that the data references in the statements
1850 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1851 of the loop being vectorized. */
1852 FOR_EACH_VEC_ELT (may_misalign_stmts
, i
, stmt
)
1854 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1855 dr
= STMT_VINFO_DATA_REF (stmt_info
);
1856 SET_DR_MISALIGNMENT (dr
, 0);
1857 if (dump_enabled_p ())
1858 dump_printf_loc (MSG_NOTE
, vect_location
,
1859 "Alignment of access forced using versioning.\n");
1862 if (dump_enabled_p ())
1863 dump_printf_loc (MSG_NOTE
, vect_location
,
1864 "Versioning for alignment will be applied.\n");
1866 /* Peeling and versioning can't be done together at this time. */
1867 gcc_assert (! (do_peeling
&& do_versioning
));
1869 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1874 /* This point is reached if neither peeling nor versioning is being done. */
1875 gcc_assert (! (do_peeling
|| do_versioning
));
1877 stat
= vect_verify_datarefs_alignment (loop_vinfo
, NULL
);
1882 /* Function vect_find_same_alignment_drs.
1884 Update group and alignment relations according to the chosen
1885 vectorization factor. */
1888 vect_find_same_alignment_drs (struct data_dependence_relation
*ddr
,
1889 loop_vec_info loop_vinfo
)
1892 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1893 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1894 struct data_reference
*dra
= DDR_A (ddr
);
1895 struct data_reference
*drb
= DDR_B (ddr
);
1896 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
1897 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
1898 int dra_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra
))));
1899 int drb_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb
))));
1900 lambda_vector dist_v
;
1901 unsigned int loop_depth
;
1903 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
1909 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
1912 /* Loop-based vectorization and known data dependence. */
1913 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
1916 /* Data-dependence analysis reports a distance vector of zero
1917 for data-references that overlap only in the first iteration
1918 but have different sign step (see PR45764).
1919 So as a sanity check require equal DR_STEP. */
1920 if (!operand_equal_p (DR_STEP (dra
), DR_STEP (drb
), 0))
1923 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
1924 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr
), i
, dist_v
)
1926 int dist
= dist_v
[loop_depth
];
1928 if (dump_enabled_p ())
1929 dump_printf_loc (MSG_NOTE
, vect_location
,
1930 "dependence distance = %d.\n", dist
);
1932 /* Same loop iteration. */
1934 || (dist
% vectorization_factor
== 0 && dra_size
== drb_size
))
1936 /* Two references with distance zero have the same alignment. */
1937 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a
).safe_push (drb
);
1938 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b
).safe_push (dra
);
1939 if (dump_enabled_p ())
1941 dump_printf_loc (MSG_NOTE
, vect_location
,
1942 "accesses have the same alignment.\n");
1943 dump_printf (MSG_NOTE
,
1944 "dependence distance modulo vf == 0 between ");
1945 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
1946 dump_printf (MSG_NOTE
, " and ");
1947 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
1948 dump_printf (MSG_NOTE
, "\n");
1955 /* Function vect_analyze_data_refs_alignment
1957 Analyze the alignment of the data-references in the loop.
1958 Return FALSE if a data reference is found that cannot be vectorized. */
1961 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo
,
1962 bb_vec_info bb_vinfo
)
1964 if (dump_enabled_p ())
1965 dump_printf_loc (MSG_NOTE
, vect_location
,
1966 "=== vect_analyze_data_refs_alignment ===\n");
1968 /* Mark groups of data references with same alignment using
1969 data dependence information. */
1972 vec
<ddr_p
> ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
1973 struct data_dependence_relation
*ddr
;
1976 FOR_EACH_VEC_ELT (ddrs
, i
, ddr
)
1977 vect_find_same_alignment_drs (ddr
, loop_vinfo
);
1980 if (!vect_compute_data_refs_alignment (loop_vinfo
, bb_vinfo
))
1982 if (dump_enabled_p ())
1983 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1984 "not vectorized: can't calculate alignment "
1993 /* Analyze groups of accesses: check that DR belongs to a group of
1994 accesses of legal size, step, etc. Detect gaps, single element
1995 interleaving, and other special cases. Set grouped access info.
1996 Collect groups of strided stores for further use in SLP analysis. */
1999 vect_analyze_group_access (struct data_reference
*dr
)
2001 tree step
= DR_STEP (dr
);
2002 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
2003 HOST_WIDE_INT type_size
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
2004 gimple stmt
= DR_STMT (dr
);
2005 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2006 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2007 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2008 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
2009 HOST_WIDE_INT groupsize
, last_accessed_element
= 1;
2010 bool slp_impossible
= false;
2011 struct loop
*loop
= NULL
;
2014 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2016 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2017 size of the interleaving group (including gaps). */
2018 groupsize
= absu_hwi (dr_step
) / type_size
;
2020 /* Not consecutive access is possible only if it is a part of interleaving. */
2021 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
2023 /* Check if it this DR is a part of interleaving, and is a single
2024 element of the group that is accessed in the loop. */
2026 /* Gaps are supported only for loads. STEP must be a multiple of the type
2027 size. The size of the group must be a power of 2. */
2029 && (dr_step
% type_size
) == 0
2031 && exact_log2 (groupsize
) != -1)
2033 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = stmt
;
2034 GROUP_SIZE (vinfo_for_stmt (stmt
)) = groupsize
;
2035 if (dump_enabled_p ())
2037 dump_printf_loc (MSG_NOTE
, vect_location
,
2038 "Detected single element interleaving ");
2039 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dr
));
2040 dump_printf (MSG_NOTE
, " step ");
2041 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step
);
2042 dump_printf (MSG_NOTE
, "\n");
2047 if (dump_enabled_p ())
2048 dump_printf_loc (MSG_NOTE
, vect_location
,
2049 "Data access with gaps requires scalar "
2053 if (dump_enabled_p ())
2054 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2055 "Peeling for outer loop is not"
2060 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2066 if (dump_enabled_p ())
2068 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2069 "not consecutive access ");
2070 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2071 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2076 /* Mark the statement as unvectorizable. */
2077 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
2084 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
)
2086 /* First stmt in the interleaving chain. Check the chain. */
2087 gimple next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
2088 struct data_reference
*data_ref
= dr
;
2089 unsigned int count
= 1;
2090 tree prev_init
= DR_INIT (data_ref
);
2092 HOST_WIDE_INT diff
, gaps
= 0;
2093 unsigned HOST_WIDE_INT count_in_bytes
;
2097 /* Skip same data-refs. In case that two or more stmts share
2098 data-ref (supported only for loads), we vectorize only the first
2099 stmt, and the rest get their vectorized loads from the first
2101 if (!tree_int_cst_compare (DR_INIT (data_ref
),
2102 DR_INIT (STMT_VINFO_DATA_REF (
2103 vinfo_for_stmt (next
)))))
2105 if (DR_IS_WRITE (data_ref
))
2107 if (dump_enabled_p ())
2108 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2109 "Two store stmts share the same dr.\n");
2113 /* For load use the same data-ref load. */
2114 GROUP_SAME_DR_STMT (vinfo_for_stmt (next
)) = prev
;
2117 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
2122 data_ref
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next
));
2124 /* All group members have the same STEP by construction. */
2125 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref
), step
, 0));
2127 /* Check that the distance between two accesses is equal to the type
2128 size. Otherwise, we have gaps. */
2129 diff
= (TREE_INT_CST_LOW (DR_INIT (data_ref
))
2130 - TREE_INT_CST_LOW (prev_init
)) / type_size
;
2133 /* FORNOW: SLP of accesses with gaps is not supported. */
2134 slp_impossible
= true;
2135 if (DR_IS_WRITE (data_ref
))
2137 if (dump_enabled_p ())
2138 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2139 "interleaved store with gaps\n");
2146 last_accessed_element
+= diff
;
2148 /* Store the gap from the previous member of the group. If there is no
2149 gap in the access, GROUP_GAP is always 1. */
2150 GROUP_GAP (vinfo_for_stmt (next
)) = diff
;
2152 prev_init
= DR_INIT (data_ref
);
2153 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
2154 /* Count the number of data-refs in the chain. */
2158 /* COUNT is the number of accesses found, we multiply it by the size of
2159 the type to get COUNT_IN_BYTES. */
2160 count_in_bytes
= type_size
* count
;
2162 /* Check that the size of the interleaving (including gaps) is not
2163 greater than STEP. */
2165 && absu_hwi (dr_step
) < count_in_bytes
+ gaps
* type_size
)
2167 if (dump_enabled_p ())
2169 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2170 "interleaving size is greater than step for ");
2171 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
2173 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2178 /* Check that the size of the interleaving is equal to STEP for stores,
2179 i.e., that there are no gaps. */
2181 && absu_hwi (dr_step
) != count_in_bytes
)
2183 if (DR_IS_READ (dr
))
2185 slp_impossible
= true;
2186 /* There is a gap after the last load in the group. This gap is a
2187 difference between the groupsize and the number of elements.
2188 When there is no gap, this difference should be 0. */
2189 GROUP_GAP (vinfo_for_stmt (stmt
)) = groupsize
- count
;
2193 if (dump_enabled_p ())
2194 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2195 "interleaved store with gaps\n");
2200 /* Check that STEP is a multiple of type size. */
2202 && (dr_step
% type_size
) != 0)
2204 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2207 "step is not a multiple of type size: step ");
2208 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, step
);
2209 dump_printf (MSG_MISSED_OPTIMIZATION
, " size ");
2210 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
2211 TYPE_SIZE_UNIT (scalar_type
));
2212 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2220 GROUP_SIZE (vinfo_for_stmt (stmt
)) = groupsize
;
2221 if (dump_enabled_p ())
2222 dump_printf_loc (MSG_NOTE
, vect_location
,
2223 "Detected interleaving of size %d\n", (int)groupsize
);
2225 /* SLP: create an SLP data structure for every interleaving group of
2226 stores for further analysis in vect_analyse_slp. */
2227 if (DR_IS_WRITE (dr
) && !slp_impossible
)
2230 LOOP_VINFO_GROUPED_STORES (loop_vinfo
).safe_push (stmt
);
2232 BB_VINFO_GROUPED_STORES (bb_vinfo
).safe_push (stmt
);
2235 /* There is a gap in the end of the group. */
2236 if (groupsize
- last_accessed_element
> 0 && loop_vinfo
)
2238 if (dump_enabled_p ())
2239 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2240 "Data access with gaps requires scalar "
2244 if (dump_enabled_p ())
2245 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2246 "Peeling for outer loop is not supported\n");
2250 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2258 /* Analyze the access pattern of the data-reference DR.
2259 In case of non-consecutive accesses call vect_analyze_group_access() to
2260 analyze groups of accesses. */
2263 vect_analyze_data_ref_access (struct data_reference
*dr
)
2265 tree step
= DR_STEP (dr
);
2266 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
2267 gimple stmt
= DR_STMT (dr
);
2268 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2269 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2270 struct loop
*loop
= NULL
;
2273 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2275 if (loop_vinfo
&& !step
)
2277 if (dump_enabled_p ())
2278 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2279 "bad data-ref access in loop\n");
2283 /* Allow invariant loads in not nested loops. */
2284 if (loop_vinfo
&& integer_zerop (step
))
2286 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = NULL
;
2287 if (nested_in_vect_loop_p (loop
, stmt
))
2289 if (dump_enabled_p ())
2290 dump_printf_loc (MSG_NOTE
, vect_location
,
2291 "zero step in inner loop of nest\n");
2294 return DR_IS_READ (dr
);
2297 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
2299 /* Interleaved accesses are not yet supported within outer-loop
2300 vectorization for references in the inner-loop. */
2301 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = NULL
;
2303 /* For the rest of the analysis we use the outer-loop step. */
2304 step
= STMT_VINFO_DR_STEP (stmt_info
);
2305 if (integer_zerop (step
))
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_NOTE
, vect_location
,
2309 "zero step in outer loop.\n");
2310 if (DR_IS_READ (dr
))
2318 if (TREE_CODE (step
) == INTEGER_CST
)
2320 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
2321 if (!tree_int_cst_compare (step
, TYPE_SIZE_UNIT (scalar_type
))
2323 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type
), -dr_step
)))
2325 /* Mark that it is not interleaving. */
2326 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = NULL
;
2331 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
2333 if (dump_enabled_p ())
2334 dump_printf_loc (MSG_NOTE
, vect_location
,
2335 "grouped access in outer loop.\n");
2339 /* Assume this is a DR handled by non-constant strided load case. */
2340 if (TREE_CODE (step
) != INTEGER_CST
)
2341 return STMT_VINFO_STRIDE_LOAD_P (stmt_info
);
2343 /* Not consecutive access - check if it's a part of interleaving group. */
2344 return vect_analyze_group_access (dr
);
2349 /* A helper function used in the comparator function to sort data
2350 references. T1 and T2 are two data references to be compared.
2351 The function returns -1, 0, or 1. */
2354 compare_tree (tree t1
, tree t2
)
2357 enum tree_code code
;
2368 if (TREE_CODE (t1
) != TREE_CODE (t2
))
2369 return TREE_CODE (t1
) < TREE_CODE (t2
) ? -1 : 1;
2371 code
= TREE_CODE (t1
);
2374 /* For const values, we can just use hash values for comparisons. */
2382 hashval_t h1
= iterative_hash_expr (t1
, 0);
2383 hashval_t h2
= iterative_hash_expr (t2
, 0);
2385 return h1
< h2
? -1 : 1;
2390 cmp
= compare_tree (SSA_NAME_VAR (t1
), SSA_NAME_VAR (t2
));
2394 if (SSA_NAME_VERSION (t1
) != SSA_NAME_VERSION (t2
))
2395 return SSA_NAME_VERSION (t1
) < SSA_NAME_VERSION (t2
) ? -1 : 1;
2399 tclass
= TREE_CODE_CLASS (code
);
2401 /* For var-decl, we could compare their UIDs. */
2402 if (tclass
== tcc_declaration
)
2404 if (DECL_UID (t1
) != DECL_UID (t2
))
2405 return DECL_UID (t1
) < DECL_UID (t2
) ? -1 : 1;
2409 /* For expressions with operands, compare their operands recursively. */
2410 for (i
= TREE_OPERAND_LENGTH (t1
) - 1; i
>= 0; --i
)
2412 cmp
= compare_tree (TREE_OPERAND (t1
, i
), TREE_OPERAND (t2
, i
));
2422 /* Compare two data-references DRA and DRB to group them into chunks
2423 suitable for grouping. */
2426 dr_group_sort_cmp (const void *dra_
, const void *drb_
)
2428 data_reference_p dra
= *(data_reference_p
*)const_cast<void *>(dra_
);
2429 data_reference_p drb
= *(data_reference_p
*)const_cast<void *>(drb_
);
2432 /* Stabilize sort. */
2436 /* Ordering of DRs according to base. */
2437 if (!operand_equal_p (DR_BASE_ADDRESS (dra
), DR_BASE_ADDRESS (drb
), 0))
2439 cmp
= compare_tree (DR_BASE_ADDRESS (dra
), DR_BASE_ADDRESS (drb
));
2444 /* And according to DR_OFFSET. */
2445 if (!dr_equal_offsets_p (dra
, drb
))
2447 cmp
= compare_tree (DR_OFFSET (dra
), DR_OFFSET (drb
));
2452 /* Put reads before writes. */
2453 if (DR_IS_READ (dra
) != DR_IS_READ (drb
))
2454 return DR_IS_READ (dra
) ? -1 : 1;
2456 /* Then sort after access size. */
2457 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
))),
2458 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
))), 0))
2460 cmp
= compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
))),
2461 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
))));
2466 /* And after step. */
2467 if (!operand_equal_p (DR_STEP (dra
), DR_STEP (drb
), 0))
2469 cmp
= compare_tree (DR_STEP (dra
), DR_STEP (drb
));
2474 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2475 cmp
= tree_int_cst_compare (DR_INIT (dra
), DR_INIT (drb
));
2477 return gimple_uid (DR_STMT (dra
)) < gimple_uid (DR_STMT (drb
)) ? -1 : 1;
2481 /* Function vect_analyze_data_ref_accesses.
2483 Analyze the access pattern of all the data references in the loop.
2485 FORNOW: the only access pattern that is considered vectorizable is a
2486 simple step 1 (consecutive) access.
2488 FORNOW: handle only arrays and pointer accesses. */
2491 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
2494 vec
<data_reference_p
> datarefs
;
2495 struct data_reference
*dr
;
2497 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_NOTE
, vect_location
,
2499 "=== vect_analyze_data_ref_accesses ===\n");
2502 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
2504 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
2506 if (datarefs
.is_empty ())
2509 /* Sort the array of datarefs to make building the interleaving chains
2510 linear. Don't modify the original vector's order, it is needed for
2511 determining what dependencies are reversed. */
2512 vec
<data_reference_p
> datarefs_copy
= datarefs
.copy ();
2513 datarefs_copy
.qsort (dr_group_sort_cmp
);
2515 /* Build the interleaving chains. */
2516 for (i
= 0; i
< datarefs_copy
.length () - 1;)
2518 data_reference_p dra
= datarefs_copy
[i
];
2519 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
2520 stmt_vec_info lastinfo
= NULL
;
2521 for (i
= i
+ 1; i
< datarefs_copy
.length (); ++i
)
2523 data_reference_p drb
= datarefs_copy
[i
];
2524 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
2526 /* ??? Imperfect sorting (non-compatible types, non-modulo
2527 accesses, same accesses) can lead to a group to be artificially
2528 split here as we don't just skip over those. If it really
2529 matters we can push those to a worklist and re-iterate
2530 over them. The we can just skip ahead to the next DR here. */
2532 /* Check that the data-refs have same first location (except init)
2533 and they are both either store or load (not load and store). */
2534 if (DR_IS_READ (dra
) != DR_IS_READ (drb
)
2535 || !operand_equal_p (DR_BASE_ADDRESS (dra
),
2536 DR_BASE_ADDRESS (drb
), 0)
2537 || !dr_equal_offsets_p (dra
, drb
))
2540 /* Check that the data-refs have the same constant size and step. */
2541 tree sza
= TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
)));
2542 tree szb
= TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
)));
2543 if (!tree_fits_uhwi_p (sza
)
2544 || !tree_fits_uhwi_p (szb
)
2545 || !tree_int_cst_equal (sza
, szb
)
2546 || !tree_fits_shwi_p (DR_STEP (dra
))
2547 || !tree_fits_shwi_p (DR_STEP (drb
))
2548 || !tree_int_cst_equal (DR_STEP (dra
), DR_STEP (drb
)))
2551 /* Do not place the same access in the interleaving chain twice. */
2552 if (tree_int_cst_compare (DR_INIT (dra
), DR_INIT (drb
)) == 0)
2555 /* Check the types are compatible.
2556 ??? We don't distinguish this during sorting. */
2557 if (!types_compatible_p (TREE_TYPE (DR_REF (dra
)),
2558 TREE_TYPE (DR_REF (drb
))))
2561 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2562 HOST_WIDE_INT init_a
= TREE_INT_CST_LOW (DR_INIT (dra
));
2563 HOST_WIDE_INT init_b
= TREE_INT_CST_LOW (DR_INIT (drb
));
2564 gcc_assert (init_a
< init_b
);
2566 /* If init_b == init_a + the size of the type * k, we have an
2567 interleaving, and DRA is accessed before DRB. */
2568 HOST_WIDE_INT type_size_a
= tree_to_uhwi (sza
);
2569 if ((init_b
- init_a
) % type_size_a
!= 0)
2572 /* The step (if not zero) is greater than the difference between
2573 data-refs' inits. This splits groups into suitable sizes. */
2574 HOST_WIDE_INT step
= tree_to_shwi (DR_STEP (dra
));
2575 if (step
!= 0 && step
<= (init_b
- init_a
))
2578 if (dump_enabled_p ())
2580 dump_printf_loc (MSG_NOTE
, vect_location
,
2581 "Detected interleaving ");
2582 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (dra
));
2583 dump_printf (MSG_NOTE
, " and ");
2584 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_REF (drb
));
2585 dump_printf (MSG_NOTE
, "\n");
2588 /* Link the found element into the group list. */
2589 if (!GROUP_FIRST_ELEMENT (stmtinfo_a
))
2591 GROUP_FIRST_ELEMENT (stmtinfo_a
) = DR_STMT (dra
);
2592 lastinfo
= stmtinfo_a
;
2594 GROUP_FIRST_ELEMENT (stmtinfo_b
) = DR_STMT (dra
);
2595 GROUP_NEXT_ELEMENT (lastinfo
) = DR_STMT (drb
);
2596 lastinfo
= stmtinfo_b
;
2600 FOR_EACH_VEC_ELT (datarefs_copy
, i
, dr
)
2601 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
)))
2602 && !vect_analyze_data_ref_access (dr
))
2604 if (dump_enabled_p ())
2605 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2606 "not vectorized: complicated access pattern.\n");
2610 /* Mark the statement as not vectorizable. */
2611 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
2616 datarefs_copy
.release ();
2621 datarefs_copy
.release ();
2626 /* Operator == between two dr_with_seg_len objects.
2628 This equality operator is used to make sure two data refs
2629 are the same one so that we will consider to combine the
2630 aliasing checks of those two pairs of data dependent data
2634 operator == (const dr_with_seg_len
& d1
,
2635 const dr_with_seg_len
& d2
)
2637 return operand_equal_p (DR_BASE_ADDRESS (d1
.dr
),
2638 DR_BASE_ADDRESS (d2
.dr
), 0)
2639 && compare_tree (d1
.offset
, d2
.offset
) == 0
2640 && compare_tree (d1
.seg_len
, d2
.seg_len
) == 0;
2643 /* Function comp_dr_with_seg_len_pair.
2645 Comparison function for sorting objects of dr_with_seg_len_pair_t
2646 so that we can combine aliasing checks in one scan. */
2649 comp_dr_with_seg_len_pair (const void *p1_
, const void *p2_
)
2651 const dr_with_seg_len_pair_t
* p1
= (const dr_with_seg_len_pair_t
*) p1_
;
2652 const dr_with_seg_len_pair_t
* p2
= (const dr_with_seg_len_pair_t
*) p2_
;
2654 const dr_with_seg_len
&p11
= p1
->first
,
2659 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2660 if a and c have the same basic address snd step, and b and d have the same
2661 address and step. Therefore, if any a&c or b&d don't have the same address
2662 and step, we don't care the order of those two pairs after sorting. */
2665 if ((comp_res
= compare_tree (DR_BASE_ADDRESS (p11
.dr
),
2666 DR_BASE_ADDRESS (p21
.dr
))) != 0)
2668 if ((comp_res
= compare_tree (DR_BASE_ADDRESS (p12
.dr
),
2669 DR_BASE_ADDRESS (p22
.dr
))) != 0)
2671 if ((comp_res
= compare_tree (DR_STEP (p11
.dr
), DR_STEP (p21
.dr
))) != 0)
2673 if ((comp_res
= compare_tree (DR_STEP (p12
.dr
), DR_STEP (p22
.dr
))) != 0)
2675 if ((comp_res
= compare_tree (p11
.offset
, p21
.offset
)) != 0)
2677 if ((comp_res
= compare_tree (p12
.offset
, p22
.offset
)) != 0)
2683 template <class T
> static void
2691 /* Function vect_vfa_segment_size.
2693 Create an expression that computes the size of segment
2694 that will be accessed for a data reference. The functions takes into
2695 account that realignment loads may access one more vector.
2698 DR: The data reference.
2699 LENGTH_FACTOR: segment length to consider.
2701 Return an expression whose value is the size of segment which will be
2705 vect_vfa_segment_size (struct data_reference
*dr
, tree length_factor
)
2707 tree segment_length
;
2709 if (integer_zerop (DR_STEP (dr
)))
2710 segment_length
= TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr
)));
2712 segment_length
= size_binop (MULT_EXPR
,
2713 fold_convert (sizetype
, DR_STEP (dr
)),
2714 fold_convert (sizetype
, length_factor
));
2716 if (vect_supportable_dr_alignment (dr
, false)
2717 == dr_explicit_realign_optimized
)
2719 tree vector_size
= TYPE_SIZE_UNIT
2720 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr
))));
2722 segment_length
= size_binop (PLUS_EXPR
, segment_length
, vector_size
);
2724 return segment_length
;
2727 /* Function vect_prune_runtime_alias_test_list.
2729 Prune a list of ddrs to be tested at run-time by versioning for alias.
2730 Merge several alias checks into one if possible.
2731 Return FALSE if resulting list of ddrs is longer then allowed by
2732 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2735 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo
)
2737 vec
<ddr_p
> may_alias_ddrs
=
2738 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
);
2739 vec
<dr_with_seg_len_pair_t
>& comp_alias_ddrs
=
2740 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
);
2741 int vect_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2742 tree scalar_loop_iters
= LOOP_VINFO_NITERS (loop_vinfo
);
2748 if (dump_enabled_p ())
2749 dump_printf_loc (MSG_NOTE
, vect_location
,
2750 "=== vect_prune_runtime_alias_test_list ===\n");
2752 if (may_alias_ddrs
.is_empty ())
2755 /* Basically, for each pair of dependent data refs store_ptr_0
2756 and load_ptr_0, we create an expression:
2758 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2759 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2761 for aliasing checks. However, in some cases we can decrease
2762 the number of checks by combining two checks into one. For
2763 example, suppose we have another pair of data refs store_ptr_0
2764 and load_ptr_1, and if the following condition is satisfied:
2766 load_ptr_0 < load_ptr_1 &&
2767 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2769 (this condition means, in each iteration of vectorized loop,
2770 the accessed memory of store_ptr_0 cannot be between the memory
2771 of load_ptr_0 and load_ptr_1.)
2773 we then can use only the following expression to finish the
2774 alising checks between store_ptr_0 & load_ptr_0 and
2775 store_ptr_0 & load_ptr_1:
2777 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2778 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2780 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2781 same basic address. */
2783 comp_alias_ddrs
.create (may_alias_ddrs
.length ());
2785 /* First, we collect all data ref pairs for aliasing checks. */
2786 FOR_EACH_VEC_ELT (may_alias_ddrs
, i
, ddr
)
2788 struct data_reference
*dr_a
, *dr_b
;
2789 gimple dr_group_first_a
, dr_group_first_b
;
2790 tree segment_length_a
, segment_length_b
;
2791 gimple stmt_a
, stmt_b
;
2794 stmt_a
= DR_STMT (DDR_A (ddr
));
2795 dr_group_first_a
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a
));
2796 if (dr_group_first_a
)
2798 stmt_a
= dr_group_first_a
;
2799 dr_a
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a
));
2803 stmt_b
= DR_STMT (DDR_B (ddr
));
2804 dr_group_first_b
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b
));
2805 if (dr_group_first_b
)
2807 stmt_b
= dr_group_first_b
;
2808 dr_b
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b
));
2811 if (!operand_equal_p (DR_STEP (dr_a
), DR_STEP (dr_b
), 0))
2812 length_factor
= scalar_loop_iters
;
2814 length_factor
= size_int (vect_factor
);
2815 segment_length_a
= vect_vfa_segment_size (dr_a
, length_factor
);
2816 segment_length_b
= vect_vfa_segment_size (dr_b
, length_factor
);
2818 dr_with_seg_len_pair_t dr_with_seg_len_pair
2819 (dr_with_seg_len (dr_a
, segment_length_a
),
2820 dr_with_seg_len (dr_b
, segment_length_b
));
2822 if (compare_tree (DR_BASE_ADDRESS (dr_a
), DR_BASE_ADDRESS (dr_b
)) > 0)
2823 swap (dr_with_seg_len_pair
.first
, dr_with_seg_len_pair
.second
);
2825 comp_alias_ddrs
.safe_push (dr_with_seg_len_pair
);
2828 /* Second, we sort the collected data ref pairs so that we can scan
2829 them once to combine all possible aliasing checks. */
2830 comp_alias_ddrs
.qsort (comp_dr_with_seg_len_pair
);
2832 /* Third, we scan the sorted dr pairs and check if we can combine
2833 alias checks of two neighbouring dr pairs. */
2834 for (size_t i
= 1; i
< comp_alias_ddrs
.length (); ++i
)
2836 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2837 dr_with_seg_len
*dr_a1
= &comp_alias_ddrs
[i
-1].first
,
2838 *dr_b1
= &comp_alias_ddrs
[i
-1].second
,
2839 *dr_a2
= &comp_alias_ddrs
[i
].first
,
2840 *dr_b2
= &comp_alias_ddrs
[i
].second
;
2842 /* Remove duplicate data ref pairs. */
2843 if (*dr_a1
== *dr_a2
&& *dr_b1
== *dr_b2
)
2845 if (dump_enabled_p ())
2847 dump_printf_loc (MSG_NOTE
, vect_location
,
2848 "found equal ranges ");
2849 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2850 DR_REF (dr_a1
->dr
));
2851 dump_printf (MSG_NOTE
, ", ");
2852 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2853 DR_REF (dr_b1
->dr
));
2854 dump_printf (MSG_NOTE
, " and ");
2855 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2856 DR_REF (dr_a2
->dr
));
2857 dump_printf (MSG_NOTE
, ", ");
2858 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2859 DR_REF (dr_b2
->dr
));
2860 dump_printf (MSG_NOTE
, "\n");
2863 comp_alias_ddrs
.ordered_remove (i
--);
2867 if (*dr_a1
== *dr_a2
|| *dr_b1
== *dr_b2
)
2869 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2870 and DR_A1 and DR_A2 are two consecutive memrefs. */
2871 if (*dr_a1
== *dr_a2
)
2873 swap (dr_a1
, dr_b1
);
2874 swap (dr_a2
, dr_b2
);
2877 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1
->dr
),
2878 DR_BASE_ADDRESS (dr_a2
->dr
),
2880 || !tree_fits_shwi_p (dr_a1
->offset
)
2881 || !tree_fits_shwi_p (dr_a2
->offset
))
2884 HOST_WIDE_INT diff
= (tree_to_shwi (dr_a2
->offset
)
2885 - tree_to_shwi (dr_a1
->offset
));
2888 /* Now we check if the following condition is satisfied:
2890 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2892 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2893 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2894 have to make a best estimation. We can get the minimum value
2895 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2896 then either of the following two conditions can guarantee the
2899 1: DIFF <= MIN_SEG_LEN_B
2900 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2904 HOST_WIDE_INT min_seg_len_b
= (tree_fits_shwi_p (dr_b1
->seg_len
)
2905 ? tree_to_shwi (dr_b1
->seg_len
)
2908 if (diff
<= min_seg_len_b
2909 || (tree_fits_shwi_p (dr_a1
->seg_len
)
2910 && diff
- tree_to_shwi (dr_a1
->seg_len
) < min_seg_len_b
))
2912 if (dump_enabled_p ())
2914 dump_printf_loc (MSG_NOTE
, vect_location
,
2915 "merging ranges for ");
2916 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2917 DR_REF (dr_a1
->dr
));
2918 dump_printf (MSG_NOTE
, ", ");
2919 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2920 DR_REF (dr_b1
->dr
));
2921 dump_printf (MSG_NOTE
, " and ");
2922 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2923 DR_REF (dr_a2
->dr
));
2924 dump_printf (MSG_NOTE
, ", ");
2925 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2926 DR_REF (dr_b2
->dr
));
2927 dump_printf (MSG_NOTE
, "\n");
2930 dr_a1
->seg_len
= size_binop (PLUS_EXPR
,
2931 dr_a2
->seg_len
, size_int (diff
));
2932 comp_alias_ddrs
.ordered_remove (i
--);
2937 dump_printf_loc (MSG_NOTE
, vect_location
,
2938 "improved number of alias checks from %d to %d\n",
2939 may_alias_ddrs
.length (), comp_alias_ddrs
.length ());
2940 if ((int) comp_alias_ddrs
.length () >
2941 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
))
2947 /* Check whether a non-affine read in stmt is suitable for gather load
2948 and if so, return a builtin decl for that operation. */
2951 vect_check_gather (gimple stmt
, loop_vec_info loop_vinfo
, tree
*basep
,
2952 tree
*offp
, int *scalep
)
2954 HOST_WIDE_INT scale
= 1, pbitpos
, pbitsize
;
2955 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2956 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2957 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2958 tree offtype
= NULL_TREE
;
2959 tree decl
, base
, off
;
2960 enum machine_mode pmode
;
2961 int punsignedp
, pvolatilep
;
2964 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2965 see if we can use the def stmt of the address. */
2966 if (is_gimple_call (stmt
)
2967 && gimple_call_internal_p (stmt
)
2968 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2969 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
2970 && TREE_CODE (base
) == MEM_REF
2971 && TREE_CODE (TREE_OPERAND (base
, 0)) == SSA_NAME
2972 && integer_zerop (TREE_OPERAND (base
, 1))
2973 && !expr_invariant_in_loop_p (loop
, TREE_OPERAND (base
, 0)))
2975 gimple def_stmt
= SSA_NAME_DEF_STMT (TREE_OPERAND (base
, 0));
2976 if (is_gimple_assign (def_stmt
)
2977 && gimple_assign_rhs_code (def_stmt
) == ADDR_EXPR
)
2978 base
= TREE_OPERAND (gimple_assign_rhs1 (def_stmt
), 0);
2981 /* The gather builtins need address of the form
2982 loop_invariant + vector * {1, 2, 4, 8}
2984 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
2985 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
2986 of loop invariants/SSA_NAMEs defined in the loop, with casts,
2987 multiplications and additions in it. To get a vector, we need
2988 a single SSA_NAME that will be defined in the loop and will
2989 contain everything that is not loop invariant and that can be
2990 vectorized. The following code attempts to find such a preexistng
2991 SSA_NAME OFF and put the loop invariants into a tree BASE
2992 that can be gimplified before the loop. */
2993 base
= get_inner_reference (base
, &pbitsize
, &pbitpos
, &off
,
2994 &pmode
, &punsignedp
, &pvolatilep
, false);
2995 gcc_assert (base
!= NULL_TREE
&& (pbitpos
% BITS_PER_UNIT
) == 0);
2997 if (TREE_CODE (base
) == MEM_REF
)
2999 if (!integer_zerop (TREE_OPERAND (base
, 1)))
3001 if (off
== NULL_TREE
)
3003 offset_int moff
= mem_ref_offset (base
);
3004 off
= wide_int_to_tree (sizetype
, moff
);
3007 off
= size_binop (PLUS_EXPR
, off
,
3008 fold_convert (sizetype
, TREE_OPERAND (base
, 1)));
3010 base
= TREE_OPERAND (base
, 0);
3013 base
= build_fold_addr_expr (base
);
3015 if (off
== NULL_TREE
)
3016 off
= size_zero_node
;
3018 /* If base is not loop invariant, either off is 0, then we start with just
3019 the constant offset in the loop invariant BASE and continue with base
3020 as OFF, otherwise give up.
3021 We could handle that case by gimplifying the addition of base + off
3022 into some SSA_NAME and use that as off, but for now punt. */
3023 if (!expr_invariant_in_loop_p (loop
, base
))
3025 if (!integer_zerop (off
))
3028 base
= size_int (pbitpos
/ BITS_PER_UNIT
);
3030 /* Otherwise put base + constant offset into the loop invariant BASE
3031 and continue with OFF. */
3034 base
= fold_convert (sizetype
, base
);
3035 base
= size_binop (PLUS_EXPR
, base
, size_int (pbitpos
/ BITS_PER_UNIT
));
3038 /* OFF at this point may be either a SSA_NAME or some tree expression
3039 from get_inner_reference. Try to peel off loop invariants from it
3040 into BASE as long as possible. */
3042 while (offtype
== NULL_TREE
)
3044 enum tree_code code
;
3045 tree op0
, op1
, add
= NULL_TREE
;
3047 if (TREE_CODE (off
) == SSA_NAME
)
3049 gimple def_stmt
= SSA_NAME_DEF_STMT (off
);
3051 if (expr_invariant_in_loop_p (loop
, off
))
3054 if (gimple_code (def_stmt
) != GIMPLE_ASSIGN
)
3057 op0
= gimple_assign_rhs1 (def_stmt
);
3058 code
= gimple_assign_rhs_code (def_stmt
);
3059 op1
= gimple_assign_rhs2 (def_stmt
);
3063 if (get_gimple_rhs_class (TREE_CODE (off
)) == GIMPLE_TERNARY_RHS
)
3065 code
= TREE_CODE (off
);
3066 extract_ops_from_tree (off
, &code
, &op0
, &op1
);
3070 case POINTER_PLUS_EXPR
:
3072 if (expr_invariant_in_loop_p (loop
, op0
))
3077 add
= fold_convert (sizetype
, add
);
3079 add
= size_binop (MULT_EXPR
, add
, size_int (scale
));
3080 base
= size_binop (PLUS_EXPR
, base
, add
);
3083 if (expr_invariant_in_loop_p (loop
, op1
))
3091 if (expr_invariant_in_loop_p (loop
, op1
))
3093 add
= fold_convert (sizetype
, op1
);
3094 add
= size_binop (MINUS_EXPR
, size_zero_node
, add
);
3100 if (scale
== 1 && tree_fits_shwi_p (op1
))
3102 scale
= tree_to_shwi (op1
);
3111 if (!POINTER_TYPE_P (TREE_TYPE (op0
))
3112 && !INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
3114 if (TYPE_PRECISION (TREE_TYPE (op0
))
3115 == TYPE_PRECISION (TREE_TYPE (off
)))
3120 if (TYPE_PRECISION (TREE_TYPE (op0
))
3121 < TYPE_PRECISION (TREE_TYPE (off
)))
3124 offtype
= TREE_TYPE (off
);
3135 /* If at the end OFF still isn't a SSA_NAME or isn't
3136 defined in the loop, punt. */
3137 if (TREE_CODE (off
) != SSA_NAME
3138 || expr_invariant_in_loop_p (loop
, off
))
3141 if (offtype
== NULL_TREE
)
3142 offtype
= TREE_TYPE (off
);
3144 decl
= targetm
.vectorize
.builtin_gather (STMT_VINFO_VECTYPE (stmt_info
),
3146 if (decl
== NULL_TREE
)
3158 /* Function vect_analyze_data_refs.
3160 Find all the data references in the loop or basic block.
3162 The general structure of the analysis of data refs in the vectorizer is as
3164 1- vect_analyze_data_refs(loop/bb): call
3165 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3166 in the loop/bb and their dependences.
3167 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3168 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3169 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3174 vect_analyze_data_refs (loop_vec_info loop_vinfo
,
3175 bb_vec_info bb_vinfo
,
3176 int *min_vf
, unsigned *n_stmts
)
3178 struct loop
*loop
= NULL
;
3179 basic_block bb
= NULL
;
3181 vec
<data_reference_p
> datarefs
;
3182 struct data_reference
*dr
;
3185 if (dump_enabled_p ())
3186 dump_printf_loc (MSG_NOTE
, vect_location
,
3187 "=== vect_analyze_data_refs ===\n");
3191 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
3193 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3194 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
3195 if (!find_loop_nest (loop
, &LOOP_VINFO_LOOP_NEST (loop_vinfo
)))
3197 if (dump_enabled_p ())
3198 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3199 "not vectorized: loop contains function calls"
3200 " or data references that cannot be analyzed\n");
3204 for (i
= 0; i
< loop
->num_nodes
; i
++)
3206 gimple_stmt_iterator gsi
;
3208 for (gsi
= gsi_start_bb (bbs
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
3210 gimple stmt
= gsi_stmt (gsi
);
3211 if (is_gimple_debug (stmt
))
3214 if (!find_data_references_in_stmt (loop
, stmt
, &datarefs
))
3216 if (is_gimple_call (stmt
) && loop
->safelen
)
3218 tree fndecl
= gimple_call_fndecl (stmt
), op
;
3219 if (fndecl
!= NULL_TREE
)
3221 struct cgraph_node
*node
= cgraph_get_node (fndecl
);
3222 if (node
!= NULL
&& node
->simd_clones
!= NULL
)
3224 unsigned int j
, n
= gimple_call_num_args (stmt
);
3225 for (j
= 0; j
< n
; j
++)
3227 op
= gimple_call_arg (stmt
, j
);
3229 || (REFERENCE_CLASS_P (op
)
3230 && get_base_address (op
)))
3233 op
= gimple_call_lhs (stmt
);
3234 /* Ignore #pragma omp declare simd functions
3235 if they don't have data references in the
3236 call stmt itself. */
3240 || (REFERENCE_CLASS_P (op
)
3241 && get_base_address (op
)))))
3246 LOOP_VINFO_DATAREFS (loop_vinfo
) = datarefs
;
3247 if (dump_enabled_p ())
3248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3249 "not vectorized: loop contains function "
3250 "calls or data references that cannot "
3257 LOOP_VINFO_DATAREFS (loop_vinfo
) = datarefs
;
3261 gimple_stmt_iterator gsi
;
3263 bb
= BB_VINFO_BB (bb_vinfo
);
3264 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3266 gimple stmt
= gsi_stmt (gsi
);
3267 if (is_gimple_debug (stmt
))
3270 if (!find_data_references_in_stmt (NULL
, stmt
,
3271 &BB_VINFO_DATAREFS (bb_vinfo
)))
3273 /* Mark the rest of the basic-block as unvectorizable. */
3274 for (; !gsi_end_p (gsi
); gsi_next (&gsi
))
3276 stmt
= gsi_stmt (gsi
);
3277 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)) = false;
3283 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
3286 /* Go through the data-refs, check that the analysis succeeded. Update
3287 pointer from stmt_vec_info struct to DR and vectype. */
3289 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
3292 stmt_vec_info stmt_info
;
3293 tree base
, offset
, init
;
3294 bool gather
= false;
3295 bool simd_lane_access
= false;
3299 if (!dr
|| !DR_REF (dr
))
3301 if (dump_enabled_p ())
3302 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3303 "not vectorized: unhandled data-ref\n");
3307 stmt
= DR_STMT (dr
);
3308 stmt_info
= vinfo_for_stmt (stmt
);
3310 /* Discard clobbers from the dataref vector. We will remove
3311 clobber stmts during vectorization. */
3312 if (gimple_clobber_p (stmt
))
3315 if (i
== datarefs
.length () - 1)
3320 datarefs
.ordered_remove (i
);
3325 /* Check that analysis of the data-ref succeeded. */
3326 if (!DR_BASE_ADDRESS (dr
) || !DR_OFFSET (dr
) || !DR_INIT (dr
)
3331 && !TREE_THIS_VOLATILE (DR_REF (dr
))
3332 && targetm
.vectorize
.builtin_gather
!= NULL
;
3333 bool maybe_simd_lane_access
3334 = loop_vinfo
&& loop
->simduid
;
3336 /* If target supports vector gather loads, or if this might be
3337 a SIMD lane access, see if they can't be used. */
3339 && (maybe_gather
|| maybe_simd_lane_access
)
3340 && !nested_in_vect_loop_p (loop
, stmt
))
3342 struct data_reference
*newdr
3343 = create_data_ref (NULL
, loop_containing_stmt (stmt
),
3344 DR_REF (dr
), stmt
, true);
3345 gcc_assert (newdr
!= NULL
&& DR_REF (newdr
));
3346 if (DR_BASE_ADDRESS (newdr
)
3347 && DR_OFFSET (newdr
)
3350 && integer_zerop (DR_STEP (newdr
)))
3352 if (maybe_simd_lane_access
)
3354 tree off
= DR_OFFSET (newdr
);
3356 if (TREE_CODE (DR_INIT (newdr
)) == INTEGER_CST
3357 && TREE_CODE (off
) == MULT_EXPR
3358 && tree_fits_uhwi_p (TREE_OPERAND (off
, 1)))
3360 tree step
= TREE_OPERAND (off
, 1);
3361 off
= TREE_OPERAND (off
, 0);
3363 if (CONVERT_EXPR_P (off
)
3364 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off
,
3366 < TYPE_PRECISION (TREE_TYPE (off
)))
3367 off
= TREE_OPERAND (off
, 0);
3368 if (TREE_CODE (off
) == SSA_NAME
)
3370 gimple def
= SSA_NAME_DEF_STMT (off
);
3371 tree reft
= TREE_TYPE (DR_REF (newdr
));
3372 if (is_gimple_call (def
)
3373 && gimple_call_internal_p (def
)
3374 && (gimple_call_internal_fn (def
)
3375 == IFN_GOMP_SIMD_LANE
))
3377 tree arg
= gimple_call_arg (def
, 0);
3378 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
3379 arg
= SSA_NAME_VAR (arg
);
3380 if (arg
== loop
->simduid
3382 && tree_int_cst_equal
3383 (TYPE_SIZE_UNIT (reft
),
3386 DR_OFFSET (newdr
) = ssize_int (0);
3387 DR_STEP (newdr
) = step
;
3388 DR_ALIGNED_TO (newdr
)
3389 = size_int (BIGGEST_ALIGNMENT
);
3391 simd_lane_access
= true;
3397 if (!simd_lane_access
&& maybe_gather
)
3403 if (!gather
&& !simd_lane_access
)
3404 free_data_ref (newdr
);
3407 if (!gather
&& !simd_lane_access
)
3409 if (dump_enabled_p ())
3411 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3412 "not vectorized: data ref analysis "
3414 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3415 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3425 if (TREE_CODE (DR_BASE_ADDRESS (dr
)) == INTEGER_CST
)
3427 if (dump_enabled_p ())
3428 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3429 "not vectorized: base addr of dr is a "
3435 if (gather
|| simd_lane_access
)
3440 if (TREE_THIS_VOLATILE (DR_REF (dr
)))
3442 if (dump_enabled_p ())
3444 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3445 "not vectorized: volatile type ");
3446 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3447 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3456 if (stmt_can_throw_internal (stmt
))
3458 if (dump_enabled_p ())
3460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3461 "not vectorized: statement can throw an "
3463 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3464 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3470 if (gather
|| simd_lane_access
)
3475 if (TREE_CODE (DR_REF (dr
)) == COMPONENT_REF
3476 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr
), 1)))
3478 if (dump_enabled_p ())
3480 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3481 "not vectorized: statement is bitfield "
3483 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3484 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3490 if (gather
|| simd_lane_access
)
3495 base
= unshare_expr (DR_BASE_ADDRESS (dr
));
3496 offset
= unshare_expr (DR_OFFSET (dr
));
3497 init
= unshare_expr (DR_INIT (dr
));
3499 if (is_gimple_call (stmt
)
3500 && (!gimple_call_internal_p (stmt
)
3501 || (gimple_call_internal_fn (stmt
) != IFN_MASK_LOAD
3502 && gimple_call_internal_fn (stmt
) != IFN_MASK_STORE
)))
3504 if (dump_enabled_p ())
3506 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3507 "not vectorized: dr in a call ");
3508 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3509 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3515 if (gather
|| simd_lane_access
)
3520 /* Update DR field in stmt_vec_info struct. */
3522 /* If the dataref is in an inner-loop of the loop that is considered for
3523 for vectorization, we also want to analyze the access relative to
3524 the outer-loop (DR contains information only relative to the
3525 inner-most enclosing loop). We do that by building a reference to the
3526 first location accessed by the inner-loop, and analyze it relative to
3528 if (loop
&& nested_in_vect_loop_p (loop
, stmt
))
3530 tree outer_step
, outer_base
, outer_init
;
3531 HOST_WIDE_INT pbitsize
, pbitpos
;
3533 enum machine_mode pmode
;
3534 int punsignedp
, pvolatilep
;
3535 affine_iv base_iv
, offset_iv
;
3538 /* Build a reference to the first location accessed by the
3539 inner-loop: *(BASE+INIT). (The first location is actually
3540 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3541 tree inner_base
= build_fold_indirect_ref
3542 (fold_build_pointer_plus (base
, init
));
3544 if (dump_enabled_p ())
3546 dump_printf_loc (MSG_NOTE
, vect_location
,
3547 "analyze in outer-loop: ");
3548 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, inner_base
);
3549 dump_printf (MSG_NOTE
, "\n");
3552 outer_base
= get_inner_reference (inner_base
, &pbitsize
, &pbitpos
,
3553 &poffset
, &pmode
, &punsignedp
, &pvolatilep
, false);
3554 gcc_assert (outer_base
!= NULL_TREE
);
3556 if (pbitpos
% BITS_PER_UNIT
!= 0)
3558 if (dump_enabled_p ())
3559 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3560 "failed: bit offset alignment.\n");
3564 outer_base
= build_fold_addr_expr (outer_base
);
3565 if (!simple_iv (loop
, loop_containing_stmt (stmt
), outer_base
,
3568 if (dump_enabled_p ())
3569 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3570 "failed: evolution of base is not affine.\n");
3577 poffset
= fold_build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
,
3585 offset_iv
.base
= ssize_int (0);
3586 offset_iv
.step
= ssize_int (0);
3588 else if (!simple_iv (loop
, loop_containing_stmt (stmt
), poffset
,
3591 if (dump_enabled_p ())
3592 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3593 "evolution of offset is not affine.\n");
3597 outer_init
= ssize_int (pbitpos
/ BITS_PER_UNIT
);
3598 split_constant_offset (base_iv
.base
, &base_iv
.base
, &dinit
);
3599 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
3600 split_constant_offset (offset_iv
.base
, &offset_iv
.base
, &dinit
);
3601 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
3603 outer_step
= size_binop (PLUS_EXPR
,
3604 fold_convert (ssizetype
, base_iv
.step
),
3605 fold_convert (ssizetype
, offset_iv
.step
));
3607 STMT_VINFO_DR_STEP (stmt_info
) = outer_step
;
3608 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3609 STMT_VINFO_DR_BASE_ADDRESS (stmt_info
) = base_iv
.base
;
3610 STMT_VINFO_DR_INIT (stmt_info
) = outer_init
;
3611 STMT_VINFO_DR_OFFSET (stmt_info
) =
3612 fold_convert (ssizetype
, offset_iv
.base
);
3613 STMT_VINFO_DR_ALIGNED_TO (stmt_info
) =
3614 size_int (highest_pow2_factor (offset_iv
.base
));
3616 if (dump_enabled_p ())
3618 dump_printf_loc (MSG_NOTE
, vect_location
,
3619 "\touter base_address: ");
3620 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3621 STMT_VINFO_DR_BASE_ADDRESS (stmt_info
));
3622 dump_printf (MSG_NOTE
, "\n\touter offset from base address: ");
3623 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3624 STMT_VINFO_DR_OFFSET (stmt_info
));
3625 dump_printf (MSG_NOTE
,
3626 "\n\touter constant offset from base address: ");
3627 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3628 STMT_VINFO_DR_INIT (stmt_info
));
3629 dump_printf (MSG_NOTE
, "\n\touter step: ");
3630 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3631 STMT_VINFO_DR_STEP (stmt_info
));
3632 dump_printf (MSG_NOTE
, "\n\touter aligned to: ");
3633 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3634 STMT_VINFO_DR_ALIGNED_TO (stmt_info
));
3635 dump_printf (MSG_NOTE
, "\n");
3639 if (STMT_VINFO_DATA_REF (stmt_info
))
3641 if (dump_enabled_p ())
3643 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3644 "not vectorized: more than one data ref "
3646 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3647 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3653 if (gather
|| simd_lane_access
)
3658 STMT_VINFO_DATA_REF (stmt_info
) = dr
;
3659 if (simd_lane_access
)
3661 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
) = true;
3662 free_data_ref (datarefs
[i
]);
3666 /* Set vectype for STMT. */
3667 scalar_type
= TREE_TYPE (DR_REF (dr
));
3668 STMT_VINFO_VECTYPE (stmt_info
)
3669 = get_vectype_for_scalar_type (scalar_type
);
3670 if (!STMT_VINFO_VECTYPE (stmt_info
))
3672 if (dump_enabled_p ())
3674 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3675 "not vectorized: no vectype for stmt: ");
3676 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3677 dump_printf (MSG_MISSED_OPTIMIZATION
, " scalar_type: ");
3678 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_DETAILS
,
3680 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3686 if (gather
|| simd_lane_access
)
3688 STMT_VINFO_DATA_REF (stmt_info
) = NULL
;
3696 if (dump_enabled_p ())
3698 dump_printf_loc (MSG_NOTE
, vect_location
,
3699 "got vectype for stmt: ");
3700 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3701 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3702 STMT_VINFO_VECTYPE (stmt_info
));
3703 dump_printf (MSG_NOTE
, "\n");
3707 /* Adjust the minimal vectorization factor according to the
3709 vf
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
3717 gather
= 0 != vect_check_gather (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
3719 && get_vectype_for_scalar_type (TREE_TYPE (off
)) == NULL_TREE
)
3723 STMT_VINFO_DATA_REF (stmt_info
) = NULL
;
3725 if (dump_enabled_p ())
3727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3728 "not vectorized: not suitable for gather "
3730 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3731 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3737 STMT_VINFO_GATHER_P (stmt_info
) = true;
3740 && TREE_CODE (DR_STEP (dr
)) != INTEGER_CST
)
3742 if (nested_in_vect_loop_p (loop
, stmt
)
3743 || !DR_IS_READ (dr
))
3745 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3748 "not vectorized: not suitable for strided "
3750 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3751 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3755 STMT_VINFO_STRIDE_LOAD_P (stmt_info
) = true;
3759 /* If we stopped analysis at the first dataref we could not analyze
3760 when trying to vectorize a basic-block mark the rest of the datarefs
3761 as not vectorizable and truncate the vector of datarefs. That
3762 avoids spending useless time in analyzing their dependence. */
3763 if (i
!= datarefs
.length ())
3765 gcc_assert (bb_vinfo
!= NULL
);
3766 for (unsigned j
= i
; j
< datarefs
.length (); ++j
)
3768 data_reference_p dr
= datarefs
[j
];
3769 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr
))) = false;
3772 datarefs
.truncate (i
);
3779 /* Function vect_get_new_vect_var.
3781 Returns a name for a new variable. The current naming scheme appends the
3782 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3783 the name of vectorizer generated variables, and appends that to NAME if
3787 vect_get_new_vect_var (tree type
, enum vect_var_kind var_kind
, const char *name
)
3794 case vect_simple_var
:
3797 case vect_scalar_var
:
3800 case vect_pointer_var
:
3809 char* tmp
= concat (prefix
, "_", name
, NULL
);
3810 new_vect_var
= create_tmp_reg (type
, tmp
);
3814 new_vect_var
= create_tmp_reg (type
, prefix
);
3816 return new_vect_var
;
3820 /* Function vect_create_addr_base_for_vector_ref.
3822 Create an expression that computes the address of the first memory location
3823 that will be accessed for a data reference.
3826 STMT: The statement containing the data reference.
3827 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3828 OFFSET: Optional. If supplied, it is be added to the initial address.
3829 LOOP: Specify relative to which loop-nest should the address be computed.
3830 For example, when the dataref is in an inner-loop nested in an
3831 outer-loop that is now being vectorized, LOOP can be either the
3832 outer-loop, or the inner-loop. The first memory location accessed
3833 by the following dataref ('in' points to short):
3840 if LOOP=i_loop: &in (relative to i_loop)
3841 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3844 1. Return an SSA_NAME whose value is the address of the memory location of
3845 the first vector of the data reference.
3846 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3847 these statement(s) which define the returned SSA_NAME.
3849 FORNOW: We are only handling array accesses with step 1. */
3852 vect_create_addr_base_for_vector_ref (gimple stmt
,
3853 gimple_seq
*new_stmt_list
,
3857 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3858 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
3860 const char *base_name
;
3863 gimple_seq seq
= NULL
;
3867 tree step
= TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr
)));
3868 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3870 if (loop_vinfo
&& loop
&& loop
!= (gimple_bb (stmt
))->loop_father
)
3872 struct loop
*outer_loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3874 gcc_assert (nested_in_vect_loop_p (outer_loop
, stmt
));
3876 data_ref_base
= unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info
));
3877 base_offset
= unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info
));
3878 init
= unshare_expr (STMT_VINFO_DR_INIT (stmt_info
));
3882 data_ref_base
= unshare_expr (DR_BASE_ADDRESS (dr
));
3883 base_offset
= unshare_expr (DR_OFFSET (dr
));
3884 init
= unshare_expr (DR_INIT (dr
));
3888 base_name
= get_name (data_ref_base
);
3891 base_offset
= ssize_int (0);
3892 init
= ssize_int (0);
3893 base_name
= get_name (DR_REF (dr
));
3896 /* Create base_offset */
3897 base_offset
= size_binop (PLUS_EXPR
,
3898 fold_convert (sizetype
, base_offset
),
3899 fold_convert (sizetype
, init
));
3903 offset
= fold_build2 (MULT_EXPR
, sizetype
,
3904 fold_convert (sizetype
, offset
), step
);
3905 base_offset
= fold_build2 (PLUS_EXPR
, sizetype
,
3906 base_offset
, offset
);
3909 /* base + base_offset */
3911 addr_base
= fold_build_pointer_plus (data_ref_base
, base_offset
);
3914 addr_base
= build1 (ADDR_EXPR
,
3915 build_pointer_type (TREE_TYPE (DR_REF (dr
))),
3916 unshare_expr (DR_REF (dr
)));
3919 vect_ptr_type
= build_pointer_type (STMT_VINFO_VECTYPE (stmt_info
));
3920 addr_base
= fold_convert (vect_ptr_type
, addr_base
);
3921 dest
= vect_get_new_vect_var (vect_ptr_type
, vect_pointer_var
, base_name
);
3922 addr_base
= force_gimple_operand (addr_base
, &seq
, false, dest
);
3923 gimple_seq_add_seq (new_stmt_list
, seq
);
3925 if (DR_PTR_INFO (dr
)
3926 && TREE_CODE (addr_base
) == SSA_NAME
)
3928 duplicate_ssa_name_ptr_info (addr_base
, DR_PTR_INFO (dr
));
3930 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base
));
3933 if (dump_enabled_p ())
3935 dump_printf_loc (MSG_NOTE
, vect_location
, "created ");
3936 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, addr_base
);
3937 dump_printf (MSG_NOTE
, "\n");
3944 /* Function vect_create_data_ref_ptr.
3946 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3947 location accessed in the loop by STMT, along with the def-use update
3948 chain to appropriately advance the pointer through the loop iterations.
3949 Also set aliasing information for the pointer. This pointer is used by
3950 the callers to this function to create a memory reference expression for
3951 vector load/store access.
3954 1. STMT: a stmt that references memory. Expected to be of the form
3955 GIMPLE_ASSIGN <name, data-ref> or
3956 GIMPLE_ASSIGN <data-ref, name>.
3957 2. AGGR_TYPE: the type of the reference, which should be either a vector
3959 3. AT_LOOP: the loop where the vector memref is to be created.
3960 4. OFFSET (optional): an offset to be added to the initial address accessed
3961 by the data-ref in STMT.
3962 5. BSI: location where the new stmts are to be placed if there is no loop
3963 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
3964 pointing to the initial address.
3967 1. Declare a new ptr to vector_type, and have it point to the base of the
3968 data reference (initial addressed accessed by the data reference).
3969 For example, for vector of type V8HI, the following code is generated:
3972 ap = (v8hi *)initial_address;
3974 if OFFSET is not supplied:
3975 initial_address = &a[init];
3976 if OFFSET is supplied:
3977 initial_address = &a[init + OFFSET];
3979 Return the initial_address in INITIAL_ADDRESS.
3981 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
3982 update the pointer in each iteration of the loop.
3984 Return the increment stmt that updates the pointer in PTR_INCR.
3986 3. Set INV_P to true if the access pattern of the data reference in the
3987 vectorized loop is invariant. Set it to false otherwise.
3989 4. Return the pointer. */
3992 vect_create_data_ref_ptr (gimple stmt
, tree aggr_type
, struct loop
*at_loop
,
3993 tree offset
, tree
*initial_address
,
3994 gimple_stmt_iterator
*gsi
, gimple
*ptr_incr
,
3995 bool only_init
, bool *inv_p
)
3997 const char *base_name
;
3998 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3999 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4000 struct loop
*loop
= NULL
;
4001 bool nested_in_vect_loop
= false;
4002 struct loop
*containing_loop
= NULL
;
4007 gimple_seq new_stmt_list
= NULL
;
4011 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
4013 gimple_stmt_iterator incr_gsi
;
4015 tree indx_before_incr
, indx_after_incr
;
4018 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4020 gcc_assert (TREE_CODE (aggr_type
) == ARRAY_TYPE
4021 || TREE_CODE (aggr_type
) == VECTOR_TYPE
);
4025 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4026 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
4027 containing_loop
= (gimple_bb (stmt
))->loop_father
;
4028 pe
= loop_preheader_edge (loop
);
4032 gcc_assert (bb_vinfo
);
4037 /* Check the step (evolution) of the load in LOOP, and record
4038 whether it's invariant. */
4039 if (nested_in_vect_loop
)
4040 step
= STMT_VINFO_DR_STEP (stmt_info
);
4042 step
= DR_STEP (STMT_VINFO_DATA_REF (stmt_info
));
4044 if (integer_zerop (step
))
4049 /* Create an expression for the first address accessed by this load
4051 base_name
= get_name (DR_BASE_ADDRESS (dr
));
4053 if (dump_enabled_p ())
4055 tree dr_base_type
= TREE_TYPE (DR_BASE_OBJECT (dr
));
4056 dump_printf_loc (MSG_NOTE
, vect_location
,
4057 "create %s-pointer variable to type: ",
4058 get_tree_code_name (TREE_CODE (aggr_type
)));
4059 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, aggr_type
);
4060 if (TREE_CODE (dr_base_type
) == ARRAY_TYPE
)
4061 dump_printf (MSG_NOTE
, " vectorizing an array ref: ");
4062 else if (TREE_CODE (dr_base_type
) == VECTOR_TYPE
)
4063 dump_printf (MSG_NOTE
, " vectorizing a vector ref: ");
4064 else if (TREE_CODE (dr_base_type
) == RECORD_TYPE
)
4065 dump_printf (MSG_NOTE
, " vectorizing a record based array ref: ");
4067 dump_printf (MSG_NOTE
, " vectorizing a pointer ref: ");
4068 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, DR_BASE_OBJECT (dr
));
4069 dump_printf (MSG_NOTE
, "\n");
4072 /* (1) Create the new aggregate-pointer variable.
4073 Vector and array types inherit the alias set of their component
4074 type by default so we need to use a ref-all pointer if the data
4075 reference does not conflict with the created aggregated data
4076 reference because it is not addressable. */
4077 bool need_ref_all
= false;
4078 if (!alias_sets_conflict_p (get_alias_set (aggr_type
),
4079 get_alias_set (DR_REF (dr
))))
4080 need_ref_all
= true;
4081 /* Likewise for any of the data references in the stmt group. */
4082 else if (STMT_VINFO_GROUP_SIZE (stmt_info
) > 1)
4084 gimple orig_stmt
= STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
);
4087 stmt_vec_info sinfo
= vinfo_for_stmt (orig_stmt
);
4088 struct data_reference
*sdr
= STMT_VINFO_DATA_REF (sinfo
);
4089 if (!alias_sets_conflict_p (get_alias_set (aggr_type
),
4090 get_alias_set (DR_REF (sdr
))))
4092 need_ref_all
= true;
4095 orig_stmt
= STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo
);
4099 aggr_ptr_type
= build_pointer_type_for_mode (aggr_type
, ptr_mode
,
4101 aggr_ptr
= vect_get_new_vect_var (aggr_ptr_type
, vect_pointer_var
, base_name
);
4104 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4105 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4106 def-use update cycles for the pointer: one relative to the outer-loop
4107 (LOOP), which is what steps (3) and (4) below do. The other is relative
4108 to the inner-loop (which is the inner-most loop containing the dataref),
4109 and this is done be step (5) below.
4111 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4112 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4113 redundant. Steps (3),(4) create the following:
4116 LOOP: vp1 = phi(vp0,vp2)
4122 If there is an inner-loop nested in loop, then step (5) will also be
4123 applied, and an additional update in the inner-loop will be created:
4126 LOOP: vp1 = phi(vp0,vp2)
4128 inner: vp3 = phi(vp1,vp4)
4129 vp4 = vp3 + inner_step
4135 /* (2) Calculate the initial address of the aggregate-pointer, and set
4136 the aggregate-pointer to point to it before the loop. */
4138 /* Create: (&(base[init_val+offset]) in the loop preheader. */
4140 new_temp
= vect_create_addr_base_for_vector_ref (stmt
, &new_stmt_list
,
4146 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, new_stmt_list
);
4147 gcc_assert (!new_bb
);
4150 gsi_insert_seq_before (gsi
, new_stmt_list
, GSI_SAME_STMT
);
4153 *initial_address
= new_temp
;
4155 /* Create: p = (aggr_type *) initial_base */
4156 if (TREE_CODE (new_temp
) != SSA_NAME
4157 || !useless_type_conversion_p (aggr_ptr_type
, TREE_TYPE (new_temp
)))
4159 vec_stmt
= gimple_build_assign (aggr_ptr
,
4160 fold_convert (aggr_ptr_type
, new_temp
));
4161 aggr_ptr_init
= make_ssa_name (aggr_ptr
, vec_stmt
);
4162 /* Copy the points-to information if it exists. */
4163 if (DR_PTR_INFO (dr
))
4164 duplicate_ssa_name_ptr_info (aggr_ptr_init
, DR_PTR_INFO (dr
));
4165 gimple_assign_set_lhs (vec_stmt
, aggr_ptr_init
);
4168 new_bb
= gsi_insert_on_edge_immediate (pe
, vec_stmt
);
4169 gcc_assert (!new_bb
);
4172 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
4175 aggr_ptr_init
= new_temp
;
4177 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4178 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4179 inner-loop nested in LOOP (during outer-loop vectorization). */
4181 /* No update in loop is required. */
4182 if (only_init
&& (!loop_vinfo
|| at_loop
== loop
))
4183 aptr
= aggr_ptr_init
;
4186 /* The step of the aggregate pointer is the type size. */
4187 tree iv_step
= TYPE_SIZE_UNIT (aggr_type
);
4188 /* One exception to the above is when the scalar step of the load in
4189 LOOP is zero. In this case the step here is also zero. */
4191 iv_step
= size_zero_node
;
4192 else if (tree_int_cst_sgn (step
) == -1)
4193 iv_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (iv_step
), iv_step
);
4195 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
4197 create_iv (aggr_ptr_init
,
4198 fold_convert (aggr_ptr_type
, iv_step
),
4199 aggr_ptr
, loop
, &incr_gsi
, insert_after
,
4200 &indx_before_incr
, &indx_after_incr
);
4201 incr
= gsi_stmt (incr_gsi
);
4202 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
4204 /* Copy the points-to information if it exists. */
4205 if (DR_PTR_INFO (dr
))
4207 duplicate_ssa_name_ptr_info (indx_before_incr
, DR_PTR_INFO (dr
));
4208 duplicate_ssa_name_ptr_info (indx_after_incr
, DR_PTR_INFO (dr
));
4213 aptr
= indx_before_incr
;
4216 if (!nested_in_vect_loop
|| only_init
)
4220 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4221 nested in LOOP, if exists. */
4223 gcc_assert (nested_in_vect_loop
);
4226 standard_iv_increment_position (containing_loop
, &incr_gsi
,
4228 create_iv (aptr
, fold_convert (aggr_ptr_type
, DR_STEP (dr
)), aggr_ptr
,
4229 containing_loop
, &incr_gsi
, insert_after
, &indx_before_incr
,
4231 incr
= gsi_stmt (incr_gsi
);
4232 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
4234 /* Copy the points-to information if it exists. */
4235 if (DR_PTR_INFO (dr
))
4237 duplicate_ssa_name_ptr_info (indx_before_incr
, DR_PTR_INFO (dr
));
4238 duplicate_ssa_name_ptr_info (indx_after_incr
, DR_PTR_INFO (dr
));
4243 return indx_before_incr
;
4250 /* Function bump_vector_ptr
4252 Increment a pointer (to a vector type) by vector-size. If requested,
4253 i.e. if PTR-INCR is given, then also connect the new increment stmt
4254 to the existing def-use update-chain of the pointer, by modifying
4255 the PTR_INCR as illustrated below:
4257 The pointer def-use update-chain before this function:
4258 DATAREF_PTR = phi (p_0, p_2)
4260 PTR_INCR: p_2 = DATAREF_PTR + step
4262 The pointer def-use update-chain after this function:
4263 DATAREF_PTR = phi (p_0, p_2)
4265 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4267 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4270 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4272 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4273 the loop. The increment amount across iterations is expected
4275 BSI - location where the new update stmt is to be placed.
4276 STMT - the original scalar memory-access stmt that is being vectorized.
4277 BUMP - optional. The offset by which to bump the pointer. If not given,
4278 the offset is assumed to be vector_size.
4280 Output: Return NEW_DATAREF_PTR as illustrated above.
4285 bump_vector_ptr (tree dataref_ptr
, gimple ptr_incr
, gimple_stmt_iterator
*gsi
,
4286 gimple stmt
, tree bump
)
4288 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4289 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
4290 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4291 tree update
= TYPE_SIZE_UNIT (vectype
);
4294 use_operand_p use_p
;
4295 tree new_dataref_ptr
;
4300 new_dataref_ptr
= copy_ssa_name (dataref_ptr
, NULL
);
4301 incr_stmt
= gimple_build_assign_with_ops (POINTER_PLUS_EXPR
, new_dataref_ptr
,
4302 dataref_ptr
, update
);
4303 vect_finish_stmt_generation (stmt
, incr_stmt
, gsi
);
4305 /* Copy the points-to information if it exists. */
4306 if (DR_PTR_INFO (dr
))
4308 duplicate_ssa_name_ptr_info (new_dataref_ptr
, DR_PTR_INFO (dr
));
4309 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr
));
4313 return new_dataref_ptr
;
4315 /* Update the vector-pointer's cross-iteration increment. */
4316 FOR_EACH_SSA_USE_OPERAND (use_p
, ptr_incr
, iter
, SSA_OP_USE
)
4318 tree use
= USE_FROM_PTR (use_p
);
4320 if (use
== dataref_ptr
)
4321 SET_USE (use_p
, new_dataref_ptr
);
4323 gcc_assert (tree_int_cst_compare (use
, update
) == 0);
4326 return new_dataref_ptr
;
4330 /* Function vect_create_destination_var.
4332 Create a new temporary of type VECTYPE. */
4335 vect_create_destination_var (tree scalar_dest
, tree vectype
)
4341 enum vect_var_kind kind
;
4343 kind
= vectype
? vect_simple_var
: vect_scalar_var
;
4344 type
= vectype
? vectype
: TREE_TYPE (scalar_dest
);
4346 gcc_assert (TREE_CODE (scalar_dest
) == SSA_NAME
);
4348 name
= get_name (scalar_dest
);
4350 asprintf (&new_name
, "%s_%u", name
, SSA_NAME_VERSION (scalar_dest
));
4352 asprintf (&new_name
, "_%u", SSA_NAME_VERSION (scalar_dest
));
4353 vec_dest
= vect_get_new_vect_var (type
, kind
, new_name
);
4359 /* Function vect_grouped_store_supported.
4361 Returns TRUE if interleave high and interleave low permutations
4362 are supported, and FALSE otherwise. */
4365 vect_grouped_store_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4367 enum machine_mode mode
= TYPE_MODE (vectype
);
4369 /* vect_permute_store_chain requires the group size to be equal to 3 or
4370 be a power of two. */
4371 if (count
!= 3 && exact_log2 (count
) == -1)
4373 if (dump_enabled_p ())
4374 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4375 "the size of the group of accesses"
4376 " is not a power of 2 or not eqaul to 3\n");
4380 /* Check that the permutation is supported. */
4381 if (VECTOR_MODE_P (mode
))
4383 unsigned int i
, nelt
= GET_MODE_NUNITS (mode
);
4384 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4388 unsigned int j0
= 0, j1
= 0, j2
= 0;
4391 for (j
= 0; j
< 3; j
++)
4393 int nelt0
= ((3 - j
) * nelt
) % 3;
4394 int nelt1
= ((3 - j
) * nelt
+ 1) % 3;
4395 int nelt2
= ((3 - j
) * nelt
+ 2) % 3;
4396 for (i
= 0; i
< nelt
; i
++)
4398 if (3 * i
+ nelt0
< nelt
)
4399 sel
[3 * i
+ nelt0
] = j0
++;
4400 if (3 * i
+ nelt1
< nelt
)
4401 sel
[3 * i
+ nelt1
] = nelt
+ j1
++;
4402 if (3 * i
+ nelt2
< nelt
)
4403 sel
[3 * i
+ nelt2
] = 0;
4405 if (!can_vec_perm_p (mode
, false, sel
))
4407 if (dump_enabled_p ())
4408 dump_printf (MSG_MISSED_OPTIMIZATION
,
4409 "permutaion op not supported by target.\n");
4413 for (i
= 0; i
< nelt
; i
++)
4415 if (3 * i
+ nelt0
< nelt
)
4416 sel
[3 * i
+ nelt0
] = 3 * i
+ nelt0
;
4417 if (3 * i
+ nelt1
< nelt
)
4418 sel
[3 * i
+ nelt1
] = 3 * i
+ nelt1
;
4419 if (3 * i
+ nelt2
< nelt
)
4420 sel
[3 * i
+ nelt2
] = nelt
+ j2
++;
4422 if (!can_vec_perm_p (mode
, false, sel
))
4424 if (dump_enabled_p ())
4425 dump_printf (MSG_MISSED_OPTIMIZATION
,
4426 "permutaion op not supported by target.\n");
4434 /* If length is not equal to 3 then only power of 2 is supported. */
4435 gcc_assert (exact_log2 (count
) != -1);
4437 for (i
= 0; i
< nelt
/ 2; i
++)
4440 sel
[i
* 2 + 1] = i
+ nelt
;
4442 if (can_vec_perm_p (mode
, false, sel
))
4444 for (i
= 0; i
< nelt
; i
++)
4446 if (can_vec_perm_p (mode
, false, sel
))
4452 if (dump_enabled_p ())
4453 dump_printf (MSG_MISSED_OPTIMIZATION
,
4454 "permutaion op not supported by target.\n");
4459 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4463 vect_store_lanes_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4465 return vect_lanes_optab_supported_p ("vec_store_lanes",
4466 vec_store_lanes_optab
,
4471 /* Function vect_permute_store_chain.
4473 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4474 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4475 the data correctly for the stores. Return the final references for stores
4478 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4479 The input is 4 vectors each containing 8 elements. We assign a number to
4480 each element, the input sequence is:
4482 1st vec: 0 1 2 3 4 5 6 7
4483 2nd vec: 8 9 10 11 12 13 14 15
4484 3rd vec: 16 17 18 19 20 21 22 23
4485 4th vec: 24 25 26 27 28 29 30 31
4487 The output sequence should be:
4489 1st vec: 0 8 16 24 1 9 17 25
4490 2nd vec: 2 10 18 26 3 11 19 27
4491 3rd vec: 4 12 20 28 5 13 21 30
4492 4th vec: 6 14 22 30 7 15 23 31
4494 i.e., we interleave the contents of the four vectors in their order.
4496 We use interleave_high/low instructions to create such output. The input of
4497 each interleave_high/low operation is two vectors:
4500 the even elements of the result vector are obtained left-to-right from the
4501 high/low elements of the first vector. The odd elements of the result are
4502 obtained left-to-right from the high/low elements of the second vector.
4503 The output of interleave_high will be: 0 4 1 5
4504 and of interleave_low: 2 6 3 7
4507 The permutation is done in log LENGTH stages. In each stage interleave_high
4508 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4509 where the first argument is taken from the first half of DR_CHAIN and the
4510 second argument from it's second half.
4513 I1: interleave_high (1st vec, 3rd vec)
4514 I2: interleave_low (1st vec, 3rd vec)
4515 I3: interleave_high (2nd vec, 4th vec)
4516 I4: interleave_low (2nd vec, 4th vec)
4518 The output for the first stage is:
4520 I1: 0 16 1 17 2 18 3 19
4521 I2: 4 20 5 21 6 22 7 23
4522 I3: 8 24 9 25 10 26 11 27
4523 I4: 12 28 13 29 14 30 15 31
4525 The output of the second stage, i.e. the final result is:
4527 I1: 0 8 16 24 1 9 17 25
4528 I2: 2 10 18 26 3 11 19 27
4529 I3: 4 12 20 28 5 13 21 30
4530 I4: 6 14 22 30 7 15 23 31. */
4533 vect_permute_store_chain (vec
<tree
> dr_chain
,
4534 unsigned int length
,
4536 gimple_stmt_iterator
*gsi
,
4537 vec
<tree
> *result_chain
)
4539 tree vect1
, vect2
, high
, low
;
4541 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
4542 tree perm_mask_low
, perm_mask_high
;
4544 tree perm3_mask_low
, perm3_mask_high
;
4545 unsigned int i
, n
, log_length
= exact_log2 (length
);
4546 unsigned int j
, nelt
= TYPE_VECTOR_SUBPARTS (vectype
);
4547 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4549 result_chain
->quick_grow (length
);
4550 memcpy (result_chain
->address (), dr_chain
.address (),
4551 length
* sizeof (tree
));
4555 unsigned int j0
= 0, j1
= 0, j2
= 0;
4557 for (j
= 0; j
< 3; j
++)
4559 int nelt0
= ((3 - j
) * nelt
) % 3;
4560 int nelt1
= ((3 - j
) * nelt
+ 1) % 3;
4561 int nelt2
= ((3 - j
) * nelt
+ 2) % 3;
4563 for (i
= 0; i
< nelt
; i
++)
4565 if (3 * i
+ nelt0
< nelt
)
4566 sel
[3 * i
+ nelt0
] = j0
++;
4567 if (3 * i
+ nelt1
< nelt
)
4568 sel
[3 * i
+ nelt1
] = nelt
+ j1
++;
4569 if (3 * i
+ nelt2
< nelt
)
4570 sel
[3 * i
+ nelt2
] = 0;
4572 perm3_mask_low
= vect_gen_perm_mask (vectype
, sel
);
4573 gcc_assert (perm3_mask_low
!= NULL
);
4575 for (i
= 0; i
< nelt
; i
++)
4577 if (3 * i
+ nelt0
< nelt
)
4578 sel
[3 * i
+ nelt0
] = 3 * i
+ nelt0
;
4579 if (3 * i
+ nelt1
< nelt
)
4580 sel
[3 * i
+ nelt1
] = 3 * i
+ nelt1
;
4581 if (3 * i
+ nelt2
< nelt
)
4582 sel
[3 * i
+ nelt2
] = nelt
+ j2
++;
4584 perm3_mask_high
= vect_gen_perm_mask (vectype
, sel
);
4585 gcc_assert (perm3_mask_high
!= NULL
);
4587 vect1
= dr_chain
[0];
4588 vect2
= dr_chain
[1];
4590 /* Create interleaving stmt:
4591 low = VEC_PERM_EXPR <vect1, vect2,
4592 {j, nelt, *, j + 1, nelt + j + 1, *,
4593 j + 2, nelt + j + 2, *, ...}> */
4594 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shuffle3_low");
4595 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
4598 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4601 vect2
= dr_chain
[2];
4602 /* Create interleaving stmt:
4603 low = VEC_PERM_EXPR <vect1, vect2,
4604 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4605 6, 7, nelt + j + 2, ...}> */
4606 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shuffle3_high");
4607 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
4610 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4611 (*result_chain
)[j
] = data_ref
;
4616 /* If length is not equal to 3 then only power of 2 is supported. */
4617 gcc_assert (exact_log2 (length
) != -1);
4619 for (i
= 0, n
= nelt
/ 2; i
< n
; i
++)
4622 sel
[i
* 2 + 1] = i
+ nelt
;
4624 perm_mask_high
= vect_gen_perm_mask (vectype
, sel
);
4625 gcc_assert (perm_mask_high
!= NULL
);
4627 for (i
= 0; i
< nelt
; i
++)
4629 perm_mask_low
= vect_gen_perm_mask (vectype
, sel
);
4630 gcc_assert (perm_mask_low
!= NULL
);
4632 for (i
= 0, n
= log_length
; i
< n
; i
++)
4634 for (j
= 0; j
< length
/2; j
++)
4636 vect1
= dr_chain
[j
];
4637 vect2
= dr_chain
[j
+length
/2];
4639 /* Create interleaving stmt:
4640 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4642 high
= make_temp_ssa_name (vectype
, NULL
, "vect_inter_high");
4644 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, high
,
4645 vect1
, vect2
, perm_mask_high
);
4646 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4647 (*result_chain
)[2*j
] = high
;
4649 /* Create interleaving stmt:
4650 low = VEC_PERM_EXPR <vect1, vect2,
4651 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4653 low
= make_temp_ssa_name (vectype
, NULL
, "vect_inter_low");
4655 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, low
,
4656 vect1
, vect2
, perm_mask_low
);
4657 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
4658 (*result_chain
)[2*j
+1] = low
;
4660 memcpy (dr_chain
.address (), result_chain
->address (),
4661 length
* sizeof (tree
));
4666 /* Function vect_setup_realignment
4668 This function is called when vectorizing an unaligned load using
4669 the dr_explicit_realign[_optimized] scheme.
4670 This function generates the following code at the loop prolog:
4673 x msq_init = *(floor(p)); # prolog load
4674 realignment_token = call target_builtin;
4676 x msq = phi (msq_init, ---)
4678 The stmts marked with x are generated only for the case of
4679 dr_explicit_realign_optimized.
4681 The code above sets up a new (vector) pointer, pointing to the first
4682 location accessed by STMT, and a "floor-aligned" load using that pointer.
4683 It also generates code to compute the "realignment-token" (if the relevant
4684 target hook was defined), and creates a phi-node at the loop-header bb
4685 whose arguments are the result of the prolog-load (created by this
4686 function) and the result of a load that takes place in the loop (to be
4687 created by the caller to this function).
4689 For the case of dr_explicit_realign_optimized:
4690 The caller to this function uses the phi-result (msq) to create the
4691 realignment code inside the loop, and sets up the missing phi argument,
4694 msq = phi (msq_init, lsq)
4695 lsq = *(floor(p')); # load in loop
4696 result = realign_load (msq, lsq, realignment_token);
4698 For the case of dr_explicit_realign:
4700 msq = *(floor(p)); # load in loop
4702 lsq = *(floor(p')); # load in loop
4703 result = realign_load (msq, lsq, realignment_token);
4706 STMT - (scalar) load stmt to be vectorized. This load accesses
4707 a memory location that may be unaligned.
4708 BSI - place where new code is to be inserted.
4709 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4713 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4714 target hook, if defined.
4715 Return value - the result of the loop-header phi node. */
4718 vect_setup_realignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
4719 tree
*realignment_token
,
4720 enum dr_alignment_support alignment_support_scheme
,
4722 struct loop
**at_loop
)
4724 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4725 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4726 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4727 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
4728 struct loop
*loop
= NULL
;
4730 tree scalar_dest
= gimple_assign_lhs (stmt
);
4737 tree msq_init
= NULL_TREE
;
4740 tree msq
= NULL_TREE
;
4741 gimple_seq stmts
= NULL
;
4743 bool compute_in_loop
= false;
4744 bool nested_in_vect_loop
= false;
4745 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
4746 struct loop
*loop_for_initial_load
= NULL
;
4750 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4751 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
4754 gcc_assert (alignment_support_scheme
== dr_explicit_realign
4755 || alignment_support_scheme
== dr_explicit_realign_optimized
);
4757 /* We need to generate three things:
4758 1. the misalignment computation
4759 2. the extra vector load (for the optimized realignment scheme).
4760 3. the phi node for the two vectors from which the realignment is
4761 done (for the optimized realignment scheme). */
4763 /* 1. Determine where to generate the misalignment computation.
4765 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4766 calculation will be generated by this function, outside the loop (in the
4767 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4768 caller, inside the loop.
4770 Background: If the misalignment remains fixed throughout the iterations of
4771 the loop, then both realignment schemes are applicable, and also the
4772 misalignment computation can be done outside LOOP. This is because we are
4773 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4774 are a multiple of VS (the Vector Size), and therefore the misalignment in
4775 different vectorized LOOP iterations is always the same.
4776 The problem arises only if the memory access is in an inner-loop nested
4777 inside LOOP, which is now being vectorized using outer-loop vectorization.
4778 This is the only case when the misalignment of the memory access may not
4779 remain fixed throughout the iterations of the inner-loop (as explained in
4780 detail in vect_supportable_dr_alignment). In this case, not only is the
4781 optimized realignment scheme not applicable, but also the misalignment
4782 computation (and generation of the realignment token that is passed to
4783 REALIGN_LOAD) have to be done inside the loop.
4785 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4786 or not, which in turn determines if the misalignment is computed inside
4787 the inner-loop, or outside LOOP. */
4789 if (init_addr
!= NULL_TREE
|| !loop_vinfo
)
4791 compute_in_loop
= true;
4792 gcc_assert (alignment_support_scheme
== dr_explicit_realign
);
4796 /* 2. Determine where to generate the extra vector load.
4798 For the optimized realignment scheme, instead of generating two vector
4799 loads in each iteration, we generate a single extra vector load in the
4800 preheader of the loop, and in each iteration reuse the result of the
4801 vector load from the previous iteration. In case the memory access is in
4802 an inner-loop nested inside LOOP, which is now being vectorized using
4803 outer-loop vectorization, we need to determine whether this initial vector
4804 load should be generated at the preheader of the inner-loop, or can be
4805 generated at the preheader of LOOP. If the memory access has no evolution
4806 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4807 to be generated inside LOOP (in the preheader of the inner-loop). */
4809 if (nested_in_vect_loop
)
4811 tree outerloop_step
= STMT_VINFO_DR_STEP (stmt_info
);
4812 bool invariant_in_outerloop
=
4813 (tree_int_cst_compare (outerloop_step
, size_zero_node
) == 0);
4814 loop_for_initial_load
= (invariant_in_outerloop
? loop
: loop
->inner
);
4817 loop_for_initial_load
= loop
;
4819 *at_loop
= loop_for_initial_load
;
4821 if (loop_for_initial_load
)
4822 pe
= loop_preheader_edge (loop_for_initial_load
);
4824 /* 3. For the case of the optimized realignment, create the first vector
4825 load at the loop preheader. */
4827 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4829 /* Create msq_init = *(floor(p1)) in the loop preheader */
4831 gcc_assert (!compute_in_loop
);
4832 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4833 ptr
= vect_create_data_ref_ptr (stmt
, vectype
, loop_for_initial_load
,
4834 NULL_TREE
, &init_addr
, NULL
, &inc
,
4836 new_temp
= copy_ssa_name (ptr
, NULL
);
4837 new_stmt
= gimple_build_assign_with_ops
4838 (BIT_AND_EXPR
, new_temp
, ptr
,
4839 build_int_cst (TREE_TYPE (ptr
),
4840 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4841 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
4842 gcc_assert (!new_bb
);
4844 = build2 (MEM_REF
, TREE_TYPE (vec_dest
), new_temp
,
4845 build_int_cst (reference_alias_ptr_type (DR_REF (dr
)), 0));
4846 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4847 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4848 gimple_assign_set_lhs (new_stmt
, new_temp
);
4851 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
4852 gcc_assert (!new_bb
);
4855 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
4857 msq_init
= gimple_assign_lhs (new_stmt
);
4860 /* 4. Create realignment token using a target builtin, if available.
4861 It is done either inside the containing loop, or before LOOP (as
4862 determined above). */
4864 if (targetm
.vectorize
.builtin_mask_for_load
)
4868 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4871 /* Generate the INIT_ADDR computation outside LOOP. */
4872 init_addr
= vect_create_addr_base_for_vector_ref (stmt
, &stmts
,
4876 pe
= loop_preheader_edge (loop
);
4877 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
4878 gcc_assert (!new_bb
);
4881 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
4884 builtin_decl
= targetm
.vectorize
.builtin_mask_for_load ();
4885 new_stmt
= gimple_build_call (builtin_decl
, 1, init_addr
);
4887 vect_create_destination_var (scalar_dest
,
4888 gimple_call_return_type (new_stmt
));
4889 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4890 gimple_call_set_lhs (new_stmt
, new_temp
);
4892 if (compute_in_loop
)
4893 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
4896 /* Generate the misalignment computation outside LOOP. */
4897 pe
= loop_preheader_edge (loop
);
4898 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
4899 gcc_assert (!new_bb
);
4902 *realignment_token
= gimple_call_lhs (new_stmt
);
4904 /* The result of the CALL_EXPR to this builtin is determined from
4905 the value of the parameter and no global variables are touched
4906 which makes the builtin a "const" function. Requiring the
4907 builtin to have the "const" attribute makes it unnecessary
4908 to call mark_call_clobbered. */
4909 gcc_assert (TREE_READONLY (builtin_decl
));
4912 if (alignment_support_scheme
== dr_explicit_realign
)
4915 gcc_assert (!compute_in_loop
);
4916 gcc_assert (alignment_support_scheme
== dr_explicit_realign_optimized
);
4919 /* 5. Create msq = phi <msq_init, lsq> in loop */
4921 pe
= loop_preheader_edge (containing_loop
);
4922 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4923 msq
= make_ssa_name (vec_dest
, NULL
);
4924 phi_stmt
= create_phi_node (msq
, containing_loop
->header
);
4925 add_phi_arg (phi_stmt
, msq_init
, pe
, UNKNOWN_LOCATION
);
4931 /* Function vect_grouped_load_supported.
4933 Returns TRUE if even and odd permutations are supported,
4934 and FALSE otherwise. */
4937 vect_grouped_load_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
4939 enum machine_mode mode
= TYPE_MODE (vectype
);
4941 /* vect_permute_load_chain requires the group size to be equal to 3 or
4942 be a power of two. */
4943 if (count
!= 3 && exact_log2 (count
) == -1)
4945 if (dump_enabled_p ())
4946 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4947 "the size of the group of accesses"
4948 " is not a power of 2 or not equal to 3\n");
4952 /* Check that the permutation is supported. */
4953 if (VECTOR_MODE_P (mode
))
4955 unsigned int i
, j
, nelt
= GET_MODE_NUNITS (mode
);
4956 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
4961 for (k
= 0; k
< 3; k
++)
4963 for (i
= 0; i
< nelt
; i
++)
4964 if (3 * i
+ k
< 2 * nelt
)
4968 if (!can_vec_perm_p (mode
, false, sel
))
4970 if (dump_enabled_p ())
4971 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4972 "shuffle of 3 loads is not supported by"
4976 for (i
= 0, j
= 0; i
< nelt
; i
++)
4977 if (3 * i
+ k
< 2 * nelt
)
4980 sel
[i
] = nelt
+ ((nelt
+ k
) % 3) + 3 * (j
++);
4981 if (!can_vec_perm_p (mode
, false, sel
))
4983 if (dump_enabled_p ())
4984 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4985 "shuffle of 3 loads is not supported by"
4994 /* If length is not equal to 3 then only power of 2 is supported. */
4995 gcc_assert (exact_log2 (count
) != -1);
4996 for (i
= 0; i
< nelt
; i
++)
4998 if (can_vec_perm_p (mode
, false, sel
))
5000 for (i
= 0; i
< nelt
; i
++)
5002 if (can_vec_perm_p (mode
, false, sel
))
5008 if (dump_enabled_p ())
5009 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5010 "extract even/odd not supported by target\n");
5014 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5018 vect_load_lanes_supported (tree vectype
, unsigned HOST_WIDE_INT count
)
5020 return vect_lanes_optab_supported_p ("vec_load_lanes",
5021 vec_load_lanes_optab
,
5025 /* Function vect_permute_load_chain.
5027 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5028 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5029 the input data correctly. Return the final references for loads in
5032 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5033 The input is 4 vectors each containing 8 elements. We assign a number to each
5034 element, the input sequence is:
5036 1st vec: 0 1 2 3 4 5 6 7
5037 2nd vec: 8 9 10 11 12 13 14 15
5038 3rd vec: 16 17 18 19 20 21 22 23
5039 4th vec: 24 25 26 27 28 29 30 31
5041 The output sequence should be:
5043 1st vec: 0 4 8 12 16 20 24 28
5044 2nd vec: 1 5 9 13 17 21 25 29
5045 3rd vec: 2 6 10 14 18 22 26 30
5046 4th vec: 3 7 11 15 19 23 27 31
5048 i.e., the first output vector should contain the first elements of each
5049 interleaving group, etc.
5051 We use extract_even/odd instructions to create such output. The input of
5052 each extract_even/odd operation is two vectors
5056 and the output is the vector of extracted even/odd elements. The output of
5057 extract_even will be: 0 2 4 6
5058 and of extract_odd: 1 3 5 7
5061 The permutation is done in log LENGTH stages. In each stage extract_even
5062 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5063 their order. In our example,
5065 E1: extract_even (1st vec, 2nd vec)
5066 E2: extract_odd (1st vec, 2nd vec)
5067 E3: extract_even (3rd vec, 4th vec)
5068 E4: extract_odd (3rd vec, 4th vec)
5070 The output for the first stage will be:
5072 E1: 0 2 4 6 8 10 12 14
5073 E2: 1 3 5 7 9 11 13 15
5074 E3: 16 18 20 22 24 26 28 30
5075 E4: 17 19 21 23 25 27 29 31
5077 In order to proceed and create the correct sequence for the next stage (or
5078 for the correct output, if the second stage is the last one, as in our
5079 example), we first put the output of extract_even operation and then the
5080 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5081 The input for the second stage is:
5083 1st vec (E1): 0 2 4 6 8 10 12 14
5084 2nd vec (E3): 16 18 20 22 24 26 28 30
5085 3rd vec (E2): 1 3 5 7 9 11 13 15
5086 4th vec (E4): 17 19 21 23 25 27 29 31
5088 The output of the second stage:
5090 E1: 0 4 8 12 16 20 24 28
5091 E2: 2 6 10 14 18 22 26 30
5092 E3: 1 5 9 13 17 21 25 29
5093 E4: 3 7 11 15 19 23 27 31
5095 And RESULT_CHAIN after reordering:
5097 1st vec (E1): 0 4 8 12 16 20 24 28
5098 2nd vec (E3): 1 5 9 13 17 21 25 29
5099 3rd vec (E2): 2 6 10 14 18 22 26 30
5100 4th vec (E4): 3 7 11 15 19 23 27 31. */
5103 vect_permute_load_chain (vec
<tree
> dr_chain
,
5104 unsigned int length
,
5106 gimple_stmt_iterator
*gsi
,
5107 vec
<tree
> *result_chain
)
5109 tree data_ref
, first_vect
, second_vect
;
5110 tree perm_mask_even
, perm_mask_odd
;
5111 tree perm3_mask_low
, perm3_mask_high
;
5113 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
5114 unsigned int i
, j
, log_length
= exact_log2 (length
);
5115 unsigned nelt
= TYPE_VECTOR_SUBPARTS (vectype
);
5116 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
5118 result_chain
->quick_grow (length
);
5119 memcpy (result_chain
->address (), dr_chain
.address (),
5120 length
* sizeof (tree
));
5126 for (k
= 0; k
< 3; k
++)
5128 for (i
= 0; i
< nelt
; i
++)
5129 if (3 * i
+ k
< 2 * nelt
)
5133 perm3_mask_low
= vect_gen_perm_mask (vectype
, sel
);
5134 gcc_assert (perm3_mask_low
!= NULL
);
5136 for (i
= 0, j
= 0; i
< nelt
; i
++)
5137 if (3 * i
+ k
< 2 * nelt
)
5140 sel
[i
] = nelt
+ ((nelt
+ k
) % 3) + 3 * (j
++);
5142 perm3_mask_high
= vect_gen_perm_mask (vectype
, sel
);
5143 gcc_assert (perm3_mask_high
!= NULL
);
5145 first_vect
= dr_chain
[0];
5146 second_vect
= dr_chain
[1];
5148 /* Create interleaving stmt (low part of):
5149 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5151 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_suffle3_low");
5152 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5153 first_vect
, second_vect
,
5155 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5157 /* Create interleaving stmt (high part of):
5158 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5160 first_vect
= data_ref
;
5161 second_vect
= dr_chain
[2];
5162 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_suffle3_high");
5163 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5164 first_vect
, second_vect
,
5166 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5167 (*result_chain
)[k
] = data_ref
;
5172 /* If length is not equal to 3 then only power of 2 is supported. */
5173 gcc_assert (exact_log2 (length
) != -1);
5175 for (i
= 0; i
< nelt
; ++i
)
5177 perm_mask_even
= vect_gen_perm_mask (vectype
, sel
);
5178 gcc_assert (perm_mask_even
!= NULL
);
5180 for (i
= 0; i
< nelt
; ++i
)
5182 perm_mask_odd
= vect_gen_perm_mask (vectype
, sel
);
5183 gcc_assert (perm_mask_odd
!= NULL
);
5185 for (i
= 0; i
< log_length
; i
++)
5187 for (j
= 0; j
< length
; j
+= 2)
5189 first_vect
= dr_chain
[j
];
5190 second_vect
= dr_chain
[j
+1];
5192 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5193 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_perm_even");
5194 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5195 first_vect
, second_vect
,
5197 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5198 (*result_chain
)[j
/2] = data_ref
;
5200 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5201 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_perm_odd");
5202 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5203 first_vect
, second_vect
,
5205 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5206 (*result_chain
)[j
/2+length
/2] = data_ref
;
5208 memcpy (dr_chain
.address (), result_chain
->address (),
5209 length
* sizeof (tree
));
5214 /* Function vect_shift_permute_load_chain.
5216 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5217 sequence of stmts to reorder the input data accordingly.
5218 Return the final references for loads in RESULT_CHAIN.
5219 Return true if successed, false otherwise.
5221 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5222 The input is 3 vectors each containing 8 elements. We assign a
5223 number to each element, the input sequence is:
5225 1st vec: 0 1 2 3 4 5 6 7
5226 2nd vec: 8 9 10 11 12 13 14 15
5227 3rd vec: 16 17 18 19 20 21 22 23
5229 The output sequence should be:
5231 1st vec: 0 3 6 9 12 15 18 21
5232 2nd vec: 1 4 7 10 13 16 19 22
5233 3rd vec: 2 5 8 11 14 17 20 23
5235 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5237 First we shuffle all 3 vectors to get correct elements order:
5239 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5240 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5241 3rd vec: (16 19 22) (17 20 23) (18 21)
5243 Next we unite and shift vector 3 times:
5246 shift right by 6 the concatenation of:
5247 "1st vec" and "2nd vec"
5248 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5249 "2nd vec" and "3rd vec"
5250 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5251 "3rd vec" and "1st vec"
5252 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5255 So that now new vectors are:
5257 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5258 2nd vec: (10 13) (16 19 22) (17 20 23)
5259 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5262 shift right by 5 the concatenation of:
5263 "1st vec" and "3rd vec"
5264 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5265 "2nd vec" and "1st vec"
5266 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5267 "3rd vec" and "2nd vec"
5268 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5271 So that now new vectors are:
5273 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5274 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5275 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5278 shift right by 5 the concatenation of:
5279 "1st vec" and "1st vec"
5280 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5281 shift right by 3 the concatenation of:
5282 "2nd vec" and "2nd vec"
5283 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5286 So that now all vectors are READY:
5287 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5288 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5289 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5291 This algorithm is faster than one in vect_permute_load_chain if:
5292 1. "shift of a concatination" is faster than general permutation.
5294 2. The TARGET machine can't execute vector instructions in parallel.
5295 This is because each step of the algorithm depends on previous.
5296 The algorithm in vect_permute_load_chain is much more parallel.
5298 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5302 vect_shift_permute_load_chain (vec
<tree
> dr_chain
,
5303 unsigned int length
,
5305 gimple_stmt_iterator
*gsi
,
5306 vec
<tree
> *result_chain
)
5308 tree vect
[3], vect_shift
[3], data_ref
, first_vect
, second_vect
;
5309 tree perm2_mask1
, perm2_mask2
, perm3_mask
;
5310 tree select_mask
, shift1_mask
, shift2_mask
, shift3_mask
, shift4_mask
;
5313 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
5315 unsigned nelt
= TYPE_VECTOR_SUBPARTS (vectype
);
5316 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
5317 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5318 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5320 result_chain
->quick_grow (length
);
5321 memcpy (result_chain
->address (), dr_chain
.address (),
5322 length
* sizeof (tree
));
5324 if (length
== 2 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
) > 4)
5326 for (i
= 0; i
< nelt
/ 2; ++i
)
5328 for (i
= 0; i
< nelt
/ 2; ++i
)
5329 sel
[nelt
/ 2 + i
] = i
* 2 + 1;
5330 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5332 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5334 "shuffle of 2 fields structure is not \
5335 supported by target\n");
5338 perm2_mask1
= vect_gen_perm_mask (vectype
, sel
);
5339 gcc_assert (perm2_mask1
!= NULL
);
5341 for (i
= 0; i
< nelt
/ 2; ++i
)
5343 for (i
= 0; i
< nelt
/ 2; ++i
)
5344 sel
[nelt
/ 2 + i
] = i
* 2;
5345 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5347 if (dump_enabled_p ())
5348 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5349 "shuffle of 2 fields structure is not \
5350 supported by target\n");
5353 perm2_mask2
= vect_gen_perm_mask (vectype
, sel
);
5354 gcc_assert (perm2_mask2
!= NULL
);
5356 /* Generating permutation constant to shift all elements.
5357 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5358 for (i
= 0; i
< nelt
; i
++)
5359 sel
[i
] = nelt
/ 2 + i
;
5360 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5362 if (dump_enabled_p ())
5363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5364 "shift permutation is not supported by target\n");
5367 shift1_mask
= vect_gen_perm_mask (vectype
, sel
);
5368 gcc_assert (shift1_mask
!= NULL
);
5370 /* Generating permutation constant to select vector from 2.
5371 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5372 for (i
= 0; i
< nelt
/ 2; i
++)
5374 for (i
= nelt
/ 2; i
< nelt
; i
++)
5376 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5378 if (dump_enabled_p ())
5379 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5380 "select is not supported by target\n");
5383 select_mask
= vect_gen_perm_mask (vectype
, sel
);
5384 gcc_assert (select_mask
!= NULL
);
5386 first_vect
= dr_chain
[0];
5387 second_vect
= dr_chain
[1];
5389 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shuffle2");
5390 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5391 first_vect
, first_vect
,
5393 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5396 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shuffle2");
5397 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5398 second_vect
, second_vect
,
5400 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5403 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shift");
5404 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5407 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5408 (*result_chain
)[1] = data_ref
;
5410 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_select");
5411 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5414 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5415 (*result_chain
)[0] = data_ref
;
5419 if (length
== 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
) > 2)
5421 unsigned int k
= 0, l
= 0;
5423 /* Generating permutation constant to get all elements in rigth order.
5424 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5425 for (i
= 0; i
< nelt
; i
++)
5427 if (3 * k
+ (l
% 3) >= nelt
)
5430 l
+= (3 - (nelt
% 3));
5432 sel
[i
] = 3 * k
+ (l
% 3);
5435 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5437 if (dump_enabled_p ())
5438 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5439 "shuffle of 3 fields structure is not \
5440 supported by target\n");
5443 perm3_mask
= vect_gen_perm_mask (vectype
, sel
);
5444 gcc_assert (perm3_mask
!= NULL
);
5446 /* Generating permutation constant to shift all elements.
5447 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5448 for (i
= 0; i
< nelt
; i
++)
5449 sel
[i
] = 2 * (nelt
/ 3) + (nelt
% 3) + i
;
5450 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5452 if (dump_enabled_p ())
5453 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5454 "shift permutation is not supported by target\n");
5457 shift1_mask
= vect_gen_perm_mask (vectype
, sel
);
5458 gcc_assert (shift1_mask
!= NULL
);
5460 /* Generating permutation constant to shift all elements.
5461 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5462 for (i
= 0; i
< nelt
; i
++)
5463 sel
[i
] = 2 * (nelt
/ 3) + 1 + i
;
5464 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5466 if (dump_enabled_p ())
5467 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5468 "shift permutation is not supported by target\n");
5471 shift2_mask
= vect_gen_perm_mask (vectype
, sel
);
5472 gcc_assert (shift2_mask
!= NULL
);
5474 /* Generating permutation constant to shift all elements.
5475 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5476 for (i
= 0; i
< nelt
; i
++)
5477 sel
[i
] = (nelt
/ 3) + (nelt
% 3) / 2 + i
;
5478 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5480 if (dump_enabled_p ())
5481 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5482 "shift permutation is not supported by target\n");
5485 shift3_mask
= vect_gen_perm_mask (vectype
, sel
);
5486 gcc_assert (shift3_mask
!= NULL
);
5488 /* Generating permutation constant to shift all elements.
5489 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5490 for (i
= 0; i
< nelt
; i
++)
5491 sel
[i
] = 2 * (nelt
/ 3) + (nelt
% 3) / 2 + i
;
5492 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5494 if (dump_enabled_p ())
5495 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5496 "shift permutation is not supported by target\n");
5499 shift4_mask
= vect_gen_perm_mask (vectype
, sel
);
5500 gcc_assert (shift4_mask
!= NULL
);
5502 for (k
= 0; k
< 3; k
++)
5504 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_suffle3");
5505 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5506 dr_chain
[k
], dr_chain
[k
],
5508 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5512 for (k
= 0; k
< 3; k
++)
5514 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shift1");
5515 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5519 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5520 vect_shift
[k
] = data_ref
;
5523 for (k
= 0; k
< 3; k
++)
5525 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shift2");
5526 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5527 vect_shift
[(4 - k
) % 3],
5528 vect_shift
[(3 - k
) % 3],
5530 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5534 (*result_chain
)[3 - (nelt
% 3)] = vect
[2];
5536 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shift3");
5537 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5540 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5541 (*result_chain
)[nelt
% 3] = data_ref
;
5543 data_ref
= make_temp_ssa_name (vectype
, NULL
, "vect_shift4");
5544 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5547 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5548 (*result_chain
)[0] = data_ref
;
5554 /* Function vect_transform_grouped_load.
5556 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5557 to perform their permutation and ascribe the result vectorized statements to
5558 the scalar statements.
5562 vect_transform_grouped_load (gimple stmt
, vec
<tree
> dr_chain
, int size
,
5563 gimple_stmt_iterator
*gsi
)
5565 enum machine_mode mode
;
5566 vec
<tree
> result_chain
= vNULL
;
5568 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5569 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5570 vectors, that are ready for vector computation. */
5571 result_chain
.create (size
);
5573 /* If reassociation width for vector type is 2 or greater target machine can
5574 execute 2 or more vector instructions in parallel. Otherwise try to
5575 get chain for loads group using vect_shift_permute_load_chain. */
5576 mode
= TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
)));
5577 if (targetm
.sched
.reassociation_width (VEC_PERM_EXPR
, mode
) > 1
5578 || !vect_shift_permute_load_chain (dr_chain
, size
, stmt
,
5579 gsi
, &result_chain
))
5580 vect_permute_load_chain (dr_chain
, size
, stmt
, gsi
, &result_chain
);
5581 vect_record_grouped_load_vectors (stmt
, result_chain
);
5582 result_chain
.release ();
5585 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5586 generated as part of the vectorization of STMT. Assign the statement
5587 for each vector to the associated scalar statement. */
5590 vect_record_grouped_load_vectors (gimple stmt
, vec
<tree
> result_chain
)
5592 gimple first_stmt
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
5593 gimple next_stmt
, new_stmt
;
5594 unsigned int i
, gap_count
;
5597 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5598 Since we scan the chain starting from it's first node, their order
5599 corresponds the order of data-refs in RESULT_CHAIN. */
5600 next_stmt
= first_stmt
;
5602 FOR_EACH_VEC_ELT (result_chain
, i
, tmp_data_ref
)
5607 /* Skip the gaps. Loads created for the gaps will be removed by dead
5608 code elimination pass later. No need to check for the first stmt in
5609 the group, since it always exists.
5610 GROUP_GAP is the number of steps in elements from the previous
5611 access (if there is no gap GROUP_GAP is 1). We skip loads that
5612 correspond to the gaps. */
5613 if (next_stmt
!= first_stmt
5614 && gap_count
< GROUP_GAP (vinfo_for_stmt (next_stmt
)))
5622 new_stmt
= SSA_NAME_DEF_STMT (tmp_data_ref
);
5623 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5624 copies, and we put the new vector statement in the first available
5626 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
)))
5627 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
)) = new_stmt
;
5630 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt
)))
5633 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt
));
5635 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt
));
5638 prev_stmt
= rel_stmt
;
5640 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt
));
5643 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt
)) =
5648 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5650 /* If NEXT_STMT accesses the same DR as the previous statement,
5651 put the same TMP_DATA_REF as its vectorized statement; otherwise
5652 get the next data-ref from RESULT_CHAIN. */
5653 if (!next_stmt
|| !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt
)))
5659 /* Function vect_force_dr_alignment_p.
5661 Returns whether the alignment of a DECL can be forced to be aligned
5662 on ALIGNMENT bit boundary. */
5665 vect_can_force_dr_alignment_p (const_tree decl
, unsigned int alignment
)
5667 if (TREE_CODE (decl
) != VAR_DECL
)
5670 /* With -fno-toplevel-reorder we may have already output the constant. */
5671 if (TREE_ASM_WRITTEN (decl
))
5674 /* Constant pool entries may be shared and not properly merged by LTO. */
5675 if (DECL_IN_CONSTANT_POOL (decl
))
5678 if (TREE_PUBLIC (decl
) || DECL_EXTERNAL (decl
))
5682 /* We cannot change alignment of symbols that may bind to symbols
5683 in other translation unit that may contain a definition with lower
5685 if (!decl_binds_to_current_def_p (decl
))
5688 /* When compiling partition, be sure the symbol is not output by other
5690 snode
= symtab_get_node (decl
);
5692 && (snode
->in_other_partition
5693 || symtab_get_symbol_partitioning_class (snode
) == SYMBOL_DUPLICATE
))
5697 /* Do not override the alignment as specified by the ABI when the used
5698 attribute is set. */
5699 if (DECL_PRESERVE_P (decl
))
5702 /* Do not override explicit alignment set by the user when an explicit
5703 section name is also used. This is a common idiom used by many
5704 software projects. */
5705 if (TREE_STATIC (decl
)
5706 && DECL_SECTION_NAME (decl
) != NULL
5707 && !symtab_get_node (decl
)->implicit_section
)
5710 /* If symbol is an alias, we need to check that target is OK. */
5711 if (TREE_STATIC (decl
))
5713 tree target
= symtab_alias_ultimate_target (symtab_get_node (decl
))->decl
;
5716 if (DECL_PRESERVE_P (target
))
5722 if (TREE_STATIC (decl
))
5723 return (alignment
<= MAX_OFILE_ALIGNMENT
);
5725 return (alignment
<= MAX_STACK_ALIGNMENT
);
5729 /* Return whether the data reference DR is supported with respect to its
5731 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5732 it is aligned, i.e., check if it is possible to vectorize it with different
5735 enum dr_alignment_support
5736 vect_supportable_dr_alignment (struct data_reference
*dr
,
5737 bool check_aligned_accesses
)
5739 gimple stmt
= DR_STMT (dr
);
5740 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5741 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5742 enum machine_mode mode
= TYPE_MODE (vectype
);
5743 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5744 struct loop
*vect_loop
= NULL
;
5745 bool nested_in_vect_loop
= false;
5747 if (aligned_access_p (dr
) && !check_aligned_accesses
)
5750 /* For now assume all conditional loads/stores support unaligned
5751 access without any special code. */
5752 if (is_gimple_call (stmt
)
5753 && gimple_call_internal_p (stmt
)
5754 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
5755 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
5756 return dr_unaligned_supported
;
5760 vect_loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5761 nested_in_vect_loop
= nested_in_vect_loop_p (vect_loop
, stmt
);
5764 /* Possibly unaligned access. */
5766 /* We can choose between using the implicit realignment scheme (generating
5767 a misaligned_move stmt) and the explicit realignment scheme (generating
5768 aligned loads with a REALIGN_LOAD). There are two variants to the
5769 explicit realignment scheme: optimized, and unoptimized.
5770 We can optimize the realignment only if the step between consecutive
5771 vector loads is equal to the vector size. Since the vector memory
5772 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5773 is guaranteed that the misalignment amount remains the same throughout the
5774 execution of the vectorized loop. Therefore, we can create the
5775 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5776 at the loop preheader.
5778 However, in the case of outer-loop vectorization, when vectorizing a
5779 memory access in the inner-loop nested within the LOOP that is now being
5780 vectorized, while it is guaranteed that the misalignment of the
5781 vectorized memory access will remain the same in different outer-loop
5782 iterations, it is *not* guaranteed that is will remain the same throughout
5783 the execution of the inner-loop. This is because the inner-loop advances
5784 with the original scalar step (and not in steps of VS). If the inner-loop
5785 step happens to be a multiple of VS, then the misalignment remains fixed
5786 and we can use the optimized realignment scheme. For example:
5792 When vectorizing the i-loop in the above example, the step between
5793 consecutive vector loads is 1, and so the misalignment does not remain
5794 fixed across the execution of the inner-loop, and the realignment cannot
5795 be optimized (as illustrated in the following pseudo vectorized loop):
5797 for (i=0; i<N; i+=4)
5798 for (j=0; j<M; j++){
5799 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5800 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5801 // (assuming that we start from an aligned address).
5804 We therefore have to use the unoptimized realignment scheme:
5806 for (i=0; i<N; i+=4)
5807 for (j=k; j<M; j+=4)
5808 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5809 // that the misalignment of the initial address is
5812 The loop can then be vectorized as follows:
5814 for (k=0; k<4; k++){
5815 rt = get_realignment_token (&vp[k]);
5816 for (i=0; i<N; i+=4){
5818 for (j=k; j<M; j+=4){
5820 va = REALIGN_LOAD <v1,v2,rt>;
5827 if (DR_IS_READ (dr
))
5829 bool is_packed
= false;
5830 tree type
= (TREE_TYPE (DR_REF (dr
)));
5832 if (optab_handler (vec_realign_load_optab
, mode
) != CODE_FOR_nothing
5833 && (!targetm
.vectorize
.builtin_mask_for_load
5834 || targetm
.vectorize
.builtin_mask_for_load ()))
5836 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5837 if ((nested_in_vect_loop
5838 && (TREE_INT_CST_LOW (DR_STEP (dr
))
5839 != GET_MODE_SIZE (TYPE_MODE (vectype
))))
5841 return dr_explicit_realign
;
5843 return dr_explicit_realign_optimized
;
5845 if (!known_alignment_for_access_p (dr
))
5846 is_packed
= not_size_aligned (DR_REF (dr
));
5848 if ((TYPE_USER_ALIGN (type
) && !is_packed
)
5849 || targetm
.vectorize
.
5850 support_vector_misalignment (mode
, type
,
5851 DR_MISALIGNMENT (dr
), is_packed
))
5852 /* Can't software pipeline the loads, but can at least do them. */
5853 return dr_unaligned_supported
;
5857 bool is_packed
= false;
5858 tree type
= (TREE_TYPE (DR_REF (dr
)));
5860 if (!known_alignment_for_access_p (dr
))
5861 is_packed
= not_size_aligned (DR_REF (dr
));
5863 if ((TYPE_USER_ALIGN (type
) && !is_packed
)
5864 || targetm
.vectorize
.
5865 support_vector_misalignment (mode
, type
,
5866 DR_MISALIGNMENT (dr
), is_packed
))
5867 return dr_unaligned_supported
;
5871 return dr_unaligned_unsupported
;