* jvspec.c (jvgenmain_spec): Don't handle -fnew-verifier.
[gcc.git] / gcc / tree-vect-data-refs.c
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "tree-chrec.h"
37 #include "tree-scalar-evolution.h"
38 #include "tree-vectorizer.h"
39 #include "diagnostic-core.h"
40 #include "toplev.h"
41
42 /* Need to include rtl.h, expr.h, etc. for optabs. */
43 #include "expr.h"
44 #include "optabs.h"
45
46 /* Return the smallest scalar part of STMT.
47 This is used to determine the vectype of the stmt. We generally set the
48 vectype according to the type of the result (lhs). For stmts whose
49 result-type is different than the type of the arguments (e.g., demotion,
50 promotion), vectype will be reset appropriately (later). Note that we have
51 to visit the smallest datatype in this function, because that determines the
52 VF. If the smallest datatype in the loop is present only as the rhs of a
53 promotion operation - we'd miss it.
54 Such a case, where a variable of this datatype does not appear in the lhs
55 anywhere in the loop, can only occur if it's an invariant: e.g.:
56 'int_x = (int) short_inv', which we'd expect to have been optimized away by
57 invariant motion. However, we cannot rely on invariant motion to always take
58 invariants out of the loop, and so in the case of promotion we also have to
59 check the rhs.
60 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
61 types. */
62
63 tree
64 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
65 HOST_WIDE_INT *rhs_size_unit)
66 {
67 tree scalar_type = gimple_expr_type (stmt);
68 HOST_WIDE_INT lhs, rhs;
69
70 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
71
72 if (is_gimple_assign (stmt)
73 && (gimple_assign_cast_p (stmt)
74 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
75 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
76 {
77 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
78
79 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
80 if (rhs < lhs)
81 scalar_type = rhs_type;
82 }
83
84 *lhs_size_unit = lhs;
85 *rhs_size_unit = rhs;
86 return scalar_type;
87 }
88
89
90 /* Find the place of the data-ref in STMT in the interleaving chain that starts
91 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
92
93 int
94 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
95 {
96 gimple next_stmt = first_stmt;
97 int result = 0;
98
99 if (first_stmt != DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)))
100 return -1;
101
102 while (next_stmt && next_stmt != stmt)
103 {
104 result++;
105 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
106 }
107
108 if (next_stmt)
109 return result;
110 else
111 return -1;
112 }
113
114
115 /* Function vect_insert_into_interleaving_chain.
116
117 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
118
119 static void
120 vect_insert_into_interleaving_chain (struct data_reference *dra,
121 struct data_reference *drb)
122 {
123 gimple prev, next;
124 tree next_init;
125 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
126 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
127
128 prev = DR_GROUP_FIRST_DR (stmtinfo_b);
129 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
130 while (next)
131 {
132 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
133 if (tree_int_cst_compare (next_init, DR_INIT (dra)) > 0)
134 {
135 /* Insert here. */
136 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = DR_STMT (dra);
137 DR_GROUP_NEXT_DR (stmtinfo_a) = next;
138 return;
139 }
140 prev = next;
141 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
142 }
143
144 /* We got to the end of the list. Insert here. */
145 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = DR_STMT (dra);
146 DR_GROUP_NEXT_DR (stmtinfo_a) = NULL;
147 }
148
149
150 /* Function vect_update_interleaving_chain.
151
152 For two data-refs DRA and DRB that are a part of a chain interleaved data
153 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
154
155 There are four possible cases:
156 1. New stmts - both DRA and DRB are not a part of any chain:
157 FIRST_DR = DRB
158 NEXT_DR (DRB) = DRA
159 2. DRB is a part of a chain and DRA is not:
160 no need to update FIRST_DR
161 no need to insert DRB
162 insert DRA according to init
163 3. DRA is a part of a chain and DRB is not:
164 if (init of FIRST_DR > init of DRB)
165 FIRST_DR = DRB
166 NEXT(FIRST_DR) = previous FIRST_DR
167 else
168 insert DRB according to its init
169 4. both DRA and DRB are in some interleaving chains:
170 choose the chain with the smallest init of FIRST_DR
171 insert the nodes of the second chain into the first one. */
172
173 static void
174 vect_update_interleaving_chain (struct data_reference *drb,
175 struct data_reference *dra)
176 {
177 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
178 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
179 tree next_init, init_dra_chain, init_drb_chain;
180 gimple first_a, first_b;
181 tree node_init;
182 gimple node, prev, next, first_stmt;
183
184 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
185 if (!DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b))
186 {
187 DR_GROUP_FIRST_DR (stmtinfo_a) = DR_STMT (drb);
188 DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb);
189 DR_GROUP_NEXT_DR (stmtinfo_b) = DR_STMT (dra);
190 return;
191 }
192
193 /* 2. DRB is a part of a chain and DRA is not. */
194 if (!DR_GROUP_FIRST_DR (stmtinfo_a) && DR_GROUP_FIRST_DR (stmtinfo_b))
195 {
196 DR_GROUP_FIRST_DR (stmtinfo_a) = DR_GROUP_FIRST_DR (stmtinfo_b);
197 /* Insert DRA into the chain of DRB. */
198 vect_insert_into_interleaving_chain (dra, drb);
199 return;
200 }
201
202 /* 3. DRA is a part of a chain and DRB is not. */
203 if (DR_GROUP_FIRST_DR (stmtinfo_a) && !DR_GROUP_FIRST_DR (stmtinfo_b))
204 {
205 gimple old_first_stmt = DR_GROUP_FIRST_DR (stmtinfo_a);
206 tree init_old = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
207 old_first_stmt)));
208 gimple tmp;
209
210 if (tree_int_cst_compare (init_old, DR_INIT (drb)) > 0)
211 {
212 /* DRB's init is smaller than the init of the stmt previously marked
213 as the first stmt of the interleaving chain of DRA. Therefore, we
214 update FIRST_STMT and put DRB in the head of the list. */
215 DR_GROUP_FIRST_DR (stmtinfo_b) = DR_STMT (drb);
216 DR_GROUP_NEXT_DR (stmtinfo_b) = old_first_stmt;
217
218 /* Update all the stmts in the list to point to the new FIRST_STMT. */
219 tmp = old_first_stmt;
220 while (tmp)
221 {
222 DR_GROUP_FIRST_DR (vinfo_for_stmt (tmp)) = DR_STMT (drb);
223 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (tmp));
224 }
225 }
226 else
227 {
228 /* Insert DRB in the list of DRA. */
229 vect_insert_into_interleaving_chain (drb, dra);
230 DR_GROUP_FIRST_DR (stmtinfo_b) = DR_GROUP_FIRST_DR (stmtinfo_a);
231 }
232 return;
233 }
234
235 /* 4. both DRA and DRB are in some interleaving chains. */
236 first_a = DR_GROUP_FIRST_DR (stmtinfo_a);
237 first_b = DR_GROUP_FIRST_DR (stmtinfo_b);
238 if (first_a == first_b)
239 return;
240 init_dra_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a)));
241 init_drb_chain = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b)));
242
243 if (tree_int_cst_compare (init_dra_chain, init_drb_chain) > 0)
244 {
245 /* Insert the nodes of DRA chain into the DRB chain.
246 After inserting a node, continue from this node of the DRB chain (don't
247 start from the beginning. */
248 node = DR_GROUP_FIRST_DR (stmtinfo_a);
249 prev = DR_GROUP_FIRST_DR (stmtinfo_b);
250 first_stmt = first_b;
251 }
252 else
253 {
254 /* Insert the nodes of DRB chain into the DRA chain.
255 After inserting a node, continue from this node of the DRA chain (don't
256 start from the beginning. */
257 node = DR_GROUP_FIRST_DR (stmtinfo_b);
258 prev = DR_GROUP_FIRST_DR (stmtinfo_a);
259 first_stmt = first_a;
260 }
261
262 while (node)
263 {
264 node_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node)));
265 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
266 while (next)
267 {
268 next_init = DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
269 if (tree_int_cst_compare (next_init, node_init) > 0)
270 {
271 /* Insert here. */
272 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node;
273 DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = next;
274 prev = node;
275 break;
276 }
277 prev = next;
278 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (prev));
279 }
280 if (!next)
281 {
282 /* We got to the end of the list. Insert here. */
283 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev)) = node;
284 DR_GROUP_NEXT_DR (vinfo_for_stmt (node)) = NULL;
285 prev = node;
286 }
287 DR_GROUP_FIRST_DR (vinfo_for_stmt (node)) = first_stmt;
288 node = DR_GROUP_NEXT_DR (vinfo_for_stmt (node));
289 }
290 }
291
292
293 /* Function vect_equal_offsets.
294
295 Check if OFFSET1 and OFFSET2 are identical expressions. */
296
297 static bool
298 vect_equal_offsets (tree offset1, tree offset2)
299 {
300 bool res;
301
302 STRIP_NOPS (offset1);
303 STRIP_NOPS (offset2);
304
305 if (offset1 == offset2)
306 return true;
307
308 if (TREE_CODE (offset1) != TREE_CODE (offset2)
309 || (!BINARY_CLASS_P (offset1) && !UNARY_CLASS_P (offset1)))
310 return false;
311
312 res = vect_equal_offsets (TREE_OPERAND (offset1, 0),
313 TREE_OPERAND (offset2, 0));
314
315 if (!res || !BINARY_CLASS_P (offset1))
316 return res;
317
318 res = vect_equal_offsets (TREE_OPERAND (offset1, 1),
319 TREE_OPERAND (offset2, 1));
320
321 return res;
322 }
323
324
325 /* Check dependence between DRA and DRB for basic block vectorization. */
326
327 static bool
328 vect_drs_dependent_in_basic_block (struct data_reference *dra,
329 struct data_reference *drb)
330 {
331 HOST_WIDE_INT type_size_a, type_size_b, init_a, init_b;
332 gimple earlier_stmt;
333
334 /* We only call this function for pairs of loads and stores, but we verify
335 it here. */
336 if (DR_IS_READ (dra) == DR_IS_READ (drb))
337 {
338 if (DR_IS_READ (dra))
339 return false;
340 else
341 return true;
342 }
343
344 /* Check that the data-refs have same bases and offsets. If not, we can't
345 determine if they are dependent. */
346 if ((DR_BASE_ADDRESS (dra) != DR_BASE_ADDRESS (drb)
347 && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
348 || TREE_CODE (DR_BASE_ADDRESS (drb)) != ADDR_EXPR
349 || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
350 != TREE_OPERAND (DR_BASE_ADDRESS (drb),0)))
351 || !vect_equal_offsets (DR_OFFSET (dra), DR_OFFSET (drb)))
352 return true;
353
354 /* Check the types. */
355 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
356 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
357
358 if (type_size_a != type_size_b
359 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
360 TREE_TYPE (DR_REF (drb))))
361 return true;
362
363 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
364 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
365
366 /* Two different locations - no dependence. */
367 if (init_a != init_b)
368 return false;
369
370 /* We have a read-write dependence. Check that the load is before the store.
371 When we vectorize basic blocks, vector load can be only before
372 corresponding scalar load, and vector store can be only after its
373 corresponding scalar store. So the order of the acceses is preserved in
374 case the load is before the store. */
375 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
376 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
377 return false;
378
379 return true;
380 }
381
382
383 /* Function vect_check_interleaving.
384
385 Check if DRA and DRB are a part of interleaving. In case they are, insert
386 DRA and DRB in an interleaving chain. */
387
388 static bool
389 vect_check_interleaving (struct data_reference *dra,
390 struct data_reference *drb)
391 {
392 HOST_WIDE_INT type_size_a, type_size_b, diff_mod_size, step, init_a, init_b;
393
394 /* Check that the data-refs have same first location (except init) and they
395 are both either store or load (not load and store). */
396 if ((DR_BASE_ADDRESS (dra) != DR_BASE_ADDRESS (drb)
397 && (TREE_CODE (DR_BASE_ADDRESS (dra)) != ADDR_EXPR
398 || TREE_CODE (DR_BASE_ADDRESS (drb)) != ADDR_EXPR
399 || TREE_OPERAND (DR_BASE_ADDRESS (dra), 0)
400 != TREE_OPERAND (DR_BASE_ADDRESS (drb),0)))
401 || !vect_equal_offsets (DR_OFFSET (dra), DR_OFFSET (drb))
402 || !tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb))
403 || DR_IS_READ (dra) != DR_IS_READ (drb))
404 return false;
405
406 /* Check:
407 1. data-refs are of the same type
408 2. their steps are equal
409 3. the step (if greater than zero) is greater than the difference between
410 data-refs' inits. */
411 type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))));
412 type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
413
414 if (type_size_a != type_size_b
415 || tree_int_cst_compare (DR_STEP (dra), DR_STEP (drb))
416 || !types_compatible_p (TREE_TYPE (DR_REF (dra)),
417 TREE_TYPE (DR_REF (drb))))
418 return false;
419
420 init_a = TREE_INT_CST_LOW (DR_INIT (dra));
421 init_b = TREE_INT_CST_LOW (DR_INIT (drb));
422 step = TREE_INT_CST_LOW (DR_STEP (dra));
423
424 if (init_a > init_b)
425 {
426 /* If init_a == init_b + the size of the type * k, we have an interleaving,
427 and DRB is accessed before DRA. */
428 diff_mod_size = (init_a - init_b) % type_size_a;
429
430 if (step && (init_a - init_b) > step)
431 return false;
432
433 if (diff_mod_size == 0)
434 {
435 vect_update_interleaving_chain (drb, dra);
436 if (vect_print_dump_info (REPORT_DR_DETAILS))
437 {
438 fprintf (vect_dump, "Detected interleaving ");
439 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
440 fprintf (vect_dump, " and ");
441 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
442 }
443 return true;
444 }
445 }
446 else
447 {
448 /* If init_b == init_a + the size of the type * k, we have an
449 interleaving, and DRA is accessed before DRB. */
450 diff_mod_size = (init_b - init_a) % type_size_a;
451
452 if (step && (init_b - init_a) > step)
453 return false;
454
455 if (diff_mod_size == 0)
456 {
457 vect_update_interleaving_chain (dra, drb);
458 if (vect_print_dump_info (REPORT_DR_DETAILS))
459 {
460 fprintf (vect_dump, "Detected interleaving ");
461 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
462 fprintf (vect_dump, " and ");
463 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
464 }
465 return true;
466 }
467 }
468
469 return false;
470 }
471
472 /* Check if data references pointed by DR_I and DR_J are same or
473 belong to same interleaving group. Return FALSE if drs are
474 different, otherwise return TRUE. */
475
476 static bool
477 vect_same_range_drs (data_reference_p dr_i, data_reference_p dr_j)
478 {
479 gimple stmt_i = DR_STMT (dr_i);
480 gimple stmt_j = DR_STMT (dr_j);
481
482 if (operand_equal_p (DR_REF (dr_i), DR_REF (dr_j), 0)
483 || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i))
484 && DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j))
485 && (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i))
486 == DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j)))))
487 return true;
488 else
489 return false;
490 }
491
492 /* If address ranges represented by DDR_I and DDR_J are equal,
493 return TRUE, otherwise return FALSE. */
494
495 static bool
496 vect_vfa_range_equal (ddr_p ddr_i, ddr_p ddr_j)
497 {
498 if ((vect_same_range_drs (DDR_A (ddr_i), DDR_A (ddr_j))
499 && vect_same_range_drs (DDR_B (ddr_i), DDR_B (ddr_j)))
500 || (vect_same_range_drs (DDR_A (ddr_i), DDR_B (ddr_j))
501 && vect_same_range_drs (DDR_B (ddr_i), DDR_A (ddr_j))))
502 return true;
503 else
504 return false;
505 }
506
507 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
508 tested at run-time. Return TRUE if DDR was successfully inserted.
509 Return false if versioning is not supported. */
510
511 static bool
512 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
513 {
514 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
515
516 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
517 return false;
518
519 if (vect_print_dump_info (REPORT_DR_DETAILS))
520 {
521 fprintf (vect_dump, "mark for run-time aliasing test between ");
522 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr)), TDF_SLIM);
523 fprintf (vect_dump, " and ");
524 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr)), TDF_SLIM);
525 }
526
527 if (optimize_loop_nest_for_size_p (loop))
528 {
529 if (vect_print_dump_info (REPORT_DR_DETAILS))
530 fprintf (vect_dump, "versioning not supported when optimizing for size.");
531 return false;
532 }
533
534 /* FORNOW: We don't support versioning with outer-loop vectorization. */
535 if (loop->inner)
536 {
537 if (vect_print_dump_info (REPORT_DR_DETAILS))
538 fprintf (vect_dump, "versioning not yet supported for outer-loops.");
539 return false;
540 }
541
542 VEC_safe_push (ddr_p, heap, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), ddr);
543 return true;
544 }
545
546
547 /* Function vect_analyze_data_ref_dependence.
548
549 Return TRUE if there (might) exist a dependence between a memory-reference
550 DRA and a memory-reference DRB. When versioning for alias may check a
551 dependence at run-time, return FALSE. Adjust *MAX_VF according to
552 the data dependence. */
553
554 static bool
555 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
556 loop_vec_info loop_vinfo, int *max_vf,
557 bool *data_dependence_in_bb)
558 {
559 unsigned int i;
560 struct loop *loop = NULL;
561 struct data_reference *dra = DDR_A (ddr);
562 struct data_reference *drb = DDR_B (ddr);
563 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
564 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
565 lambda_vector dist_v;
566 unsigned int loop_depth;
567
568 /* Don't bother to analyze statements marked as unvectorizable. */
569 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
570 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
571 return false;
572
573 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
574 {
575 /* Independent data accesses. */
576 vect_check_interleaving (dra, drb);
577 return false;
578 }
579
580 if (loop_vinfo)
581 loop = LOOP_VINFO_LOOP (loop_vinfo);
582
583 if ((DR_IS_READ (dra) && DR_IS_READ (drb) && loop_vinfo) || dra == drb)
584 return false;
585
586 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
587 {
588 if (loop_vinfo)
589 {
590 if (vect_print_dump_info (REPORT_DR_DETAILS))
591 {
592 fprintf (vect_dump, "versioning for alias required: "
593 "can't determine dependence between ");
594 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
595 fprintf (vect_dump, " and ");
596 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
597 }
598
599 /* Add to list of ddrs that need to be tested at run-time. */
600 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
601 }
602
603 /* When vectorizing a basic block unknown depnedence can still mean
604 strided access. */
605 if (vect_check_interleaving (dra, drb))
606 return false;
607
608 if (vect_print_dump_info (REPORT_DR_DETAILS))
609 {
610 fprintf (vect_dump, "can't determine dependence between ");
611 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
612 fprintf (vect_dump, " and ");
613 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
614 }
615
616 /* We do not vectorize basic blocks with write-write dependencies. */
617 if (!DR_IS_READ (dra) && !DR_IS_READ (drb))
618 return true;
619
620 /* We deal with read-write dependencies in basic blocks later (by
621 verifying that all the loads in the basic block are before all the
622 stores). */
623 *data_dependence_in_bb = true;
624 return false;
625 }
626
627 /* Versioning for alias is not yet supported for basic block SLP, and
628 dependence distance is unapplicable, hence, in case of known data
629 dependence, basic block vectorization is impossible for now. */
630 if (!loop_vinfo)
631 {
632 if (dra != drb && vect_check_interleaving (dra, drb))
633 return false;
634
635 if (vect_print_dump_info (REPORT_DR_DETAILS))
636 {
637 fprintf (vect_dump, "determined dependence between ");
638 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
639 fprintf (vect_dump, " and ");
640 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
641 }
642
643 /* Do not vectorize basic blcoks with write-write dependences. */
644 if (!DR_IS_READ (dra) && !DR_IS_READ (drb))
645 return true;
646
647 /* Check if this dependence is allowed in basic block vectorization. */
648 return vect_drs_dependent_in_basic_block (dra, drb);
649 }
650
651 /* Loop-based vectorization and known data dependence. */
652 if (DDR_NUM_DIST_VECTS (ddr) == 0)
653 {
654 if (vect_print_dump_info (REPORT_DR_DETAILS))
655 {
656 fprintf (vect_dump, "versioning for alias required: bad dist vector for ");
657 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
658 fprintf (vect_dump, " and ");
659 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
660 }
661 /* Add to list of ddrs that need to be tested at run-time. */
662 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
663 }
664
665 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
666 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
667 {
668 int dist = dist_v[loop_depth];
669
670 if (vect_print_dump_info (REPORT_DR_DETAILS))
671 fprintf (vect_dump, "dependence distance = %d.", dist);
672
673 if (dist == 0)
674 {
675 if (vect_print_dump_info (REPORT_DR_DETAILS))
676 {
677 fprintf (vect_dump, "dependence distance == 0 between ");
678 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
679 fprintf (vect_dump, " and ");
680 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
681 }
682
683 /* For interleaving, mark that there is a read-write dependency if
684 necessary. We check before that one of the data-refs is store. */
685 if (DR_IS_READ (dra))
686 DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a) = true;
687 else
688 {
689 if (DR_IS_READ (drb))
690 DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b) = true;
691 }
692
693 continue;
694 }
695
696 if (dist > 0 && DDR_REVERSED_P (ddr))
697 {
698 /* If DDR_REVERSED_P the order of the data-refs in DDR was
699 reversed (to make distance vector positive), and the actual
700 distance is negative. */
701 if (vect_print_dump_info (REPORT_DR_DETAILS))
702 fprintf (vect_dump, "dependence distance negative.");
703 continue;
704 }
705
706 if (abs (dist) >= 2
707 && abs (dist) < *max_vf)
708 {
709 /* The dependence distance requires reduction of the maximal
710 vectorization factor. */
711 *max_vf = abs (dist);
712 if (vect_print_dump_info (REPORT_DR_DETAILS))
713 fprintf (vect_dump, "adjusting maximal vectorization factor to %i",
714 *max_vf);
715 }
716
717 if (abs (dist) >= *max_vf)
718 {
719 /* Dependence distance does not create dependence, as far as
720 vectorization is concerned, in this case. */
721 if (vect_print_dump_info (REPORT_DR_DETAILS))
722 fprintf (vect_dump, "dependence distance >= VF.");
723 continue;
724 }
725
726 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
727 {
728 fprintf (vect_dump, "not vectorized, possible dependence "
729 "between data-refs ");
730 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
731 fprintf (vect_dump, " and ");
732 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
733 }
734
735 return true;
736 }
737
738 return false;
739 }
740
741 /* Function vect_analyze_data_ref_dependences.
742
743 Examine all the data references in the loop, and make sure there do not
744 exist any data dependences between them. Set *MAX_VF according to
745 the maximum vectorization factor the data dependences allow. */
746
747 bool
748 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
749 bb_vec_info bb_vinfo, int *max_vf,
750 bool *data_dependence_in_bb)
751 {
752 unsigned int i;
753 VEC (ddr_p, heap) *ddrs = NULL;
754 struct data_dependence_relation *ddr;
755
756 if (vect_print_dump_info (REPORT_DETAILS))
757 fprintf (vect_dump, "=== vect_analyze_dependences ===");
758
759 if (loop_vinfo)
760 ddrs = LOOP_VINFO_DDRS (loop_vinfo);
761 else
762 ddrs = BB_VINFO_DDRS (bb_vinfo);
763
764 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
765 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf,
766 data_dependence_in_bb))
767 return false;
768
769 return true;
770 }
771
772
773 /* Function vect_compute_data_ref_alignment
774
775 Compute the misalignment of the data reference DR.
776
777 Output:
778 1. If during the misalignment computation it is found that the data reference
779 cannot be vectorized then false is returned.
780 2. DR_MISALIGNMENT (DR) is defined.
781
782 FOR NOW: No analysis is actually performed. Misalignment is calculated
783 only for trivial cases. TODO. */
784
785 static bool
786 vect_compute_data_ref_alignment (struct data_reference *dr)
787 {
788 gimple stmt = DR_STMT (dr);
789 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
790 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
791 struct loop *loop = NULL;
792 tree ref = DR_REF (dr);
793 tree vectype;
794 tree base, base_addr;
795 bool base_aligned;
796 tree misalign;
797 tree aligned_to, alignment;
798
799 if (vect_print_dump_info (REPORT_DETAILS))
800 fprintf (vect_dump, "vect_compute_data_ref_alignment:");
801
802 if (loop_vinfo)
803 loop = LOOP_VINFO_LOOP (loop_vinfo);
804
805 /* Initialize misalignment to unknown. */
806 SET_DR_MISALIGNMENT (dr, -1);
807
808 misalign = DR_INIT (dr);
809 aligned_to = DR_ALIGNED_TO (dr);
810 base_addr = DR_BASE_ADDRESS (dr);
811 vectype = STMT_VINFO_VECTYPE (stmt_info);
812
813 /* In case the dataref is in an inner-loop of the loop that is being
814 vectorized (LOOP), we use the base and misalignment information
815 relative to the outer-loop (LOOP). This is ok only if the misalignment
816 stays the same throughout the execution of the inner-loop, which is why
817 we have to check that the stride of the dataref in the inner-loop evenly
818 divides by the vector size. */
819 if (loop && nested_in_vect_loop_p (loop, stmt))
820 {
821 tree step = DR_STEP (dr);
822 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
823
824 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
825 {
826 if (vect_print_dump_info (REPORT_ALIGNMENT))
827 fprintf (vect_dump, "inner step divides the vector-size.");
828 misalign = STMT_VINFO_DR_INIT (stmt_info);
829 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
830 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
831 }
832 else
833 {
834 if (vect_print_dump_info (REPORT_ALIGNMENT))
835 fprintf (vect_dump, "inner step doesn't divide the vector-size.");
836 misalign = NULL_TREE;
837 }
838 }
839
840 base = build_fold_indirect_ref (base_addr);
841 alignment = ssize_int (TYPE_ALIGN (vectype)/BITS_PER_UNIT);
842
843 if ((aligned_to && tree_int_cst_compare (aligned_to, alignment) < 0)
844 || !misalign)
845 {
846 if (vect_print_dump_info (REPORT_ALIGNMENT))
847 {
848 fprintf (vect_dump, "Unknown alignment for access: ");
849 print_generic_expr (vect_dump, base, TDF_SLIM);
850 }
851 return true;
852 }
853
854 if ((DECL_P (base)
855 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base)),
856 alignment) >= 0)
857 || (TREE_CODE (base_addr) == SSA_NAME
858 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
859 TREE_TYPE (base_addr)))),
860 alignment) >= 0))
861 base_aligned = true;
862 else
863 base_aligned = false;
864
865 if (!base_aligned)
866 {
867 /* Do not change the alignment of global variables if
868 flag_section_anchors is enabled. */
869 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype))
870 || (TREE_STATIC (base) && flag_section_anchors))
871 {
872 if (vect_print_dump_info (REPORT_DETAILS))
873 {
874 fprintf (vect_dump, "can't force alignment of ref: ");
875 print_generic_expr (vect_dump, ref, TDF_SLIM);
876 }
877 return true;
878 }
879
880 /* Force the alignment of the decl.
881 NOTE: This is the only change to the code we make during
882 the analysis phase, before deciding to vectorize the loop. */
883 if (vect_print_dump_info (REPORT_DETAILS))
884 {
885 fprintf (vect_dump, "force alignment of ");
886 print_generic_expr (vect_dump, ref, TDF_SLIM);
887 }
888
889 DECL_ALIGN (base) = TYPE_ALIGN (vectype);
890 DECL_USER_ALIGN (base) = 1;
891 }
892
893 /* At this point we assume that the base is aligned. */
894 gcc_assert (base_aligned
895 || (TREE_CODE (base) == VAR_DECL
896 && DECL_ALIGN (base) >= TYPE_ALIGN (vectype)));
897
898 /* Modulo alignment. */
899 misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment);
900
901 if (!host_integerp (misalign, 1))
902 {
903 /* Negative or overflowed misalignment value. */
904 if (vect_print_dump_info (REPORT_DETAILS))
905 fprintf (vect_dump, "unexpected misalign value");
906 return false;
907 }
908
909 SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign));
910
911 if (vect_print_dump_info (REPORT_DETAILS))
912 {
913 fprintf (vect_dump, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
914 print_generic_expr (vect_dump, ref, TDF_SLIM);
915 }
916
917 return true;
918 }
919
920
921 /* Function vect_compute_data_refs_alignment
922
923 Compute the misalignment of data references in the loop.
924 Return FALSE if a data reference is found that cannot be vectorized. */
925
926 static bool
927 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
928 bb_vec_info bb_vinfo)
929 {
930 VEC (data_reference_p, heap) *datarefs;
931 struct data_reference *dr;
932 unsigned int i;
933
934 if (loop_vinfo)
935 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
936 else
937 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
938
939 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
940 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
941 && !vect_compute_data_ref_alignment (dr))
942 {
943 if (bb_vinfo)
944 {
945 /* Mark unsupported statement as unvectorizable. */
946 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
947 continue;
948 }
949 else
950 return false;
951 }
952
953 return true;
954 }
955
956
957 /* Function vect_update_misalignment_for_peel
958
959 DR - the data reference whose misalignment is to be adjusted.
960 DR_PEEL - the data reference whose misalignment is being made
961 zero in the vector loop by the peel.
962 NPEEL - the number of iterations in the peel loop if the misalignment
963 of DR_PEEL is known at compile time. */
964
965 static void
966 vect_update_misalignment_for_peel (struct data_reference *dr,
967 struct data_reference *dr_peel, int npeel)
968 {
969 unsigned int i;
970 VEC(dr_p,heap) *same_align_drs;
971 struct data_reference *current_dr;
972 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
973 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
974 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
975 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
976
977 /* For interleaved data accesses the step in the loop must be multiplied by
978 the size of the interleaving group. */
979 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
980 dr_size *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info)));
981 if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info))
982 dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info);
983
984 /* It can be assumed that the data refs with the same alignment as dr_peel
985 are aligned in the vector loop. */
986 same_align_drs
987 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
988 FOR_EACH_VEC_ELT (dr_p, same_align_drs, i, current_dr)
989 {
990 if (current_dr != dr)
991 continue;
992 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
993 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
994 SET_DR_MISALIGNMENT (dr, 0);
995 return;
996 }
997
998 if (known_alignment_for_access_p (dr)
999 && known_alignment_for_access_p (dr_peel))
1000 {
1001 int misal = DR_MISALIGNMENT (dr);
1002 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1003 misal += npeel * dr_size;
1004 misal %= GET_MODE_SIZE (TYPE_MODE (vectype));
1005 SET_DR_MISALIGNMENT (dr, misal);
1006 return;
1007 }
1008
1009 if (vect_print_dump_info (REPORT_DETAILS))
1010 fprintf (vect_dump, "Setting misalignment to -1.");
1011 SET_DR_MISALIGNMENT (dr, -1);
1012 }
1013
1014
1015 /* Function vect_verify_datarefs_alignment
1016
1017 Return TRUE if all data references in the loop can be
1018 handled with respect to alignment. */
1019
1020 bool
1021 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1022 {
1023 VEC (data_reference_p, heap) *datarefs;
1024 struct data_reference *dr;
1025 enum dr_alignment_support supportable_dr_alignment;
1026 unsigned int i;
1027
1028 if (loop_vinfo)
1029 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1030 else
1031 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
1032
1033 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1034 {
1035 gimple stmt = DR_STMT (dr);
1036 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1037
1038 /* For interleaving, only the alignment of the first access matters.
1039 Skip statements marked as not vectorizable. */
1040 if ((STMT_VINFO_STRIDED_ACCESS (stmt_info)
1041 && DR_GROUP_FIRST_DR (stmt_info) != stmt)
1042 || !STMT_VINFO_VECTORIZABLE (stmt_info))
1043 continue;
1044
1045 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1046 if (!supportable_dr_alignment)
1047 {
1048 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1049 {
1050 if (DR_IS_READ (dr))
1051 fprintf (vect_dump,
1052 "not vectorized: unsupported unaligned load.");
1053 else
1054 fprintf (vect_dump,
1055 "not vectorized: unsupported unaligned store.");
1056
1057 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
1058 }
1059 return false;
1060 }
1061 if (supportable_dr_alignment != dr_aligned
1062 && vect_print_dump_info (REPORT_ALIGNMENT))
1063 fprintf (vect_dump, "Vectorizing an unaligned access.");
1064 }
1065 return true;
1066 }
1067
1068
1069 /* Function vector_alignment_reachable_p
1070
1071 Return true if vector alignment for DR is reachable by peeling
1072 a few loop iterations. Return false otherwise. */
1073
1074 static bool
1075 vector_alignment_reachable_p (struct data_reference *dr)
1076 {
1077 gimple stmt = DR_STMT (dr);
1078 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1079 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1080
1081 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1082 {
1083 /* For interleaved access we peel only if number of iterations in
1084 the prolog loop ({VF - misalignment}), is a multiple of the
1085 number of the interleaved accesses. */
1086 int elem_size, mis_in_elements;
1087 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1088
1089 /* FORNOW: handle only known alignment. */
1090 if (!known_alignment_for_access_p (dr))
1091 return false;
1092
1093 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1094 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1095
1096 if ((nelements - mis_in_elements) % DR_GROUP_SIZE (stmt_info))
1097 return false;
1098 }
1099
1100 /* If misalignment is known at the compile time then allow peeling
1101 only if natural alignment is reachable through peeling. */
1102 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1103 {
1104 HOST_WIDE_INT elmsize =
1105 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1106 if (vect_print_dump_info (REPORT_DETAILS))
1107 {
1108 fprintf (vect_dump, "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1109 fprintf (vect_dump, ". misalignment = %d. ", DR_MISALIGNMENT (dr));
1110 }
1111 if (DR_MISALIGNMENT (dr) % elmsize)
1112 {
1113 if (vect_print_dump_info (REPORT_DETAILS))
1114 fprintf (vect_dump, "data size does not divide the misalignment.\n");
1115 return false;
1116 }
1117 }
1118
1119 if (!known_alignment_for_access_p (dr))
1120 {
1121 tree type = (TREE_TYPE (DR_REF (dr)));
1122 tree ba = DR_BASE_OBJECT (dr);
1123 bool is_packed = false;
1124
1125 if (ba)
1126 is_packed = contains_packed_reference (ba);
1127
1128 if (vect_print_dump_info (REPORT_DETAILS))
1129 fprintf (vect_dump, "Unknown misalignment, is_packed = %d",is_packed);
1130 if (targetm.vectorize.vector_alignment_reachable (type, is_packed))
1131 return true;
1132 else
1133 return false;
1134 }
1135
1136 return true;
1137 }
1138
1139
1140 /* Calculate the cost of the memory access represented by DR. */
1141
1142 static void
1143 vect_get_data_access_cost (struct data_reference *dr,
1144 unsigned int *inside_cost,
1145 unsigned int *outside_cost)
1146 {
1147 gimple stmt = DR_STMT (dr);
1148 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1149 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1150 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1151 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1152 int ncopies = vf / nunits;
1153 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1154
1155 if (!supportable_dr_alignment)
1156 *inside_cost = VECT_MAX_COST;
1157 else
1158 {
1159 if (DR_IS_READ (dr))
1160 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost);
1161 else
1162 vect_get_store_cost (dr, ncopies, inside_cost);
1163 }
1164
1165 if (vect_print_dump_info (REPORT_COST))
1166 fprintf (vect_dump, "vect_get_data_access_cost: inside_cost = %d, "
1167 "outside_cost = %d.", *inside_cost, *outside_cost);
1168 }
1169
1170
1171 static hashval_t
1172 vect_peeling_hash (const void *elem)
1173 {
1174 const struct _vect_peel_info *peel_info;
1175
1176 peel_info = (const struct _vect_peel_info *) elem;
1177 return (hashval_t) peel_info->npeel;
1178 }
1179
1180
1181 static int
1182 vect_peeling_hash_eq (const void *elem1, const void *elem2)
1183 {
1184 const struct _vect_peel_info *a, *b;
1185
1186 a = (const struct _vect_peel_info *) elem1;
1187 b = (const struct _vect_peel_info *) elem2;
1188 return (a->npeel == b->npeel);
1189 }
1190
1191
1192 /* Insert DR into peeling hash table with NPEEL as key. */
1193
1194 static void
1195 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1196 int npeel)
1197 {
1198 struct _vect_peel_info elem, *slot;
1199 void **new_slot;
1200 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1201
1202 elem.npeel = npeel;
1203 slot = (vect_peel_info) htab_find (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1204 &elem);
1205 if (slot)
1206 slot->count++;
1207 else
1208 {
1209 slot = XNEW (struct _vect_peel_info);
1210 slot->npeel = npeel;
1211 slot->dr = dr;
1212 slot->count = 1;
1213 new_slot = htab_find_slot (LOOP_VINFO_PEELING_HTAB (loop_vinfo), slot,
1214 INSERT);
1215 *new_slot = slot;
1216 }
1217
1218 if (!supportable_dr_alignment && !flag_vect_cost_model)
1219 slot->count += VECT_MAX_COST;
1220 }
1221
1222
1223 /* Traverse peeling hash table to find peeling option that aligns maximum
1224 number of data accesses. */
1225
1226 static int
1227 vect_peeling_hash_get_most_frequent (void **slot, void *data)
1228 {
1229 vect_peel_info elem = (vect_peel_info) *slot;
1230 vect_peel_extended_info max = (vect_peel_extended_info) data;
1231
1232 if (elem->count > max->peel_info.count)
1233 {
1234 max->peel_info.npeel = elem->npeel;
1235 max->peel_info.count = elem->count;
1236 max->peel_info.dr = elem->dr;
1237 }
1238
1239 return 1;
1240 }
1241
1242
1243 /* Traverse peeling hash table and calculate cost for each peeling option. Find
1244 one with the lowest cost. */
1245
1246 static int
1247 vect_peeling_hash_get_lowest_cost (void **slot, void *data)
1248 {
1249 vect_peel_info elem = (vect_peel_info) *slot;
1250 vect_peel_extended_info min = (vect_peel_extended_info) data;
1251 int save_misalignment, dummy;
1252 unsigned int inside_cost = 0, outside_cost = 0, i;
1253 gimple stmt = DR_STMT (elem->dr);
1254 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1256 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1257 struct data_reference *dr;
1258
1259 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1260 {
1261 stmt = DR_STMT (dr);
1262 stmt_info = vinfo_for_stmt (stmt);
1263 /* For interleaving, only the alignment of the first access
1264 matters. */
1265 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1266 && DR_GROUP_FIRST_DR (stmt_info) != stmt)
1267 continue;
1268
1269 save_misalignment = DR_MISALIGNMENT (dr);
1270 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1271 vect_get_data_access_cost (dr, &inside_cost, &outside_cost);
1272 SET_DR_MISALIGNMENT (dr, save_misalignment);
1273 }
1274
1275 outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel, &dummy,
1276 vect_get_single_scalar_iteraion_cost (loop_vinfo));
1277
1278 if (inside_cost < min->inside_cost
1279 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1280 {
1281 min->inside_cost = inside_cost;
1282 min->outside_cost = outside_cost;
1283 min->peel_info.dr = elem->dr;
1284 min->peel_info.npeel = elem->npeel;
1285 }
1286
1287 return 1;
1288 }
1289
1290
1291 /* Choose best peeling option by traversing peeling hash table and either
1292 choosing an option with the lowest cost (if cost model is enabled) or the
1293 option that aligns as many accesses as possible. */
1294
1295 static struct data_reference *
1296 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1297 unsigned int *npeel)
1298 {
1299 struct _vect_peel_extended_info res;
1300
1301 res.peel_info.dr = NULL;
1302
1303 if (flag_vect_cost_model)
1304 {
1305 res.inside_cost = INT_MAX;
1306 res.outside_cost = INT_MAX;
1307 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1308 vect_peeling_hash_get_lowest_cost, &res);
1309 }
1310 else
1311 {
1312 res.peel_info.count = 0;
1313 htab_traverse (LOOP_VINFO_PEELING_HTAB (loop_vinfo),
1314 vect_peeling_hash_get_most_frequent, &res);
1315 }
1316
1317 *npeel = res.peel_info.npeel;
1318 return res.peel_info.dr;
1319 }
1320
1321
1322 /* Function vect_enhance_data_refs_alignment
1323
1324 This pass will use loop versioning and loop peeling in order to enhance
1325 the alignment of data references in the loop.
1326
1327 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1328 original loop is to be vectorized; Any other loops that are created by
1329 the transformations performed in this pass - are not supposed to be
1330 vectorized. This restriction will be relaxed.
1331
1332 This pass will require a cost model to guide it whether to apply peeling
1333 or versioning or a combination of the two. For example, the scheme that
1334 intel uses when given a loop with several memory accesses, is as follows:
1335 choose one memory access ('p') which alignment you want to force by doing
1336 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1337 other accesses are not necessarily aligned, or (2) use loop versioning to
1338 generate one loop in which all accesses are aligned, and another loop in
1339 which only 'p' is necessarily aligned.
1340
1341 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1342 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1343 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1344
1345 Devising a cost model is the most critical aspect of this work. It will
1346 guide us on which access to peel for, whether to use loop versioning, how
1347 many versions to create, etc. The cost model will probably consist of
1348 generic considerations as well as target specific considerations (on
1349 powerpc for example, misaligned stores are more painful than misaligned
1350 loads).
1351
1352 Here are the general steps involved in alignment enhancements:
1353
1354 -- original loop, before alignment analysis:
1355 for (i=0; i<N; i++){
1356 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1357 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1358 }
1359
1360 -- After vect_compute_data_refs_alignment:
1361 for (i=0; i<N; i++){
1362 x = q[i]; # DR_MISALIGNMENT(q) = 3
1363 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1364 }
1365
1366 -- Possibility 1: we do loop versioning:
1367 if (p is aligned) {
1368 for (i=0; i<N; i++){ # loop 1A
1369 x = q[i]; # DR_MISALIGNMENT(q) = 3
1370 p[i] = y; # DR_MISALIGNMENT(p) = 0
1371 }
1372 }
1373 else {
1374 for (i=0; i<N; i++){ # loop 1B
1375 x = q[i]; # DR_MISALIGNMENT(q) = 3
1376 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1377 }
1378 }
1379
1380 -- Possibility 2: we do loop peeling:
1381 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1382 x = q[i];
1383 p[i] = y;
1384 }
1385 for (i = 3; i < N; i++){ # loop 2A
1386 x = q[i]; # DR_MISALIGNMENT(q) = 0
1387 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1388 }
1389
1390 -- Possibility 3: combination of loop peeling and versioning:
1391 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1392 x = q[i];
1393 p[i] = y;
1394 }
1395 if (p is aligned) {
1396 for (i = 3; i<N; i++){ # loop 3A
1397 x = q[i]; # DR_MISALIGNMENT(q) = 0
1398 p[i] = y; # DR_MISALIGNMENT(p) = 0
1399 }
1400 }
1401 else {
1402 for (i = 3; i<N; i++){ # loop 3B
1403 x = q[i]; # DR_MISALIGNMENT(q) = 0
1404 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1405 }
1406 }
1407
1408 These loops are later passed to loop_transform to be vectorized. The
1409 vectorizer will use the alignment information to guide the transformation
1410 (whether to generate regular loads/stores, or with special handling for
1411 misalignment). */
1412
1413 bool
1414 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1415 {
1416 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1417 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1418 enum dr_alignment_support supportable_dr_alignment;
1419 struct data_reference *dr0 = NULL, *first_store = NULL;
1420 struct data_reference *dr;
1421 unsigned int i, j;
1422 bool do_peeling = false;
1423 bool do_versioning = false;
1424 bool stat;
1425 gimple stmt;
1426 stmt_vec_info stmt_info;
1427 int vect_versioning_for_alias_required;
1428 unsigned int npeel = 0;
1429 bool all_misalignments_unknown = true;
1430 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1431 unsigned possible_npeel_number = 1;
1432 tree vectype;
1433 unsigned int nelements, mis, same_align_drs_max = 0;
1434
1435 if (vect_print_dump_info (REPORT_DETAILS))
1436 fprintf (vect_dump, "=== vect_enhance_data_refs_alignment ===");
1437
1438 /* While cost model enhancements are expected in the future, the high level
1439 view of the code at this time is as follows:
1440
1441 A) If there is a misaligned access then see if peeling to align
1442 this access can make all data references satisfy
1443 vect_supportable_dr_alignment. If so, update data structures
1444 as needed and return true.
1445
1446 B) If peeling wasn't possible and there is a data reference with an
1447 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1448 then see if loop versioning checks can be used to make all data
1449 references satisfy vect_supportable_dr_alignment. If so, update
1450 data structures as needed and return true.
1451
1452 C) If neither peeling nor versioning were successful then return false if
1453 any data reference does not satisfy vect_supportable_dr_alignment.
1454
1455 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1456
1457 Note, Possibility 3 above (which is peeling and versioning together) is not
1458 being done at this time. */
1459
1460 /* (1) Peeling to force alignment. */
1461
1462 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1463 Considerations:
1464 + How many accesses will become aligned due to the peeling
1465 - How many accesses will become unaligned due to the peeling,
1466 and the cost of misaligned accesses.
1467 - The cost of peeling (the extra runtime checks, the increase
1468 in code size). */
1469
1470 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1471 {
1472 stmt = DR_STMT (dr);
1473 stmt_info = vinfo_for_stmt (stmt);
1474
1475 /* For interleaving, only the alignment of the first access
1476 matters. */
1477 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1478 && DR_GROUP_FIRST_DR (stmt_info) != stmt)
1479 continue;
1480
1481 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1482 do_peeling = vector_alignment_reachable_p (dr);
1483 if (do_peeling)
1484 {
1485 if (known_alignment_for_access_p (dr))
1486 {
1487 unsigned int npeel_tmp;
1488
1489 /* Save info about DR in the hash table. */
1490 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1491 LOOP_VINFO_PEELING_HTAB (loop_vinfo) =
1492 htab_create (1, vect_peeling_hash,
1493 vect_peeling_hash_eq, free);
1494
1495 vectype = STMT_VINFO_VECTYPE (stmt_info);
1496 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1497 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1498 TREE_TYPE (DR_REF (dr))));
1499 npeel_tmp = (nelements - mis) % vf;
1500
1501 /* For multiple types, it is possible that the bigger type access
1502 will have more than one peeling option. E.g., a loop with two
1503 types: one of size (vector size / 4), and the other one of
1504 size (vector size / 8). Vectorization factor will 8. If both
1505 access are misaligned by 3, the first one needs one scalar
1506 iteration to be aligned, and the second one needs 5. But the
1507 the first one will be aligned also by peeling 5 scalar
1508 iterations, and in that case both accesses will be aligned.
1509 Hence, except for the immediate peeling amount, we also want
1510 to try to add full vector size, while we don't exceed
1511 vectorization factor.
1512 We do this automtically for cost model, since we calculate cost
1513 for every peeling option. */
1514 if (!flag_vect_cost_model)
1515 possible_npeel_number = vf /nelements;
1516
1517 /* Handle the aligned case. We may decide to align some other
1518 access, making DR unaligned. */
1519 if (DR_MISALIGNMENT (dr) == 0)
1520 {
1521 npeel_tmp = 0;
1522 if (!flag_vect_cost_model)
1523 possible_npeel_number++;
1524 }
1525
1526 for (j = 0; j < possible_npeel_number; j++)
1527 {
1528 gcc_assert (npeel_tmp <= vf);
1529 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1530 npeel_tmp += nelements;
1531 }
1532
1533 all_misalignments_unknown = false;
1534 /* Data-ref that was chosen for the case that all the
1535 misalignments are unknown is not relevant anymore, since we
1536 have a data-ref with known alignment. */
1537 dr0 = NULL;
1538 }
1539 else
1540 {
1541 /* If we don't know all the misalignment values, we prefer
1542 peeling for data-ref that has maximum number of data-refs
1543 with the same alignment, unless the target prefers to align
1544 stores over load. */
1545 if (all_misalignments_unknown)
1546 {
1547 if (same_align_drs_max < VEC_length (dr_p,
1548 STMT_VINFO_SAME_ALIGN_REFS (stmt_info))
1549 || !dr0)
1550 {
1551 same_align_drs_max = VEC_length (dr_p,
1552 STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
1553 dr0 = dr;
1554 }
1555
1556 if (!first_store && !DR_IS_READ (dr))
1557 first_store = dr;
1558 }
1559
1560 /* If there are both known and unknown misaligned accesses in the
1561 loop, we choose peeling amount according to the known
1562 accesses. */
1563
1564
1565 if (!supportable_dr_alignment)
1566 {
1567 dr0 = dr;
1568 if (!first_store && !DR_IS_READ (dr))
1569 first_store = dr;
1570 }
1571 }
1572 }
1573 else
1574 {
1575 if (!aligned_access_p (dr))
1576 {
1577 if (vect_print_dump_info (REPORT_DETAILS))
1578 fprintf (vect_dump, "vector alignment may not be reachable");
1579
1580 break;
1581 }
1582 }
1583 }
1584
1585 vect_versioning_for_alias_required
1586 = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
1587
1588 /* Temporarily, if versioning for alias is required, we disable peeling
1589 until we support peeling and versioning. Often peeling for alignment
1590 will require peeling for loop-bound, which in turn requires that we
1591 know how to adjust the loop ivs after the loop. */
1592 if (vect_versioning_for_alias_required
1593 || !vect_can_advance_ivs_p (loop_vinfo)
1594 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1595 do_peeling = false;
1596
1597 if (do_peeling && all_misalignments_unknown
1598 && vect_supportable_dr_alignment (dr0, false))
1599 {
1600
1601 /* Check if the target requires to prefer stores over loads, i.e., if
1602 misaligned stores are more expensive than misaligned loads (taking
1603 drs with same alignment into account). */
1604 if (first_store && DR_IS_READ (dr0))
1605 {
1606 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1607 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1608 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1609 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1610
1611 vect_get_data_access_cost (dr0, &load_inside_cost,
1612 &load_outside_cost);
1613 vect_get_data_access_cost (first_store, &store_inside_cost,
1614 &store_outside_cost);
1615
1616 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1617 aligning the load DR0). */
1618 load_inside_penalty = store_inside_cost;
1619 load_outside_penalty = store_outside_cost;
1620 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1621 (vinfo_for_stmt (DR_STMT (first_store))),
1622 i, dr);
1623 i++)
1624 if (DR_IS_READ (dr))
1625 {
1626 load_inside_penalty += load_inside_cost;
1627 load_outside_penalty += load_outside_cost;
1628 }
1629 else
1630 {
1631 load_inside_penalty += store_inside_cost;
1632 load_outside_penalty += store_outside_cost;
1633 }
1634
1635 /* Calculate the penalty for leaving DR0 unaligned (by
1636 aligning the FIRST_STORE). */
1637 store_inside_penalty = load_inside_cost;
1638 store_outside_penalty = load_outside_cost;
1639 for (i = 0; VEC_iterate (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1640 (vinfo_for_stmt (DR_STMT (dr0))),
1641 i, dr);
1642 i++)
1643 if (DR_IS_READ (dr))
1644 {
1645 store_inside_penalty += load_inside_cost;
1646 store_outside_penalty += load_outside_cost;
1647 }
1648 else
1649 {
1650 store_inside_penalty += store_inside_cost;
1651 store_outside_penalty += store_outside_cost;
1652 }
1653
1654 if (load_inside_penalty > store_inside_penalty
1655 || (load_inside_penalty == store_inside_penalty
1656 && load_outside_penalty > store_outside_penalty))
1657 dr0 = first_store;
1658 }
1659
1660 /* In case there are only loads with different unknown misalignments, use
1661 peeling only if it may help to align other accesses in the loop. */
1662 if (!first_store && !VEC_length (dr_p, STMT_VINFO_SAME_ALIGN_REFS
1663 (vinfo_for_stmt (DR_STMT (dr0))))
1664 && vect_supportable_dr_alignment (dr0, false)
1665 != dr_unaligned_supported)
1666 do_peeling = false;
1667 }
1668
1669 if (do_peeling && !dr0)
1670 {
1671 /* Peeling is possible, but there is no data access that is not supported
1672 unless aligned. So we try to choose the best possible peeling. */
1673
1674 /* We should get here only if there are drs with known misalignment. */
1675 gcc_assert (!all_misalignments_unknown);
1676
1677 /* Choose the best peeling from the hash table. */
1678 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel);
1679 if (!dr0 || !npeel)
1680 do_peeling = false;
1681 }
1682
1683 if (do_peeling)
1684 {
1685 stmt = DR_STMT (dr0);
1686 stmt_info = vinfo_for_stmt (stmt);
1687 vectype = STMT_VINFO_VECTYPE (stmt_info);
1688 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1689
1690 if (known_alignment_for_access_p (dr0))
1691 {
1692 if (!npeel)
1693 {
1694 /* Since it's known at compile time, compute the number of
1695 iterations in the peeled loop (the peeling factor) for use in
1696 updating DR_MISALIGNMENT values. The peeling factor is the
1697 vectorization factor minus the misalignment as an element
1698 count. */
1699 mis = DR_MISALIGNMENT (dr0);
1700 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1701 npeel = nelements - mis;
1702 }
1703
1704 /* For interleaved data access every iteration accesses all the
1705 members of the group, therefore we divide the number of iterations
1706 by the group size. */
1707 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1708 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
1709 npeel /= DR_GROUP_SIZE (stmt_info);
1710
1711 if (vect_print_dump_info (REPORT_DETAILS))
1712 fprintf (vect_dump, "Try peeling by %d", npeel);
1713 }
1714
1715 /* Ensure that all data refs can be vectorized after the peel. */
1716 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1717 {
1718 int save_misalignment;
1719
1720 if (dr == dr0)
1721 continue;
1722
1723 stmt = DR_STMT (dr);
1724 stmt_info = vinfo_for_stmt (stmt);
1725 /* For interleaving, only the alignment of the first access
1726 matters. */
1727 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1728 && DR_GROUP_FIRST_DR (stmt_info) != stmt)
1729 continue;
1730
1731 save_misalignment = DR_MISALIGNMENT (dr);
1732 vect_update_misalignment_for_peel (dr, dr0, npeel);
1733 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1734 SET_DR_MISALIGNMENT (dr, save_misalignment);
1735
1736 if (!supportable_dr_alignment)
1737 {
1738 do_peeling = false;
1739 break;
1740 }
1741 }
1742
1743 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1744 {
1745 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1746 if (!stat)
1747 do_peeling = false;
1748 else
1749 return stat;
1750 }
1751
1752 if (do_peeling)
1753 {
1754 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1755 If the misalignment of DR_i is identical to that of dr0 then set
1756 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1757 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1758 by the peeling factor times the element size of DR_i (MOD the
1759 vectorization factor times the size). Otherwise, the
1760 misalignment of DR_i must be set to unknown. */
1761 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1762 if (dr != dr0)
1763 vect_update_misalignment_for_peel (dr, dr0, npeel);
1764
1765 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1766 if (npeel)
1767 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1768 else
1769 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) = DR_MISALIGNMENT (dr0);
1770 SET_DR_MISALIGNMENT (dr0, 0);
1771 if (vect_print_dump_info (REPORT_ALIGNMENT))
1772 fprintf (vect_dump, "Alignment of access forced using peeling.");
1773
1774 if (vect_print_dump_info (REPORT_DETAILS))
1775 fprintf (vect_dump, "Peeling for alignment will be applied.");
1776
1777 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1778 gcc_assert (stat);
1779 return stat;
1780 }
1781 }
1782
1783
1784 /* (2) Versioning to force alignment. */
1785
1786 /* Try versioning if:
1787 1) flag_tree_vect_loop_version is TRUE
1788 2) optimize loop for speed
1789 3) there is at least one unsupported misaligned data ref with an unknown
1790 misalignment, and
1791 4) all misaligned data refs with a known misalignment are supported, and
1792 5) the number of runtime alignment checks is within reason. */
1793
1794 do_versioning =
1795 flag_tree_vect_loop_version
1796 && optimize_loop_nest_for_speed_p (loop)
1797 && (!loop->inner); /* FORNOW */
1798
1799 if (do_versioning)
1800 {
1801 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
1802 {
1803 stmt = DR_STMT (dr);
1804 stmt_info = vinfo_for_stmt (stmt);
1805
1806 /* For interleaving, only the alignment of the first access
1807 matters. */
1808 if (aligned_access_p (dr)
1809 || (STMT_VINFO_STRIDED_ACCESS (stmt_info)
1810 && DR_GROUP_FIRST_DR (stmt_info) != stmt))
1811 continue;
1812
1813 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1814
1815 if (!supportable_dr_alignment)
1816 {
1817 gimple stmt;
1818 int mask;
1819 tree vectype;
1820
1821 if (known_alignment_for_access_p (dr)
1822 || VEC_length (gimple,
1823 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))
1824 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1825 {
1826 do_versioning = false;
1827 break;
1828 }
1829
1830 stmt = DR_STMT (dr);
1831 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1832 gcc_assert (vectype);
1833
1834 /* The rightmost bits of an aligned address must be zeros.
1835 Construct the mask needed for this test. For example,
1836 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1837 mask must be 15 = 0xf. */
1838 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1839
1840 /* FORNOW: use the same mask to test all potentially unaligned
1841 references in the loop. The vectorizer currently supports
1842 a single vector size, see the reference to
1843 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1844 vectorization factor is computed. */
1845 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1846 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1847 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1848 VEC_safe_push (gimple, heap,
1849 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo),
1850 DR_STMT (dr));
1851 }
1852 }
1853
1854 /* Versioning requires at least one misaligned data reference. */
1855 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1856 do_versioning = false;
1857 else if (!do_versioning)
1858 VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0);
1859 }
1860
1861 if (do_versioning)
1862 {
1863 VEC(gimple,heap) *may_misalign_stmts
1864 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1865 gimple stmt;
1866
1867 /* It can now be assumed that the data references in the statements
1868 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1869 of the loop being vectorized. */
1870 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, stmt)
1871 {
1872 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1873 dr = STMT_VINFO_DATA_REF (stmt_info);
1874 SET_DR_MISALIGNMENT (dr, 0);
1875 if (vect_print_dump_info (REPORT_ALIGNMENT))
1876 fprintf (vect_dump, "Alignment of access forced using versioning.");
1877 }
1878
1879 if (vect_print_dump_info (REPORT_DETAILS))
1880 fprintf (vect_dump, "Versioning for alignment will be applied.");
1881
1882 /* Peeling and versioning can't be done together at this time. */
1883 gcc_assert (! (do_peeling && do_versioning));
1884
1885 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1886 gcc_assert (stat);
1887 return stat;
1888 }
1889
1890 /* This point is reached if neither peeling nor versioning is being done. */
1891 gcc_assert (! (do_peeling || do_versioning));
1892
1893 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1894 return stat;
1895 }
1896
1897
1898 /* Function vect_find_same_alignment_drs.
1899
1900 Update group and alignment relations according to the chosen
1901 vectorization factor. */
1902
1903 static void
1904 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1905 loop_vec_info loop_vinfo)
1906 {
1907 unsigned int i;
1908 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1909 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1910 struct data_reference *dra = DDR_A (ddr);
1911 struct data_reference *drb = DDR_B (ddr);
1912 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1913 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1914 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1915 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1916 lambda_vector dist_v;
1917 unsigned int loop_depth;
1918
1919 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1920 return;
1921
1922 if (dra == drb)
1923 return;
1924
1925 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1926 return;
1927
1928 /* Loop-based vectorization and known data dependence. */
1929 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1930 return;
1931
1932 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1933 FOR_EACH_VEC_ELT (lambda_vector, DDR_DIST_VECTS (ddr), i, dist_v)
1934 {
1935 int dist = dist_v[loop_depth];
1936
1937 if (vect_print_dump_info (REPORT_DR_DETAILS))
1938 fprintf (vect_dump, "dependence distance = %d.", dist);
1939
1940 /* Same loop iteration. */
1941 if (dist == 0
1942 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1943 {
1944 /* Two references with distance zero have the same alignment. */
1945 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a), drb);
1946 VEC_safe_push (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b), dra);
1947 if (vect_print_dump_info (REPORT_ALIGNMENT))
1948 fprintf (vect_dump, "accesses have the same alignment.");
1949 if (vect_print_dump_info (REPORT_DR_DETAILS))
1950 {
1951 fprintf (vect_dump, "dependence distance modulo vf == 0 between ");
1952 print_generic_expr (vect_dump, DR_REF (dra), TDF_SLIM);
1953 fprintf (vect_dump, " and ");
1954 print_generic_expr (vect_dump, DR_REF (drb), TDF_SLIM);
1955 }
1956 }
1957 }
1958 }
1959
1960
1961 /* Function vect_analyze_data_refs_alignment
1962
1963 Analyze the alignment of the data-references in the loop.
1964 Return FALSE if a data reference is found that cannot be vectorized. */
1965
1966 bool
1967 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
1968 bb_vec_info bb_vinfo)
1969 {
1970 if (vect_print_dump_info (REPORT_DETAILS))
1971 fprintf (vect_dump, "=== vect_analyze_data_refs_alignment ===");
1972
1973 /* Mark groups of data references with same alignment using
1974 data dependence information. */
1975 if (loop_vinfo)
1976 {
1977 VEC (ddr_p, heap) *ddrs = LOOP_VINFO_DDRS (loop_vinfo);
1978 struct data_dependence_relation *ddr;
1979 unsigned int i;
1980
1981 FOR_EACH_VEC_ELT (ddr_p, ddrs, i, ddr)
1982 vect_find_same_alignment_drs (ddr, loop_vinfo);
1983 }
1984
1985 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
1986 {
1987 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
1988 fprintf (vect_dump,
1989 "not vectorized: can't calculate alignment for data ref.");
1990 return false;
1991 }
1992
1993 return true;
1994 }
1995
1996
1997 /* Analyze groups of strided accesses: check that DR belongs to a group of
1998 strided accesses of legal size, step, etc. Detect gaps, single element
1999 interleaving, and other special cases. Set strided access info.
2000 Collect groups of strided stores for further use in SLP analysis. */
2001
2002 static bool
2003 vect_analyze_group_access (struct data_reference *dr)
2004 {
2005 tree step = DR_STEP (dr);
2006 tree scalar_type = TREE_TYPE (DR_REF (dr));
2007 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2008 gimple stmt = DR_STMT (dr);
2009 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2010 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2011 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2012 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2013 HOST_WIDE_INT stride;
2014 bool slp_impossible = false;
2015
2016 /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
2017 interleaving group (including gaps). */
2018 stride = dr_step / type_size;
2019
2020 /* Not consecutive access is possible only if it is a part of interleaving. */
2021 if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)))
2022 {
2023 /* Check if it this DR is a part of interleaving, and is a single
2024 element of the group that is accessed in the loop. */
2025
2026 /* Gaps are supported only for loads. STEP must be a multiple of the type
2027 size. The size of the group must be a power of 2. */
2028 if (DR_IS_READ (dr)
2029 && (dr_step % type_size) == 0
2030 && stride > 0
2031 && exact_log2 (stride) != -1)
2032 {
2033 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = stmt;
2034 DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2035 if (vect_print_dump_info (REPORT_DR_DETAILS))
2036 {
2037 fprintf (vect_dump, "Detected single element interleaving ");
2038 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2039 fprintf (vect_dump, " step ");
2040 print_generic_expr (vect_dump, step, TDF_SLIM);
2041 }
2042 return true;
2043 }
2044
2045 if (vect_print_dump_info (REPORT_DETAILS))
2046 {
2047 fprintf (vect_dump, "not consecutive access ");
2048 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2049 }
2050
2051 if (bb_vinfo)
2052 {
2053 /* Mark the statement as unvectorizable. */
2054 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2055 return true;
2056 }
2057
2058 return false;
2059 }
2060
2061 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) == stmt)
2062 {
2063 /* First stmt in the interleaving chain. Check the chain. */
2064 gimple next = DR_GROUP_NEXT_DR (vinfo_for_stmt (stmt));
2065 struct data_reference *data_ref = dr;
2066 unsigned int count = 1;
2067 tree next_step;
2068 tree prev_init = DR_INIT (data_ref);
2069 gimple prev = stmt;
2070 HOST_WIDE_INT diff, count_in_bytes, gaps = 0;
2071
2072 while (next)
2073 {
2074 /* Skip same data-refs. In case that two or more stmts share data-ref
2075 (supported only for loads), we vectorize only the first stmt, and
2076 the rest get their vectorized loads from the first one. */
2077 if (!tree_int_cst_compare (DR_INIT (data_ref),
2078 DR_INIT (STMT_VINFO_DATA_REF (
2079 vinfo_for_stmt (next)))))
2080 {
2081 if (!DR_IS_READ (data_ref))
2082 {
2083 if (vect_print_dump_info (REPORT_DETAILS))
2084 fprintf (vect_dump, "Two store stmts share the same dr.");
2085 return false;
2086 }
2087
2088 /* Check that there is no load-store dependencies for this loads
2089 to prevent a case of load-store-load to the same location. */
2090 if (DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next))
2091 || DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev)))
2092 {
2093 if (vect_print_dump_info (REPORT_DETAILS))
2094 fprintf (vect_dump,
2095 "READ_WRITE dependence in interleaving.");
2096 return false;
2097 }
2098
2099 /* For load use the same data-ref load. */
2100 DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2101
2102 prev = next;
2103 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
2104 continue;
2105 }
2106 prev = next;
2107
2108 /* Check that all the accesses have the same STEP. */
2109 next_step = DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next)));
2110 if (tree_int_cst_compare (step, next_step))
2111 {
2112 if (vect_print_dump_info (REPORT_DETAILS))
2113 fprintf (vect_dump, "not consecutive access in interleaving");
2114 return false;
2115 }
2116
2117 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2118 /* Check that the distance between two accesses is equal to the type
2119 size. Otherwise, we have gaps. */
2120 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2121 - TREE_INT_CST_LOW (prev_init)) / type_size;
2122 if (diff != 1)
2123 {
2124 /* FORNOW: SLP of accesses with gaps is not supported. */
2125 slp_impossible = true;
2126 if (!DR_IS_READ (data_ref))
2127 {
2128 if (vect_print_dump_info (REPORT_DETAILS))
2129 fprintf (vect_dump, "interleaved store with gaps");
2130 return false;
2131 }
2132
2133 gaps += diff - 1;
2134 }
2135
2136 /* Store the gap from the previous member of the group. If there is no
2137 gap in the access, DR_GROUP_GAP is always 1. */
2138 DR_GROUP_GAP (vinfo_for_stmt (next)) = diff;
2139
2140 prev_init = DR_INIT (data_ref);
2141 next = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
2142 /* Count the number of data-refs in the chain. */
2143 count++;
2144 }
2145
2146 /* COUNT is the number of accesses found, we multiply it by the size of
2147 the type to get COUNT_IN_BYTES. */
2148 count_in_bytes = type_size * count;
2149
2150 /* Check that the size of the interleaving (including gaps) is not
2151 greater than STEP. */
2152 if (dr_step && dr_step < count_in_bytes + gaps * type_size)
2153 {
2154 if (vect_print_dump_info (REPORT_DETAILS))
2155 {
2156 fprintf (vect_dump, "interleaving size is greater than step for ");
2157 print_generic_expr (vect_dump, DR_REF (dr), TDF_SLIM);
2158 }
2159 return false;
2160 }
2161
2162 /* Check that the size of the interleaving is equal to STEP for stores,
2163 i.e., that there are no gaps. */
2164 if (dr_step && dr_step != count_in_bytes)
2165 {
2166 if (DR_IS_READ (dr))
2167 {
2168 slp_impossible = true;
2169 /* There is a gap after the last load in the group. This gap is a
2170 difference between the stride and the number of elements. When
2171 there is no gap, this difference should be 0. */
2172 DR_GROUP_GAP (vinfo_for_stmt (stmt)) = stride - count;
2173 }
2174 else
2175 {
2176 if (vect_print_dump_info (REPORT_DETAILS))
2177 fprintf (vect_dump, "interleaved store with gaps");
2178 return false;
2179 }
2180 }
2181
2182 /* Check that STEP is a multiple of type size. */
2183 if (dr_step && (dr_step % type_size) != 0)
2184 {
2185 if (vect_print_dump_info (REPORT_DETAILS))
2186 {
2187 fprintf (vect_dump, "step is not a multiple of type size: step ");
2188 print_generic_expr (vect_dump, step, TDF_SLIM);
2189 fprintf (vect_dump, " size ");
2190 print_generic_expr (vect_dump, TYPE_SIZE_UNIT (scalar_type),
2191 TDF_SLIM);
2192 }
2193 return false;
2194 }
2195
2196 /* FORNOW: we handle only interleaving that is a power of 2.
2197 We don't fail here if it may be still possible to vectorize the
2198 group using SLP. If not, the size of the group will be checked in
2199 vect_analyze_operations, and the vectorization will fail. */
2200 if (exact_log2 (stride) == -1)
2201 {
2202 if (vect_print_dump_info (REPORT_DETAILS))
2203 fprintf (vect_dump, "interleaving is not a power of 2");
2204
2205 if (slp_impossible)
2206 return false;
2207 }
2208
2209 if (stride == 0)
2210 stride = count;
2211
2212 DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = stride;
2213 if (vect_print_dump_info (REPORT_DETAILS))
2214 fprintf (vect_dump, "Detected interleaving of size %d", (int)stride);
2215
2216 /* SLP: create an SLP data structure for every interleaving group of
2217 stores for further analysis in vect_analyse_slp. */
2218 if (!DR_IS_READ (dr) && !slp_impossible)
2219 {
2220 if (loop_vinfo)
2221 VEC_safe_push (gimple, heap, LOOP_VINFO_STRIDED_STORES (loop_vinfo),
2222 stmt);
2223 if (bb_vinfo)
2224 VEC_safe_push (gimple, heap, BB_VINFO_STRIDED_STORES (bb_vinfo),
2225 stmt);
2226 }
2227 }
2228
2229 return true;
2230 }
2231
2232
2233 /* Analyze the access pattern of the data-reference DR.
2234 In case of non-consecutive accesses call vect_analyze_group_access() to
2235 analyze groups of strided accesses. */
2236
2237 static bool
2238 vect_analyze_data_ref_access (struct data_reference *dr)
2239 {
2240 tree step = DR_STEP (dr);
2241 tree scalar_type = TREE_TYPE (DR_REF (dr));
2242 gimple stmt = DR_STMT (dr);
2243 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2244 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2245 struct loop *loop = NULL;
2246 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2247
2248 if (loop_vinfo)
2249 loop = LOOP_VINFO_LOOP (loop_vinfo);
2250
2251 if (loop_vinfo && !step)
2252 {
2253 if (vect_print_dump_info (REPORT_DETAILS))
2254 fprintf (vect_dump, "bad data-ref access in loop");
2255 return false;
2256 }
2257
2258 /* Don't allow invariant accesses in loops. */
2259 if (loop_vinfo && dr_step == 0)
2260 return false;
2261
2262 if (loop && nested_in_vect_loop_p (loop, stmt))
2263 {
2264 /* Interleaved accesses are not yet supported within outer-loop
2265 vectorization for references in the inner-loop. */
2266 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = NULL;
2267
2268 /* For the rest of the analysis we use the outer-loop step. */
2269 step = STMT_VINFO_DR_STEP (stmt_info);
2270 dr_step = TREE_INT_CST_LOW (step);
2271
2272 if (dr_step == 0)
2273 {
2274 if (vect_print_dump_info (REPORT_ALIGNMENT))
2275 fprintf (vect_dump, "zero step in outer loop.");
2276 if (DR_IS_READ (dr))
2277 return true;
2278 else
2279 return false;
2280 }
2281 }
2282
2283 /* Consecutive? */
2284 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)))
2285 {
2286 /* Mark that it is not interleaving. */
2287 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt)) = NULL;
2288 return true;
2289 }
2290
2291 if (loop && nested_in_vect_loop_p (loop, stmt))
2292 {
2293 if (vect_print_dump_info (REPORT_ALIGNMENT))
2294 fprintf (vect_dump, "strided access in outer loop.");
2295 return false;
2296 }
2297
2298 /* Not consecutive access - check if it's a part of interleaving group. */
2299 return vect_analyze_group_access (dr);
2300 }
2301
2302
2303 /* Function vect_analyze_data_ref_accesses.
2304
2305 Analyze the access pattern of all the data references in the loop.
2306
2307 FORNOW: the only access pattern that is considered vectorizable is a
2308 simple step 1 (consecutive) access.
2309
2310 FORNOW: handle only arrays and pointer accesses. */
2311
2312 bool
2313 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2314 {
2315 unsigned int i;
2316 VEC (data_reference_p, heap) *datarefs;
2317 struct data_reference *dr;
2318
2319 if (vect_print_dump_info (REPORT_DETAILS))
2320 fprintf (vect_dump, "=== vect_analyze_data_ref_accesses ===");
2321
2322 if (loop_vinfo)
2323 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2324 else
2325 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2326
2327 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2328 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2329 && !vect_analyze_data_ref_access (dr))
2330 {
2331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2332 fprintf (vect_dump, "not vectorized: complicated access pattern.");
2333
2334 if (bb_vinfo)
2335 {
2336 /* Mark the statement as not vectorizable. */
2337 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2338 continue;
2339 }
2340 else
2341 return false;
2342 }
2343
2344 return true;
2345 }
2346
2347 /* Function vect_prune_runtime_alias_test_list.
2348
2349 Prune a list of ddrs to be tested at run-time by versioning for alias.
2350 Return FALSE if resulting list of ddrs is longer then allowed by
2351 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2352
2353 bool
2354 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2355 {
2356 VEC (ddr_p, heap) * ddrs =
2357 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2358 unsigned i, j;
2359
2360 if (vect_print_dump_info (REPORT_DETAILS))
2361 fprintf (vect_dump, "=== vect_prune_runtime_alias_test_list ===");
2362
2363 for (i = 0; i < VEC_length (ddr_p, ddrs); )
2364 {
2365 bool found;
2366 ddr_p ddr_i;
2367
2368 ddr_i = VEC_index (ddr_p, ddrs, i);
2369 found = false;
2370
2371 for (j = 0; j < i; j++)
2372 {
2373 ddr_p ddr_j = VEC_index (ddr_p, ddrs, j);
2374
2375 if (vect_vfa_range_equal (ddr_i, ddr_j))
2376 {
2377 if (vect_print_dump_info (REPORT_DR_DETAILS))
2378 {
2379 fprintf (vect_dump, "found equal ranges ");
2380 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_i)), TDF_SLIM);
2381 fprintf (vect_dump, ", ");
2382 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_i)), TDF_SLIM);
2383 fprintf (vect_dump, " and ");
2384 print_generic_expr (vect_dump, DR_REF (DDR_A (ddr_j)), TDF_SLIM);
2385 fprintf (vect_dump, ", ");
2386 print_generic_expr (vect_dump, DR_REF (DDR_B (ddr_j)), TDF_SLIM);
2387 }
2388 found = true;
2389 break;
2390 }
2391 }
2392
2393 if (found)
2394 {
2395 VEC_ordered_remove (ddr_p, ddrs, i);
2396 continue;
2397 }
2398 i++;
2399 }
2400
2401 if (VEC_length (ddr_p, ddrs) >
2402 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2403 {
2404 if (vect_print_dump_info (REPORT_DR_DETAILS))
2405 {
2406 fprintf (vect_dump,
2407 "disable versioning for alias - max number of generated "
2408 "checks exceeded.");
2409 }
2410
2411 VEC_truncate (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo), 0);
2412
2413 return false;
2414 }
2415
2416 return true;
2417 }
2418
2419
2420 /* Function vect_analyze_data_refs.
2421
2422 Find all the data references in the loop or basic block.
2423
2424 The general structure of the analysis of data refs in the vectorizer is as
2425 follows:
2426 1- vect_analyze_data_refs(loop/bb): call
2427 compute_data_dependences_for_loop/bb to find and analyze all data-refs
2428 in the loop/bb and their dependences.
2429 2- vect_analyze_dependences(): apply dependence testing using ddrs.
2430 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
2431 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
2432
2433 */
2434
2435 bool
2436 vect_analyze_data_refs (loop_vec_info loop_vinfo,
2437 bb_vec_info bb_vinfo,
2438 int *min_vf)
2439 {
2440 struct loop *loop = NULL;
2441 basic_block bb = NULL;
2442 unsigned int i;
2443 VEC (data_reference_p, heap) *datarefs;
2444 struct data_reference *dr;
2445 tree scalar_type;
2446 bool res;
2447
2448 if (vect_print_dump_info (REPORT_DETAILS))
2449 fprintf (vect_dump, "=== vect_analyze_data_refs ===\n");
2450
2451 if (loop_vinfo)
2452 {
2453 loop = LOOP_VINFO_LOOP (loop_vinfo);
2454 res = compute_data_dependences_for_loop
2455 (loop, true, &LOOP_VINFO_DATAREFS (loop_vinfo),
2456 &LOOP_VINFO_DDRS (loop_vinfo));
2457
2458 if (!res)
2459 {
2460 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2461 fprintf (vect_dump, "not vectorized: loop contains function calls"
2462 " or data references that cannot be analyzed");
2463 return false;
2464 }
2465
2466 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2467 }
2468 else
2469 {
2470 bb = BB_VINFO_BB (bb_vinfo);
2471 res = compute_data_dependences_for_bb (bb, true,
2472 &BB_VINFO_DATAREFS (bb_vinfo),
2473 &BB_VINFO_DDRS (bb_vinfo));
2474 if (!res)
2475 {
2476 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2477 fprintf (vect_dump, "not vectorized: basic block contains function"
2478 " calls or data references that cannot be analyzed");
2479 return false;
2480 }
2481
2482 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2483 }
2484
2485 /* Go through the data-refs, check that the analysis succeeded. Update pointer
2486 from stmt_vec_info struct to DR and vectype. */
2487
2488 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
2489 {
2490 gimple stmt;
2491 stmt_vec_info stmt_info;
2492 tree base, offset, init;
2493 int vf;
2494
2495 if (!dr || !DR_REF (dr))
2496 {
2497 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2498 fprintf (vect_dump, "not vectorized: unhandled data-ref ");
2499 return false;
2500 }
2501
2502 stmt = DR_STMT (dr);
2503 stmt_info = vinfo_for_stmt (stmt);
2504
2505 /* Check that analysis of the data-ref succeeded. */
2506 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
2507 || !DR_STEP (dr))
2508 {
2509 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2510 {
2511 fprintf (vect_dump, "not vectorized: data ref analysis failed ");
2512 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2513 }
2514
2515 if (bb_vinfo)
2516 {
2517 /* Mark the statement as not vectorizable. */
2518 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2519 continue;
2520 }
2521 else
2522 return false;
2523 }
2524
2525 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
2526 {
2527 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2528 fprintf (vect_dump, "not vectorized: base addr of dr is a "
2529 "constant");
2530 if (bb_vinfo)
2531 {
2532 /* Mark the statement as not vectorizable. */
2533 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2534 continue;
2535 }
2536 else
2537 return false;
2538 }
2539
2540 base = unshare_expr (DR_BASE_ADDRESS (dr));
2541 offset = unshare_expr (DR_OFFSET (dr));
2542 init = unshare_expr (DR_INIT (dr));
2543
2544 /* Update DR field in stmt_vec_info struct. */
2545
2546 /* If the dataref is in an inner-loop of the loop that is considered for
2547 for vectorization, we also want to analyze the access relative to
2548 the outer-loop (DR contains information only relative to the
2549 inner-most enclosing loop). We do that by building a reference to the
2550 first location accessed by the inner-loop, and analyze it relative to
2551 the outer-loop. */
2552 if (loop && nested_in_vect_loop_p (loop, stmt))
2553 {
2554 tree outer_step, outer_base, outer_init;
2555 HOST_WIDE_INT pbitsize, pbitpos;
2556 tree poffset;
2557 enum machine_mode pmode;
2558 int punsignedp, pvolatilep;
2559 affine_iv base_iv, offset_iv;
2560 tree dinit;
2561
2562 /* Build a reference to the first location accessed by the
2563 inner-loop: *(BASE+INIT). (The first location is actually
2564 BASE+INIT+OFFSET, but we add OFFSET separately later). */
2565 tree inner_base = build_fold_indirect_ref
2566 (fold_build2 (POINTER_PLUS_EXPR,
2567 TREE_TYPE (base), base,
2568 fold_convert (sizetype, init)));
2569
2570 if (vect_print_dump_info (REPORT_DETAILS))
2571 {
2572 fprintf (vect_dump, "analyze in outer-loop: ");
2573 print_generic_expr (vect_dump, inner_base, TDF_SLIM);
2574 }
2575
2576 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
2577 &poffset, &pmode, &punsignedp, &pvolatilep, false);
2578 gcc_assert (outer_base != NULL_TREE);
2579
2580 if (pbitpos % BITS_PER_UNIT != 0)
2581 {
2582 if (vect_print_dump_info (REPORT_DETAILS))
2583 fprintf (vect_dump, "failed: bit offset alignment.\n");
2584 return false;
2585 }
2586
2587 outer_base = build_fold_addr_expr (outer_base);
2588 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
2589 &base_iv, false))
2590 {
2591 if (vect_print_dump_info (REPORT_DETAILS))
2592 fprintf (vect_dump, "failed: evolution of base is not affine.\n");
2593 return false;
2594 }
2595
2596 if (offset)
2597 {
2598 if (poffset)
2599 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
2600 poffset);
2601 else
2602 poffset = offset;
2603 }
2604
2605 if (!poffset)
2606 {
2607 offset_iv.base = ssize_int (0);
2608 offset_iv.step = ssize_int (0);
2609 }
2610 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
2611 &offset_iv, false))
2612 {
2613 if (vect_print_dump_info (REPORT_DETAILS))
2614 fprintf (vect_dump, "evolution of offset is not affine.\n");
2615 return false;
2616 }
2617
2618 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
2619 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
2620 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2621 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
2622 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
2623
2624 outer_step = size_binop (PLUS_EXPR,
2625 fold_convert (ssizetype, base_iv.step),
2626 fold_convert (ssizetype, offset_iv.step));
2627
2628 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
2629 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
2630 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
2631 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
2632 STMT_VINFO_DR_OFFSET (stmt_info) =
2633 fold_convert (ssizetype, offset_iv.base);
2634 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
2635 size_int (highest_pow2_factor (offset_iv.base));
2636
2637 if (vect_print_dump_info (REPORT_DETAILS))
2638 {
2639 fprintf (vect_dump, "\touter base_address: ");
2640 print_generic_expr (vect_dump, STMT_VINFO_DR_BASE_ADDRESS (stmt_info), TDF_SLIM);
2641 fprintf (vect_dump, "\n\touter offset from base address: ");
2642 print_generic_expr (vect_dump, STMT_VINFO_DR_OFFSET (stmt_info), TDF_SLIM);
2643 fprintf (vect_dump, "\n\touter constant offset from base address: ");
2644 print_generic_expr (vect_dump, STMT_VINFO_DR_INIT (stmt_info), TDF_SLIM);
2645 fprintf (vect_dump, "\n\touter step: ");
2646 print_generic_expr (vect_dump, STMT_VINFO_DR_STEP (stmt_info), TDF_SLIM);
2647 fprintf (vect_dump, "\n\touter aligned to: ");
2648 print_generic_expr (vect_dump, STMT_VINFO_DR_ALIGNED_TO (stmt_info), TDF_SLIM);
2649 }
2650 }
2651
2652 if (STMT_VINFO_DATA_REF (stmt_info))
2653 {
2654 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2655 {
2656 fprintf (vect_dump,
2657 "not vectorized: more than one data ref in stmt: ");
2658 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2659 }
2660 return false;
2661 }
2662
2663 STMT_VINFO_DATA_REF (stmt_info) = dr;
2664
2665 /* Set vectype for STMT. */
2666 scalar_type = TREE_TYPE (DR_REF (dr));
2667 STMT_VINFO_VECTYPE (stmt_info) =
2668 get_vectype_for_scalar_type (scalar_type);
2669 if (!STMT_VINFO_VECTYPE (stmt_info))
2670 {
2671 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
2672 {
2673 fprintf (vect_dump,
2674 "not vectorized: no vectype for stmt: ");
2675 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
2676 fprintf (vect_dump, " scalar_type: ");
2677 print_generic_expr (vect_dump, scalar_type, TDF_DETAILS);
2678 }
2679
2680 if (bb_vinfo)
2681 {
2682 /* Mark the statement as not vectorizable. */
2683 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2684 continue;
2685 }
2686 else
2687 return false;
2688 }
2689
2690 /* Adjust the minimal vectorization factor according to the
2691 vector type. */
2692 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
2693 if (vf > *min_vf)
2694 *min_vf = vf;
2695 }
2696
2697 return true;
2698 }
2699
2700
2701 /* Function vect_get_new_vect_var.
2702
2703 Returns a name for a new variable. The current naming scheme appends the
2704 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
2705 the name of vectorizer generated variables, and appends that to NAME if
2706 provided. */
2707
2708 tree
2709 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
2710 {
2711 const char *prefix;
2712 tree new_vect_var;
2713
2714 switch (var_kind)
2715 {
2716 case vect_simple_var:
2717 prefix = "vect_";
2718 break;
2719 case vect_scalar_var:
2720 prefix = "stmp_";
2721 break;
2722 case vect_pointer_var:
2723 prefix = "vect_p";
2724 break;
2725 default:
2726 gcc_unreachable ();
2727 }
2728
2729 if (name)
2730 {
2731 char* tmp = concat (prefix, name, NULL);
2732 new_vect_var = create_tmp_var (type, tmp);
2733 free (tmp);
2734 }
2735 else
2736 new_vect_var = create_tmp_var (type, prefix);
2737
2738 /* Mark vector typed variable as a gimple register variable. */
2739 if (TREE_CODE (type) == VECTOR_TYPE)
2740 DECL_GIMPLE_REG_P (new_vect_var) = true;
2741
2742 return new_vect_var;
2743 }
2744
2745
2746 /* Function vect_create_addr_base_for_vector_ref.
2747
2748 Create an expression that computes the address of the first memory location
2749 that will be accessed for a data reference.
2750
2751 Input:
2752 STMT: The statement containing the data reference.
2753 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
2754 OFFSET: Optional. If supplied, it is be added to the initial address.
2755 LOOP: Specify relative to which loop-nest should the address be computed.
2756 For example, when the dataref is in an inner-loop nested in an
2757 outer-loop that is now being vectorized, LOOP can be either the
2758 outer-loop, or the inner-loop. The first memory location accessed
2759 by the following dataref ('in' points to short):
2760
2761 for (i=0; i<N; i++)
2762 for (j=0; j<M; j++)
2763 s += in[i+j]
2764
2765 is as follows:
2766 if LOOP=i_loop: &in (relative to i_loop)
2767 if LOOP=j_loop: &in+i*2B (relative to j_loop)
2768
2769 Output:
2770 1. Return an SSA_NAME whose value is the address of the memory location of
2771 the first vector of the data reference.
2772 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
2773 these statement(s) which define the returned SSA_NAME.
2774
2775 FORNOW: We are only handling array accesses with step 1. */
2776
2777 tree
2778 vect_create_addr_base_for_vector_ref (gimple stmt,
2779 gimple_seq *new_stmt_list,
2780 tree offset,
2781 struct loop *loop)
2782 {
2783 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2784 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2785 tree data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
2786 tree base_name;
2787 tree data_ref_base_var;
2788 tree vec_stmt;
2789 tree addr_base, addr_expr;
2790 tree dest;
2791 gimple_seq seq = NULL;
2792 tree base_offset = unshare_expr (DR_OFFSET (dr));
2793 tree init = unshare_expr (DR_INIT (dr));
2794 tree vect_ptr_type;
2795 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2796 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2797 tree base;
2798
2799 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
2800 {
2801 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
2802
2803 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
2804
2805 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
2806 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
2807 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
2808 }
2809
2810 if (loop_vinfo)
2811 base_name = build_fold_indirect_ref (data_ref_base);
2812 else
2813 {
2814 base_offset = ssize_int (0);
2815 init = ssize_int (0);
2816 base_name = build_fold_indirect_ref (unshare_expr (DR_REF (dr)));
2817 }
2818
2819 data_ref_base_var = create_tmp_var (TREE_TYPE (data_ref_base), "batmp");
2820 add_referenced_var (data_ref_base_var);
2821 data_ref_base = force_gimple_operand (data_ref_base, &seq, true,
2822 data_ref_base_var);
2823 gimple_seq_add_seq (new_stmt_list, seq);
2824
2825 /* Create base_offset */
2826 base_offset = size_binop (PLUS_EXPR,
2827 fold_convert (sizetype, base_offset),
2828 fold_convert (sizetype, init));
2829 dest = create_tmp_var (sizetype, "base_off");
2830 add_referenced_var (dest);
2831 base_offset = force_gimple_operand (base_offset, &seq, true, dest);
2832 gimple_seq_add_seq (new_stmt_list, seq);
2833
2834 if (offset)
2835 {
2836 tree tmp = create_tmp_var (sizetype, "offset");
2837
2838 add_referenced_var (tmp);
2839 offset = fold_build2 (MULT_EXPR, sizetype,
2840 fold_convert (sizetype, offset), step);
2841 base_offset = fold_build2 (PLUS_EXPR, sizetype,
2842 base_offset, offset);
2843 base_offset = force_gimple_operand (base_offset, &seq, false, tmp);
2844 gimple_seq_add_seq (new_stmt_list, seq);
2845 }
2846
2847 /* base + base_offset */
2848 if (loop_vinfo)
2849 addr_base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (data_ref_base),
2850 data_ref_base, base_offset);
2851 else
2852 {
2853 addr_base = build1 (ADDR_EXPR,
2854 build_pointer_type (TREE_TYPE (DR_REF (dr))),
2855 unshare_expr (DR_REF (dr)));
2856 }
2857
2858 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
2859 base = get_base_address (DR_REF (dr));
2860 if (base
2861 && TREE_CODE (base) == MEM_REF)
2862 vect_ptr_type
2863 = build_qualified_type (vect_ptr_type,
2864 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
2865
2866 vec_stmt = fold_convert (vect_ptr_type, addr_base);
2867 addr_expr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
2868 get_name (base_name));
2869 add_referenced_var (addr_expr);
2870 vec_stmt = force_gimple_operand (vec_stmt, &seq, false, addr_expr);
2871 gimple_seq_add_seq (new_stmt_list, seq);
2872
2873 if (DR_PTR_INFO (dr)
2874 && TREE_CODE (vec_stmt) == SSA_NAME)
2875 duplicate_ssa_name_ptr_info (vec_stmt, DR_PTR_INFO (dr));
2876
2877 if (vect_print_dump_info (REPORT_DETAILS))
2878 {
2879 fprintf (vect_dump, "created ");
2880 print_generic_expr (vect_dump, vec_stmt, TDF_SLIM);
2881 }
2882
2883 return vec_stmt;
2884 }
2885
2886
2887 /* Function vect_create_data_ref_ptr.
2888
2889 Create a new pointer to vector type (vp), that points to the first location
2890 accessed in the loop by STMT, along with the def-use update chain to
2891 appropriately advance the pointer through the loop iterations. Also set
2892 aliasing information for the pointer. This vector pointer is used by the
2893 callers to this function to create a memory reference expression for vector
2894 load/store access.
2895
2896 Input:
2897 1. STMT: a stmt that references memory. Expected to be of the form
2898 GIMPLE_ASSIGN <name, data-ref> or
2899 GIMPLE_ASSIGN <data-ref, name>.
2900 2. AT_LOOP: the loop where the vector memref is to be created.
2901 3. OFFSET (optional): an offset to be added to the initial address accessed
2902 by the data-ref in STMT.
2903 4. ONLY_INIT: indicate if vp is to be updated in the loop, or remain
2904 pointing to the initial address.
2905 5. TYPE: if not NULL indicates the required type of the data-ref.
2906
2907 Output:
2908 1. Declare a new ptr to vector_type, and have it point to the base of the
2909 data reference (initial addressed accessed by the data reference).
2910 For example, for vector of type V8HI, the following code is generated:
2911
2912 v8hi *vp;
2913 vp = (v8hi *)initial_address;
2914
2915 if OFFSET is not supplied:
2916 initial_address = &a[init];
2917 if OFFSET is supplied:
2918 initial_address = &a[init + OFFSET];
2919
2920 Return the initial_address in INITIAL_ADDRESS.
2921
2922 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
2923 update the pointer in each iteration of the loop.
2924
2925 Return the increment stmt that updates the pointer in PTR_INCR.
2926
2927 3. Set INV_P to true if the access pattern of the data reference in the
2928 vectorized loop is invariant. Set it to false otherwise.
2929
2930 4. Return the pointer. */
2931
2932 tree
2933 vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
2934 tree offset, tree *initial_address, gimple *ptr_incr,
2935 bool only_init, bool *inv_p)
2936 {
2937 tree base_name;
2938 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2939 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2940 struct loop *loop = NULL;
2941 bool nested_in_vect_loop = false;
2942 struct loop *containing_loop = NULL;
2943 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2944 tree vect_ptr_type;
2945 tree vect_ptr;
2946 tree new_temp;
2947 gimple vec_stmt;
2948 gimple_seq new_stmt_list = NULL;
2949 edge pe = NULL;
2950 basic_block new_bb;
2951 tree vect_ptr_init;
2952 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2953 tree vptr;
2954 gimple_stmt_iterator incr_gsi;
2955 bool insert_after;
2956 tree indx_before_incr, indx_after_incr;
2957 gimple incr;
2958 tree step;
2959 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2960 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2961 tree base;
2962
2963 if (loop_vinfo)
2964 {
2965 loop = LOOP_VINFO_LOOP (loop_vinfo);
2966 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2967 containing_loop = (gimple_bb (stmt))->loop_father;
2968 pe = loop_preheader_edge (loop);
2969 }
2970 else
2971 {
2972 gcc_assert (bb_vinfo);
2973 only_init = true;
2974 *ptr_incr = NULL;
2975 }
2976
2977 /* Check the step (evolution) of the load in LOOP, and record
2978 whether it's invariant. */
2979 if (nested_in_vect_loop)
2980 step = STMT_VINFO_DR_STEP (stmt_info);
2981 else
2982 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
2983
2984 if (tree_int_cst_compare (step, size_zero_node) == 0)
2985 *inv_p = true;
2986 else
2987 *inv_p = false;
2988
2989 /* Create an expression for the first address accessed by this load
2990 in LOOP. */
2991 base_name = build_fold_indirect_ref (unshare_expr (DR_BASE_ADDRESS (dr)));
2992
2993 if (vect_print_dump_info (REPORT_DETAILS))
2994 {
2995 tree data_ref_base = base_name;
2996 fprintf (vect_dump, "create vector-pointer variable to type: ");
2997 print_generic_expr (vect_dump, vectype, TDF_SLIM);
2998 if (TREE_CODE (data_ref_base) == VAR_DECL
2999 || TREE_CODE (data_ref_base) == ARRAY_REF)
3000 fprintf (vect_dump, " vectorizing an array ref: ");
3001 else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
3002 fprintf (vect_dump, " vectorizing a record based array ref: ");
3003 else if (TREE_CODE (data_ref_base) == SSA_NAME)
3004 fprintf (vect_dump, " vectorizing a pointer ref: ");
3005 print_generic_expr (vect_dump, base_name, TDF_SLIM);
3006 }
3007
3008 /** (1) Create the new vector-pointer variable: **/
3009 vect_ptr_type = build_pointer_type (vectype);
3010 base = get_base_address (DR_REF (dr));
3011 if (base
3012 && TREE_CODE (base) == MEM_REF)
3013 vect_ptr_type
3014 = build_qualified_type (vect_ptr_type,
3015 TYPE_QUALS (TREE_TYPE (TREE_OPERAND (base, 0))));
3016 vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
3017 get_name (base_name));
3018
3019 /* Vector types inherit the alias set of their component type by default so
3020 we need to use a ref-all pointer if the data reference does not conflict
3021 with the created vector data reference because it is not addressable. */
3022 if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr),
3023 get_alias_set (DR_REF (dr))))
3024 {
3025 vect_ptr_type
3026 = build_pointer_type_for_mode (vectype,
3027 TYPE_MODE (vect_ptr_type), true);
3028 vect_ptr = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
3029 get_name (base_name));
3030 }
3031
3032 /* Likewise for any of the data references in the stmt group. */
3033 else if (STMT_VINFO_DR_GROUP_SIZE (stmt_info) > 1)
3034 {
3035 gimple orig_stmt = STMT_VINFO_DR_GROUP_FIRST_DR (stmt_info);
3036 do
3037 {
3038 tree lhs = gimple_assign_lhs (orig_stmt);
3039 if (!alias_sets_conflict_p (get_deref_alias_set (vect_ptr),
3040 get_alias_set (lhs)))
3041 {
3042 vect_ptr_type
3043 = build_pointer_type_for_mode (vectype,
3044 TYPE_MODE (vect_ptr_type), true);
3045 vect_ptr
3046 = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var,
3047 get_name (base_name));
3048 break;
3049 }
3050
3051 orig_stmt = STMT_VINFO_DR_GROUP_NEXT_DR (vinfo_for_stmt (orig_stmt));
3052 }
3053 while (orig_stmt);
3054 }
3055
3056 add_referenced_var (vect_ptr);
3057
3058 /** Note: If the dataref is in an inner-loop nested in LOOP, and we are
3059 vectorizing LOOP (i.e. outer-loop vectorization), we need to create two
3060 def-use update cycles for the pointer: One relative to the outer-loop
3061 (LOOP), which is what steps (3) and (4) below do. The other is relative
3062 to the inner-loop (which is the inner-most loop containing the dataref),
3063 and this is done be step (5) below.
3064
3065 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
3066 inner-most loop, and so steps (3),(4) work the same, and step (5) is
3067 redundant. Steps (3),(4) create the following:
3068
3069 vp0 = &base_addr;
3070 LOOP: vp1 = phi(vp0,vp2)
3071 ...
3072 ...
3073 vp2 = vp1 + step
3074 goto LOOP
3075
3076 If there is an inner-loop nested in loop, then step (5) will also be
3077 applied, and an additional update in the inner-loop will be created:
3078
3079 vp0 = &base_addr;
3080 LOOP: vp1 = phi(vp0,vp2)
3081 ...
3082 inner: vp3 = phi(vp1,vp4)
3083 vp4 = vp3 + inner_step
3084 if () goto inner
3085 ...
3086 vp2 = vp1 + step
3087 if () goto LOOP */
3088
3089 /** (3) Calculate the initial address the vector-pointer, and set
3090 the vector-pointer to point to it before the loop: **/
3091
3092 /* Create: (&(base[init_val+offset]) in the loop preheader. */
3093
3094 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
3095 offset, loop);
3096 if (new_stmt_list)
3097 {
3098 if (pe)
3099 {
3100 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
3101 gcc_assert (!new_bb);
3102 }
3103 else
3104 gsi_insert_seq_before (&gsi, new_stmt_list, GSI_SAME_STMT);
3105 }
3106
3107 *initial_address = new_temp;
3108
3109 /* Create: p = (vectype *) initial_base */
3110 if (TREE_CODE (new_temp) != SSA_NAME
3111 || !useless_type_conversion_p (vect_ptr_type, TREE_TYPE (new_temp)))
3112 {
3113 vec_stmt = gimple_build_assign (vect_ptr,
3114 fold_convert (vect_ptr_type, new_temp));
3115 vect_ptr_init = make_ssa_name (vect_ptr, vec_stmt);
3116 /* Copy the points-to information if it exists. */
3117 if (DR_PTR_INFO (dr))
3118 duplicate_ssa_name_ptr_info (vect_ptr_init, DR_PTR_INFO (dr));
3119 gimple_assign_set_lhs (vec_stmt, vect_ptr_init);
3120 if (pe)
3121 {
3122 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
3123 gcc_assert (!new_bb);
3124 }
3125 else
3126 gsi_insert_before (&gsi, vec_stmt, GSI_SAME_STMT);
3127 }
3128 else
3129 vect_ptr_init = new_temp;
3130
3131 /** (4) Handle the updating of the vector-pointer inside the loop.
3132 This is needed when ONLY_INIT is false, and also when AT_LOOP
3133 is the inner-loop nested in LOOP (during outer-loop vectorization).
3134 **/
3135
3136 /* No update in loop is required. */
3137 if (only_init && (!loop_vinfo || at_loop == loop))
3138 vptr = vect_ptr_init;
3139 else
3140 {
3141 /* The step of the vector pointer is the Vector Size. */
3142 tree step = TYPE_SIZE_UNIT (vectype);
3143 /* One exception to the above is when the scalar step of the load in
3144 LOOP is zero. In this case the step here is also zero. */
3145 if (*inv_p)
3146 step = size_zero_node;
3147
3148 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
3149
3150 create_iv (vect_ptr_init,
3151 fold_convert (vect_ptr_type, step),
3152 vect_ptr, loop, &incr_gsi, insert_after,
3153 &indx_before_incr, &indx_after_incr);
3154 incr = gsi_stmt (incr_gsi);
3155 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3156
3157 /* Copy the points-to information if it exists. */
3158 if (DR_PTR_INFO (dr))
3159 {
3160 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3161 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3162 }
3163 if (ptr_incr)
3164 *ptr_incr = incr;
3165
3166 vptr = indx_before_incr;
3167 }
3168
3169 if (!nested_in_vect_loop || only_init)
3170 return vptr;
3171
3172
3173 /** (5) Handle the updating of the vector-pointer inside the inner-loop
3174 nested in LOOP, if exists: **/
3175
3176 gcc_assert (nested_in_vect_loop);
3177 if (!only_init)
3178 {
3179 standard_iv_increment_position (containing_loop, &incr_gsi,
3180 &insert_after);
3181 create_iv (vptr, fold_convert (vect_ptr_type, DR_STEP (dr)), vect_ptr,
3182 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
3183 &indx_after_incr);
3184 incr = gsi_stmt (incr_gsi);
3185 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
3186
3187 /* Copy the points-to information if it exists. */
3188 if (DR_PTR_INFO (dr))
3189 {
3190 duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr));
3191 duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr));
3192 }
3193 if (ptr_incr)
3194 *ptr_incr = incr;
3195
3196 return indx_before_incr;
3197 }
3198 else
3199 gcc_unreachable ();
3200 }
3201
3202
3203 /* Function bump_vector_ptr
3204
3205 Increment a pointer (to a vector type) by vector-size. If requested,
3206 i.e. if PTR-INCR is given, then also connect the new increment stmt
3207 to the existing def-use update-chain of the pointer, by modifying
3208 the PTR_INCR as illustrated below:
3209
3210 The pointer def-use update-chain before this function:
3211 DATAREF_PTR = phi (p_0, p_2)
3212 ....
3213 PTR_INCR: p_2 = DATAREF_PTR + step
3214
3215 The pointer def-use update-chain after this function:
3216 DATAREF_PTR = phi (p_0, p_2)
3217 ....
3218 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
3219 ....
3220 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
3221
3222 Input:
3223 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
3224 in the loop.
3225 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
3226 the loop. The increment amount across iterations is expected
3227 to be vector_size.
3228 BSI - location where the new update stmt is to be placed.
3229 STMT - the original scalar memory-access stmt that is being vectorized.
3230 BUMP - optional. The offset by which to bump the pointer. If not given,
3231 the offset is assumed to be vector_size.
3232
3233 Output: Return NEW_DATAREF_PTR as illustrated above.
3234
3235 */
3236
3237 tree
3238 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
3239 gimple stmt, tree bump)
3240 {
3241 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3242 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3243 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3244 tree ptr_var = SSA_NAME_VAR (dataref_ptr);
3245 tree update = TYPE_SIZE_UNIT (vectype);
3246 gimple incr_stmt;
3247 ssa_op_iter iter;
3248 use_operand_p use_p;
3249 tree new_dataref_ptr;
3250
3251 if (bump)
3252 update = bump;
3253
3254 incr_stmt = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, ptr_var,
3255 dataref_ptr, update);
3256 new_dataref_ptr = make_ssa_name (ptr_var, incr_stmt);
3257 gimple_assign_set_lhs (incr_stmt, new_dataref_ptr);
3258 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
3259
3260 /* Copy the points-to information if it exists. */
3261 if (DR_PTR_INFO (dr))
3262 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
3263
3264 if (!ptr_incr)
3265 return new_dataref_ptr;
3266
3267 /* Update the vector-pointer's cross-iteration increment. */
3268 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
3269 {
3270 tree use = USE_FROM_PTR (use_p);
3271
3272 if (use == dataref_ptr)
3273 SET_USE (use_p, new_dataref_ptr);
3274 else
3275 gcc_assert (tree_int_cst_compare (use, update) == 0);
3276 }
3277
3278 return new_dataref_ptr;
3279 }
3280
3281
3282 /* Function vect_create_destination_var.
3283
3284 Create a new temporary of type VECTYPE. */
3285
3286 tree
3287 vect_create_destination_var (tree scalar_dest, tree vectype)
3288 {
3289 tree vec_dest;
3290 const char *new_name;
3291 tree type;
3292 enum vect_var_kind kind;
3293
3294 kind = vectype ? vect_simple_var : vect_scalar_var;
3295 type = vectype ? vectype : TREE_TYPE (scalar_dest);
3296
3297 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
3298
3299 new_name = get_name (scalar_dest);
3300 if (!new_name)
3301 new_name = "var_";
3302 vec_dest = vect_get_new_vect_var (type, kind, new_name);
3303 add_referenced_var (vec_dest);
3304
3305 return vec_dest;
3306 }
3307
3308 /* Function vect_strided_store_supported.
3309
3310 Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported,
3311 and FALSE otherwise. */
3312
3313 bool
3314 vect_strided_store_supported (tree vectype)
3315 {
3316 optab interleave_high_optab, interleave_low_optab;
3317 enum machine_mode mode;
3318
3319 mode = TYPE_MODE (vectype);
3320
3321 /* Check that the operation is supported. */
3322 interleave_high_optab = optab_for_tree_code (VEC_INTERLEAVE_HIGH_EXPR,
3323 vectype, optab_default);
3324 interleave_low_optab = optab_for_tree_code (VEC_INTERLEAVE_LOW_EXPR,
3325 vectype, optab_default);
3326 if (!interleave_high_optab || !interleave_low_optab)
3327 {
3328 if (vect_print_dump_info (REPORT_DETAILS))
3329 fprintf (vect_dump, "no optab for interleave.");
3330 return false;
3331 }
3332
3333 if (optab_handler (interleave_high_optab, mode) == CODE_FOR_nothing
3334 || optab_handler (interleave_low_optab, mode) == CODE_FOR_nothing)
3335 {
3336 if (vect_print_dump_info (REPORT_DETAILS))
3337 fprintf (vect_dump, "interleave op not supported by target.");
3338 return false;
3339 }
3340
3341 return true;
3342 }
3343
3344
3345 /* Function vect_permute_store_chain.
3346
3347 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
3348 a power of 2, generate interleave_high/low stmts to reorder the data
3349 correctly for the stores. Return the final references for stores in
3350 RESULT_CHAIN.
3351
3352 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3353 The input is 4 vectors each containing 8 elements. We assign a number to each
3354 element, the input sequence is:
3355
3356 1st vec: 0 1 2 3 4 5 6 7
3357 2nd vec: 8 9 10 11 12 13 14 15
3358 3rd vec: 16 17 18 19 20 21 22 23
3359 4th vec: 24 25 26 27 28 29 30 31
3360
3361 The output sequence should be:
3362
3363 1st vec: 0 8 16 24 1 9 17 25
3364 2nd vec: 2 10 18 26 3 11 19 27
3365 3rd vec: 4 12 20 28 5 13 21 30
3366 4th vec: 6 14 22 30 7 15 23 31
3367
3368 i.e., we interleave the contents of the four vectors in their order.
3369
3370 We use interleave_high/low instructions to create such output. The input of
3371 each interleave_high/low operation is two vectors:
3372 1st vec 2nd vec
3373 0 1 2 3 4 5 6 7
3374 the even elements of the result vector are obtained left-to-right from the
3375 high/low elements of the first vector. The odd elements of the result are
3376 obtained left-to-right from the high/low elements of the second vector.
3377 The output of interleave_high will be: 0 4 1 5
3378 and of interleave_low: 2 6 3 7
3379
3380
3381 The permutation is done in log LENGTH stages. In each stage interleave_high
3382 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
3383 where the first argument is taken from the first half of DR_CHAIN and the
3384 second argument from it's second half.
3385 In our example,
3386
3387 I1: interleave_high (1st vec, 3rd vec)
3388 I2: interleave_low (1st vec, 3rd vec)
3389 I3: interleave_high (2nd vec, 4th vec)
3390 I4: interleave_low (2nd vec, 4th vec)
3391
3392 The output for the first stage is:
3393
3394 I1: 0 16 1 17 2 18 3 19
3395 I2: 4 20 5 21 6 22 7 23
3396 I3: 8 24 9 25 10 26 11 27
3397 I4: 12 28 13 29 14 30 15 31
3398
3399 The output of the second stage, i.e. the final result is:
3400
3401 I1: 0 8 16 24 1 9 17 25
3402 I2: 2 10 18 26 3 11 19 27
3403 I3: 4 12 20 28 5 13 21 30
3404 I4: 6 14 22 30 7 15 23 31. */
3405
3406 bool
3407 vect_permute_store_chain (VEC(tree,heap) *dr_chain,
3408 unsigned int length,
3409 gimple stmt,
3410 gimple_stmt_iterator *gsi,
3411 VEC(tree,heap) **result_chain)
3412 {
3413 tree perm_dest, vect1, vect2, high, low;
3414 gimple perm_stmt;
3415 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3416 int i;
3417 unsigned int j;
3418 enum tree_code high_code, low_code;
3419
3420 /* Check that the operation is supported. */
3421 if (!vect_strided_store_supported (vectype))
3422 return false;
3423
3424 *result_chain = VEC_copy (tree, heap, dr_chain);
3425
3426 for (i = 0; i < exact_log2 (length); i++)
3427 {
3428 for (j = 0; j < length/2; j++)
3429 {
3430 vect1 = VEC_index (tree, dr_chain, j);
3431 vect2 = VEC_index (tree, dr_chain, j+length/2);
3432
3433 /* Create interleaving stmt:
3434 in the case of big endian:
3435 high = interleave_high (vect1, vect2)
3436 and in the case of little endian:
3437 high = interleave_low (vect1, vect2). */
3438 perm_dest = create_tmp_var (vectype, "vect_inter_high");
3439 DECL_GIMPLE_REG_P (perm_dest) = 1;
3440 add_referenced_var (perm_dest);
3441 if (BYTES_BIG_ENDIAN)
3442 {
3443 high_code = VEC_INTERLEAVE_HIGH_EXPR;
3444 low_code = VEC_INTERLEAVE_LOW_EXPR;
3445 }
3446 else
3447 {
3448 low_code = VEC_INTERLEAVE_HIGH_EXPR;
3449 high_code = VEC_INTERLEAVE_LOW_EXPR;
3450 }
3451 perm_stmt = gimple_build_assign_with_ops (high_code, perm_dest,
3452 vect1, vect2);
3453 high = make_ssa_name (perm_dest, perm_stmt);
3454 gimple_assign_set_lhs (perm_stmt, high);
3455 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3456 VEC_replace (tree, *result_chain, 2*j, high);
3457
3458 /* Create interleaving stmt:
3459 in the case of big endian:
3460 low = interleave_low (vect1, vect2)
3461 and in the case of little endian:
3462 low = interleave_high (vect1, vect2). */
3463 perm_dest = create_tmp_var (vectype, "vect_inter_low");
3464 DECL_GIMPLE_REG_P (perm_dest) = 1;
3465 add_referenced_var (perm_dest);
3466 perm_stmt = gimple_build_assign_with_ops (low_code, perm_dest,
3467 vect1, vect2);
3468 low = make_ssa_name (perm_dest, perm_stmt);
3469 gimple_assign_set_lhs (perm_stmt, low);
3470 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3471 VEC_replace (tree, *result_chain, 2*j+1, low);
3472 }
3473 dr_chain = VEC_copy (tree, heap, *result_chain);
3474 }
3475 return true;
3476 }
3477
3478 /* Function vect_setup_realignment
3479
3480 This function is called when vectorizing an unaligned load using
3481 the dr_explicit_realign[_optimized] scheme.
3482 This function generates the following code at the loop prolog:
3483
3484 p = initial_addr;
3485 x msq_init = *(floor(p)); # prolog load
3486 realignment_token = call target_builtin;
3487 loop:
3488 x msq = phi (msq_init, ---)
3489
3490 The stmts marked with x are generated only for the case of
3491 dr_explicit_realign_optimized.
3492
3493 The code above sets up a new (vector) pointer, pointing to the first
3494 location accessed by STMT, and a "floor-aligned" load using that pointer.
3495 It also generates code to compute the "realignment-token" (if the relevant
3496 target hook was defined), and creates a phi-node at the loop-header bb
3497 whose arguments are the result of the prolog-load (created by this
3498 function) and the result of a load that takes place in the loop (to be
3499 created by the caller to this function).
3500
3501 For the case of dr_explicit_realign_optimized:
3502 The caller to this function uses the phi-result (msq) to create the
3503 realignment code inside the loop, and sets up the missing phi argument,
3504 as follows:
3505 loop:
3506 msq = phi (msq_init, lsq)
3507 lsq = *(floor(p')); # load in loop
3508 result = realign_load (msq, lsq, realignment_token);
3509
3510 For the case of dr_explicit_realign:
3511 loop:
3512 msq = *(floor(p)); # load in loop
3513 p' = p + (VS-1);
3514 lsq = *(floor(p')); # load in loop
3515 result = realign_load (msq, lsq, realignment_token);
3516
3517 Input:
3518 STMT - (scalar) load stmt to be vectorized. This load accesses
3519 a memory location that may be unaligned.
3520 BSI - place where new code is to be inserted.
3521 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
3522 is used.
3523
3524 Output:
3525 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
3526 target hook, if defined.
3527 Return value - the result of the loop-header phi node. */
3528
3529 tree
3530 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
3531 tree *realignment_token,
3532 enum dr_alignment_support alignment_support_scheme,
3533 tree init_addr,
3534 struct loop **at_loop)
3535 {
3536 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3537 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3538 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3539 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3540 struct loop *loop = NULL;
3541 edge pe = NULL;
3542 tree scalar_dest = gimple_assign_lhs (stmt);
3543 tree vec_dest;
3544 gimple inc;
3545 tree ptr;
3546 tree data_ref;
3547 gimple new_stmt;
3548 basic_block new_bb;
3549 tree msq_init = NULL_TREE;
3550 tree new_temp;
3551 gimple phi_stmt;
3552 tree msq = NULL_TREE;
3553 gimple_seq stmts = NULL;
3554 bool inv_p;
3555 bool compute_in_loop = false;
3556 bool nested_in_vect_loop = false;
3557 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3558 struct loop *loop_for_initial_load = NULL;
3559
3560 if (loop_vinfo)
3561 {
3562 loop = LOOP_VINFO_LOOP (loop_vinfo);
3563 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3564 }
3565
3566 gcc_assert (alignment_support_scheme == dr_explicit_realign
3567 || alignment_support_scheme == dr_explicit_realign_optimized);
3568
3569 /* We need to generate three things:
3570 1. the misalignment computation
3571 2. the extra vector load (for the optimized realignment scheme).
3572 3. the phi node for the two vectors from which the realignment is
3573 done (for the optimized realignment scheme).
3574 */
3575
3576 /* 1. Determine where to generate the misalignment computation.
3577
3578 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
3579 calculation will be generated by this function, outside the loop (in the
3580 preheader). Otherwise, INIT_ADDR had already been computed for us by the
3581 caller, inside the loop.
3582
3583 Background: If the misalignment remains fixed throughout the iterations of
3584 the loop, then both realignment schemes are applicable, and also the
3585 misalignment computation can be done outside LOOP. This is because we are
3586 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
3587 are a multiple of VS (the Vector Size), and therefore the misalignment in
3588 different vectorized LOOP iterations is always the same.
3589 The problem arises only if the memory access is in an inner-loop nested
3590 inside LOOP, which is now being vectorized using outer-loop vectorization.
3591 This is the only case when the misalignment of the memory access may not
3592 remain fixed throughout the iterations of the inner-loop (as explained in
3593 detail in vect_supportable_dr_alignment). In this case, not only is the
3594 optimized realignment scheme not applicable, but also the misalignment
3595 computation (and generation of the realignment token that is passed to
3596 REALIGN_LOAD) have to be done inside the loop.
3597
3598 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
3599 or not, which in turn determines if the misalignment is computed inside
3600 the inner-loop, or outside LOOP. */
3601
3602 if (init_addr != NULL_TREE || !loop_vinfo)
3603 {
3604 compute_in_loop = true;
3605 gcc_assert (alignment_support_scheme == dr_explicit_realign);
3606 }
3607
3608
3609 /* 2. Determine where to generate the extra vector load.
3610
3611 For the optimized realignment scheme, instead of generating two vector
3612 loads in each iteration, we generate a single extra vector load in the
3613 preheader of the loop, and in each iteration reuse the result of the
3614 vector load from the previous iteration. In case the memory access is in
3615 an inner-loop nested inside LOOP, which is now being vectorized using
3616 outer-loop vectorization, we need to determine whether this initial vector
3617 load should be generated at the preheader of the inner-loop, or can be
3618 generated at the preheader of LOOP. If the memory access has no evolution
3619 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
3620 to be generated inside LOOP (in the preheader of the inner-loop). */
3621
3622 if (nested_in_vect_loop)
3623 {
3624 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
3625 bool invariant_in_outerloop =
3626 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
3627 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
3628 }
3629 else
3630 loop_for_initial_load = loop;
3631 if (at_loop)
3632 *at_loop = loop_for_initial_load;
3633
3634 if (loop_for_initial_load)
3635 pe = loop_preheader_edge (loop_for_initial_load);
3636
3637 /* 3. For the case of the optimized realignment, create the first vector
3638 load at the loop preheader. */
3639
3640 if (alignment_support_scheme == dr_explicit_realign_optimized)
3641 {
3642 /* Create msq_init = *(floor(p1)) in the loop preheader */
3643
3644 gcc_assert (!compute_in_loop);
3645 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3646 ptr = vect_create_data_ref_ptr (stmt, loop_for_initial_load, NULL_TREE,
3647 &init_addr, &inc, true, &inv_p);
3648 new_stmt = gimple_build_assign_with_ops
3649 (BIT_AND_EXPR, NULL_TREE, ptr,
3650 build_int_cst (TREE_TYPE (ptr),
3651 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3652 new_temp = make_ssa_name (SSA_NAME_VAR (ptr), new_stmt);
3653 gimple_assign_set_lhs (new_stmt, new_temp);
3654 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3655 gcc_assert (!new_bb);
3656 data_ref
3657 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
3658 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
3659 new_stmt = gimple_build_assign (vec_dest, data_ref);
3660 new_temp = make_ssa_name (vec_dest, new_stmt);
3661 gimple_assign_set_lhs (new_stmt, new_temp);
3662 mark_symbols_for_renaming (new_stmt);
3663 if (pe)
3664 {
3665 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3666 gcc_assert (!new_bb);
3667 }
3668 else
3669 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
3670
3671 msq_init = gimple_assign_lhs (new_stmt);
3672 }
3673
3674 /* 4. Create realignment token using a target builtin, if available.
3675 It is done either inside the containing loop, or before LOOP (as
3676 determined above). */
3677
3678 if (targetm.vectorize.builtin_mask_for_load)
3679 {
3680 tree builtin_decl;
3681
3682 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
3683 if (!init_addr)
3684 {
3685 /* Generate the INIT_ADDR computation outside LOOP. */
3686 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
3687 NULL_TREE, loop);
3688 if (loop)
3689 {
3690 pe = loop_preheader_edge (loop);
3691 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3692 gcc_assert (!new_bb);
3693 }
3694 else
3695 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
3696 }
3697
3698 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
3699 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
3700 vec_dest =
3701 vect_create_destination_var (scalar_dest,
3702 gimple_call_return_type (new_stmt));
3703 new_temp = make_ssa_name (vec_dest, new_stmt);
3704 gimple_call_set_lhs (new_stmt, new_temp);
3705
3706 if (compute_in_loop)
3707 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
3708 else
3709 {
3710 /* Generate the misalignment computation outside LOOP. */
3711 pe = loop_preheader_edge (loop);
3712 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
3713 gcc_assert (!new_bb);
3714 }
3715
3716 *realignment_token = gimple_call_lhs (new_stmt);
3717
3718 /* The result of the CALL_EXPR to this builtin is determined from
3719 the value of the parameter and no global variables are touched
3720 which makes the builtin a "const" function. Requiring the
3721 builtin to have the "const" attribute makes it unnecessary
3722 to call mark_call_clobbered. */
3723 gcc_assert (TREE_READONLY (builtin_decl));
3724 }
3725
3726 if (alignment_support_scheme == dr_explicit_realign)
3727 return msq;
3728
3729 gcc_assert (!compute_in_loop);
3730 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
3731
3732
3733 /* 5. Create msq = phi <msq_init, lsq> in loop */
3734
3735 pe = loop_preheader_edge (containing_loop);
3736 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3737 msq = make_ssa_name (vec_dest, NULL);
3738 phi_stmt = create_phi_node (msq, containing_loop->header);
3739 SSA_NAME_DEF_STMT (msq) = phi_stmt;
3740 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
3741
3742 return msq;
3743 }
3744
3745
3746 /* Function vect_strided_load_supported.
3747
3748 Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported,
3749 and FALSE otherwise. */
3750
3751 bool
3752 vect_strided_load_supported (tree vectype)
3753 {
3754 optab perm_even_optab, perm_odd_optab;
3755 enum machine_mode mode;
3756
3757 mode = TYPE_MODE (vectype);
3758
3759 perm_even_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, vectype,
3760 optab_default);
3761 if (!perm_even_optab)
3762 {
3763 if (vect_print_dump_info (REPORT_DETAILS))
3764 fprintf (vect_dump, "no optab for perm_even.");
3765 return false;
3766 }
3767
3768 if (optab_handler (perm_even_optab, mode) == CODE_FOR_nothing)
3769 {
3770 if (vect_print_dump_info (REPORT_DETAILS))
3771 fprintf (vect_dump, "perm_even op not supported by target.");
3772 return false;
3773 }
3774
3775 perm_odd_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, vectype,
3776 optab_default);
3777 if (!perm_odd_optab)
3778 {
3779 if (vect_print_dump_info (REPORT_DETAILS))
3780 fprintf (vect_dump, "no optab for perm_odd.");
3781 return false;
3782 }
3783
3784 if (optab_handler (perm_odd_optab, mode) == CODE_FOR_nothing)
3785 {
3786 if (vect_print_dump_info (REPORT_DETAILS))
3787 fprintf (vect_dump, "perm_odd op not supported by target.");
3788 return false;
3789 }
3790 return true;
3791 }
3792
3793
3794 /* Function vect_permute_load_chain.
3795
3796 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
3797 a power of 2, generate extract_even/odd stmts to reorder the input data
3798 correctly. Return the final references for loads in RESULT_CHAIN.
3799
3800 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
3801 The input is 4 vectors each containing 8 elements. We assign a number to each
3802 element, the input sequence is:
3803
3804 1st vec: 0 1 2 3 4 5 6 7
3805 2nd vec: 8 9 10 11 12 13 14 15
3806 3rd vec: 16 17 18 19 20 21 22 23
3807 4th vec: 24 25 26 27 28 29 30 31
3808
3809 The output sequence should be:
3810
3811 1st vec: 0 4 8 12 16 20 24 28
3812 2nd vec: 1 5 9 13 17 21 25 29
3813 3rd vec: 2 6 10 14 18 22 26 30
3814 4th vec: 3 7 11 15 19 23 27 31
3815
3816 i.e., the first output vector should contain the first elements of each
3817 interleaving group, etc.
3818
3819 We use extract_even/odd instructions to create such output. The input of each
3820 extract_even/odd operation is two vectors
3821 1st vec 2nd vec
3822 0 1 2 3 4 5 6 7
3823
3824 and the output is the vector of extracted even/odd elements. The output of
3825 extract_even will be: 0 2 4 6
3826 and of extract_odd: 1 3 5 7
3827
3828
3829 The permutation is done in log LENGTH stages. In each stage extract_even and
3830 extract_odd stmts are created for each pair of vectors in DR_CHAIN in their
3831 order. In our example,
3832
3833 E1: extract_even (1st vec, 2nd vec)
3834 E2: extract_odd (1st vec, 2nd vec)
3835 E3: extract_even (3rd vec, 4th vec)
3836 E4: extract_odd (3rd vec, 4th vec)
3837
3838 The output for the first stage will be:
3839
3840 E1: 0 2 4 6 8 10 12 14
3841 E2: 1 3 5 7 9 11 13 15
3842 E3: 16 18 20 22 24 26 28 30
3843 E4: 17 19 21 23 25 27 29 31
3844
3845 In order to proceed and create the correct sequence for the next stage (or
3846 for the correct output, if the second stage is the last one, as in our
3847 example), we first put the output of extract_even operation and then the
3848 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
3849 The input for the second stage is:
3850
3851 1st vec (E1): 0 2 4 6 8 10 12 14
3852 2nd vec (E3): 16 18 20 22 24 26 28 30
3853 3rd vec (E2): 1 3 5 7 9 11 13 15
3854 4th vec (E4): 17 19 21 23 25 27 29 31
3855
3856 The output of the second stage:
3857
3858 E1: 0 4 8 12 16 20 24 28
3859 E2: 2 6 10 14 18 22 26 30
3860 E3: 1 5 9 13 17 21 25 29
3861 E4: 3 7 11 15 19 23 27 31
3862
3863 And RESULT_CHAIN after reordering:
3864
3865 1st vec (E1): 0 4 8 12 16 20 24 28
3866 2nd vec (E3): 1 5 9 13 17 21 25 29
3867 3rd vec (E2): 2 6 10 14 18 22 26 30
3868 4th vec (E4): 3 7 11 15 19 23 27 31. */
3869
3870 bool
3871 vect_permute_load_chain (VEC(tree,heap) *dr_chain,
3872 unsigned int length,
3873 gimple stmt,
3874 gimple_stmt_iterator *gsi,
3875 VEC(tree,heap) **result_chain)
3876 {
3877 tree perm_dest, data_ref, first_vect, second_vect;
3878 gimple perm_stmt;
3879 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
3880 int i;
3881 unsigned int j;
3882
3883 /* Check that the operation is supported. */
3884 if (!vect_strided_load_supported (vectype))
3885 return false;
3886
3887 *result_chain = VEC_copy (tree, heap, dr_chain);
3888 for (i = 0; i < exact_log2 (length); i++)
3889 {
3890 for (j = 0; j < length; j +=2)
3891 {
3892 first_vect = VEC_index (tree, dr_chain, j);
3893 second_vect = VEC_index (tree, dr_chain, j+1);
3894
3895 /* data_ref = permute_even (first_data_ref, second_data_ref); */
3896 perm_dest = create_tmp_var (vectype, "vect_perm_even");
3897 DECL_GIMPLE_REG_P (perm_dest) = 1;
3898 add_referenced_var (perm_dest);
3899
3900 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR,
3901 perm_dest, first_vect,
3902 second_vect);
3903
3904 data_ref = make_ssa_name (perm_dest, perm_stmt);
3905 gimple_assign_set_lhs (perm_stmt, data_ref);
3906 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3907 mark_symbols_for_renaming (perm_stmt);
3908
3909 VEC_replace (tree, *result_chain, j/2, data_ref);
3910
3911 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
3912 perm_dest = create_tmp_var (vectype, "vect_perm_odd");
3913 DECL_GIMPLE_REG_P (perm_dest) = 1;
3914 add_referenced_var (perm_dest);
3915
3916 perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR,
3917 perm_dest, first_vect,
3918 second_vect);
3919 data_ref = make_ssa_name (perm_dest, perm_stmt);
3920 gimple_assign_set_lhs (perm_stmt, data_ref);
3921 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3922 mark_symbols_for_renaming (perm_stmt);
3923
3924 VEC_replace (tree, *result_chain, j/2+length/2, data_ref);
3925 }
3926 dr_chain = VEC_copy (tree, heap, *result_chain);
3927 }
3928 return true;
3929 }
3930
3931
3932 /* Function vect_transform_strided_load.
3933
3934 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
3935 to perform their permutation and ascribe the result vectorized statements to
3936 the scalar statements.
3937 */
3938
3939 bool
3940 vect_transform_strided_load (gimple stmt, VEC(tree,heap) *dr_chain, int size,
3941 gimple_stmt_iterator *gsi)
3942 {
3943 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3944 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3945 gimple next_stmt, new_stmt;
3946 VEC(tree,heap) *result_chain = NULL;
3947 unsigned int i, gap_count;
3948 tree tmp_data_ref;
3949
3950 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
3951 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
3952 vectors, that are ready for vector computation. */
3953 result_chain = VEC_alloc (tree, heap, size);
3954 /* Permute. */
3955 if (!vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain))
3956 return false;
3957
3958 /* Put a permuted data-ref in the VECTORIZED_STMT field.
3959 Since we scan the chain starting from it's first node, their order
3960 corresponds the order of data-refs in RESULT_CHAIN. */
3961 next_stmt = first_stmt;
3962 gap_count = 1;
3963 FOR_EACH_VEC_ELT (tree, result_chain, i, tmp_data_ref)
3964 {
3965 if (!next_stmt)
3966 break;
3967
3968 /* Skip the gaps. Loads created for the gaps will be removed by dead
3969 code elimination pass later. No need to check for the first stmt in
3970 the group, since it always exists.
3971 DR_GROUP_GAP is the number of steps in elements from the previous
3972 access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
3973 correspond to the gaps.
3974 */
3975 if (next_stmt != first_stmt
3976 && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
3977 {
3978 gap_count++;
3979 continue;
3980 }
3981
3982 while (next_stmt)
3983 {
3984 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
3985 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
3986 copies, and we put the new vector statement in the first available
3987 RELATED_STMT. */
3988 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
3989 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
3990 else
3991 {
3992 if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
3993 {
3994 gimple prev_stmt =
3995 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
3996 gimple rel_stmt =
3997 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
3998 while (rel_stmt)
3999 {
4000 prev_stmt = rel_stmt;
4001 rel_stmt =
4002 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
4003 }
4004
4005 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
4006 new_stmt;
4007 }
4008 }
4009
4010 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
4011 gap_count = 1;
4012 /* If NEXT_STMT accesses the same DR as the previous statement,
4013 put the same TMP_DATA_REF as its vectorized statement; otherwise
4014 get the next data-ref from RESULT_CHAIN. */
4015 if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
4016 break;
4017 }
4018 }
4019
4020 VEC_free (tree, heap, result_chain);
4021 return true;
4022 }
4023
4024 /* Function vect_force_dr_alignment_p.
4025
4026 Returns whether the alignment of a DECL can be forced to be aligned
4027 on ALIGNMENT bit boundary. */
4028
4029 bool
4030 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
4031 {
4032 if (TREE_CODE (decl) != VAR_DECL)
4033 return false;
4034
4035 if (DECL_EXTERNAL (decl))
4036 return false;
4037
4038 if (TREE_ASM_WRITTEN (decl))
4039 return false;
4040
4041 if (TREE_STATIC (decl))
4042 return (alignment <= MAX_OFILE_ALIGNMENT);
4043 else
4044 return (alignment <= MAX_STACK_ALIGNMENT);
4045 }
4046
4047
4048 /* Return whether the data reference DR is supported with respect to its
4049 alignment.
4050 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
4051 it is aligned, i.e., check if it is possible to vectorize it with different
4052 alignment. */
4053
4054 enum dr_alignment_support
4055 vect_supportable_dr_alignment (struct data_reference *dr,
4056 bool check_aligned_accesses)
4057 {
4058 gimple stmt = DR_STMT (dr);
4059 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4060 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4061 enum machine_mode mode = TYPE_MODE (vectype);
4062 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4063 struct loop *vect_loop = NULL;
4064 bool nested_in_vect_loop = false;
4065
4066 if (aligned_access_p (dr) && !check_aligned_accesses)
4067 return dr_aligned;
4068
4069 if (loop_vinfo)
4070 {
4071 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
4072 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
4073 }
4074
4075 /* Possibly unaligned access. */
4076
4077 /* We can choose between using the implicit realignment scheme (generating
4078 a misaligned_move stmt) and the explicit realignment scheme (generating
4079 aligned loads with a REALIGN_LOAD). There are two variants to the explicit
4080 realignment scheme: optimized, and unoptimized.
4081 We can optimize the realignment only if the step between consecutive
4082 vector loads is equal to the vector size. Since the vector memory
4083 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
4084 is guaranteed that the misalignment amount remains the same throughout the
4085 execution of the vectorized loop. Therefore, we can create the
4086 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
4087 at the loop preheader.
4088
4089 However, in the case of outer-loop vectorization, when vectorizing a
4090 memory access in the inner-loop nested within the LOOP that is now being
4091 vectorized, while it is guaranteed that the misalignment of the
4092 vectorized memory access will remain the same in different outer-loop
4093 iterations, it is *not* guaranteed that is will remain the same throughout
4094 the execution of the inner-loop. This is because the inner-loop advances
4095 with the original scalar step (and not in steps of VS). If the inner-loop
4096 step happens to be a multiple of VS, then the misalignment remains fixed
4097 and we can use the optimized realignment scheme. For example:
4098
4099 for (i=0; i<N; i++)
4100 for (j=0; j<M; j++)
4101 s += a[i+j];
4102
4103 When vectorizing the i-loop in the above example, the step between
4104 consecutive vector loads is 1, and so the misalignment does not remain
4105 fixed across the execution of the inner-loop, and the realignment cannot
4106 be optimized (as illustrated in the following pseudo vectorized loop):
4107
4108 for (i=0; i<N; i+=4)
4109 for (j=0; j<M; j++){
4110 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
4111 // when j is {0,1,2,3,4,5,6,7,...} respectively.
4112 // (assuming that we start from an aligned address).
4113 }
4114
4115 We therefore have to use the unoptimized realignment scheme:
4116
4117 for (i=0; i<N; i+=4)
4118 for (j=k; j<M; j+=4)
4119 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
4120 // that the misalignment of the initial address is
4121 // 0).
4122
4123 The loop can then be vectorized as follows:
4124
4125 for (k=0; k<4; k++){
4126 rt = get_realignment_token (&vp[k]);
4127 for (i=0; i<N; i+=4){
4128 v1 = vp[i+k];
4129 for (j=k; j<M; j+=4){
4130 v2 = vp[i+j+VS-1];
4131 va = REALIGN_LOAD <v1,v2,rt>;
4132 vs += va;
4133 v1 = v2;
4134 }
4135 }
4136 } */
4137
4138 if (DR_IS_READ (dr))
4139 {
4140 bool is_packed = false;
4141 tree type = (TREE_TYPE (DR_REF (dr)));
4142
4143 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
4144 && (!targetm.vectorize.builtin_mask_for_load
4145 || targetm.vectorize.builtin_mask_for_load ()))
4146 {
4147 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4148 if ((nested_in_vect_loop
4149 && (TREE_INT_CST_LOW (DR_STEP (dr))
4150 != GET_MODE_SIZE (TYPE_MODE (vectype))))
4151 || !loop_vinfo)
4152 return dr_explicit_realign;
4153 else
4154 return dr_explicit_realign_optimized;
4155 }
4156 if (!known_alignment_for_access_p (dr))
4157 {
4158 tree ba = DR_BASE_OBJECT (dr);
4159
4160 if (ba)
4161 is_packed = contains_packed_reference (ba);
4162 }
4163
4164 if (targetm.vectorize.
4165 support_vector_misalignment (mode, type,
4166 DR_MISALIGNMENT (dr), is_packed))
4167 /* Can't software pipeline the loads, but can at least do them. */
4168 return dr_unaligned_supported;
4169 }
4170 else
4171 {
4172 bool is_packed = false;
4173 tree type = (TREE_TYPE (DR_REF (dr)));
4174
4175 if (!known_alignment_for_access_p (dr))
4176 {
4177 tree ba = DR_BASE_OBJECT (dr);
4178
4179 if (ba)
4180 is_packed = contains_packed_reference (ba);
4181 }
4182
4183 if (targetm.vectorize.
4184 support_vector_misalignment (mode, type,
4185 DR_MISALIGNMENT (dr), is_packed))
4186 return dr_unaligned_supported;
4187 }
4188
4189 /* Unsupported. */
4190 return dr_unaligned_unsupported;
4191 }