Report vectorization problems via a new opt_problem class
[gcc.git] / gcc / tree-vectorizer.c
1 /* Vectorizer
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Loop and basic block vectorizer.
22
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
28
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
40
41 Here's a poor attempt at illustrating that:
42
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
55 */
56
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83
84
85 /* Loop or bb location, with hotness information. */
86 dump_user_location_t vect_location;
87
88 /* Dump a cost entry according to args to F. */
89
90 void
91 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
92 stmt_vec_info stmt_info, int misalign,
93 enum vect_cost_model_location where)
94 {
95 fprintf (f, "%p ", data);
96 if (stmt_info)
97 {
98 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
99 fprintf (f, " ");
100 }
101 else
102 fprintf (f, "<unknown> ");
103 fprintf (f, "%d times ", count);
104 const char *ks = "unknown";
105 switch (kind)
106 {
107 case scalar_stmt:
108 ks = "scalar_stmt";
109 break;
110 case scalar_load:
111 ks = "scalar_load";
112 break;
113 case scalar_store:
114 ks = "scalar_store";
115 break;
116 case vector_stmt:
117 ks = "vector_stmt";
118 break;
119 case vector_load:
120 ks = "vector_load";
121 break;
122 case vector_gather_load:
123 ks = "vector_gather_load";
124 break;
125 case unaligned_load:
126 ks = "unaligned_load";
127 break;
128 case unaligned_store:
129 ks = "unaligned_store";
130 break;
131 case vector_store:
132 ks = "unaligned_store";
133 break;
134 case vector_scatter_store:
135 ks = "unaligned_store";
136 break;
137 case vec_to_scalar:
138 ks = "unaligned_store";
139 break;
140 case scalar_to_vec:
141 ks = "unaligned_store";
142 break;
143 case cond_branch_not_taken:
144 ks = "unaligned_store";
145 break;
146 case cond_branch_taken:
147 ks = "unaligned_store";
148 break;
149 case vec_perm:
150 ks = "unaligned_store";
151 break;
152 case vec_promote_demote:
153 ks = "unaligned_store";
154 break;
155 case vec_construct:
156 ks = "unaligned_store";
157 break;
158 }
159 fprintf (f, "%s ", ks);
160 if (kind == unaligned_load || kind == unaligned_store)
161 fprintf (f, "(misalign %d) ", misalign);
162 const char *ws = "unknown";
163 switch (where)
164 {
165 case vect_prologue:
166 ws = "prologue";
167 break;
168 case vect_body:
169 ws = "body";
170 break;
171 case vect_epilogue:
172 ws = "epilogue";
173 break;
174 }
175 fprintf (f, "in %s\n", ws);
176 }
177 \f
178 /* For mapping simduid to vectorization factor. */
179
180 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
181 {
182 unsigned int simduid;
183 poly_uint64 vf;
184
185 /* hash_table support. */
186 static inline hashval_t hash (const simduid_to_vf *);
187 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
188 };
189
190 inline hashval_t
191 simduid_to_vf::hash (const simduid_to_vf *p)
192 {
193 return p->simduid;
194 }
195
196 inline int
197 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
198 {
199 return p1->simduid == p2->simduid;
200 }
201
202 /* This hash maps the OMP simd array to the corresponding simduid used
203 to index into it. Like thus,
204
205 _7 = GOMP_SIMD_LANE (simduid.0)
206 ...
207 ...
208 D.1737[_7] = stuff;
209
210
211 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
212 simduid.0. */
213
214 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
215 {
216 tree decl;
217 unsigned int simduid;
218
219 /* hash_table support. */
220 static inline hashval_t hash (const simd_array_to_simduid *);
221 static inline int equal (const simd_array_to_simduid *,
222 const simd_array_to_simduid *);
223 };
224
225 inline hashval_t
226 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
227 {
228 return DECL_UID (p->decl);
229 }
230
231 inline int
232 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
233 const simd_array_to_simduid *p2)
234 {
235 return p1->decl == p2->decl;
236 }
237
238 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
239 into their corresponding constants and remove
240 IFN_GOMP_SIMD_ORDERED_{START,END}. */
241
242 static void
243 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
244 {
245 basic_block bb;
246
247 FOR_EACH_BB_FN (bb, cfun)
248 {
249 gimple_stmt_iterator i;
250
251 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
252 {
253 poly_uint64 vf = 1;
254 enum internal_fn ifn;
255 gimple *stmt = gsi_stmt (i);
256 tree t;
257 if (!is_gimple_call (stmt)
258 || !gimple_call_internal_p (stmt))
259 {
260 gsi_next (&i);
261 continue;
262 }
263 ifn = gimple_call_internal_fn (stmt);
264 switch (ifn)
265 {
266 case IFN_GOMP_SIMD_LANE:
267 case IFN_GOMP_SIMD_VF:
268 case IFN_GOMP_SIMD_LAST_LANE:
269 break;
270 case IFN_GOMP_SIMD_ORDERED_START:
271 case IFN_GOMP_SIMD_ORDERED_END:
272 if (integer_onep (gimple_call_arg (stmt, 0)))
273 {
274 enum built_in_function bcode
275 = (ifn == IFN_GOMP_SIMD_ORDERED_START
276 ? BUILT_IN_GOMP_ORDERED_START
277 : BUILT_IN_GOMP_ORDERED_END);
278 gimple *g
279 = gimple_build_call (builtin_decl_explicit (bcode), 0);
280 tree vdef = gimple_vdef (stmt);
281 gimple_set_vdef (g, vdef);
282 SSA_NAME_DEF_STMT (vdef) = g;
283 gimple_set_vuse (g, gimple_vuse (stmt));
284 gsi_replace (&i, g, true);
285 continue;
286 }
287 gsi_remove (&i, true);
288 unlink_stmt_vdef (stmt);
289 continue;
290 default:
291 gsi_next (&i);
292 continue;
293 }
294 tree arg = gimple_call_arg (stmt, 0);
295 gcc_assert (arg != NULL_TREE);
296 gcc_assert (TREE_CODE (arg) == SSA_NAME);
297 simduid_to_vf *p = NULL, data;
298 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
299 /* Need to nullify loop safelen field since it's value is not
300 valid after transformation. */
301 if (bb->loop_father && bb->loop_father->safelen > 0)
302 bb->loop_father->safelen = 0;
303 if (htab)
304 {
305 p = htab->find (&data);
306 if (p)
307 vf = p->vf;
308 }
309 switch (ifn)
310 {
311 case IFN_GOMP_SIMD_VF:
312 t = build_int_cst (unsigned_type_node, vf);
313 break;
314 case IFN_GOMP_SIMD_LANE:
315 t = build_int_cst (unsigned_type_node, 0);
316 break;
317 case IFN_GOMP_SIMD_LAST_LANE:
318 t = gimple_call_arg (stmt, 1);
319 break;
320 default:
321 gcc_unreachable ();
322 }
323 tree lhs = gimple_call_lhs (stmt);
324 if (lhs)
325 replace_uses_by (lhs, t);
326 release_defs (stmt);
327 gsi_remove (&i, true);
328 }
329 }
330 }
331
332 /* Helper structure for note_simd_array_uses. */
333
334 struct note_simd_array_uses_struct
335 {
336 hash_table<simd_array_to_simduid> **htab;
337 unsigned int simduid;
338 };
339
340 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
341
342 static tree
343 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
344 {
345 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
346 struct note_simd_array_uses_struct *ns
347 = (struct note_simd_array_uses_struct *) wi->info;
348
349 if (TYPE_P (*tp))
350 *walk_subtrees = 0;
351 else if (VAR_P (*tp)
352 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
353 && DECL_CONTEXT (*tp) == current_function_decl)
354 {
355 simd_array_to_simduid data;
356 if (!*ns->htab)
357 *ns->htab = new hash_table<simd_array_to_simduid> (15);
358 data.decl = *tp;
359 data.simduid = ns->simduid;
360 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
361 if (*slot == NULL)
362 {
363 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
364 *p = data;
365 *slot = p;
366 }
367 else if ((*slot)->simduid != ns->simduid)
368 (*slot)->simduid = -1U;
369 *walk_subtrees = 0;
370 }
371 return NULL_TREE;
372 }
373
374 /* Find "omp simd array" temporaries and map them to corresponding
375 simduid. */
376
377 static void
378 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
379 {
380 basic_block bb;
381 gimple_stmt_iterator gsi;
382 struct walk_stmt_info wi;
383 struct note_simd_array_uses_struct ns;
384
385 memset (&wi, 0, sizeof (wi));
386 wi.info = &ns;
387 ns.htab = htab;
388
389 FOR_EACH_BB_FN (bb, cfun)
390 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
391 {
392 gimple *stmt = gsi_stmt (gsi);
393 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
394 continue;
395 switch (gimple_call_internal_fn (stmt))
396 {
397 case IFN_GOMP_SIMD_LANE:
398 case IFN_GOMP_SIMD_VF:
399 case IFN_GOMP_SIMD_LAST_LANE:
400 break;
401 default:
402 continue;
403 }
404 tree lhs = gimple_call_lhs (stmt);
405 if (lhs == NULL_TREE)
406 continue;
407 imm_use_iterator use_iter;
408 gimple *use_stmt;
409 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
410 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
411 if (!is_gimple_debug (use_stmt))
412 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
413 }
414 }
415
416 /* Shrink arrays with "omp simd array" attribute to the corresponding
417 vectorization factor. */
418
419 static void
420 shrink_simd_arrays
421 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
422 hash_table<simduid_to_vf> *simduid_to_vf_htab)
423 {
424 for (hash_table<simd_array_to_simduid>::iterator iter
425 = simd_array_to_simduid_htab->begin ();
426 iter != simd_array_to_simduid_htab->end (); ++iter)
427 if ((*iter)->simduid != -1U)
428 {
429 tree decl = (*iter)->decl;
430 poly_uint64 vf = 1;
431 if (simduid_to_vf_htab)
432 {
433 simduid_to_vf *p = NULL, data;
434 data.simduid = (*iter)->simduid;
435 p = simduid_to_vf_htab->find (&data);
436 if (p)
437 vf = p->vf;
438 }
439 tree atype
440 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
441 TREE_TYPE (decl) = atype;
442 relayout_decl (decl);
443 }
444
445 delete simd_array_to_simduid_htab;
446 }
447 \f
448 /* Initialize the vec_info with kind KIND_IN and target cost data
449 TARGET_COST_DATA_IN. */
450
451 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
452 vec_info_shared *shared_)
453 : kind (kind_in),
454 shared (shared_),
455 target_cost_data (target_cost_data_in)
456 {
457 stmt_vec_infos.create (50);
458 }
459
460 vec_info::~vec_info ()
461 {
462 slp_instance instance;
463 unsigned int i;
464
465 FOR_EACH_VEC_ELT (slp_instances, i, instance)
466 vect_free_slp_instance (instance, true);
467
468 destroy_cost_data (target_cost_data);
469 free_stmt_vec_infos ();
470 }
471
472 vec_info_shared::vec_info_shared ()
473 : datarefs (vNULL),
474 datarefs_copy (vNULL),
475 ddrs (vNULL)
476 {
477 }
478
479 vec_info_shared::~vec_info_shared ()
480 {
481 free_data_refs (datarefs);
482 free_dependence_relations (ddrs);
483 datarefs_copy.release ();
484 }
485
486 void
487 vec_info_shared::save_datarefs ()
488 {
489 if (!flag_checking)
490 return;
491 datarefs_copy.reserve_exact (datarefs.length ());
492 for (unsigned i = 0; i < datarefs.length (); ++i)
493 datarefs_copy.quick_push (*datarefs[i]);
494 }
495
496 void
497 vec_info_shared::check_datarefs ()
498 {
499 if (!flag_checking)
500 return;
501 gcc_assert (datarefs.length () == datarefs_copy.length ());
502 for (unsigned i = 0; i < datarefs.length (); ++i)
503 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
504 gcc_unreachable ();
505 }
506
507 /* Record that STMT belongs to the vectorizable region. Create and return
508 an associated stmt_vec_info. */
509
510 stmt_vec_info
511 vec_info::add_stmt (gimple *stmt)
512 {
513 stmt_vec_info res = new_stmt_vec_info (stmt);
514 set_vinfo_for_stmt (stmt, res);
515 return res;
516 }
517
518 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
519 return null. It is safe to call this function on any statement, even if
520 it might not be part of the vectorizable region. */
521
522 stmt_vec_info
523 vec_info::lookup_stmt (gimple *stmt)
524 {
525 unsigned int uid = gimple_uid (stmt);
526 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
527 {
528 stmt_vec_info res = stmt_vec_infos[uid - 1];
529 if (res && res->stmt == stmt)
530 return res;
531 }
532 return NULL;
533 }
534
535 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
536 return that stmt_vec_info, otherwise return null. It is safe to call
537 this on arbitrary operands. */
538
539 stmt_vec_info
540 vec_info::lookup_def (tree name)
541 {
542 if (TREE_CODE (name) == SSA_NAME
543 && !SSA_NAME_IS_DEFAULT_DEF (name))
544 return lookup_stmt (SSA_NAME_DEF_STMT (name));
545 return NULL;
546 }
547
548 /* See whether there is a single non-debug statement that uses LHS and
549 whether that statement has an associated stmt_vec_info. Return the
550 stmt_vec_info if so, otherwise return null. */
551
552 stmt_vec_info
553 vec_info::lookup_single_use (tree lhs)
554 {
555 use_operand_p dummy;
556 gimple *use_stmt;
557 if (single_imm_use (lhs, &dummy, &use_stmt))
558 return lookup_stmt (use_stmt);
559 return NULL;
560 }
561
562 /* Return vectorization information about DR. */
563
564 dr_vec_info *
565 vec_info::lookup_dr (data_reference *dr)
566 {
567 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
568 /* DR_STMT should never refer to a stmt in a pattern replacement. */
569 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
570 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
571 }
572
573 /* Record that NEW_STMT_INFO now implements the same data reference
574 as OLD_STMT_INFO. */
575
576 void
577 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
578 {
579 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
580 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
581 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
582 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
583 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
584 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
585 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
586 }
587
588 /* Permanently remove the statement described by STMT_INFO from the
589 function. */
590
591 void
592 vec_info::remove_stmt (stmt_vec_info stmt_info)
593 {
594 gcc_assert (!stmt_info->pattern_stmt_p);
595 set_vinfo_for_stmt (stmt_info->stmt, NULL);
596 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
597 unlink_stmt_vdef (stmt_info->stmt);
598 gsi_remove (&si, true);
599 release_defs (stmt_info->stmt);
600 free_stmt_vec_info (stmt_info);
601 }
602
603 /* Replace the statement at GSI by NEW_STMT, both the vectorization
604 information and the function itself. STMT_INFO describes the statement
605 at GSI. */
606
607 void
608 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
609 gimple *new_stmt)
610 {
611 gimple *old_stmt = stmt_info->stmt;
612 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
613 set_vinfo_for_stmt (old_stmt, NULL);
614 set_vinfo_for_stmt (new_stmt, stmt_info);
615 stmt_info->stmt = new_stmt;
616 gsi_replace (gsi, new_stmt, true);
617 }
618
619 /* Create and initialize a new stmt_vec_info struct for STMT. */
620
621 stmt_vec_info
622 vec_info::new_stmt_vec_info (gimple *stmt)
623 {
624 stmt_vec_info res = XCNEW (struct _stmt_vec_info);
625 res->vinfo = this;
626 res->stmt = stmt;
627
628 STMT_VINFO_TYPE (res) = undef_vec_info_type;
629 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
630 STMT_VINFO_VECTORIZABLE (res) = true;
631 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
632 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
633
634 if (gimple_code (stmt) == GIMPLE_PHI
635 && is_loop_header_bb_p (gimple_bb (stmt)))
636 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
637 else
638 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
639
640 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
641 STMT_SLP_TYPE (res) = loop_vect;
642
643 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
644 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
645
646 return res;
647 }
648
649 /* Associate STMT with INFO. */
650
651 void
652 vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
653 {
654 unsigned int uid = gimple_uid (stmt);
655 if (uid == 0)
656 {
657 gcc_checking_assert (info);
658 uid = stmt_vec_infos.length () + 1;
659 gimple_set_uid (stmt, uid);
660 stmt_vec_infos.safe_push (info);
661 }
662 else
663 {
664 gcc_checking_assert (info == NULL);
665 stmt_vec_infos[uid - 1] = info;
666 }
667 }
668
669 /* Free the contents of stmt_vec_infos. */
670
671 void
672 vec_info::free_stmt_vec_infos (void)
673 {
674 unsigned int i;
675 stmt_vec_info info;
676 FOR_EACH_VEC_ELT (stmt_vec_infos, i, info)
677 if (info != NULL)
678 free_stmt_vec_info (info);
679 stmt_vec_infos.release ();
680 }
681
682 /* Free STMT_INFO. */
683
684 void
685 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
686 {
687 if (stmt_info->pattern_stmt_p)
688 {
689 gimple_set_bb (stmt_info->stmt, NULL);
690 tree lhs = gimple_get_lhs (stmt_info->stmt);
691 if (lhs && TREE_CODE (lhs) == SSA_NAME)
692 release_ssa_name (lhs);
693 }
694
695 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
696 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
697 free (stmt_info);
698 }
699
700 /* A helper function to free scev and LOOP niter information, as well as
701 clear loop constraint LOOP_C_FINITE. */
702
703 void
704 vect_free_loop_info_assumptions (struct loop *loop)
705 {
706 scev_reset_htab ();
707 /* We need to explicitly reset upper bound information since they are
708 used even after free_numbers_of_iterations_estimates. */
709 loop->any_upper_bound = false;
710 loop->any_likely_upper_bound = false;
711 free_numbers_of_iterations_estimates (loop);
712 loop_constraint_clear (loop, LOOP_C_FINITE);
713 }
714
715 /* If LOOP has been versioned during ifcvt, return the internal call
716 guarding it. */
717
718 static gimple *
719 vect_loop_vectorized_call (struct loop *loop)
720 {
721 basic_block bb = loop_preheader_edge (loop)->src;
722 gimple *g;
723 do
724 {
725 g = last_stmt (bb);
726 if (g)
727 break;
728 if (!single_pred_p (bb))
729 break;
730 bb = single_pred (bb);
731 }
732 while (1);
733 if (g && gimple_code (g) == GIMPLE_COND)
734 {
735 gimple_stmt_iterator gsi = gsi_for_stmt (g);
736 gsi_prev (&gsi);
737 if (!gsi_end_p (gsi))
738 {
739 g = gsi_stmt (gsi);
740 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
741 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
742 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
743 return g;
744 }
745 }
746 return NULL;
747 }
748
749 /* If LOOP has been versioned during loop distribution, return the gurading
750 internal call. */
751
752 static gimple *
753 vect_loop_dist_alias_call (struct loop *loop)
754 {
755 basic_block bb;
756 basic_block entry;
757 struct loop *outer, *orig;
758 gimple_stmt_iterator gsi;
759 gimple *g;
760
761 if (loop->orig_loop_num == 0)
762 return NULL;
763
764 orig = get_loop (cfun, loop->orig_loop_num);
765 if (orig == NULL)
766 {
767 /* The original loop is somehow destroyed. Clear the information. */
768 loop->orig_loop_num = 0;
769 return NULL;
770 }
771
772 if (loop != orig)
773 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
774 else
775 bb = loop_preheader_edge (loop)->src;
776
777 outer = bb->loop_father;
778 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
779
780 /* Look upward in dominance tree. */
781 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
782 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
783 {
784 g = last_stmt (bb);
785 if (g == NULL || gimple_code (g) != GIMPLE_COND)
786 continue;
787
788 gsi = gsi_for_stmt (g);
789 gsi_prev (&gsi);
790 if (gsi_end_p (gsi))
791 continue;
792
793 g = gsi_stmt (gsi);
794 /* The guarding internal function call must have the same distribution
795 alias id. */
796 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
797 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
798 return g;
799 }
800 return NULL;
801 }
802
803 /* Set the uids of all the statements in basic blocks inside loop
804 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
805 call guarding the loop which has been if converted. */
806 static void
807 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
808 {
809 tree arg = gimple_call_arg (loop_vectorized_call, 1);
810 basic_block *bbs;
811 unsigned int i;
812 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
813
814 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
815 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
816 == loop_vectorized_call);
817 /* If we are going to vectorize outer loop, prevent vectorization
818 of the inner loop in the scalar loop - either the scalar loop is
819 thrown away, so it is a wasted work, or is used only for
820 a few iterations. */
821 if (scalar_loop->inner)
822 {
823 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
824 if (g)
825 {
826 arg = gimple_call_arg (g, 0);
827 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
828 fold_loop_internal_call (g, boolean_false_node);
829 }
830 }
831 bbs = get_loop_body (scalar_loop);
832 for (i = 0; i < scalar_loop->num_nodes; i++)
833 {
834 basic_block bb = bbs[i];
835 gimple_stmt_iterator gsi;
836 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
837 {
838 gimple *phi = gsi_stmt (gsi);
839 gimple_set_uid (phi, 0);
840 }
841 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
842 {
843 gimple *stmt = gsi_stmt (gsi);
844 gimple_set_uid (stmt, 0);
845 }
846 }
847 free (bbs);
848 }
849
850 /* Try to vectorize LOOP. */
851
852 static unsigned
853 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
854 unsigned *num_vectorized_loops,
855 loop_p loop, loop_vec_info orig_loop_vinfo,
856 gimple *loop_vectorized_call,
857 gimple *loop_dist_alias_call)
858 {
859 unsigned ret = 0;
860 vec_info_shared shared;
861 vect_location = find_loop_location (loop);
862 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
863 && dump_enabled_p ())
864 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
865 "\nAnalyzing loop at %s:%d\n",
866 LOCATION_FILE (vect_location.get_location_t ()),
867 LOCATION_LINE (vect_location.get_location_t ()));
868
869 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
870 opt_loop_vec_info loop_vinfo
871 = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
872 loop->aux = loop_vinfo;
873
874 if (!loop_vinfo)
875 if (dump_enabled_p ())
876 if (opt_problem *problem = loop_vinfo.get_problem ())
877 {
878 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
879 "couldn't vectorize loop\n");
880 problem->emit_and_clear ();
881 }
882
883 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
884 {
885 /* Free existing information if loop is analyzed with some
886 assumptions. */
887 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
888 vect_free_loop_info_assumptions (loop);
889
890 /* If we applied if-conversion then try to vectorize the
891 BB of innermost loops.
892 ??? Ideally BB vectorization would learn to vectorize
893 control flow by applying if-conversion on-the-fly, the
894 following retains the if-converted loop body even when
895 only non-if-converted parts took part in BB vectorization. */
896 if (flag_tree_slp_vectorize != 0
897 && loop_vectorized_call
898 && ! loop->inner)
899 {
900 basic_block bb = loop->header;
901 bool has_mask_load_store = false;
902 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
903 !gsi_end_p (gsi); gsi_next (&gsi))
904 {
905 gimple *stmt = gsi_stmt (gsi);
906 if (is_gimple_call (stmt)
907 && gimple_call_internal_p (stmt)
908 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
909 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
910 {
911 has_mask_load_store = true;
912 break;
913 }
914 gimple_set_uid (stmt, -1);
915 gimple_set_visited (stmt, false);
916 }
917 if (! has_mask_load_store && vect_slp_bb (bb))
918 {
919 dump_printf_loc (MSG_NOTE, vect_location,
920 "basic block vectorized\n");
921 fold_loop_internal_call (loop_vectorized_call,
922 boolean_true_node);
923 loop_vectorized_call = NULL;
924 ret |= TODO_cleanup_cfg;
925 }
926 }
927 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
928 loop, don't vectorize its inner loop; we'll attempt to
929 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
930 loop version. */
931 if (loop_vectorized_call && loop->inner)
932 loop->inner->dont_vectorize = true;
933 return ret;
934 }
935
936 if (!dbg_cnt (vect_loop))
937 {
938 /* Free existing information if loop is analyzed with some
939 assumptions. */
940 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
941 vect_free_loop_info_assumptions (loop);
942 return ret;
943 }
944
945 if (loop_vectorized_call)
946 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
947
948 unsigned HOST_WIDE_INT bytes;
949 if (current_vector_size.is_constant (&bytes))
950 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
951 "loop vectorized using %wu byte vectors\n", bytes);
952 else
953 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
954 "loop vectorized using variable length vectors\n");
955
956 loop_p new_loop = vect_transform_loop (loop_vinfo);
957 (*num_vectorized_loops)++;
958 /* Now that the loop has been vectorized, allow it to be unrolled
959 etc. */
960 loop->force_vectorize = false;
961
962 if (loop->simduid)
963 {
964 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
965 if (!simduid_to_vf_htab)
966 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
967 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
968 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
969 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
970 = simduid_to_vf_data;
971 }
972
973 if (loop_vectorized_call)
974 {
975 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
976 loop_vectorized_call = NULL;
977 ret |= TODO_cleanup_cfg;
978 }
979 if (loop_dist_alias_call)
980 {
981 tree value = gimple_call_arg (loop_dist_alias_call, 1);
982 fold_loop_internal_call (loop_dist_alias_call, value);
983 loop_dist_alias_call = NULL;
984 ret |= TODO_cleanup_cfg;
985 }
986
987 /* Epilogue of vectorized loop must be vectorized too. */
988 if (new_loop)
989 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
990 new_loop, loop_vinfo, NULL, NULL);
991
992 return ret;
993 }
994
995 /* Try to vectorize LOOP. */
996
997 static unsigned
998 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
999 unsigned *num_vectorized_loops, loop_p loop)
1000 {
1001 if (!((flag_tree_loop_vectorize
1002 && optimize_loop_nest_for_speed_p (loop))
1003 || loop->force_vectorize))
1004 return 0;
1005
1006 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1007 loop, NULL,
1008 vect_loop_vectorized_call (loop),
1009 vect_loop_dist_alias_call (loop));
1010 }
1011
1012
1013 /* Function vectorize_loops.
1014
1015 Entry point to loop vectorization phase. */
1016
1017 unsigned
1018 vectorize_loops (void)
1019 {
1020 unsigned int i;
1021 unsigned int num_vectorized_loops = 0;
1022 unsigned int vect_loops_num;
1023 struct loop *loop;
1024 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1025 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1026 bool any_ifcvt_loops = false;
1027 unsigned ret = 0;
1028
1029 vect_loops_num = number_of_loops (cfun);
1030
1031 /* Bail out if there are no loops. */
1032 if (vect_loops_num <= 1)
1033 return 0;
1034
1035 if (cfun->has_simduid_loops)
1036 note_simd_array_uses (&simd_array_to_simduid_htab);
1037
1038 /* ----------- Analyze loops. ----------- */
1039
1040 /* If some loop was duplicated, it gets bigger number
1041 than all previously defined loops. This fact allows us to run
1042 only over initial loops skipping newly generated ones. */
1043 FOR_EACH_LOOP (loop, 0)
1044 if (loop->dont_vectorize)
1045 {
1046 any_ifcvt_loops = true;
1047 /* If-conversion sometimes versions both the outer loop
1048 (for the case when outer loop vectorization might be
1049 desirable) as well as the inner loop in the scalar version
1050 of the loop. So we have:
1051 if (LOOP_VECTORIZED (1, 3))
1052 {
1053 loop1
1054 loop2
1055 }
1056 else
1057 loop3 (copy of loop1)
1058 if (LOOP_VECTORIZED (4, 5))
1059 loop4 (copy of loop2)
1060 else
1061 loop5 (copy of loop4)
1062 If FOR_EACH_LOOP gives us loop3 first (which has
1063 dont_vectorize set), make sure to process loop1 before loop4;
1064 so that we can prevent vectorization of loop4 if loop1
1065 is successfully vectorized. */
1066 if (loop->inner)
1067 {
1068 gimple *loop_vectorized_call
1069 = vect_loop_vectorized_call (loop);
1070 if (loop_vectorized_call
1071 && vect_loop_vectorized_call (loop->inner))
1072 {
1073 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1074 struct loop *vector_loop
1075 = get_loop (cfun, tree_to_shwi (arg));
1076 if (vector_loop && vector_loop != loop)
1077 {
1078 /* Make sure we don't vectorize it twice. */
1079 vector_loop->dont_vectorize = true;
1080 ret |= try_vectorize_loop (simduid_to_vf_htab,
1081 &num_vectorized_loops,
1082 vector_loop);
1083 }
1084 }
1085 }
1086 }
1087 else
1088 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1089 loop);
1090
1091 vect_location = dump_user_location_t ();
1092
1093 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
1094 if (dump_enabled_p ()
1095 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1096 dump_printf_loc (MSG_NOTE, vect_location,
1097 "vectorized %u loops in function.\n",
1098 num_vectorized_loops);
1099
1100 /* ----------- Finalize. ----------- */
1101
1102 if (any_ifcvt_loops)
1103 for (i = 1; i < number_of_loops (cfun); i++)
1104 {
1105 loop = get_loop (cfun, i);
1106 if (loop && loop->dont_vectorize)
1107 {
1108 gimple *g = vect_loop_vectorized_call (loop);
1109 if (g)
1110 {
1111 fold_loop_internal_call (g, boolean_false_node);
1112 ret |= TODO_cleanup_cfg;
1113 g = NULL;
1114 }
1115 else
1116 g = vect_loop_dist_alias_call (loop);
1117
1118 if (g)
1119 {
1120 fold_loop_internal_call (g, boolean_false_node);
1121 ret |= TODO_cleanup_cfg;
1122 }
1123 }
1124 }
1125
1126 for (i = 1; i < number_of_loops (cfun); i++)
1127 {
1128 loop_vec_info loop_vinfo;
1129 bool has_mask_store;
1130
1131 loop = get_loop (cfun, i);
1132 if (!loop || !loop->aux)
1133 continue;
1134 loop_vinfo = (loop_vec_info) loop->aux;
1135 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1136 delete loop_vinfo;
1137 if (has_mask_store
1138 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1139 optimize_mask_stores (loop);
1140 loop->aux = NULL;
1141 }
1142
1143 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1144 if (cfun->has_simduid_loops)
1145 adjust_simduid_builtins (simduid_to_vf_htab);
1146
1147 /* Shrink any "omp array simd" temporary arrays to the
1148 actual vectorization factors. */
1149 if (simd_array_to_simduid_htab)
1150 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1151 delete simduid_to_vf_htab;
1152 cfun->has_simduid_loops = false;
1153
1154 if (num_vectorized_loops > 0)
1155 {
1156 /* If we vectorized any loop only virtual SSA form needs to be updated.
1157 ??? Also while we try hard to update loop-closed SSA form we fail
1158 to properly do this in some corner-cases (see PR56286). */
1159 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1160 return TODO_cleanup_cfg;
1161 }
1162
1163 return ret;
1164 }
1165
1166
1167 /* Entry point to the simduid cleanup pass. */
1168
1169 namespace {
1170
1171 const pass_data pass_data_simduid_cleanup =
1172 {
1173 GIMPLE_PASS, /* type */
1174 "simduid", /* name */
1175 OPTGROUP_NONE, /* optinfo_flags */
1176 TV_NONE, /* tv_id */
1177 ( PROP_ssa | PROP_cfg ), /* properties_required */
1178 0, /* properties_provided */
1179 0, /* properties_destroyed */
1180 0, /* todo_flags_start */
1181 0, /* todo_flags_finish */
1182 };
1183
1184 class pass_simduid_cleanup : public gimple_opt_pass
1185 {
1186 public:
1187 pass_simduid_cleanup (gcc::context *ctxt)
1188 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1189 {}
1190
1191 /* opt_pass methods: */
1192 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1193 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1194 virtual unsigned int execute (function *);
1195
1196 }; // class pass_simduid_cleanup
1197
1198 unsigned int
1199 pass_simduid_cleanup::execute (function *fun)
1200 {
1201 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1202
1203 note_simd_array_uses (&simd_array_to_simduid_htab);
1204
1205 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1206 adjust_simduid_builtins (NULL);
1207
1208 /* Shrink any "omp array simd" temporary arrays to the
1209 actual vectorization factors. */
1210 if (simd_array_to_simduid_htab)
1211 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1212 fun->has_simduid_loops = false;
1213 return 0;
1214 }
1215
1216 } // anon namespace
1217
1218 gimple_opt_pass *
1219 make_pass_simduid_cleanup (gcc::context *ctxt)
1220 {
1221 return new pass_simduid_cleanup (ctxt);
1222 }
1223
1224
1225 /* Entry point to basic block SLP phase. */
1226
1227 namespace {
1228
1229 const pass_data pass_data_slp_vectorize =
1230 {
1231 GIMPLE_PASS, /* type */
1232 "slp", /* name */
1233 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1234 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1235 ( PROP_ssa | PROP_cfg ), /* properties_required */
1236 0, /* properties_provided */
1237 0, /* properties_destroyed */
1238 0, /* todo_flags_start */
1239 TODO_update_ssa, /* todo_flags_finish */
1240 };
1241
1242 class pass_slp_vectorize : public gimple_opt_pass
1243 {
1244 public:
1245 pass_slp_vectorize (gcc::context *ctxt)
1246 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1247 {}
1248
1249 /* opt_pass methods: */
1250 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1251 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1252 virtual unsigned int execute (function *);
1253
1254 }; // class pass_slp_vectorize
1255
1256 unsigned int
1257 pass_slp_vectorize::execute (function *fun)
1258 {
1259 basic_block bb;
1260
1261 bool in_loop_pipeline = scev_initialized_p ();
1262 if (!in_loop_pipeline)
1263 {
1264 loop_optimizer_init (LOOPS_NORMAL);
1265 scev_initialize ();
1266 }
1267
1268 /* Mark all stmts as not belonging to the current region and unvisited. */
1269 FOR_EACH_BB_FN (bb, fun)
1270 {
1271 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1272 gsi_next (&gsi))
1273 {
1274 gimple *stmt = gsi_stmt (gsi);
1275 gimple_set_uid (stmt, -1);
1276 gimple_set_visited (stmt, false);
1277 }
1278 }
1279
1280 FOR_EACH_BB_FN (bb, fun)
1281 {
1282 if (vect_slp_bb (bb))
1283 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1284 }
1285
1286 if (!in_loop_pipeline)
1287 {
1288 scev_finalize ();
1289 loop_optimizer_finalize ();
1290 }
1291
1292 return 0;
1293 }
1294
1295 } // anon namespace
1296
1297 gimple_opt_pass *
1298 make_pass_slp_vectorize (gcc::context *ctxt)
1299 {
1300 return new pass_slp_vectorize (ctxt);
1301 }
1302
1303
1304 /* Increase alignment of global arrays to improve vectorization potential.
1305 TODO:
1306 - Consider also structs that have an array field.
1307 - Use ipa analysis to prune arrays that can't be vectorized?
1308 This should involve global alignment analysis and in the future also
1309 array padding. */
1310
1311 static unsigned get_vec_alignment_for_type (tree);
1312 static hash_map<tree, unsigned> *type_align_map;
1313
1314 /* Return alignment of array's vector type corresponding to scalar type.
1315 0 if no vector type exists. */
1316 static unsigned
1317 get_vec_alignment_for_array_type (tree type)
1318 {
1319 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1320 poly_uint64 array_size, vector_size;
1321
1322 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1323 if (!vectype
1324 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1325 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1326 || maybe_lt (array_size, vector_size))
1327 return 0;
1328
1329 return TYPE_ALIGN (vectype);
1330 }
1331
1332 /* Return alignment of field having maximum alignment of vector type
1333 corresponding to it's scalar type. For now, we only consider fields whose
1334 offset is a multiple of it's vector alignment.
1335 0 if no suitable field is found. */
1336 static unsigned
1337 get_vec_alignment_for_record_type (tree type)
1338 {
1339 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1340
1341 unsigned max_align = 0, alignment;
1342 HOST_WIDE_INT offset;
1343 tree offset_tree;
1344
1345 if (TYPE_PACKED (type))
1346 return 0;
1347
1348 unsigned *slot = type_align_map->get (type);
1349 if (slot)
1350 return *slot;
1351
1352 for (tree field = first_field (type);
1353 field != NULL_TREE;
1354 field = DECL_CHAIN (field))
1355 {
1356 /* Skip if not FIELD_DECL or if alignment is set by user. */
1357 if (TREE_CODE (field) != FIELD_DECL
1358 || DECL_USER_ALIGN (field)
1359 || DECL_ARTIFICIAL (field))
1360 continue;
1361
1362 /* We don't need to process the type further if offset is variable,
1363 since the offsets of remaining members will also be variable. */
1364 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1365 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1366 break;
1367
1368 /* Similarly stop processing the type if offset_tree
1369 does not fit in unsigned HOST_WIDE_INT. */
1370 offset_tree = bit_position (field);
1371 if (!tree_fits_uhwi_p (offset_tree))
1372 break;
1373
1374 offset = tree_to_uhwi (offset_tree);
1375 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1376
1377 /* Get maximum alignment of vectorized field/array among those members
1378 whose offset is multiple of the vector alignment. */
1379 if (alignment
1380 && (offset % alignment == 0)
1381 && (alignment > max_align))
1382 max_align = alignment;
1383 }
1384
1385 type_align_map->put (type, max_align);
1386 return max_align;
1387 }
1388
1389 /* Return alignment of vector type corresponding to decl's scalar type
1390 or 0 if it doesn't exist or the vector alignment is lesser than
1391 decl's alignment. */
1392 static unsigned
1393 get_vec_alignment_for_type (tree type)
1394 {
1395 if (type == NULL_TREE)
1396 return 0;
1397
1398 gcc_assert (TYPE_P (type));
1399
1400 static unsigned alignment = 0;
1401 switch (TREE_CODE (type))
1402 {
1403 case ARRAY_TYPE:
1404 alignment = get_vec_alignment_for_array_type (type);
1405 break;
1406 case RECORD_TYPE:
1407 alignment = get_vec_alignment_for_record_type (type);
1408 break;
1409 default:
1410 alignment = 0;
1411 break;
1412 }
1413
1414 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1415 }
1416
1417 /* Entry point to increase_alignment pass. */
1418 static unsigned int
1419 increase_alignment (void)
1420 {
1421 varpool_node *vnode;
1422
1423 vect_location = dump_user_location_t ();
1424 type_align_map = new hash_map<tree, unsigned>;
1425
1426 /* Increase the alignment of all global arrays for vectorization. */
1427 FOR_EACH_DEFINED_VARIABLE (vnode)
1428 {
1429 tree decl = vnode->decl;
1430 unsigned int alignment;
1431
1432 if ((decl_in_symtab_p (decl)
1433 && !symtab_node::get (decl)->can_increase_alignment_p ())
1434 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1435 continue;
1436
1437 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1438 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1439 {
1440 vnode->increase_alignment (alignment);
1441 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
1442 }
1443 }
1444
1445 delete type_align_map;
1446 return 0;
1447 }
1448
1449
1450 namespace {
1451
1452 const pass_data pass_data_ipa_increase_alignment =
1453 {
1454 SIMPLE_IPA_PASS, /* type */
1455 "increase_alignment", /* name */
1456 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1457 TV_IPA_OPT, /* tv_id */
1458 0, /* properties_required */
1459 0, /* properties_provided */
1460 0, /* properties_destroyed */
1461 0, /* todo_flags_start */
1462 0, /* todo_flags_finish */
1463 };
1464
1465 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1466 {
1467 public:
1468 pass_ipa_increase_alignment (gcc::context *ctxt)
1469 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1470 {}
1471
1472 /* opt_pass methods: */
1473 virtual bool gate (function *)
1474 {
1475 return flag_section_anchors && flag_tree_loop_vectorize;
1476 }
1477
1478 virtual unsigned int execute (function *) { return increase_alignment (); }
1479
1480 }; // class pass_ipa_increase_alignment
1481
1482 } // anon namespace
1483
1484 simple_ipa_opt_pass *
1485 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1486 {
1487 return new pass_ipa_increase_alignment (ctxt);
1488 }