ggcplug.c: Shuffle includes to include gcc-plugin.h earlier.
[gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "target.h"
30 #include "predict.h"
31 #include "vec.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "machmode.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "basic-block.h"
39 #include "gimple-pretty-print.h"
40 #include "tree-ssa-alias.h"
41 #include "internal-fn.h"
42 #include "gimple-expr.h"
43 #include "is-a.h"
44 #include "gimple.h"
45 #include "gimple-iterator.h"
46 #include "gimple-ssa.h"
47 #include "tree-phinodes.h"
48 #include "ssa-iterators.h"
49 #include "stringpool.h"
50 #include "tree-ssanames.h"
51 #include "tree-pass.h"
52 #include "cfgloop.h"
53 #include "expr.h"
54 #include "recog.h" /* FIXME: for insn_data */
55 #include "optabs.h"
56 #include "tree-vectorizer.h"
57 #include "langhooks.h"
58
59 /* Extract the location of the basic block in the source code.
60 Return the basic block location if succeed and NULL if not. */
61
62 source_location
63 find_bb_location (basic_block bb)
64 {
65 gimple stmt = NULL;
66 gimple_stmt_iterator si;
67
68 if (!bb)
69 return UNKNOWN_LOCATION;
70
71 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
72 {
73 stmt = gsi_stmt (si);
74 if (gimple_location (stmt) != UNKNOWN_LOCATION)
75 return gimple_location (stmt);
76 }
77
78 return UNKNOWN_LOCATION;
79 }
80
81
82 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
83
84 static void
85 vect_free_slp_tree (slp_tree node)
86 {
87 int i;
88 slp_tree child;
89
90 if (!node)
91 return;
92
93 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
94 vect_free_slp_tree (child);
95
96 SLP_TREE_CHILDREN (node).release ();
97 SLP_TREE_SCALAR_STMTS (node).release ();
98 SLP_TREE_VEC_STMTS (node).release ();
99 SLP_TREE_LOAD_PERMUTATION (node).release ();
100
101 free (node);
102 }
103
104
105 /* Free the memory allocated for the SLP instance. */
106
107 void
108 vect_free_slp_instance (slp_instance instance)
109 {
110 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
111 SLP_INSTANCE_LOADS (instance).release ();
112 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
113 free (instance);
114 }
115
116
117 /* Create an SLP node for SCALAR_STMTS. */
118
119 static slp_tree
120 vect_create_new_slp_node (vec<gimple> scalar_stmts)
121 {
122 slp_tree node;
123 gimple stmt = scalar_stmts[0];
124 unsigned int nops;
125
126 if (is_gimple_call (stmt))
127 nops = gimple_call_num_args (stmt);
128 else if (is_gimple_assign (stmt))
129 {
130 nops = gimple_num_ops (stmt) - 1;
131 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
132 nops++;
133 }
134 else
135 return NULL;
136
137 node = XNEW (struct _slp_tree);
138 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
139 SLP_TREE_VEC_STMTS (node).create (0);
140 SLP_TREE_CHILDREN (node).create (nops);
141 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
142
143 return node;
144 }
145
146
147 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
148 operand. */
149 static vec<slp_oprnd_info>
150 vect_create_oprnd_info (int nops, int group_size)
151 {
152 int i;
153 slp_oprnd_info oprnd_info;
154 vec<slp_oprnd_info> oprnds_info;
155
156 oprnds_info.create (nops);
157 for (i = 0; i < nops; i++)
158 {
159 oprnd_info = XNEW (struct _slp_oprnd_info);
160 oprnd_info->def_stmts.create (group_size);
161 oprnd_info->first_dt = vect_uninitialized_def;
162 oprnd_info->first_op_type = NULL_TREE;
163 oprnd_info->first_pattern = false;
164 oprnds_info.quick_push (oprnd_info);
165 }
166
167 return oprnds_info;
168 }
169
170
171 /* Free operands info. */
172
173 static void
174 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
175 {
176 int i;
177 slp_oprnd_info oprnd_info;
178
179 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
180 {
181 oprnd_info->def_stmts.release ();
182 XDELETE (oprnd_info);
183 }
184
185 oprnds_info.release ();
186 }
187
188
189 /* Find the place of the data-ref in STMT in the interleaving chain that starts
190 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
191
192 static int
193 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
194 {
195 gimple next_stmt = first_stmt;
196 int result = 0;
197
198 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
199 return -1;
200
201 do
202 {
203 if (next_stmt == stmt)
204 return result;
205 result++;
206 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
207 }
208 while (next_stmt);
209
210 return -1;
211 }
212
213
214 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
215 they are of a valid type and that they match the defs of the first stmt of
216 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
217 return -1, if the error could be corrected by swapping operands of the
218 operation return 1, if everything is ok return 0. */
219
220 static int
221 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
222 gimple stmt, bool first,
223 vec<slp_oprnd_info> *oprnds_info)
224 {
225 tree oprnd;
226 unsigned int i, number_of_oprnds;
227 tree def;
228 gimple def_stmt;
229 enum vect_def_type dt = vect_uninitialized_def;
230 struct loop *loop = NULL;
231 bool pattern = false;
232 slp_oprnd_info oprnd_info;
233 int first_op_idx = 1;
234 bool commutative = false;
235 bool first_op_cond = false;
236
237 if (loop_vinfo)
238 loop = LOOP_VINFO_LOOP (loop_vinfo);
239
240 if (is_gimple_call (stmt))
241 {
242 number_of_oprnds = gimple_call_num_args (stmt);
243 first_op_idx = 3;
244 }
245 else if (is_gimple_assign (stmt))
246 {
247 enum tree_code code = gimple_assign_rhs_code (stmt);
248 number_of_oprnds = gimple_num_ops (stmt) - 1;
249 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
250 {
251 first_op_cond = true;
252 commutative = true;
253 number_of_oprnds++;
254 }
255 else
256 commutative = commutative_tree_code (code);
257 }
258 else
259 return -1;
260
261 bool swapped = false;
262 for (i = 0; i < number_of_oprnds; i++)
263 {
264 again:
265 if (first_op_cond)
266 {
267 if (i == 0 || i == 1)
268 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
269 swapped ? !i : i);
270 else
271 oprnd = gimple_op (stmt, first_op_idx + i - 1);
272 }
273 else
274 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
275
276 oprnd_info = (*oprnds_info)[i];
277
278 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
279 &def, &dt)
280 || (!def_stmt && dt != vect_constant_def))
281 {
282 if (dump_enabled_p ())
283 {
284 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
285 "Build SLP failed: can't find def for ");
286 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
287 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
288 }
289
290 return -1;
291 }
292
293 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
294 from the pattern. Check that all the stmts of the node are in the
295 pattern. */
296 if (def_stmt && gimple_bb (def_stmt)
297 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
298 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
299 && gimple_code (def_stmt) != GIMPLE_PHI))
300 && vinfo_for_stmt (def_stmt)
301 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
302 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
303 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
304 {
305 pattern = true;
306 if (!first && !oprnd_info->first_pattern)
307 {
308 if (i == 0
309 && !swapped
310 && commutative)
311 {
312 swapped = true;
313 goto again;
314 }
315
316 if (dump_enabled_p ())
317 {
318 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
319 "Build SLP failed: some of the stmts"
320 " are in a pattern, and others are not ");
321 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
322 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
323 }
324
325 return 1;
326 }
327
328 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
329 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
330
331 if (dt == vect_unknown_def_type)
332 {
333 if (dump_enabled_p ())
334 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
335 "Unsupported pattern.\n");
336 return -1;
337 }
338
339 switch (gimple_code (def_stmt))
340 {
341 case GIMPLE_PHI:
342 def = gimple_phi_result (def_stmt);
343 break;
344
345 case GIMPLE_ASSIGN:
346 def = gimple_assign_lhs (def_stmt);
347 break;
348
349 default:
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
352 "unsupported defining stmt:\n");
353 return -1;
354 }
355 }
356
357 if (first)
358 {
359 oprnd_info->first_dt = dt;
360 oprnd_info->first_pattern = pattern;
361 oprnd_info->first_op_type = TREE_TYPE (oprnd);
362 }
363 else
364 {
365 /* Not first stmt of the group, check that the def-stmt/s match
366 the def-stmt/s of the first stmt. Allow different definition
367 types for reduction chains: the first stmt must be a
368 vect_reduction_def (a phi node), and the rest
369 vect_internal_def. */
370 if (((oprnd_info->first_dt != dt
371 && !(oprnd_info->first_dt == vect_reduction_def
372 && dt == vect_internal_def)
373 && !((oprnd_info->first_dt == vect_external_def
374 || oprnd_info->first_dt == vect_constant_def)
375 && (dt == vect_external_def
376 || dt == vect_constant_def)))
377 || !types_compatible_p (oprnd_info->first_op_type,
378 TREE_TYPE (oprnd))))
379 {
380 /* Try swapping operands if we got a mismatch. */
381 if (i == 0
382 && !swapped
383 && commutative)
384 {
385 swapped = true;
386 goto again;
387 }
388
389 if (dump_enabled_p ())
390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
391 "Build SLP failed: different types\n");
392
393 return 1;
394 }
395 }
396
397 /* Check the types of the definitions. */
398 switch (dt)
399 {
400 case vect_constant_def:
401 case vect_external_def:
402 case vect_reduction_def:
403 break;
404
405 case vect_internal_def:
406 oprnd_info->def_stmts.quick_push (def_stmt);
407 break;
408
409 default:
410 /* FORNOW: Not supported. */
411 if (dump_enabled_p ())
412 {
413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
414 "Build SLP failed: illegal type of def ");
415 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
416 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
417 }
418
419 return -1;
420 }
421 }
422
423 /* Swap operands. */
424 if (swapped)
425 {
426 if (first_op_cond)
427 {
428 tree cond = gimple_assign_rhs1 (stmt);
429 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
430 &TREE_OPERAND (cond, 1));
431 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
432 }
433 else
434 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
435 gimple_assign_rhs2_ptr (stmt));
436 }
437
438 return 0;
439 }
440
441
442 /* Verify if the scalar stmts STMTS are isomorphic, require data
443 permutation or are of unsupported types of operation. Return
444 true if they are, otherwise return false and indicate in *MATCHES
445 which stmts are not isomorphic to the first one. If MATCHES[0]
446 is false then this indicates the comparison could not be
447 carried out or the stmts will never be vectorized by SLP. */
448
449 static bool
450 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
451 vec<gimple> stmts, unsigned int group_size,
452 unsigned nops, unsigned int *max_nunits,
453 unsigned int vectorization_factor, bool *matches)
454 {
455 unsigned int i;
456 gimple stmt = stmts[0];
457 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
458 enum tree_code first_cond_code = ERROR_MARK;
459 tree lhs;
460 bool need_same_oprnds = false;
461 tree vectype, scalar_type, first_op1 = NULL_TREE;
462 optab optab;
463 int icode;
464 enum machine_mode optab_op2_mode;
465 enum machine_mode vec_mode;
466 struct data_reference *first_dr;
467 HOST_WIDE_INT dummy;
468 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
469 tree cond;
470
471 /* For every stmt in NODE find its def stmt/s. */
472 FOR_EACH_VEC_ELT (stmts, i, stmt)
473 {
474 matches[i] = false;
475
476 if (dump_enabled_p ())
477 {
478 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
479 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
480 dump_printf (MSG_NOTE, "\n");
481 }
482
483 /* Fail to vectorize statements marked as unvectorizable. */
484 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
485 {
486 if (dump_enabled_p ())
487 {
488 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
489 "Build SLP failed: unvectorizable statement ");
490 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
491 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
492 }
493 /* Fatal mismatch. */
494 matches[0] = false;
495 return false;
496 }
497
498 lhs = gimple_get_lhs (stmt);
499 if (lhs == NULL_TREE)
500 {
501 if (dump_enabled_p ())
502 {
503 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
504 "Build SLP failed: not GIMPLE_ASSIGN nor "
505 "GIMPLE_CALL ");
506 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
507 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
508 }
509 /* Fatal mismatch. */
510 matches[0] = false;
511 return false;
512 }
513
514 if (is_gimple_assign (stmt)
515 && gimple_assign_rhs_code (stmt) == COND_EXPR
516 && (cond = gimple_assign_rhs1 (stmt))
517 && !COMPARISON_CLASS_P (cond))
518 {
519 if (dump_enabled_p ())
520 {
521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
522 "Build SLP failed: condition is not "
523 "comparison ");
524 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
525 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
526 }
527 /* Fatal mismatch. */
528 matches[0] = false;
529 return false;
530 }
531
532 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
533 vectype = get_vectype_for_scalar_type (scalar_type);
534 if (!vectype)
535 {
536 if (dump_enabled_p ())
537 {
538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
539 "Build SLP failed: unsupported data-type ");
540 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
541 scalar_type);
542 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
543 }
544 /* Fatal mismatch. */
545 matches[0] = false;
546 return false;
547 }
548
549 /* In case of multiple types we need to detect the smallest type. */
550 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
551 {
552 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
553 if (bb_vinfo)
554 vectorization_factor = *max_nunits;
555 }
556
557 if (is_gimple_call (stmt))
558 {
559 rhs_code = CALL_EXPR;
560 if (gimple_call_internal_p (stmt)
561 || gimple_call_tail_p (stmt)
562 || gimple_call_noreturn_p (stmt)
563 || !gimple_call_nothrow_p (stmt)
564 || gimple_call_chain (stmt))
565 {
566 if (dump_enabled_p ())
567 {
568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
569 "Build SLP failed: unsupported call type ");
570 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
571 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
572 }
573 /* Fatal mismatch. */
574 matches[0] = false;
575 return false;
576 }
577 }
578 else
579 rhs_code = gimple_assign_rhs_code (stmt);
580
581 /* Check the operation. */
582 if (i == 0)
583 {
584 first_stmt_code = rhs_code;
585
586 /* Shift arguments should be equal in all the packed stmts for a
587 vector shift with scalar shift operand. */
588 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
589 || rhs_code == LROTATE_EXPR
590 || rhs_code == RROTATE_EXPR)
591 {
592 vec_mode = TYPE_MODE (vectype);
593
594 /* First see if we have a vector/vector shift. */
595 optab = optab_for_tree_code (rhs_code, vectype,
596 optab_vector);
597
598 if (!optab
599 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
600 {
601 /* No vector/vector shift, try for a vector/scalar shift. */
602 optab = optab_for_tree_code (rhs_code, vectype,
603 optab_scalar);
604
605 if (!optab)
606 {
607 if (dump_enabled_p ())
608 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
609 "Build SLP failed: no optab.\n");
610 /* Fatal mismatch. */
611 matches[0] = false;
612 return false;
613 }
614 icode = (int) optab_handler (optab, vec_mode);
615 if (icode == CODE_FOR_nothing)
616 {
617 if (dump_enabled_p ())
618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
619 "Build SLP failed: "
620 "op not supported by target.\n");
621 /* Fatal mismatch. */
622 matches[0] = false;
623 return false;
624 }
625 optab_op2_mode = insn_data[icode].operand[2].mode;
626 if (!VECTOR_MODE_P (optab_op2_mode))
627 {
628 need_same_oprnds = true;
629 first_op1 = gimple_assign_rhs2 (stmt);
630 }
631 }
632 }
633 else if (rhs_code == WIDEN_LSHIFT_EXPR)
634 {
635 need_same_oprnds = true;
636 first_op1 = gimple_assign_rhs2 (stmt);
637 }
638 }
639 else
640 {
641 if (first_stmt_code != rhs_code
642 && (first_stmt_code != IMAGPART_EXPR
643 || rhs_code != REALPART_EXPR)
644 && (first_stmt_code != REALPART_EXPR
645 || rhs_code != IMAGPART_EXPR)
646 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
647 && (first_stmt_code == ARRAY_REF
648 || first_stmt_code == BIT_FIELD_REF
649 || first_stmt_code == INDIRECT_REF
650 || first_stmt_code == COMPONENT_REF
651 || first_stmt_code == MEM_REF)))
652 {
653 if (dump_enabled_p ())
654 {
655 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
656 "Build SLP failed: different operation "
657 "in stmt ");
658 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
659 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
660 }
661 /* Mismatch. */
662 continue;
663 }
664
665 if (need_same_oprnds
666 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
667 {
668 if (dump_enabled_p ())
669 {
670 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
671 "Build SLP failed: different shift "
672 "arguments in ");
673 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
674 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
675 }
676 /* Mismatch. */
677 continue;
678 }
679
680 if (rhs_code == CALL_EXPR)
681 {
682 gimple first_stmt = stmts[0];
683 if (gimple_call_num_args (stmt) != nops
684 || !operand_equal_p (gimple_call_fn (first_stmt),
685 gimple_call_fn (stmt), 0)
686 || gimple_call_fntype (first_stmt)
687 != gimple_call_fntype (stmt))
688 {
689 if (dump_enabled_p ())
690 {
691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
692 "Build SLP failed: different calls in ");
693 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
694 stmt, 0);
695 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
696 }
697 /* Mismatch. */
698 continue;
699 }
700 }
701 }
702
703 /* Grouped store or load. */
704 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
705 {
706 if (REFERENCE_CLASS_P (lhs))
707 {
708 /* Store. */
709 ;
710 }
711 else
712 {
713 /* Load. */
714 unsigned unrolling_factor
715 = least_common_multiple
716 (*max_nunits, group_size) / group_size;
717 /* FORNOW: Check that there is no gap between the loads
718 and no gap between the groups when we need to load
719 multiple groups at once.
720 ??? We should enhance this to only disallow gaps
721 inside vectors. */
722 if ((unrolling_factor > 1
723 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
724 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
725 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
726 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
727 {
728 if (dump_enabled_p ())
729 {
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
731 "Build SLP failed: grouped "
732 "loads have gaps ");
733 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
734 stmt, 0);
735 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
736 }
737 /* Fatal mismatch. */
738 matches[0] = false;
739 return false;
740 }
741
742 /* Check that the size of interleaved loads group is not
743 greater than the SLP group size. */
744 unsigned ncopies
745 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
746 if (loop_vinfo
747 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
748 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
749 - GROUP_GAP (vinfo_for_stmt (stmt)))
750 > ncopies * group_size))
751 {
752 if (dump_enabled_p ())
753 {
754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
755 "Build SLP failed: the number "
756 "of interleaved loads is greater than "
757 "the SLP group size ");
758 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
759 stmt, 0);
760 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
761 }
762 /* Fatal mismatch. */
763 matches[0] = false;
764 return false;
765 }
766
767 old_first_load = first_load;
768 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
769 if (prev_first_load)
770 {
771 /* Check that there are no loads from different interleaving
772 chains in the same node. */
773 if (prev_first_load != first_load)
774 {
775 if (dump_enabled_p ())
776 {
777 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
778 vect_location,
779 "Build SLP failed: different "
780 "interleaving chains in one node ");
781 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
782 stmt, 0);
783 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
784 }
785 /* Mismatch. */
786 continue;
787 }
788 }
789 else
790 prev_first_load = first_load;
791
792 /* In some cases a group of loads is just the same load
793 repeated N times. Only analyze its cost once. */
794 if (first_load == stmt && old_first_load != first_load)
795 {
796 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
797 if (vect_supportable_dr_alignment (first_dr, false)
798 == dr_unaligned_unsupported)
799 {
800 if (dump_enabled_p ())
801 {
802 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
803 vect_location,
804 "Build SLP failed: unsupported "
805 "unaligned load ");
806 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
807 stmt, 0);
808 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
809 }
810 /* Fatal mismatch. */
811 matches[0] = false;
812 return false;
813 }
814 }
815 }
816 } /* Grouped access. */
817 else
818 {
819 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
820 {
821 /* Not grouped load. */
822 if (dump_enabled_p ())
823 {
824 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
825 "Build SLP failed: not grouped load ");
826 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
827 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
828 }
829
830 /* FORNOW: Not grouped loads are not supported. */
831 /* Fatal mismatch. */
832 matches[0] = false;
833 return false;
834 }
835
836 /* Not memory operation. */
837 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
838 && TREE_CODE_CLASS (rhs_code) != tcc_unary
839 && rhs_code != COND_EXPR
840 && rhs_code != CALL_EXPR)
841 {
842 if (dump_enabled_p ())
843 {
844 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
845 "Build SLP failed: operation");
846 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
847 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
848 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
849 }
850 /* Fatal mismatch. */
851 matches[0] = false;
852 return false;
853 }
854
855 if (rhs_code == COND_EXPR)
856 {
857 tree cond_expr = gimple_assign_rhs1 (stmt);
858
859 if (i == 0)
860 first_cond_code = TREE_CODE (cond_expr);
861 else if (first_cond_code != TREE_CODE (cond_expr))
862 {
863 if (dump_enabled_p ())
864 {
865 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
866 "Build SLP failed: different"
867 " operation");
868 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
869 stmt, 0);
870 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
871 }
872 /* Mismatch. */
873 continue;
874 }
875 }
876 }
877
878 matches[i] = true;
879 }
880
881 for (i = 0; i < group_size; ++i)
882 if (!matches[i])
883 return false;
884
885 return true;
886 }
887
888 /* Recursively build an SLP tree starting from NODE.
889 Fail (and return a value not equal to zero) if def-stmts are not
890 isomorphic, require data permutation or are of unsupported types of
891 operation. Otherwise, return 0.
892 The value returned is the depth in the SLP tree where a mismatch
893 was found. */
894
895 static bool
896 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
897 slp_tree *node, unsigned int group_size,
898 unsigned int *max_nunits,
899 vec<slp_tree> *loads,
900 unsigned int vectorization_factor,
901 bool *matches, unsigned *npermutes, unsigned *tree_size,
902 unsigned max_tree_size)
903 {
904 unsigned nops, i, this_npermutes = 0, this_tree_size = 0;
905 gimple stmt;
906
907 if (!matches)
908 matches = XALLOCAVEC (bool, group_size);
909 if (!npermutes)
910 npermutes = &this_npermutes;
911
912 matches[0] = false;
913
914 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
915 if (is_gimple_call (stmt))
916 nops = gimple_call_num_args (stmt);
917 else if (is_gimple_assign (stmt))
918 {
919 nops = gimple_num_ops (stmt) - 1;
920 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
921 nops++;
922 }
923 else
924 return false;
925
926 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
927 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
928 max_nunits, vectorization_factor, matches))
929 return false;
930
931 /* If the SLP node is a load, terminate the recursion. */
932 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
933 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
934 {
935 loads->safe_push (*node);
936 return true;
937 }
938
939 /* Get at the operands, verifying they are compatible. */
940 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
941 slp_oprnd_info oprnd_info;
942 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
943 {
944 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
945 stmt, (i == 0), &oprnds_info))
946 {
947 case 0:
948 break;
949 case -1:
950 matches[0] = false;
951 vect_free_oprnd_info (oprnds_info);
952 return false;
953 case 1:
954 matches[i] = false;
955 break;
956 }
957 }
958 for (i = 0; i < group_size; ++i)
959 if (!matches[i])
960 {
961 vect_free_oprnd_info (oprnds_info);
962 return false;
963 }
964
965 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
966
967 /* Create SLP_TREE nodes for the definition node/s. */
968 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
969 {
970 slp_tree child;
971 unsigned old_nloads = loads->length ();
972 unsigned old_max_nunits = *max_nunits;
973
974 if (oprnd_info->first_dt != vect_internal_def)
975 continue;
976
977 if (++this_tree_size > max_tree_size)
978 {
979 vect_free_oprnd_info (oprnds_info);
980 return false;
981 }
982
983 child = vect_create_new_slp_node (oprnd_info->def_stmts);
984 if (!child)
985 {
986 vect_free_oprnd_info (oprnds_info);
987 return false;
988 }
989
990 bool *matches = XALLOCAVEC (bool, group_size);
991 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
992 group_size, max_nunits, loads,
993 vectorization_factor, matches,
994 npermutes, &this_tree_size, max_tree_size))
995 {
996 oprnd_info->def_stmts = vNULL;
997 SLP_TREE_CHILDREN (*node).quick_push (child);
998 continue;
999 }
1000
1001 /* If the SLP build for operand zero failed and operand zero
1002 and one can be commutated try that for the scalar stmts
1003 that failed the match. */
1004 if (i == 0
1005 /* A first scalar stmt mismatch signals a fatal mismatch. */
1006 && matches[0]
1007 /* ??? For COND_EXPRs we can swap the comparison operands
1008 as well as the arms under some constraints. */
1009 && nops == 2
1010 && oprnds_info[1]->first_dt == vect_internal_def
1011 && is_gimple_assign (stmt)
1012 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1013 /* Do so only if the number of not successful permutes was nor more
1014 than a cut-ff as re-trying the recursive match on
1015 possibly each level of the tree would expose exponential
1016 behavior. */
1017 && *npermutes < 4)
1018 {
1019 /* Roll back. */
1020 *max_nunits = old_max_nunits;
1021 loads->truncate (old_nloads);
1022 /* Swap mismatched definition stmts. */
1023 dump_printf_loc (MSG_NOTE, vect_location,
1024 "Re-trying with swapped operands of stmts ");
1025 for (unsigned j = 0; j < group_size; ++j)
1026 if (!matches[j])
1027 {
1028 gimple tem = oprnds_info[0]->def_stmts[j];
1029 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1030 oprnds_info[1]->def_stmts[j] = tem;
1031 dump_printf (MSG_NOTE, "%d ", j);
1032 }
1033 dump_printf (MSG_NOTE, "\n");
1034 /* And try again ... */
1035 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1036 group_size, max_nunits, loads,
1037 vectorization_factor,
1038 matches, npermutes, &this_tree_size,
1039 max_tree_size))
1040 {
1041 oprnd_info->def_stmts = vNULL;
1042 SLP_TREE_CHILDREN (*node).quick_push (child);
1043 continue;
1044 }
1045
1046 ++*npermutes;
1047 }
1048
1049 oprnd_info->def_stmts = vNULL;
1050 vect_free_slp_tree (child);
1051 vect_free_oprnd_info (oprnds_info);
1052 return false;
1053 }
1054
1055 if (tree_size)
1056 *tree_size += this_tree_size;
1057
1058 vect_free_oprnd_info (oprnds_info);
1059 return true;
1060 }
1061
1062 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1063
1064 static void
1065 vect_print_slp_tree (int dump_kind, slp_tree node)
1066 {
1067 int i;
1068 gimple stmt;
1069 slp_tree child;
1070
1071 if (!node)
1072 return;
1073
1074 dump_printf (dump_kind, "node ");
1075 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1076 {
1077 dump_printf (dump_kind, "\n\tstmt %d ", i);
1078 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1079 }
1080 dump_printf (dump_kind, "\n");
1081
1082 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1083 vect_print_slp_tree (dump_kind, child);
1084 }
1085
1086
1087 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1088 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1089 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1090 stmts in NODE are to be marked. */
1091
1092 static void
1093 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1094 {
1095 int i;
1096 gimple stmt;
1097 slp_tree child;
1098
1099 if (!node)
1100 return;
1101
1102 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1103 if (j < 0 || i == j)
1104 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1105
1106 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1107 vect_mark_slp_stmts (child, mark, j);
1108 }
1109
1110
1111 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1112
1113 static void
1114 vect_mark_slp_stmts_relevant (slp_tree node)
1115 {
1116 int i;
1117 gimple stmt;
1118 stmt_vec_info stmt_info;
1119 slp_tree child;
1120
1121 if (!node)
1122 return;
1123
1124 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1125 {
1126 stmt_info = vinfo_for_stmt (stmt);
1127 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1128 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1129 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1130 }
1131
1132 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1133 vect_mark_slp_stmts_relevant (child);
1134 }
1135
1136
1137 /* Rearrange the statements of NODE according to PERMUTATION. */
1138
1139 static void
1140 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1141 vec<unsigned> permutation)
1142 {
1143 gimple stmt;
1144 vec<gimple> tmp_stmts;
1145 unsigned int i;
1146 slp_tree child;
1147
1148 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1149 vect_slp_rearrange_stmts (child, group_size, permutation);
1150
1151 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1152 tmp_stmts.create (group_size);
1153 tmp_stmts.quick_grow_cleared (group_size);
1154
1155 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1156 tmp_stmts[permutation[i]] = stmt;
1157
1158 SLP_TREE_SCALAR_STMTS (node).release ();
1159 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1160 }
1161
1162
1163 /* Check if the required load permutations in the SLP instance
1164 SLP_INSTN are supported. */
1165
1166 static bool
1167 vect_supported_load_permutation_p (slp_instance slp_instn)
1168 {
1169 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1170 unsigned int i, j, k, next;
1171 sbitmap load_index;
1172 slp_tree node;
1173 gimple stmt, load, next_load, first_load;
1174 struct data_reference *dr;
1175
1176 if (dump_enabled_p ())
1177 {
1178 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1179 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1180 if (node->load_permutation.exists ())
1181 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1182 dump_printf (MSG_NOTE, "%d ", next);
1183 else
1184 for (k = 0; k < group_size; ++k)
1185 dump_printf (MSG_NOTE, "%d ", k);
1186 dump_printf (MSG_NOTE, "\n");
1187 }
1188
1189 /* In case of reduction every load permutation is allowed, since the order
1190 of the reduction statements is not important (as opposed to the case of
1191 grouped stores). The only condition we need to check is that all the
1192 load nodes are of the same size and have the same permutation (and then
1193 rearrange all the nodes of the SLP instance according to this
1194 permutation). */
1195
1196 /* Check that all the load nodes are of the same size. */
1197 /* ??? Can't we assert this? */
1198 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1199 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1200 return false;
1201
1202 node = SLP_INSTANCE_TREE (slp_instn);
1203 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1204
1205 /* Reduction (there are no data-refs in the root).
1206 In reduction chain the order of the loads is important. */
1207 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1208 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1209 {
1210 slp_tree load;
1211 unsigned int lidx;
1212
1213 /* Compare all the permutation sequences to the first one. We know
1214 that at least one load is permuted. */
1215 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1216 if (!node->load_permutation.exists ())
1217 return false;
1218 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1219 {
1220 if (!load->load_permutation.exists ())
1221 return false;
1222 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1223 if (lidx != node->load_permutation[j])
1224 return false;
1225 }
1226
1227 /* Check that the loads in the first sequence are different and there
1228 are no gaps between them. */
1229 load_index = sbitmap_alloc (group_size);
1230 bitmap_clear (load_index);
1231 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1232 {
1233 if (bitmap_bit_p (load_index, lidx))
1234 {
1235 sbitmap_free (load_index);
1236 return false;
1237 }
1238 bitmap_set_bit (load_index, lidx);
1239 }
1240 for (i = 0; i < group_size; i++)
1241 if (!bitmap_bit_p (load_index, i))
1242 {
1243 sbitmap_free (load_index);
1244 return false;
1245 }
1246 sbitmap_free (load_index);
1247
1248 /* This permutation is valid for reduction. Since the order of the
1249 statements in the nodes is not important unless they are memory
1250 accesses, we can rearrange the statements in all the nodes
1251 according to the order of the loads. */
1252 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1253 node->load_permutation);
1254
1255 /* We are done, no actual permutations need to be generated. */
1256 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1257 SLP_TREE_LOAD_PERMUTATION (node).release ();
1258 return true;
1259 }
1260
1261 /* In basic block vectorization we allow any subchain of an interleaving
1262 chain.
1263 FORNOW: not supported in loop SLP because of realignment compications. */
1264 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1265 {
1266 /* Check that for every node in the instance the loads
1267 form a subchain. */
1268 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1269 {
1270 next_load = NULL;
1271 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1272 {
1273 if (j != 0 && next_load != load)
1274 return false;
1275 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1276 }
1277 }
1278
1279 /* Check that the alignment of the first load in every subchain, i.e.,
1280 the first statement in every load node, is supported.
1281 ??? This belongs in alignment checking. */
1282 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1283 {
1284 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1285 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1286 {
1287 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1288 if (vect_supportable_dr_alignment (dr, false)
1289 == dr_unaligned_unsupported)
1290 {
1291 if (dump_enabled_p ())
1292 {
1293 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1294 vect_location,
1295 "unsupported unaligned load ");
1296 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1297 first_load, 0);
1298 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1299 }
1300 return false;
1301 }
1302 }
1303 }
1304
1305 /* We are done, no actual permutations need to be generated. */
1306 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1307 SLP_TREE_LOAD_PERMUTATION (node).release ();
1308 return true;
1309 }
1310
1311 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1312 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1313 well (unless it's reduction). */
1314 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1315 return false;
1316 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1317 if (!node->load_permutation.exists ())
1318 return false;
1319
1320 load_index = sbitmap_alloc (group_size);
1321 bitmap_clear (load_index);
1322 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1323 {
1324 unsigned int lidx = node->load_permutation[0];
1325 if (bitmap_bit_p (load_index, lidx))
1326 {
1327 sbitmap_free (load_index);
1328 return false;
1329 }
1330 bitmap_set_bit (load_index, lidx);
1331 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1332 if (k != lidx)
1333 {
1334 sbitmap_free (load_index);
1335 return false;
1336 }
1337 }
1338 for (i = 0; i < group_size; i++)
1339 if (!bitmap_bit_p (load_index, i))
1340 {
1341 sbitmap_free (load_index);
1342 return false;
1343 }
1344 sbitmap_free (load_index);
1345
1346 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1347 if (node->load_permutation.exists ()
1348 && !vect_transform_slp_perm_load
1349 (node, vNULL, NULL,
1350 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1351 return false;
1352 return true;
1353 }
1354
1355
1356 /* Find the first load in the loop that belongs to INSTANCE.
1357 When loads are in several SLP nodes, there can be a case in which the first
1358 load does not appear in the first SLP node to be transformed, causing
1359 incorrect order of statements. Since we generate all the loads together,
1360 they must be inserted before the first load of the SLP instance and not
1361 before the first load of the first node of the instance. */
1362
1363 static gimple
1364 vect_find_first_load_in_slp_instance (slp_instance instance)
1365 {
1366 int i, j;
1367 slp_tree load_node;
1368 gimple first_load = NULL, load;
1369
1370 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
1371 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1372 first_load = get_earlier_stmt (load, first_load);
1373
1374 return first_load;
1375 }
1376
1377
1378 /* Find the last store in SLP INSTANCE. */
1379
1380 static gimple
1381 vect_find_last_store_in_slp_instance (slp_instance instance)
1382 {
1383 int i;
1384 slp_tree node;
1385 gimple last_store = NULL, store;
1386
1387 node = SLP_INSTANCE_TREE (instance);
1388 for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
1389 last_store = get_later_stmt (store, last_store);
1390
1391 return last_store;
1392 }
1393
1394 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1395
1396 static void
1397 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1398 slp_instance instance, slp_tree node,
1399 stmt_vector_for_cost *prologue_cost_vec,
1400 unsigned ncopies_for_cost)
1401 {
1402 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1403
1404 unsigned i;
1405 slp_tree child;
1406 gimple stmt, s;
1407 stmt_vec_info stmt_info;
1408 tree lhs;
1409 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1410
1411 /* Recurse down the SLP tree. */
1412 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1413 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1414 instance, child, prologue_cost_vec,
1415 ncopies_for_cost);
1416
1417 /* Look at the first scalar stmt to determine the cost. */
1418 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1419 stmt_info = vinfo_for_stmt (stmt);
1420 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1421 {
1422 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1423 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1424 vect_uninitialized_def,
1425 node, prologue_cost_vec, body_cost_vec);
1426 else
1427 {
1428 int i;
1429 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1430 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1431 node, prologue_cost_vec, body_cost_vec);
1432 /* If the load is permuted record the cost for the permutation.
1433 ??? Loads from multiple chains are let through here only
1434 for a single special case involving complex numbers where
1435 in the end no permutation is necessary. */
1436 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1437 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1438 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1439 && vect_get_place_in_interleaving_chain
1440 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1441 {
1442 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1443 stmt_info, 0, vect_body);
1444 break;
1445 }
1446 }
1447 }
1448 else
1449 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1450 stmt_info, 0, vect_body);
1451
1452 /* Scan operands and account for prologue cost of constants/externals.
1453 ??? This over-estimates cost for multiple uses and should be
1454 re-engineered. */
1455 lhs = gimple_get_lhs (stmt);
1456 for (i = 0; i < gimple_num_ops (stmt); ++i)
1457 {
1458 tree def, op = gimple_op (stmt, i);
1459 gimple def_stmt;
1460 enum vect_def_type dt;
1461 if (!op || op == lhs)
1462 continue;
1463 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1464 &def_stmt, &def, &dt)
1465 && (dt == vect_constant_def || dt == vect_external_def))
1466 record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
1467 stmt_info, 0, vect_prologue);
1468 }
1469 }
1470
1471 /* Compute the cost for the SLP instance INSTANCE. */
1472
1473 static void
1474 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1475 slp_instance instance, unsigned nunits)
1476 {
1477 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1478 unsigned ncopies_for_cost;
1479 stmt_info_for_cost *si;
1480 unsigned i;
1481
1482 /* Calculate the number of vector stmts to create based on the unrolling
1483 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1484 GROUP_SIZE / NUNITS otherwise. */
1485 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1486 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1487
1488 prologue_cost_vec.create (10);
1489 body_cost_vec.create (10);
1490 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1491 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1492 instance, SLP_INSTANCE_TREE (instance),
1493 &prologue_cost_vec, ncopies_for_cost);
1494
1495 /* Record the prologue costs, which were delayed until we were
1496 sure that SLP was successful. Unlike the body costs, we know
1497 the final values now regardless of the loop vectorization factor. */
1498 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1499 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1500 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1501 {
1502 struct _stmt_vec_info *stmt_info
1503 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1504 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1505 si->misalign, vect_prologue);
1506 }
1507
1508 prologue_cost_vec.release ();
1509 }
1510
1511 /* Analyze an SLP instance starting from a group of grouped stores. Call
1512 vect_build_slp_tree to build a tree of packed stmts if possible.
1513 Return FALSE if it's impossible to SLP any stmt in the loop. */
1514
1515 static bool
1516 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1517 gimple stmt, unsigned max_tree_size)
1518 {
1519 slp_instance new_instance;
1520 slp_tree node;
1521 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1522 unsigned int unrolling_factor = 1, nunits;
1523 tree vectype, scalar_type = NULL_TREE;
1524 gimple next;
1525 unsigned int vectorization_factor = 0;
1526 int i;
1527 unsigned int max_nunits = 0;
1528 vec<slp_tree> loads;
1529 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1530 vec<gimple> scalar_stmts;
1531
1532 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1533 {
1534 if (dr)
1535 {
1536 scalar_type = TREE_TYPE (DR_REF (dr));
1537 vectype = get_vectype_for_scalar_type (scalar_type);
1538 }
1539 else
1540 {
1541 gcc_assert (loop_vinfo);
1542 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1543 }
1544
1545 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1546 }
1547 else
1548 {
1549 gcc_assert (loop_vinfo);
1550 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1551 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1552 }
1553
1554 if (!vectype)
1555 {
1556 if (dump_enabled_p ())
1557 {
1558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1559 "Build SLP failed: unsupported data-type ");
1560 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1561 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1562 }
1563
1564 return false;
1565 }
1566
1567 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1568 if (loop_vinfo)
1569 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1570 else
1571 vectorization_factor = nunits;
1572
1573 /* Calculate the unrolling factor. */
1574 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1575 if (unrolling_factor != 1 && !loop_vinfo)
1576 {
1577 if (dump_enabled_p ())
1578 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1579 "Build SLP failed: unrolling required in basic"
1580 " block SLP\n");
1581
1582 return false;
1583 }
1584
1585 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1586 scalar_stmts.create (group_size);
1587 next = stmt;
1588 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1589 {
1590 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1591 while (next)
1592 {
1593 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1594 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1595 scalar_stmts.safe_push (
1596 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1597 else
1598 scalar_stmts.safe_push (next);
1599 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1600 }
1601 }
1602 else
1603 {
1604 /* Collect reduction statements. */
1605 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1606 for (i = 0; reductions.iterate (i, &next); i++)
1607 scalar_stmts.safe_push (next);
1608 }
1609
1610 node = vect_create_new_slp_node (scalar_stmts);
1611
1612 loads.create (group_size);
1613
1614 /* Build the tree for the SLP instance. */
1615 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1616 &max_nunits, &loads,
1617 vectorization_factor, NULL, NULL, NULL,
1618 max_tree_size))
1619 {
1620 /* Calculate the unrolling factor based on the smallest type. */
1621 if (max_nunits > nunits)
1622 unrolling_factor = least_common_multiple (max_nunits, group_size)
1623 / group_size;
1624
1625 if (unrolling_factor != 1 && !loop_vinfo)
1626 {
1627 if (dump_enabled_p ())
1628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1629 "Build SLP failed: unrolling required in basic"
1630 " block SLP\n");
1631 vect_free_slp_tree (node);
1632 loads.release ();
1633 return false;
1634 }
1635
1636 /* Create a new SLP instance. */
1637 new_instance = XNEW (struct _slp_instance);
1638 SLP_INSTANCE_TREE (new_instance) = node;
1639 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1640 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1641 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1642 SLP_INSTANCE_LOADS (new_instance) = loads;
1643 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1644
1645 /* Compute the load permutation. */
1646 slp_tree load_node;
1647 bool loads_permuted = false;
1648 FOR_EACH_VEC_ELT (loads, i, load_node)
1649 {
1650 vec<unsigned> load_permutation;
1651 int j;
1652 gimple load, first_stmt;
1653 bool this_load_permuted = false;
1654 load_permutation.create (group_size);
1655 first_stmt = GROUP_FIRST_ELEMENT
1656 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1657 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1658 {
1659 int load_place
1660 = vect_get_place_in_interleaving_chain (load, first_stmt);
1661 gcc_assert (load_place != -1);
1662 if (load_place != j)
1663 this_load_permuted = true;
1664 load_permutation.safe_push (load_place);
1665 }
1666 if (!this_load_permuted)
1667 {
1668 load_permutation.release ();
1669 continue;
1670 }
1671 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1672 loads_permuted = true;
1673 }
1674
1675 if (loads_permuted)
1676 {
1677 if (!vect_supported_load_permutation_p (new_instance))
1678 {
1679 if (dump_enabled_p ())
1680 {
1681 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1682 "Build SLP failed: unsupported load "
1683 "permutation ");
1684 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1685 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1686 }
1687 vect_free_slp_instance (new_instance);
1688 return false;
1689 }
1690
1691 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1692 = vect_find_first_load_in_slp_instance (new_instance);
1693 }
1694
1695 /* Compute the costs of this SLP instance. */
1696 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1697 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1698
1699 if (loop_vinfo)
1700 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1701 else
1702 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1703
1704 if (dump_enabled_p ())
1705 vect_print_slp_tree (MSG_NOTE, node);
1706
1707 return true;
1708 }
1709
1710 /* Failed to SLP. */
1711 /* Free the allocated memory. */
1712 vect_free_slp_tree (node);
1713 loads.release ();
1714
1715 return false;
1716 }
1717
1718
1719 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1720 trees of packed scalar stmts if SLP is possible. */
1721
1722 bool
1723 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1724 unsigned max_tree_size)
1725 {
1726 unsigned int i;
1727 vec<gimple> grouped_stores;
1728 vec<gimple> reductions = vNULL;
1729 vec<gimple> reduc_chains = vNULL;
1730 gimple first_element;
1731 bool ok = false;
1732
1733 if (dump_enabled_p ())
1734 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1735
1736 if (loop_vinfo)
1737 {
1738 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1739 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1740 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1741 }
1742 else
1743 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1744
1745 /* Find SLP sequences starting from groups of grouped stores. */
1746 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1747 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1748 max_tree_size))
1749 ok = true;
1750
1751 if (bb_vinfo && !ok)
1752 {
1753 if (dump_enabled_p ())
1754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1755 "Failed to SLP the basic block.\n");
1756
1757 return false;
1758 }
1759
1760 if (loop_vinfo
1761 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1762 {
1763 /* Find SLP sequences starting from reduction chains. */
1764 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1765 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1766 max_tree_size))
1767 ok = true;
1768 else
1769 return false;
1770
1771 /* Don't try to vectorize SLP reductions if reduction chain was
1772 detected. */
1773 return ok;
1774 }
1775
1776 /* Find SLP sequences starting from groups of reductions. */
1777 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1778 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1779 max_tree_size))
1780 ok = true;
1781
1782 return true;
1783 }
1784
1785
1786 /* For each possible SLP instance decide whether to SLP it and calculate overall
1787 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1788 least one instance. */
1789
1790 bool
1791 vect_make_slp_decision (loop_vec_info loop_vinfo)
1792 {
1793 unsigned int i, unrolling_factor = 1;
1794 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1795 slp_instance instance;
1796 int decided_to_slp = 0;
1797
1798 if (dump_enabled_p ())
1799 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1800 "\n");
1801
1802 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1803 {
1804 /* FORNOW: SLP if you can. */
1805 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1806 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1807
1808 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1809 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1810 loop-based vectorization. Such stmts will be marked as HYBRID. */
1811 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1812 decided_to_slp++;
1813 }
1814
1815 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1816
1817 if (decided_to_slp && dump_enabled_p ())
1818 dump_printf_loc (MSG_NOTE, vect_location,
1819 "Decided to SLP %d instances. Unrolling factor %d\n",
1820 decided_to_slp, unrolling_factor);
1821
1822 return (decided_to_slp > 0);
1823 }
1824
1825
1826 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1827 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1828
1829 static void
1830 vect_detect_hybrid_slp_stmts (slp_tree node)
1831 {
1832 int i;
1833 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (node);
1834 gimple stmt = stmts[0];
1835 imm_use_iterator imm_iter;
1836 gimple use_stmt;
1837 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1838 slp_tree child;
1839 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1840 struct loop *loop = NULL;
1841 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1842 basic_block bb = NULL;
1843
1844 if (!node)
1845 return;
1846
1847 if (loop_vinfo)
1848 loop = LOOP_VINFO_LOOP (loop_vinfo);
1849 else
1850 bb = BB_VINFO_BB (bb_vinfo);
1851
1852 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1853 if (PURE_SLP_STMT (vinfo_for_stmt (stmt))
1854 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1855 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1856 if (gimple_bb (use_stmt)
1857 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1858 || bb == gimple_bb (use_stmt))
1859 && (stmt_vinfo = vinfo_for_stmt (use_stmt))
1860 && !STMT_SLP_TYPE (stmt_vinfo)
1861 && (STMT_VINFO_RELEVANT (stmt_vinfo)
1862 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo))
1863 || (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)
1864 && STMT_VINFO_RELATED_STMT (stmt_vinfo)
1865 && !STMT_SLP_TYPE (vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo)))))
1866 && !(gimple_code (use_stmt) == GIMPLE_PHI
1867 && STMT_VINFO_DEF_TYPE (stmt_vinfo)
1868 == vect_reduction_def))
1869 vect_mark_slp_stmts (node, hybrid, i);
1870
1871 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1872 vect_detect_hybrid_slp_stmts (child);
1873 }
1874
1875
1876 /* Find stmts that must be both vectorized and SLPed. */
1877
1878 void
1879 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1880 {
1881 unsigned int i;
1882 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1883 slp_instance instance;
1884
1885 if (dump_enabled_p ())
1886 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1887 "\n");
1888
1889 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1890 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
1891 }
1892
1893
1894 /* Create and initialize a new bb_vec_info struct for BB, as well as
1895 stmt_vec_info structs for all the stmts in it. */
1896
1897 static bb_vec_info
1898 new_bb_vec_info (basic_block bb)
1899 {
1900 bb_vec_info res = NULL;
1901 gimple_stmt_iterator gsi;
1902
1903 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1904 BB_VINFO_BB (res) = bb;
1905
1906 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1907 {
1908 gimple stmt = gsi_stmt (gsi);
1909 gimple_set_uid (stmt, 0);
1910 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1911 }
1912
1913 BB_VINFO_GROUPED_STORES (res).create (10);
1914 BB_VINFO_SLP_INSTANCES (res).create (2);
1915 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
1916
1917 bb->aux = res;
1918 return res;
1919 }
1920
1921
1922 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1923 stmts in the basic block. */
1924
1925 static void
1926 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1927 {
1928 vec<slp_instance> slp_instances;
1929 slp_instance instance;
1930 basic_block bb;
1931 gimple_stmt_iterator si;
1932 unsigned i;
1933
1934 if (!bb_vinfo)
1935 return;
1936
1937 bb = BB_VINFO_BB (bb_vinfo);
1938
1939 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1940 {
1941 gimple stmt = gsi_stmt (si);
1942 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1943
1944 if (stmt_info)
1945 /* Free stmt_vec_info. */
1946 free_stmt_vec_info (stmt);
1947 }
1948
1949 vect_destroy_datarefs (NULL, bb_vinfo);
1950 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
1951 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
1952 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1953 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1954 vect_free_slp_instance (instance);
1955 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
1956 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1957 free (bb_vinfo);
1958 bb->aux = NULL;
1959 }
1960
1961
1962 /* Analyze statements contained in SLP tree node after recursively analyzing
1963 the subtree. Return TRUE if the operations are supported. */
1964
1965 static bool
1966 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
1967 {
1968 bool dummy;
1969 int i;
1970 gimple stmt;
1971 slp_tree child;
1972
1973 if (!node)
1974 return true;
1975
1976 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1977 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
1978 return false;
1979
1980 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1981 {
1982 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1983 gcc_assert (stmt_info);
1984 gcc_assert (PURE_SLP_STMT (stmt_info));
1985
1986 if (!vect_analyze_stmt (stmt, &dummy, node))
1987 return false;
1988 }
1989
1990 return true;
1991 }
1992
1993
1994 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1995 operations are supported. */
1996
1997 static bool
1998 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
1999 {
2000 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2001 slp_instance instance;
2002 int i;
2003
2004 for (i = 0; slp_instances.iterate (i, &instance); )
2005 {
2006 if (!vect_slp_analyze_node_operations (bb_vinfo,
2007 SLP_INSTANCE_TREE (instance)))
2008 {
2009 vect_free_slp_instance (instance);
2010 slp_instances.ordered_remove (i);
2011 }
2012 else
2013 i++;
2014 }
2015
2016 if (!slp_instances.length ())
2017 return false;
2018
2019 return true;
2020 }
2021
2022
2023 /* Compute the scalar cost of the SLP node NODE and its children
2024 and return it. Do not account defs that are marked in LIFE and
2025 update LIFE according to uses of NODE. */
2026
2027 static unsigned
2028 vect_bb_slp_scalar_cost (basic_block bb,
2029 slp_tree node, vec<bool, va_heap> *life)
2030 {
2031 unsigned scalar_cost = 0;
2032 unsigned i;
2033 gimple stmt;
2034 slp_tree child;
2035
2036 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2037 {
2038 unsigned stmt_cost;
2039 ssa_op_iter op_iter;
2040 def_operand_p def_p;
2041 stmt_vec_info stmt_info;
2042
2043 if ((*life)[i])
2044 continue;
2045
2046 /* If there is a non-vectorized use of the defs then the scalar
2047 stmt is kept live in which case we do not account it or any
2048 required defs in the SLP children in the scalar cost. This
2049 way we make the vectorization more costly when compared to
2050 the scalar cost. */
2051 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2052 {
2053 imm_use_iterator use_iter;
2054 gimple use_stmt;
2055 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2056 if (!is_gimple_debug (use_stmt)
2057 && (gimple_code (use_stmt) == GIMPLE_PHI
2058 || gimple_bb (use_stmt) != bb
2059 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2060 {
2061 (*life)[i] = true;
2062 BREAK_FROM_IMM_USE_STMT (use_iter);
2063 }
2064 }
2065 if ((*life)[i])
2066 continue;
2067
2068 stmt_info = vinfo_for_stmt (stmt);
2069 if (STMT_VINFO_DATA_REF (stmt_info))
2070 {
2071 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2072 stmt_cost = vect_get_stmt_cost (scalar_load);
2073 else
2074 stmt_cost = vect_get_stmt_cost (scalar_store);
2075 }
2076 else
2077 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2078
2079 scalar_cost += stmt_cost;
2080 }
2081
2082 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2083 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2084
2085 return scalar_cost;
2086 }
2087
2088 /* Check if vectorization of the basic block is profitable. */
2089
2090 static bool
2091 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2092 {
2093 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2094 slp_instance instance;
2095 int i, j;
2096 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2097 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2098 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2099 stmt_vec_info stmt_info = NULL;
2100 stmt_vector_for_cost body_cost_vec;
2101 stmt_info_for_cost *ci;
2102
2103 /* Calculate vector costs. */
2104 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2105 {
2106 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2107
2108 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2109 {
2110 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2111 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2112 stmt_info, ci->misalign, vect_body);
2113 }
2114 }
2115
2116 /* Calculate scalar cost. */
2117 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2118 {
2119 auto_vec<bool, 20> life;
2120 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2121 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2122 SLP_INSTANCE_TREE (instance),
2123 &life);
2124 }
2125
2126 /* Complete the target-specific cost calculation. */
2127 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2128 &vec_inside_cost, &vec_epilogue_cost);
2129
2130 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2131
2132 if (dump_enabled_p ())
2133 {
2134 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2135 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2136 vec_inside_cost);
2137 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2138 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2139 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2140 }
2141
2142 /* Vectorization is profitable if its cost is less than the cost of scalar
2143 version. */
2144 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2145 return false;
2146
2147 return true;
2148 }
2149
2150 /* Check if the basic block can be vectorized. */
2151
2152 static bb_vec_info
2153 vect_slp_analyze_bb_1 (basic_block bb)
2154 {
2155 bb_vec_info bb_vinfo;
2156 vec<slp_instance> slp_instances;
2157 slp_instance instance;
2158 int i;
2159 int min_vf = 2;
2160 unsigned n_stmts = 0;
2161
2162 bb_vinfo = new_bb_vec_info (bb);
2163 if (!bb_vinfo)
2164 return NULL;
2165
2166 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2167 {
2168 if (dump_enabled_p ())
2169 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2170 "not vectorized: unhandled data-ref in basic "
2171 "block.\n");
2172
2173 destroy_bb_vec_info (bb_vinfo);
2174 return NULL;
2175 }
2176
2177 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2178 {
2179 if (dump_enabled_p ())
2180 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2181 "not vectorized: not enough data-refs in "
2182 "basic block.\n");
2183
2184 destroy_bb_vec_info (bb_vinfo);
2185 return NULL;
2186 }
2187
2188 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2189 {
2190 if (dump_enabled_p ())
2191 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2192 "not vectorized: unhandled data access in "
2193 "basic block.\n");
2194
2195 destroy_bb_vec_info (bb_vinfo);
2196 return NULL;
2197 }
2198
2199 vect_pattern_recog (NULL, bb_vinfo);
2200
2201 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2202 {
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2205 "not vectorized: bad data alignment in basic "
2206 "block.\n");
2207
2208 destroy_bb_vec_info (bb_vinfo);
2209 return NULL;
2210 }
2211
2212 /* Check the SLP opportunities in the basic block, analyze and build SLP
2213 trees. */
2214 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2215 {
2216 if (dump_enabled_p ())
2217 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2218 "not vectorized: failed to find SLP opportunities "
2219 "in basic block.\n");
2220
2221 destroy_bb_vec_info (bb_vinfo);
2222 return NULL;
2223 }
2224
2225 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2226
2227 /* Mark all the statements that we want to vectorize as pure SLP and
2228 relevant. */
2229 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2230 {
2231 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2232 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2233 }
2234
2235 /* Mark all the statements that we do not want to vectorize. */
2236 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2237 !gsi_end_p (gsi); gsi_next (&gsi))
2238 {
2239 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2240 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2241 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2242 }
2243
2244 /* Analyze dependences. At this point all stmts not participating in
2245 vectorization have to be marked. Dependence analysis assumes
2246 that we either vectorize all SLP instances or none at all. */
2247 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2248 {
2249 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2251 "not vectorized: unhandled data dependence "
2252 "in basic block.\n");
2253
2254 destroy_bb_vec_info (bb_vinfo);
2255 return NULL;
2256 }
2257
2258 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2259 {
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2262 "not vectorized: unsupported alignment in basic "
2263 "block.\n");
2264 destroy_bb_vec_info (bb_vinfo);
2265 return NULL;
2266 }
2267
2268 if (!vect_slp_analyze_operations (bb_vinfo))
2269 {
2270 if (dump_enabled_p ())
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2272 "not vectorized: bad operation in basic block.\n");
2273
2274 destroy_bb_vec_info (bb_vinfo);
2275 return NULL;
2276 }
2277
2278 /* Cost model: check if the vectorization is worthwhile. */
2279 if (!unlimited_cost_model (NULL)
2280 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2281 {
2282 if (dump_enabled_p ())
2283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2284 "not vectorized: vectorization is not "
2285 "profitable.\n");
2286
2287 destroy_bb_vec_info (bb_vinfo);
2288 return NULL;
2289 }
2290
2291 if (dump_enabled_p ())
2292 dump_printf_loc (MSG_NOTE, vect_location,
2293 "Basic block will be vectorized using SLP\n");
2294
2295 return bb_vinfo;
2296 }
2297
2298
2299 bb_vec_info
2300 vect_slp_analyze_bb (basic_block bb)
2301 {
2302 bb_vec_info bb_vinfo;
2303 int insns = 0;
2304 gimple_stmt_iterator gsi;
2305 unsigned int vector_sizes;
2306
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2309
2310 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2311 {
2312 gimple stmt = gsi_stmt (gsi);
2313 if (!is_gimple_debug (stmt)
2314 && !gimple_nop_p (stmt)
2315 && gimple_code (stmt) != GIMPLE_LABEL)
2316 insns++;
2317 }
2318
2319 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2320 {
2321 if (dump_enabled_p ())
2322 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2323 "not vectorized: too many instructions in "
2324 "basic block.\n");
2325
2326 return NULL;
2327 }
2328
2329 /* Autodetect first vector size we try. */
2330 current_vector_size = 0;
2331 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2332
2333 while (1)
2334 {
2335 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2336 if (bb_vinfo)
2337 return bb_vinfo;
2338
2339 destroy_bb_vec_info (bb_vinfo);
2340
2341 vector_sizes &= ~current_vector_size;
2342 if (vector_sizes == 0
2343 || current_vector_size == 0)
2344 return NULL;
2345
2346 /* Try the next biggest vector size. */
2347 current_vector_size = 1 << floor_log2 (vector_sizes);
2348 if (dump_enabled_p ())
2349 dump_printf_loc (MSG_NOTE, vect_location,
2350 "***** Re-trying analysis with "
2351 "vector size %d\n", current_vector_size);
2352 }
2353 }
2354
2355
2356 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2357 the number of created vector stmts depends on the unrolling factor).
2358 However, the actual number of vector stmts for every SLP node depends on
2359 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2360 should be updated. In this function we assume that the inside costs
2361 calculated in vect_model_xxx_cost are linear in ncopies. */
2362
2363 void
2364 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2365 {
2366 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2367 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2368 slp_instance instance;
2369 stmt_vector_for_cost body_cost_vec;
2370 stmt_info_for_cost *si;
2371 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2372
2373 if (dump_enabled_p ())
2374 dump_printf_loc (MSG_NOTE, vect_location,
2375 "=== vect_update_slp_costs_according_to_vf ===\n");
2376
2377 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2378 {
2379 /* We assume that costs are linear in ncopies. */
2380 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2381
2382 /* Record the instance's instructions in the target cost model.
2383 This was delayed until here because the count of instructions
2384 isn't known beforehand. */
2385 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2386
2387 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2388 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2389 vinfo_for_stmt (si->stmt), si->misalign,
2390 vect_body);
2391 }
2392 }
2393
2394
2395 /* For constant and loop invariant defs of SLP_NODE this function returns
2396 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2397 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2398 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2399 REDUC_INDEX is the index of the reduction operand in the statements, unless
2400 it is -1. */
2401
2402 static void
2403 vect_get_constant_vectors (tree op, slp_tree slp_node,
2404 vec<tree> *vec_oprnds,
2405 unsigned int op_num, unsigned int number_of_vectors,
2406 int reduc_index)
2407 {
2408 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2409 gimple stmt = stmts[0];
2410 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2411 unsigned nunits;
2412 tree vec_cst;
2413 tree *elts;
2414 unsigned j, number_of_places_left_in_vector;
2415 tree vector_type;
2416 tree vop;
2417 int group_size = stmts.length ();
2418 unsigned int vec_num, i;
2419 unsigned number_of_copies = 1;
2420 vec<tree> voprnds;
2421 voprnds.create (number_of_vectors);
2422 bool constant_p, is_store;
2423 tree neutral_op = NULL;
2424 enum tree_code code = gimple_expr_code (stmt);
2425 gimple def_stmt;
2426 struct loop *loop;
2427 gimple_seq ctor_seq = NULL;
2428
2429 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2430 && reduc_index != -1)
2431 {
2432 op_num = reduc_index - 1;
2433 op = gimple_op (stmt, reduc_index);
2434 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2435 we need either neutral operands or the original operands. See
2436 get_initial_def_for_reduction() for details. */
2437 switch (code)
2438 {
2439 case WIDEN_SUM_EXPR:
2440 case DOT_PROD_EXPR:
2441 case PLUS_EXPR:
2442 case MINUS_EXPR:
2443 case BIT_IOR_EXPR:
2444 case BIT_XOR_EXPR:
2445 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2446 neutral_op = build_real (TREE_TYPE (op), dconst0);
2447 else
2448 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2449
2450 break;
2451
2452 case MULT_EXPR:
2453 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2454 neutral_op = build_real (TREE_TYPE (op), dconst1);
2455 else
2456 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2457
2458 break;
2459
2460 case BIT_AND_EXPR:
2461 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2462 break;
2463
2464 /* For MIN/MAX we don't have an easy neutral operand but
2465 the initial values can be used fine here. Only for
2466 a reduction chain we have to force a neutral element. */
2467 case MAX_EXPR:
2468 case MIN_EXPR:
2469 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2470 neutral_op = NULL;
2471 else
2472 {
2473 def_stmt = SSA_NAME_DEF_STMT (op);
2474 loop = (gimple_bb (stmt))->loop_father;
2475 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2476 loop_preheader_edge (loop));
2477 }
2478 break;
2479
2480 default:
2481 neutral_op = NULL;
2482 }
2483 }
2484
2485 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2486 {
2487 is_store = true;
2488 op = gimple_assign_rhs1 (stmt);
2489 }
2490 else
2491 is_store = false;
2492
2493 gcc_assert (op);
2494
2495 if (CONSTANT_CLASS_P (op))
2496 constant_p = true;
2497 else
2498 constant_p = false;
2499
2500 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2501 gcc_assert (vector_type);
2502 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2503
2504 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2505 created vectors. It is greater than 1 if unrolling is performed.
2506
2507 For example, we have two scalar operands, s1 and s2 (e.g., group of
2508 strided accesses of size two), while NUNITS is four (i.e., four scalars
2509 of this type can be packed in a vector). The output vector will contain
2510 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2511 will be 2).
2512
2513 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2514 containing the operands.
2515
2516 For example, NUNITS is four as before, and the group size is 8
2517 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2518 {s5, s6, s7, s8}. */
2519
2520 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2521
2522 number_of_places_left_in_vector = nunits;
2523 elts = XALLOCAVEC (tree, nunits);
2524 for (j = 0; j < number_of_copies; j++)
2525 {
2526 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2527 {
2528 if (is_store)
2529 op = gimple_assign_rhs1 (stmt);
2530 else
2531 {
2532 switch (code)
2533 {
2534 case COND_EXPR:
2535 if (op_num == 0 || op_num == 1)
2536 {
2537 tree cond = gimple_assign_rhs1 (stmt);
2538 op = TREE_OPERAND (cond, op_num);
2539 }
2540 else
2541 {
2542 if (op_num == 2)
2543 op = gimple_assign_rhs2 (stmt);
2544 else
2545 op = gimple_assign_rhs3 (stmt);
2546 }
2547 break;
2548
2549 case CALL_EXPR:
2550 op = gimple_call_arg (stmt, op_num);
2551 break;
2552
2553 case LSHIFT_EXPR:
2554 case RSHIFT_EXPR:
2555 case LROTATE_EXPR:
2556 case RROTATE_EXPR:
2557 op = gimple_op (stmt, op_num + 1);
2558 /* Unlike the other binary operators, shifts/rotates have
2559 the shift count being int, instead of the same type as
2560 the lhs, so make sure the scalar is the right type if
2561 we are dealing with vectors of
2562 long long/long/short/char. */
2563 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2564 op = fold_convert (TREE_TYPE (vector_type), op);
2565 break;
2566
2567 default:
2568 op = gimple_op (stmt, op_num + 1);
2569 break;
2570 }
2571 }
2572
2573 if (reduc_index != -1)
2574 {
2575 loop = (gimple_bb (stmt))->loop_father;
2576 def_stmt = SSA_NAME_DEF_STMT (op);
2577
2578 gcc_assert (loop);
2579
2580 /* Get the def before the loop. In reduction chain we have only
2581 one initial value. */
2582 if ((j != (number_of_copies - 1)
2583 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2584 && i != 0))
2585 && neutral_op)
2586 op = neutral_op;
2587 else
2588 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2589 loop_preheader_edge (loop));
2590 }
2591
2592 /* Create 'vect_ = {op0,op1,...,opn}'. */
2593 number_of_places_left_in_vector--;
2594 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2595 {
2596 if (CONSTANT_CLASS_P (op))
2597 {
2598 op = fold_unary (VIEW_CONVERT_EXPR,
2599 TREE_TYPE (vector_type), op);
2600 gcc_assert (op && CONSTANT_CLASS_P (op));
2601 }
2602 else
2603 {
2604 tree new_temp
2605 = make_ssa_name (TREE_TYPE (vector_type), NULL);
2606 gimple init_stmt;
2607 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
2608 op);
2609 init_stmt
2610 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR,
2611 new_temp, op, NULL_TREE);
2612 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2613 op = new_temp;
2614 }
2615 }
2616 elts[number_of_places_left_in_vector] = op;
2617 if (!CONSTANT_CLASS_P (op))
2618 constant_p = false;
2619
2620 if (number_of_places_left_in_vector == 0)
2621 {
2622 number_of_places_left_in_vector = nunits;
2623
2624 if (constant_p)
2625 vec_cst = build_vector (vector_type, elts);
2626 else
2627 {
2628 vec<constructor_elt, va_gc> *v;
2629 unsigned k;
2630 vec_alloc (v, nunits);
2631 for (k = 0; k < nunits; ++k)
2632 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2633 vec_cst = build_constructor (vector_type, v);
2634 }
2635 voprnds.quick_push (vect_init_vector (stmt, vec_cst,
2636 vector_type, NULL));
2637 if (ctor_seq != NULL)
2638 {
2639 gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
2640 gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
2641 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2642 GSI_SAME_STMT);
2643 ctor_seq = NULL;
2644 }
2645 }
2646 }
2647 }
2648
2649 /* Since the vectors are created in the reverse order, we should invert
2650 them. */
2651 vec_num = voprnds.length ();
2652 for (j = vec_num; j != 0; j--)
2653 {
2654 vop = voprnds[j - 1];
2655 vec_oprnds->quick_push (vop);
2656 }
2657
2658 voprnds.release ();
2659
2660 /* In case that VF is greater than the unrolling factor needed for the SLP
2661 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2662 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2663 to replicate the vectors. */
2664 while (number_of_vectors > vec_oprnds->length ())
2665 {
2666 tree neutral_vec = NULL;
2667
2668 if (neutral_op)
2669 {
2670 if (!neutral_vec)
2671 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2672
2673 vec_oprnds->quick_push (neutral_vec);
2674 }
2675 else
2676 {
2677 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2678 vec_oprnds->quick_push (vop);
2679 }
2680 }
2681 }
2682
2683
2684 /* Get vectorized definitions from SLP_NODE that contains corresponding
2685 vectorized def-stmts. */
2686
2687 static void
2688 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2689 {
2690 tree vec_oprnd;
2691 gimple vec_def_stmt;
2692 unsigned int i;
2693
2694 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2695
2696 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2697 {
2698 gcc_assert (vec_def_stmt);
2699 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2700 vec_oprnds->quick_push (vec_oprnd);
2701 }
2702 }
2703
2704
2705 /* Get vectorized definitions for SLP_NODE.
2706 If the scalar definitions are loop invariants or constants, collect them and
2707 call vect_get_constant_vectors() to create vector stmts.
2708 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2709 must be stored in the corresponding child of SLP_NODE, and we call
2710 vect_get_slp_vect_defs () to retrieve them. */
2711
2712 void
2713 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2714 vec<vec<tree> > *vec_oprnds, int reduc_index)
2715 {
2716 gimple first_stmt;
2717 int number_of_vects = 0, i;
2718 unsigned int child_index = 0;
2719 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2720 slp_tree child = NULL;
2721 vec<tree> vec_defs;
2722 tree oprnd;
2723 bool vectorized_defs;
2724
2725 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2726 FOR_EACH_VEC_ELT (ops, i, oprnd)
2727 {
2728 /* For each operand we check if it has vectorized definitions in a child
2729 node or we need to create them (for invariants and constants). We
2730 check if the LHS of the first stmt of the next child matches OPRND.
2731 If it does, we found the correct child. Otherwise, we call
2732 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2733 to check this child node for the next operand. */
2734 vectorized_defs = false;
2735 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2736 {
2737 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2738
2739 /* We have to check both pattern and original def, if available. */
2740 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2741 gimple related = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2742
2743 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2744 || (related
2745 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2746 {
2747 /* The number of vector defs is determined by the number of
2748 vector statements in the node from which we get those
2749 statements. */
2750 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2751 vectorized_defs = true;
2752 child_index++;
2753 }
2754 }
2755
2756 if (!vectorized_defs)
2757 {
2758 if (i == 0)
2759 {
2760 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2761 /* Number of vector stmts was calculated according to LHS in
2762 vect_schedule_slp_instance (), fix it by replacing LHS with
2763 RHS, if necessary. See vect_get_smallest_scalar_type () for
2764 details. */
2765 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2766 &rhs_size_unit);
2767 if (rhs_size_unit != lhs_size_unit)
2768 {
2769 number_of_vects *= rhs_size_unit;
2770 number_of_vects /= lhs_size_unit;
2771 }
2772 }
2773 }
2774
2775 /* Allocate memory for vectorized defs. */
2776 vec_defs = vNULL;
2777 vec_defs.create (number_of_vects);
2778
2779 /* For reduction defs we call vect_get_constant_vectors (), since we are
2780 looking for initial loop invariant values. */
2781 if (vectorized_defs && reduc_index == -1)
2782 /* The defs are already vectorized. */
2783 vect_get_slp_vect_defs (child, &vec_defs);
2784 else
2785 /* Build vectors from scalar defs. */
2786 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2787 number_of_vects, reduc_index);
2788
2789 vec_oprnds->quick_push (vec_defs);
2790
2791 /* For reductions, we only need initial values. */
2792 if (reduc_index != -1)
2793 return;
2794 }
2795 }
2796
2797
2798 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2799 building a vector of type MASK_TYPE from it) and two input vectors placed in
2800 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2801 shifting by STRIDE elements of DR_CHAIN for every copy.
2802 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2803 copies).
2804 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2805 the created stmts must be inserted. */
2806
2807 static inline void
2808 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2809 tree mask, int first_vec_indx, int second_vec_indx,
2810 gimple_stmt_iterator *gsi, slp_tree node,
2811 tree vectype, vec<tree> dr_chain,
2812 int ncopies, int vect_stmts_counter)
2813 {
2814 tree perm_dest;
2815 gimple perm_stmt = NULL;
2816 stmt_vec_info next_stmt_info;
2817 int i, stride;
2818 tree first_vec, second_vec, data_ref;
2819
2820 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2821
2822 /* Initialize the vect stmts of NODE to properly insert the generated
2823 stmts later. */
2824 for (i = SLP_TREE_VEC_STMTS (node).length ();
2825 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2826 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2827
2828 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2829 for (i = 0; i < ncopies; i++)
2830 {
2831 first_vec = dr_chain[first_vec_indx];
2832 second_vec = dr_chain[second_vec_indx];
2833
2834 /* Generate the permute statement. */
2835 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, perm_dest,
2836 first_vec, second_vec, mask);
2837 data_ref = make_ssa_name (perm_dest, perm_stmt);
2838 gimple_set_lhs (perm_stmt, data_ref);
2839 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2840
2841 /* Store the vector statement in NODE. */
2842 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
2843
2844 first_vec_indx += stride;
2845 second_vec_indx += stride;
2846 }
2847
2848 /* Mark the scalar stmt as vectorized. */
2849 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2850 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2851 }
2852
2853
2854 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2855 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2856 representation. Check that the mask is valid and return FALSE if not.
2857 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2858 the next vector, i.e., the current first vector is not needed. */
2859
2860 static bool
2861 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2862 int mask_nunits, bool only_one_vec, int index,
2863 unsigned char *mask, int *current_mask_element,
2864 bool *need_next_vector, int *number_of_mask_fixes,
2865 bool *mask_fixed, bool *needs_first_vector)
2866 {
2867 int i;
2868
2869 /* Convert to target specific representation. */
2870 *current_mask_element = first_mask_element + m;
2871 /* Adjust the value in case it's a mask for second and third vectors. */
2872 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2873
2874 if (*current_mask_element < mask_nunits)
2875 *needs_first_vector = true;
2876
2877 /* We have only one input vector to permute but the mask accesses values in
2878 the next vector as well. */
2879 if (only_one_vec && *current_mask_element >= mask_nunits)
2880 {
2881 if (dump_enabled_p ())
2882 {
2883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2884 "permutation requires at least two vectors ");
2885 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2886 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2887 }
2888
2889 return false;
2890 }
2891
2892 /* The mask requires the next vector. */
2893 if (*current_mask_element >= mask_nunits * 2)
2894 {
2895 if (*needs_first_vector || *mask_fixed)
2896 {
2897 /* We either need the first vector too or have already moved to the
2898 next vector. In both cases, this permutation needs three
2899 vectors. */
2900 if (dump_enabled_p ())
2901 {
2902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2903 "permutation requires at "
2904 "least three vectors ");
2905 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2906 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2907 }
2908
2909 return false;
2910 }
2911
2912 /* We move to the next vector, dropping the first one and working with
2913 the second and the third - we need to adjust the values of the mask
2914 accordingly. */
2915 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2916
2917 for (i = 0; i < index; i++)
2918 mask[i] -= mask_nunits * *number_of_mask_fixes;
2919
2920 (*number_of_mask_fixes)++;
2921 *mask_fixed = true;
2922 }
2923
2924 *need_next_vector = *mask_fixed;
2925
2926 /* This was the last element of this mask. Start a new one. */
2927 if (index == mask_nunits - 1)
2928 {
2929 *number_of_mask_fixes = 1;
2930 *mask_fixed = false;
2931 *needs_first_vector = false;
2932 }
2933
2934 return true;
2935 }
2936
2937
2938 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2939 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2940 permute statements for the SLP node NODE of the SLP instance
2941 SLP_NODE_INSTANCE. */
2942
2943 bool
2944 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
2945 gimple_stmt_iterator *gsi, int vf,
2946 slp_instance slp_node_instance, bool analyze_only)
2947 {
2948 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
2949 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2950 tree mask_element_type = NULL_TREE, mask_type;
2951 int i, j, k, nunits, vec_index = 0, scalar_index;
2952 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2953 gimple next_scalar_stmt;
2954 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
2955 int first_mask_element;
2956 int index, unroll_factor, current_mask_element, ncopies;
2957 unsigned char *mask;
2958 bool only_one_vec = false, need_next_vector = false;
2959 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
2960 int number_of_mask_fixes = 1;
2961 bool mask_fixed = false;
2962 bool needs_first_vector = false;
2963 enum machine_mode mode;
2964
2965 mode = TYPE_MODE (vectype);
2966
2967 if (!can_vec_perm_p (mode, false, NULL))
2968 {
2969 if (dump_enabled_p ())
2970 {
2971 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2972 "no vect permute for ");
2973 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2974 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2975 }
2976 return false;
2977 }
2978
2979 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2980 same size as the vector element being permuted. */
2981 mask_element_type = lang_hooks.types.type_for_mode
2982 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
2983 mask_type = get_vectype_for_scalar_type (mask_element_type);
2984 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2985 mask = XALLOCAVEC (unsigned char, nunits);
2986 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2987
2988 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2989 unrolling factor. */
2990 orig_vec_stmts_num = group_size *
2991 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
2992 if (orig_vec_stmts_num == 1)
2993 only_one_vec = true;
2994
2995 /* Number of copies is determined by the final vectorization factor
2996 relatively to SLP_NODE_INSTANCE unrolling factor. */
2997 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2998
2999 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3000 return false;
3001
3002 /* Generate permutation masks for every NODE. Number of masks for each NODE
3003 is equal to GROUP_SIZE.
3004 E.g., we have a group of three nodes with three loads from the same
3005 location in each node, and the vector size is 4. I.e., we have a
3006 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3007 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3008 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3009 ...
3010
3011 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3012 The last mask is illegal since we assume two operands for permute
3013 operation, and the mask element values can't be outside that range.
3014 Hence, the last mask must be converted into {2,5,5,5}.
3015 For the first two permutations we need the first and the second input
3016 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3017 we need the second and the third vectors: {b1,c1,a2,b2} and
3018 {c2,a3,b3,c3}. */
3019
3020 {
3021 scalar_index = 0;
3022 index = 0;
3023 vect_stmts_counter = 0;
3024 vec_index = 0;
3025 first_vec_index = vec_index++;
3026 if (only_one_vec)
3027 second_vec_index = first_vec_index;
3028 else
3029 second_vec_index = vec_index++;
3030
3031 for (j = 0; j < unroll_factor; j++)
3032 {
3033 for (k = 0; k < group_size; k++)
3034 {
3035 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3036 first_mask_element = i + j * group_size;
3037 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3038 nunits, only_one_vec, index,
3039 mask, &current_mask_element,
3040 &need_next_vector,
3041 &number_of_mask_fixes, &mask_fixed,
3042 &needs_first_vector))
3043 return false;
3044 mask[index++] = current_mask_element;
3045
3046 if (index == nunits)
3047 {
3048 index = 0;
3049 if (!can_vec_perm_p (mode, false, mask))
3050 {
3051 if (dump_enabled_p ())
3052 {
3053 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3054 vect_location,
3055 "unsupported vect permute { ");
3056 for (i = 0; i < nunits; ++i)
3057 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3058 mask[i]);
3059 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3060 }
3061 return false;
3062 }
3063
3064 if (!analyze_only)
3065 {
3066 int l;
3067 tree mask_vec, *mask_elts;
3068 mask_elts = XALLOCAVEC (tree, nunits);
3069 for (l = 0; l < nunits; ++l)
3070 mask_elts[l] = build_int_cst (mask_element_type,
3071 mask[l]);
3072 mask_vec = build_vector (mask_type, mask_elts);
3073
3074 if (need_next_vector)
3075 {
3076 first_vec_index = second_vec_index;
3077 second_vec_index = vec_index;
3078 }
3079
3080 next_scalar_stmt
3081 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3082
3083 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3084 mask_vec, first_vec_index, second_vec_index,
3085 gsi, node, vectype, dr_chain,
3086 ncopies, vect_stmts_counter++);
3087 }
3088 }
3089 }
3090 }
3091 }
3092
3093 return true;
3094 }
3095
3096
3097
3098 /* Vectorize SLP instance tree in postorder. */
3099
3100 static bool
3101 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3102 unsigned int vectorization_factor)
3103 {
3104 gimple stmt;
3105 bool grouped_store, is_store;
3106 gimple_stmt_iterator si;
3107 stmt_vec_info stmt_info;
3108 unsigned int vec_stmts_size, nunits, group_size;
3109 tree vectype;
3110 int i;
3111 slp_tree child;
3112
3113 if (!node)
3114 return false;
3115
3116 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3117 vect_schedule_slp_instance (child, instance, vectorization_factor);
3118
3119 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3120 stmt_info = vinfo_for_stmt (stmt);
3121
3122 /* VECTYPE is the type of the destination. */
3123 vectype = STMT_VINFO_VECTYPE (stmt_info);
3124 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3125 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3126
3127 /* For each SLP instance calculate number of vector stmts to be created
3128 for the scalar stmts in each node of the SLP tree. Number of vector
3129 elements in one vector iteration is the number of scalar elements in
3130 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3131 size. */
3132 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3133
3134 if (!SLP_TREE_VEC_STMTS (node).exists ())
3135 {
3136 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3137 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3138 }
3139
3140 if (dump_enabled_p ())
3141 {
3142 dump_printf_loc (MSG_NOTE,vect_location,
3143 "------>vectorizing SLP node starting from: ");
3144 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3145 dump_printf (MSG_NOTE, "\n");
3146 }
3147
3148 /* Loads should be inserted before the first load. */
3149 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
3150 && STMT_VINFO_GROUPED_ACCESS (stmt_info)
3151 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
3152 && SLP_TREE_LOAD_PERMUTATION (node).exists ())
3153 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
3154 else if (is_pattern_stmt_p (stmt_info))
3155 si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
3156 else
3157 si = gsi_for_stmt (stmt);
3158
3159 /* Stores should be inserted just before the last store. */
3160 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
3161 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
3162 {
3163 gimple last_store = vect_find_last_store_in_slp_instance (instance);
3164 if (is_pattern_stmt_p (vinfo_for_stmt (last_store)))
3165 last_store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store));
3166 si = gsi_for_stmt (last_store);
3167 }
3168
3169 /* Mark the first element of the reduction chain as reduction to properly
3170 transform the node. In the analysis phase only the last element of the
3171 chain is marked as reduction. */
3172 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3173 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3174 {
3175 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3176 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3177 }
3178
3179 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3180 return is_store;
3181 }
3182
3183 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3184 For loop vectorization this is done in vectorizable_call, but for SLP
3185 it needs to be deferred until end of vect_schedule_slp, because multiple
3186 SLP instances may refer to the same scalar stmt. */
3187
3188 static void
3189 vect_remove_slp_scalar_calls (slp_tree node)
3190 {
3191 gimple stmt, new_stmt;
3192 gimple_stmt_iterator gsi;
3193 int i;
3194 slp_tree child;
3195 tree lhs;
3196 stmt_vec_info stmt_info;
3197
3198 if (!node)
3199 return;
3200
3201 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3202 vect_remove_slp_scalar_calls (child);
3203
3204 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3205 {
3206 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3207 continue;
3208 stmt_info = vinfo_for_stmt (stmt);
3209 if (stmt_info == NULL
3210 || is_pattern_stmt_p (stmt_info)
3211 || !PURE_SLP_STMT (stmt_info))
3212 continue;
3213 lhs = gimple_call_lhs (stmt);
3214 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3215 set_vinfo_for_stmt (new_stmt, stmt_info);
3216 set_vinfo_for_stmt (stmt, NULL);
3217 STMT_VINFO_STMT (stmt_info) = new_stmt;
3218 gsi = gsi_for_stmt (stmt);
3219 gsi_replace (&gsi, new_stmt, false);
3220 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3221 }
3222 }
3223
3224 /* Generate vector code for all SLP instances in the loop/basic block. */
3225
3226 bool
3227 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3228 {
3229 vec<slp_instance> slp_instances;
3230 slp_instance instance;
3231 unsigned int i, vf;
3232 bool is_store = false;
3233
3234 if (loop_vinfo)
3235 {
3236 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3237 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3238 }
3239 else
3240 {
3241 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3242 vf = 1;
3243 }
3244
3245 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3246 {
3247 /* Schedule the tree of INSTANCE. */
3248 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3249 instance, vf);
3250 if (dump_enabled_p ())
3251 dump_printf_loc (MSG_NOTE, vect_location,
3252 "vectorizing stmts using SLP.\n");
3253 }
3254
3255 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3256 {
3257 slp_tree root = SLP_INSTANCE_TREE (instance);
3258 gimple store;
3259 unsigned int j;
3260 gimple_stmt_iterator gsi;
3261
3262 /* Remove scalar call stmts. Do not do this for basic-block
3263 vectorization as not all uses may be vectorized.
3264 ??? Why should this be necessary? DCE should be able to
3265 remove the stmts itself.
3266 ??? For BB vectorization we can as well remove scalar
3267 stmts starting from the SLP tree root if they have no
3268 uses. */
3269 if (loop_vinfo)
3270 vect_remove_slp_scalar_calls (root);
3271
3272 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3273 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3274 {
3275 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3276 break;
3277
3278 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3279 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3280 /* Free the attached stmt_vec_info and remove the stmt. */
3281 gsi = gsi_for_stmt (store);
3282 unlink_stmt_vdef (store);
3283 gsi_remove (&gsi, true);
3284 release_defs (store);
3285 free_stmt_vec_info (store);
3286 }
3287 }
3288
3289 return is_store;
3290 }
3291
3292
3293 /* Vectorize the basic block. */
3294
3295 void
3296 vect_slp_transform_bb (basic_block bb)
3297 {
3298 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3299 gimple_stmt_iterator si;
3300
3301 gcc_assert (bb_vinfo);
3302
3303 if (dump_enabled_p ())
3304 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3305
3306 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3307 {
3308 gimple stmt = gsi_stmt (si);
3309 stmt_vec_info stmt_info;
3310
3311 if (dump_enabled_p ())
3312 {
3313 dump_printf_loc (MSG_NOTE, vect_location,
3314 "------>SLPing statement: ");
3315 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3316 dump_printf (MSG_NOTE, "\n");
3317 }
3318
3319 stmt_info = vinfo_for_stmt (stmt);
3320 gcc_assert (stmt_info);
3321
3322 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3323 if (STMT_SLP_TYPE (stmt_info))
3324 {
3325 vect_schedule_slp (NULL, bb_vinfo);
3326 break;
3327 }
3328 }
3329
3330 if (dump_enabled_p ())
3331 dump_printf_loc (MSG_NOTE, vect_location,
3332 "BASIC BLOCK VECTORIZED\n");
3333
3334 destroy_bb_vec_info (bb_vinfo);
3335 }