Update copyright years.
[gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "target.h"
30 #include "predict.h"
31 #include "vec.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "machmode.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "basic-block.h"
39 #include "gimple-pretty-print.h"
40 #include "tree-ssa-alias.h"
41 #include "internal-fn.h"
42 #include "gimple-expr.h"
43 #include "is-a.h"
44 #include "gimple.h"
45 #include "gimple-iterator.h"
46 #include "gimple-ssa.h"
47 #include "tree-phinodes.h"
48 #include "ssa-iterators.h"
49 #include "stringpool.h"
50 #include "tree-ssanames.h"
51 #include "tree-pass.h"
52 #include "cfgloop.h"
53 #include "expr.h"
54 #include "recog.h" /* FIXME: for insn_data */
55 #include "insn-codes.h"
56 #include "optabs.h"
57 #include "tree-vectorizer.h"
58 #include "langhooks.h"
59 #include "gimple-walk.h"
60
61 /* Extract the location of the basic block in the source code.
62 Return the basic block location if succeed and NULL if not. */
63
64 source_location
65 find_bb_location (basic_block bb)
66 {
67 gimple stmt = NULL;
68 gimple_stmt_iterator si;
69
70 if (!bb)
71 return UNKNOWN_LOCATION;
72
73 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
74 {
75 stmt = gsi_stmt (si);
76 if (gimple_location (stmt) != UNKNOWN_LOCATION)
77 return gimple_location (stmt);
78 }
79
80 return UNKNOWN_LOCATION;
81 }
82
83
84 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
85
86 static void
87 vect_free_slp_tree (slp_tree node)
88 {
89 int i;
90 slp_tree child;
91
92 if (!node)
93 return;
94
95 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
96 vect_free_slp_tree (child);
97
98 SLP_TREE_CHILDREN (node).release ();
99 SLP_TREE_SCALAR_STMTS (node).release ();
100 SLP_TREE_VEC_STMTS (node).release ();
101 SLP_TREE_LOAD_PERMUTATION (node).release ();
102
103 free (node);
104 }
105
106
107 /* Free the memory allocated for the SLP instance. */
108
109 void
110 vect_free_slp_instance (slp_instance instance)
111 {
112 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
113 SLP_INSTANCE_LOADS (instance).release ();
114 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
115 free (instance);
116 }
117
118
119 /* Create an SLP node for SCALAR_STMTS. */
120
121 static slp_tree
122 vect_create_new_slp_node (vec<gimple> scalar_stmts)
123 {
124 slp_tree node;
125 gimple stmt = scalar_stmts[0];
126 unsigned int nops;
127
128 if (is_gimple_call (stmt))
129 nops = gimple_call_num_args (stmt);
130 else if (is_gimple_assign (stmt))
131 {
132 nops = gimple_num_ops (stmt) - 1;
133 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
134 nops++;
135 }
136 else
137 return NULL;
138
139 node = XNEW (struct _slp_tree);
140 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
141 SLP_TREE_VEC_STMTS (node).create (0);
142 SLP_TREE_CHILDREN (node).create (nops);
143 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
144
145 return node;
146 }
147
148
149 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
150 operand. */
151 static vec<slp_oprnd_info>
152 vect_create_oprnd_info (int nops, int group_size)
153 {
154 int i;
155 slp_oprnd_info oprnd_info;
156 vec<slp_oprnd_info> oprnds_info;
157
158 oprnds_info.create (nops);
159 for (i = 0; i < nops; i++)
160 {
161 oprnd_info = XNEW (struct _slp_oprnd_info);
162 oprnd_info->def_stmts.create (group_size);
163 oprnd_info->first_dt = vect_uninitialized_def;
164 oprnd_info->first_op_type = NULL_TREE;
165 oprnd_info->first_pattern = false;
166 oprnds_info.quick_push (oprnd_info);
167 }
168
169 return oprnds_info;
170 }
171
172
173 /* Free operands info. */
174
175 static void
176 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
177 {
178 int i;
179 slp_oprnd_info oprnd_info;
180
181 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
182 {
183 oprnd_info->def_stmts.release ();
184 XDELETE (oprnd_info);
185 }
186
187 oprnds_info.release ();
188 }
189
190
191 /* Find the place of the data-ref in STMT in the interleaving chain that starts
192 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
193
194 static int
195 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
196 {
197 gimple next_stmt = first_stmt;
198 int result = 0;
199
200 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
201 return -1;
202
203 do
204 {
205 if (next_stmt == stmt)
206 return result;
207 result++;
208 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
209 }
210 while (next_stmt);
211
212 return -1;
213 }
214
215
216 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
217 they are of a valid type and that they match the defs of the first stmt of
218 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
219 return -1, if the error could be corrected by swapping operands of the
220 operation return 1, if everything is ok return 0. */
221
222 static int
223 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
224 gimple stmt, bool first,
225 vec<slp_oprnd_info> *oprnds_info)
226 {
227 tree oprnd;
228 unsigned int i, number_of_oprnds;
229 tree def;
230 gimple def_stmt;
231 enum vect_def_type dt = vect_uninitialized_def;
232 struct loop *loop = NULL;
233 bool pattern = false;
234 slp_oprnd_info oprnd_info;
235 int first_op_idx = 1;
236 bool commutative = false;
237 bool first_op_cond = false;
238
239 if (loop_vinfo)
240 loop = LOOP_VINFO_LOOP (loop_vinfo);
241
242 if (is_gimple_call (stmt))
243 {
244 number_of_oprnds = gimple_call_num_args (stmt);
245 first_op_idx = 3;
246 }
247 else if (is_gimple_assign (stmt))
248 {
249 enum tree_code code = gimple_assign_rhs_code (stmt);
250 number_of_oprnds = gimple_num_ops (stmt) - 1;
251 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
252 {
253 first_op_cond = true;
254 commutative = true;
255 number_of_oprnds++;
256 }
257 else
258 commutative = commutative_tree_code (code);
259 }
260 else
261 return -1;
262
263 bool swapped = false;
264 for (i = 0; i < number_of_oprnds; i++)
265 {
266 again:
267 if (first_op_cond)
268 {
269 if (i == 0 || i == 1)
270 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
271 swapped ? !i : i);
272 else
273 oprnd = gimple_op (stmt, first_op_idx + i - 1);
274 }
275 else
276 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
277
278 oprnd_info = (*oprnds_info)[i];
279
280 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
281 &def, &dt)
282 || (!def_stmt && dt != vect_constant_def))
283 {
284 if (dump_enabled_p ())
285 {
286 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
287 "Build SLP failed: can't find def for ");
288 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
289 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
290 }
291
292 return -1;
293 }
294
295 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
296 from the pattern. Check that all the stmts of the node are in the
297 pattern. */
298 if (def_stmt && gimple_bb (def_stmt)
299 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
300 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
301 && gimple_code (def_stmt) != GIMPLE_PHI))
302 && vinfo_for_stmt (def_stmt)
303 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
304 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
305 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
306 {
307 pattern = true;
308 if (!first && !oprnd_info->first_pattern)
309 {
310 if (i == 0
311 && !swapped
312 && commutative)
313 {
314 swapped = true;
315 goto again;
316 }
317
318 if (dump_enabled_p ())
319 {
320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
321 "Build SLP failed: some of the stmts"
322 " are in a pattern, and others are not ");
323 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
324 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
325 }
326
327 return 1;
328 }
329
330 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
331 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
332
333 if (dt == vect_unknown_def_type)
334 {
335 if (dump_enabled_p ())
336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
337 "Unsupported pattern.\n");
338 return -1;
339 }
340
341 switch (gimple_code (def_stmt))
342 {
343 case GIMPLE_PHI:
344 def = gimple_phi_result (def_stmt);
345 break;
346
347 case GIMPLE_ASSIGN:
348 def = gimple_assign_lhs (def_stmt);
349 break;
350
351 default:
352 if (dump_enabled_p ())
353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
354 "unsupported defining stmt:\n");
355 return -1;
356 }
357 }
358
359 if (first)
360 {
361 oprnd_info->first_dt = dt;
362 oprnd_info->first_pattern = pattern;
363 oprnd_info->first_op_type = TREE_TYPE (oprnd);
364 }
365 else
366 {
367 /* Not first stmt of the group, check that the def-stmt/s match
368 the def-stmt/s of the first stmt. Allow different definition
369 types for reduction chains: the first stmt must be a
370 vect_reduction_def (a phi node), and the rest
371 vect_internal_def. */
372 if (((oprnd_info->first_dt != dt
373 && !(oprnd_info->first_dt == vect_reduction_def
374 && dt == vect_internal_def)
375 && !((oprnd_info->first_dt == vect_external_def
376 || oprnd_info->first_dt == vect_constant_def)
377 && (dt == vect_external_def
378 || dt == vect_constant_def)))
379 || !types_compatible_p (oprnd_info->first_op_type,
380 TREE_TYPE (oprnd))))
381 {
382 /* Try swapping operands if we got a mismatch. */
383 if (i == 0
384 && !swapped
385 && commutative)
386 {
387 swapped = true;
388 goto again;
389 }
390
391 if (dump_enabled_p ())
392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
393 "Build SLP failed: different types\n");
394
395 return 1;
396 }
397 }
398
399 /* Check the types of the definitions. */
400 switch (dt)
401 {
402 case vect_constant_def:
403 case vect_external_def:
404 case vect_reduction_def:
405 break;
406
407 case vect_internal_def:
408 oprnd_info->def_stmts.quick_push (def_stmt);
409 break;
410
411 default:
412 /* FORNOW: Not supported. */
413 if (dump_enabled_p ())
414 {
415 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
416 "Build SLP failed: illegal type of def ");
417 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
418 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
419 }
420
421 return -1;
422 }
423 }
424
425 /* Swap operands. */
426 if (swapped)
427 {
428 if (first_op_cond)
429 {
430 tree cond = gimple_assign_rhs1 (stmt);
431 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
432 &TREE_OPERAND (cond, 1));
433 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
434 }
435 else
436 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
437 gimple_assign_rhs2_ptr (stmt));
438 }
439
440 return 0;
441 }
442
443
444 /* Verify if the scalar stmts STMTS are isomorphic, require data
445 permutation or are of unsupported types of operation. Return
446 true if they are, otherwise return false and indicate in *MATCHES
447 which stmts are not isomorphic to the first one. If MATCHES[0]
448 is false then this indicates the comparison could not be
449 carried out or the stmts will never be vectorized by SLP. */
450
451 static bool
452 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
453 vec<gimple> stmts, unsigned int group_size,
454 unsigned nops, unsigned int *max_nunits,
455 unsigned int vectorization_factor, bool *matches)
456 {
457 unsigned int i;
458 gimple stmt = stmts[0];
459 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
460 enum tree_code first_cond_code = ERROR_MARK;
461 tree lhs;
462 bool need_same_oprnds = false;
463 tree vectype, scalar_type, first_op1 = NULL_TREE;
464 optab optab;
465 int icode;
466 machine_mode optab_op2_mode;
467 machine_mode vec_mode;
468 struct data_reference *first_dr;
469 HOST_WIDE_INT dummy;
470 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
471 tree cond;
472
473 /* For every stmt in NODE find its def stmt/s. */
474 FOR_EACH_VEC_ELT (stmts, i, stmt)
475 {
476 matches[i] = false;
477
478 if (dump_enabled_p ())
479 {
480 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
481 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
482 dump_printf (MSG_NOTE, "\n");
483 }
484
485 /* Fail to vectorize statements marked as unvectorizable. */
486 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
487 {
488 if (dump_enabled_p ())
489 {
490 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
491 "Build SLP failed: unvectorizable statement ");
492 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
493 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
494 }
495 /* Fatal mismatch. */
496 matches[0] = false;
497 return false;
498 }
499
500 lhs = gimple_get_lhs (stmt);
501 if (lhs == NULL_TREE)
502 {
503 if (dump_enabled_p ())
504 {
505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
506 "Build SLP failed: not GIMPLE_ASSIGN nor "
507 "GIMPLE_CALL ");
508 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
509 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
510 }
511 /* Fatal mismatch. */
512 matches[0] = false;
513 return false;
514 }
515
516 if (is_gimple_assign (stmt)
517 && gimple_assign_rhs_code (stmt) == COND_EXPR
518 && (cond = gimple_assign_rhs1 (stmt))
519 && !COMPARISON_CLASS_P (cond))
520 {
521 if (dump_enabled_p ())
522 {
523 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
524 "Build SLP failed: condition is not "
525 "comparison ");
526 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
527 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
528 }
529 /* Fatal mismatch. */
530 matches[0] = false;
531 return false;
532 }
533
534 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
535 vectype = get_vectype_for_scalar_type (scalar_type);
536 if (!vectype)
537 {
538 if (dump_enabled_p ())
539 {
540 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
541 "Build SLP failed: unsupported data-type ");
542 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
543 scalar_type);
544 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
545 }
546 /* Fatal mismatch. */
547 matches[0] = false;
548 return false;
549 }
550
551 /* In case of multiple types we need to detect the smallest type. */
552 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
553 {
554 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
555 if (bb_vinfo)
556 vectorization_factor = *max_nunits;
557 }
558
559 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
560 {
561 rhs_code = CALL_EXPR;
562 if (gimple_call_internal_p (call_stmt)
563 || gimple_call_tail_p (call_stmt)
564 || gimple_call_noreturn_p (call_stmt)
565 || !gimple_call_nothrow_p (call_stmt)
566 || gimple_call_chain (call_stmt))
567 {
568 if (dump_enabled_p ())
569 {
570 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
571 "Build SLP failed: unsupported call type ");
572 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
573 call_stmt, 0);
574 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
575 }
576 /* Fatal mismatch. */
577 matches[0] = false;
578 return false;
579 }
580 }
581 else
582 rhs_code = gimple_assign_rhs_code (stmt);
583
584 /* Check the operation. */
585 if (i == 0)
586 {
587 first_stmt_code = rhs_code;
588
589 /* Shift arguments should be equal in all the packed stmts for a
590 vector shift with scalar shift operand. */
591 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
592 || rhs_code == LROTATE_EXPR
593 || rhs_code == RROTATE_EXPR)
594 {
595 vec_mode = TYPE_MODE (vectype);
596
597 /* First see if we have a vector/vector shift. */
598 optab = optab_for_tree_code (rhs_code, vectype,
599 optab_vector);
600
601 if (!optab
602 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
603 {
604 /* No vector/vector shift, try for a vector/scalar shift. */
605 optab = optab_for_tree_code (rhs_code, vectype,
606 optab_scalar);
607
608 if (!optab)
609 {
610 if (dump_enabled_p ())
611 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
612 "Build SLP failed: no optab.\n");
613 /* Fatal mismatch. */
614 matches[0] = false;
615 return false;
616 }
617 icode = (int) optab_handler (optab, vec_mode);
618 if (icode == CODE_FOR_nothing)
619 {
620 if (dump_enabled_p ())
621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
622 "Build SLP failed: "
623 "op not supported by target.\n");
624 /* Fatal mismatch. */
625 matches[0] = false;
626 return false;
627 }
628 optab_op2_mode = insn_data[icode].operand[2].mode;
629 if (!VECTOR_MODE_P (optab_op2_mode))
630 {
631 need_same_oprnds = true;
632 first_op1 = gimple_assign_rhs2 (stmt);
633 }
634 }
635 }
636 else if (rhs_code == WIDEN_LSHIFT_EXPR)
637 {
638 need_same_oprnds = true;
639 first_op1 = gimple_assign_rhs2 (stmt);
640 }
641 }
642 else
643 {
644 if (first_stmt_code != rhs_code
645 && (first_stmt_code != IMAGPART_EXPR
646 || rhs_code != REALPART_EXPR)
647 && (first_stmt_code != REALPART_EXPR
648 || rhs_code != IMAGPART_EXPR)
649 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
650 && (first_stmt_code == ARRAY_REF
651 || first_stmt_code == BIT_FIELD_REF
652 || first_stmt_code == INDIRECT_REF
653 || first_stmt_code == COMPONENT_REF
654 || first_stmt_code == MEM_REF)))
655 {
656 if (dump_enabled_p ())
657 {
658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
659 "Build SLP failed: different operation "
660 "in stmt ");
661 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
662 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
663 }
664 /* Mismatch. */
665 continue;
666 }
667
668 if (need_same_oprnds
669 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
670 {
671 if (dump_enabled_p ())
672 {
673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
674 "Build SLP failed: different shift "
675 "arguments in ");
676 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
677 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
678 }
679 /* Mismatch. */
680 continue;
681 }
682
683 if (rhs_code == CALL_EXPR)
684 {
685 gimple first_stmt = stmts[0];
686 if (gimple_call_num_args (stmt) != nops
687 || !operand_equal_p (gimple_call_fn (first_stmt),
688 gimple_call_fn (stmt), 0)
689 || gimple_call_fntype (first_stmt)
690 != gimple_call_fntype (stmt))
691 {
692 if (dump_enabled_p ())
693 {
694 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
695 "Build SLP failed: different calls in ");
696 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
697 stmt, 0);
698 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
699 }
700 /* Mismatch. */
701 continue;
702 }
703 }
704 }
705
706 /* Grouped store or load. */
707 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
708 {
709 if (REFERENCE_CLASS_P (lhs))
710 {
711 /* Store. */
712 ;
713 }
714 else
715 {
716 /* Load. */
717 unsigned unrolling_factor
718 = least_common_multiple
719 (*max_nunits, group_size) / group_size;
720 /* FORNOW: Check that there is no gap between the loads
721 and no gap between the groups when we need to load
722 multiple groups at once.
723 ??? We should enhance this to only disallow gaps
724 inside vectors. */
725 if ((unrolling_factor > 1
726 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
727 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
728 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
729 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
730 {
731 if (dump_enabled_p ())
732 {
733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
734 "Build SLP failed: grouped "
735 "loads have gaps ");
736 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
737 stmt, 0);
738 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
739 }
740 /* Fatal mismatch. */
741 matches[0] = false;
742 return false;
743 }
744
745 /* Check that the size of interleaved loads group is not
746 greater than the SLP group size. */
747 unsigned ncopies
748 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
749 if (loop_vinfo
750 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
751 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
752 - GROUP_GAP (vinfo_for_stmt (stmt)))
753 > ncopies * group_size))
754 {
755 if (dump_enabled_p ())
756 {
757 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
758 "Build SLP failed: the number "
759 "of interleaved loads is greater than "
760 "the SLP group size ");
761 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
762 stmt, 0);
763 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
764 }
765 /* Fatal mismatch. */
766 matches[0] = false;
767 return false;
768 }
769
770 old_first_load = first_load;
771 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
772 if (prev_first_load)
773 {
774 /* Check that there are no loads from different interleaving
775 chains in the same node. */
776 if (prev_first_load != first_load)
777 {
778 if (dump_enabled_p ())
779 {
780 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
781 vect_location,
782 "Build SLP failed: different "
783 "interleaving chains in one node ");
784 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
785 stmt, 0);
786 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
787 }
788 /* Mismatch. */
789 continue;
790 }
791 }
792 else
793 prev_first_load = first_load;
794
795 /* In some cases a group of loads is just the same load
796 repeated N times. Only analyze its cost once. */
797 if (first_load == stmt && old_first_load != first_load)
798 {
799 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
800 if (vect_supportable_dr_alignment (first_dr, false)
801 == dr_unaligned_unsupported)
802 {
803 if (dump_enabled_p ())
804 {
805 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
806 vect_location,
807 "Build SLP failed: unsupported "
808 "unaligned load ");
809 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
810 stmt, 0);
811 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
812 }
813 /* Fatal mismatch. */
814 matches[0] = false;
815 return false;
816 }
817 }
818 }
819 } /* Grouped access. */
820 else
821 {
822 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
823 {
824 /* Not grouped load. */
825 if (dump_enabled_p ())
826 {
827 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
828 "Build SLP failed: not grouped load ");
829 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
830 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
831 }
832
833 /* FORNOW: Not grouped loads are not supported. */
834 /* Fatal mismatch. */
835 matches[0] = false;
836 return false;
837 }
838
839 /* Not memory operation. */
840 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
841 && TREE_CODE_CLASS (rhs_code) != tcc_unary
842 && rhs_code != COND_EXPR
843 && rhs_code != CALL_EXPR)
844 {
845 if (dump_enabled_p ())
846 {
847 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
848 "Build SLP failed: operation");
849 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
850 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
851 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
852 }
853 /* Fatal mismatch. */
854 matches[0] = false;
855 return false;
856 }
857
858 if (rhs_code == COND_EXPR)
859 {
860 tree cond_expr = gimple_assign_rhs1 (stmt);
861
862 if (i == 0)
863 first_cond_code = TREE_CODE (cond_expr);
864 else if (first_cond_code != TREE_CODE (cond_expr))
865 {
866 if (dump_enabled_p ())
867 {
868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
869 "Build SLP failed: different"
870 " operation");
871 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
872 stmt, 0);
873 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
874 }
875 /* Mismatch. */
876 continue;
877 }
878 }
879 }
880
881 matches[i] = true;
882 }
883
884 for (i = 0; i < group_size; ++i)
885 if (!matches[i])
886 return false;
887
888 return true;
889 }
890
891 /* Recursively build an SLP tree starting from NODE.
892 Fail (and return a value not equal to zero) if def-stmts are not
893 isomorphic, require data permutation or are of unsupported types of
894 operation. Otherwise, return 0.
895 The value returned is the depth in the SLP tree where a mismatch
896 was found. */
897
898 static bool
899 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
900 slp_tree *node, unsigned int group_size,
901 unsigned int *max_nunits,
902 vec<slp_tree> *loads,
903 unsigned int vectorization_factor,
904 bool *matches, unsigned *npermutes, unsigned *tree_size,
905 unsigned max_tree_size)
906 {
907 unsigned nops, i, this_npermutes = 0, this_tree_size = 0;
908 gimple stmt;
909
910 if (!matches)
911 matches = XALLOCAVEC (bool, group_size);
912 if (!npermutes)
913 npermutes = &this_npermutes;
914
915 matches[0] = false;
916
917 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
918 if (is_gimple_call (stmt))
919 nops = gimple_call_num_args (stmt);
920 else if (is_gimple_assign (stmt))
921 {
922 nops = gimple_num_ops (stmt) - 1;
923 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
924 nops++;
925 }
926 else
927 return false;
928
929 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
930 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
931 max_nunits, vectorization_factor, matches))
932 return false;
933
934 /* If the SLP node is a load, terminate the recursion. */
935 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
936 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
937 {
938 loads->safe_push (*node);
939 return true;
940 }
941
942 /* Get at the operands, verifying they are compatible. */
943 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
944 slp_oprnd_info oprnd_info;
945 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
946 {
947 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
948 stmt, (i == 0), &oprnds_info))
949 {
950 case 0:
951 break;
952 case -1:
953 matches[0] = false;
954 vect_free_oprnd_info (oprnds_info);
955 return false;
956 case 1:
957 matches[i] = false;
958 break;
959 }
960 }
961 for (i = 0; i < group_size; ++i)
962 if (!matches[i])
963 {
964 vect_free_oprnd_info (oprnds_info);
965 return false;
966 }
967
968 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
969
970 /* Create SLP_TREE nodes for the definition node/s. */
971 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
972 {
973 slp_tree child;
974 unsigned old_nloads = loads->length ();
975 unsigned old_max_nunits = *max_nunits;
976
977 if (oprnd_info->first_dt != vect_internal_def)
978 continue;
979
980 if (++this_tree_size > max_tree_size)
981 {
982 vect_free_oprnd_info (oprnds_info);
983 return false;
984 }
985
986 child = vect_create_new_slp_node (oprnd_info->def_stmts);
987 if (!child)
988 {
989 vect_free_oprnd_info (oprnds_info);
990 return false;
991 }
992
993 bool *matches = XALLOCAVEC (bool, group_size);
994 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
995 group_size, max_nunits, loads,
996 vectorization_factor, matches,
997 npermutes, &this_tree_size, max_tree_size))
998 {
999 oprnd_info->def_stmts = vNULL;
1000 SLP_TREE_CHILDREN (*node).quick_push (child);
1001 continue;
1002 }
1003
1004 /* If the SLP build for operand zero failed and operand zero
1005 and one can be commutated try that for the scalar stmts
1006 that failed the match. */
1007 if (i == 0
1008 /* A first scalar stmt mismatch signals a fatal mismatch. */
1009 && matches[0]
1010 /* ??? For COND_EXPRs we can swap the comparison operands
1011 as well as the arms under some constraints. */
1012 && nops == 2
1013 && oprnds_info[1]->first_dt == vect_internal_def
1014 && is_gimple_assign (stmt)
1015 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1016 /* Do so only if the number of not successful permutes was nor more
1017 than a cut-ff as re-trying the recursive match on
1018 possibly each level of the tree would expose exponential
1019 behavior. */
1020 && *npermutes < 4)
1021 {
1022 /* Roll back. */
1023 *max_nunits = old_max_nunits;
1024 loads->truncate (old_nloads);
1025 /* Swap mismatched definition stmts. */
1026 dump_printf_loc (MSG_NOTE, vect_location,
1027 "Re-trying with swapped operands of stmts ");
1028 for (unsigned j = 0; j < group_size; ++j)
1029 if (!matches[j])
1030 {
1031 gimple tem = oprnds_info[0]->def_stmts[j];
1032 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1033 oprnds_info[1]->def_stmts[j] = tem;
1034 dump_printf (MSG_NOTE, "%d ", j);
1035 }
1036 dump_printf (MSG_NOTE, "\n");
1037 /* And try again ... */
1038 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1039 group_size, max_nunits, loads,
1040 vectorization_factor,
1041 matches, npermutes, &this_tree_size,
1042 max_tree_size))
1043 {
1044 oprnd_info->def_stmts = vNULL;
1045 SLP_TREE_CHILDREN (*node).quick_push (child);
1046 continue;
1047 }
1048
1049 ++*npermutes;
1050 }
1051
1052 oprnd_info->def_stmts = vNULL;
1053 vect_free_slp_tree (child);
1054 vect_free_oprnd_info (oprnds_info);
1055 return false;
1056 }
1057
1058 if (tree_size)
1059 *tree_size += this_tree_size;
1060
1061 vect_free_oprnd_info (oprnds_info);
1062 return true;
1063 }
1064
1065 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1066
1067 static void
1068 vect_print_slp_tree (int dump_kind, slp_tree node)
1069 {
1070 int i;
1071 gimple stmt;
1072 slp_tree child;
1073
1074 if (!node)
1075 return;
1076
1077 dump_printf (dump_kind, "node ");
1078 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1079 {
1080 dump_printf (dump_kind, "\n\tstmt %d ", i);
1081 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1082 }
1083 dump_printf (dump_kind, "\n");
1084
1085 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1086 vect_print_slp_tree (dump_kind, child);
1087 }
1088
1089
1090 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1091 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1092 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1093 stmts in NODE are to be marked. */
1094
1095 static void
1096 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1097 {
1098 int i;
1099 gimple stmt;
1100 slp_tree child;
1101
1102 if (!node)
1103 return;
1104
1105 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1106 if (j < 0 || i == j)
1107 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1108
1109 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1110 vect_mark_slp_stmts (child, mark, j);
1111 }
1112
1113
1114 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1115
1116 static void
1117 vect_mark_slp_stmts_relevant (slp_tree node)
1118 {
1119 int i;
1120 gimple stmt;
1121 stmt_vec_info stmt_info;
1122 slp_tree child;
1123
1124 if (!node)
1125 return;
1126
1127 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1128 {
1129 stmt_info = vinfo_for_stmt (stmt);
1130 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1131 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1132 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1133 }
1134
1135 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1136 vect_mark_slp_stmts_relevant (child);
1137 }
1138
1139
1140 /* Rearrange the statements of NODE according to PERMUTATION. */
1141
1142 static void
1143 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1144 vec<unsigned> permutation)
1145 {
1146 gimple stmt;
1147 vec<gimple> tmp_stmts;
1148 unsigned int i;
1149 slp_tree child;
1150
1151 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1152 vect_slp_rearrange_stmts (child, group_size, permutation);
1153
1154 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1155 tmp_stmts.create (group_size);
1156 tmp_stmts.quick_grow_cleared (group_size);
1157
1158 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1159 tmp_stmts[permutation[i]] = stmt;
1160
1161 SLP_TREE_SCALAR_STMTS (node).release ();
1162 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1163 }
1164
1165
1166 /* Check if the required load permutations in the SLP instance
1167 SLP_INSTN are supported. */
1168
1169 static bool
1170 vect_supported_load_permutation_p (slp_instance slp_instn)
1171 {
1172 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1173 unsigned int i, j, k, next;
1174 sbitmap load_index;
1175 slp_tree node;
1176 gimple stmt, load, next_load, first_load;
1177 struct data_reference *dr;
1178
1179 if (dump_enabled_p ())
1180 {
1181 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1182 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1183 if (node->load_permutation.exists ())
1184 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1185 dump_printf (MSG_NOTE, "%d ", next);
1186 else
1187 for (k = 0; k < group_size; ++k)
1188 dump_printf (MSG_NOTE, "%d ", k);
1189 dump_printf (MSG_NOTE, "\n");
1190 }
1191
1192 /* In case of reduction every load permutation is allowed, since the order
1193 of the reduction statements is not important (as opposed to the case of
1194 grouped stores). The only condition we need to check is that all the
1195 load nodes are of the same size and have the same permutation (and then
1196 rearrange all the nodes of the SLP instance according to this
1197 permutation). */
1198
1199 /* Check that all the load nodes are of the same size. */
1200 /* ??? Can't we assert this? */
1201 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1202 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1203 return false;
1204
1205 node = SLP_INSTANCE_TREE (slp_instn);
1206 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1207
1208 /* Reduction (there are no data-refs in the root).
1209 In reduction chain the order of the loads is important. */
1210 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1211 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1212 {
1213 slp_tree load;
1214 unsigned int lidx;
1215
1216 /* Compare all the permutation sequences to the first one. We know
1217 that at least one load is permuted. */
1218 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1219 if (!node->load_permutation.exists ())
1220 return false;
1221 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1222 {
1223 if (!load->load_permutation.exists ())
1224 return false;
1225 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1226 if (lidx != node->load_permutation[j])
1227 return false;
1228 }
1229
1230 /* Check that the loads in the first sequence are different and there
1231 are no gaps between them. */
1232 load_index = sbitmap_alloc (group_size);
1233 bitmap_clear (load_index);
1234 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1235 {
1236 if (bitmap_bit_p (load_index, lidx))
1237 {
1238 sbitmap_free (load_index);
1239 return false;
1240 }
1241 bitmap_set_bit (load_index, lidx);
1242 }
1243 for (i = 0; i < group_size; i++)
1244 if (!bitmap_bit_p (load_index, i))
1245 {
1246 sbitmap_free (load_index);
1247 return false;
1248 }
1249 sbitmap_free (load_index);
1250
1251 /* This permutation is valid for reduction. Since the order of the
1252 statements in the nodes is not important unless they are memory
1253 accesses, we can rearrange the statements in all the nodes
1254 according to the order of the loads. */
1255 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1256 node->load_permutation);
1257
1258 /* We are done, no actual permutations need to be generated. */
1259 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1260 SLP_TREE_LOAD_PERMUTATION (node).release ();
1261 return true;
1262 }
1263
1264 /* In basic block vectorization we allow any subchain of an interleaving
1265 chain.
1266 FORNOW: not supported in loop SLP because of realignment compications. */
1267 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1268 {
1269 /* Check that for every node in the instance the loads
1270 form a subchain. */
1271 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1272 {
1273 next_load = NULL;
1274 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1275 {
1276 if (j != 0 && next_load != load)
1277 return false;
1278 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1279 }
1280 }
1281
1282 /* Check that the alignment of the first load in every subchain, i.e.,
1283 the first statement in every load node, is supported.
1284 ??? This belongs in alignment checking. */
1285 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1286 {
1287 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1288 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1289 {
1290 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1291 if (vect_supportable_dr_alignment (dr, false)
1292 == dr_unaligned_unsupported)
1293 {
1294 if (dump_enabled_p ())
1295 {
1296 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1297 vect_location,
1298 "unsupported unaligned load ");
1299 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1300 first_load, 0);
1301 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1302 }
1303 return false;
1304 }
1305 }
1306 }
1307
1308 /* We are done, no actual permutations need to be generated. */
1309 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1310 SLP_TREE_LOAD_PERMUTATION (node).release ();
1311 return true;
1312 }
1313
1314 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1315 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1316 well (unless it's reduction). */
1317 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1318 return false;
1319 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1320 if (!node->load_permutation.exists ())
1321 return false;
1322
1323 load_index = sbitmap_alloc (group_size);
1324 bitmap_clear (load_index);
1325 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1326 {
1327 unsigned int lidx = node->load_permutation[0];
1328 if (bitmap_bit_p (load_index, lidx))
1329 {
1330 sbitmap_free (load_index);
1331 return false;
1332 }
1333 bitmap_set_bit (load_index, lidx);
1334 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1335 if (k != lidx)
1336 {
1337 sbitmap_free (load_index);
1338 return false;
1339 }
1340 }
1341 for (i = 0; i < group_size; i++)
1342 if (!bitmap_bit_p (load_index, i))
1343 {
1344 sbitmap_free (load_index);
1345 return false;
1346 }
1347 sbitmap_free (load_index);
1348
1349 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1350 if (node->load_permutation.exists ()
1351 && !vect_transform_slp_perm_load
1352 (node, vNULL, NULL,
1353 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1354 return false;
1355 return true;
1356 }
1357
1358
1359 /* Find the first load in the loop that belongs to INSTANCE.
1360 When loads are in several SLP nodes, there can be a case in which the first
1361 load does not appear in the first SLP node to be transformed, causing
1362 incorrect order of statements. Since we generate all the loads together,
1363 they must be inserted before the first load of the SLP instance and not
1364 before the first load of the first node of the instance. */
1365
1366 static gimple
1367 vect_find_first_load_in_slp_instance (slp_instance instance)
1368 {
1369 int i, j;
1370 slp_tree load_node;
1371 gimple first_load = NULL, load;
1372
1373 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
1374 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1375 first_load = get_earlier_stmt (load, first_load);
1376
1377 return first_load;
1378 }
1379
1380
1381 /* Find the last store in SLP INSTANCE. */
1382
1383 static gimple
1384 vect_find_last_store_in_slp_instance (slp_instance instance)
1385 {
1386 int i;
1387 slp_tree node;
1388 gimple last_store = NULL, store;
1389
1390 node = SLP_INSTANCE_TREE (instance);
1391 for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
1392 last_store = get_later_stmt (store, last_store);
1393
1394 return last_store;
1395 }
1396
1397 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1398
1399 static void
1400 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1401 slp_instance instance, slp_tree node,
1402 stmt_vector_for_cost *prologue_cost_vec,
1403 unsigned ncopies_for_cost)
1404 {
1405 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1406
1407 unsigned i;
1408 slp_tree child;
1409 gimple stmt, s;
1410 stmt_vec_info stmt_info;
1411 tree lhs;
1412 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1413
1414 /* Recurse down the SLP tree. */
1415 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1416 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1417 instance, child, prologue_cost_vec,
1418 ncopies_for_cost);
1419
1420 /* Look at the first scalar stmt to determine the cost. */
1421 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1422 stmt_info = vinfo_for_stmt (stmt);
1423 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1424 {
1425 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1426 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1427 vect_uninitialized_def,
1428 node, prologue_cost_vec, body_cost_vec);
1429 else
1430 {
1431 int i;
1432 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1433 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1434 node, prologue_cost_vec, body_cost_vec);
1435 /* If the load is permuted record the cost for the permutation.
1436 ??? Loads from multiple chains are let through here only
1437 for a single special case involving complex numbers where
1438 in the end no permutation is necessary. */
1439 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1440 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1441 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1442 && vect_get_place_in_interleaving_chain
1443 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1444 {
1445 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1446 stmt_info, 0, vect_body);
1447 break;
1448 }
1449 }
1450 }
1451 else
1452 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1453 stmt_info, 0, vect_body);
1454
1455 /* Scan operands and account for prologue cost of constants/externals.
1456 ??? This over-estimates cost for multiple uses and should be
1457 re-engineered. */
1458 lhs = gimple_get_lhs (stmt);
1459 for (i = 0; i < gimple_num_ops (stmt); ++i)
1460 {
1461 tree def, op = gimple_op (stmt, i);
1462 gimple def_stmt;
1463 enum vect_def_type dt;
1464 if (!op || op == lhs)
1465 continue;
1466 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1467 &def_stmt, &def, &dt)
1468 && (dt == vect_constant_def || dt == vect_external_def))
1469 record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
1470 stmt_info, 0, vect_prologue);
1471 }
1472 }
1473
1474 /* Compute the cost for the SLP instance INSTANCE. */
1475
1476 static void
1477 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1478 slp_instance instance, unsigned nunits)
1479 {
1480 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1481 unsigned ncopies_for_cost;
1482 stmt_info_for_cost *si;
1483 unsigned i;
1484
1485 /* Calculate the number of vector stmts to create based on the unrolling
1486 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1487 GROUP_SIZE / NUNITS otherwise. */
1488 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1489 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1490
1491 prologue_cost_vec.create (10);
1492 body_cost_vec.create (10);
1493 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1494 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1495 instance, SLP_INSTANCE_TREE (instance),
1496 &prologue_cost_vec, ncopies_for_cost);
1497
1498 /* Record the prologue costs, which were delayed until we were
1499 sure that SLP was successful. Unlike the body costs, we know
1500 the final values now regardless of the loop vectorization factor. */
1501 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1502 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1503 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1504 {
1505 struct _stmt_vec_info *stmt_info
1506 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1507 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1508 si->misalign, vect_prologue);
1509 }
1510
1511 prologue_cost_vec.release ();
1512 }
1513
1514 /* Analyze an SLP instance starting from a group of grouped stores. Call
1515 vect_build_slp_tree to build a tree of packed stmts if possible.
1516 Return FALSE if it's impossible to SLP any stmt in the loop. */
1517
1518 static bool
1519 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1520 gimple stmt, unsigned max_tree_size)
1521 {
1522 slp_instance new_instance;
1523 slp_tree node;
1524 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1525 unsigned int unrolling_factor = 1, nunits;
1526 tree vectype, scalar_type = NULL_TREE;
1527 gimple next;
1528 unsigned int vectorization_factor = 0;
1529 int i;
1530 unsigned int max_nunits = 0;
1531 vec<slp_tree> loads;
1532 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1533 vec<gimple> scalar_stmts;
1534
1535 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1536 {
1537 if (dr)
1538 {
1539 scalar_type = TREE_TYPE (DR_REF (dr));
1540 vectype = get_vectype_for_scalar_type (scalar_type);
1541 }
1542 else
1543 {
1544 gcc_assert (loop_vinfo);
1545 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1546 }
1547
1548 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1549 }
1550 else
1551 {
1552 gcc_assert (loop_vinfo);
1553 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1554 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1555 }
1556
1557 if (!vectype)
1558 {
1559 if (dump_enabled_p ())
1560 {
1561 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1562 "Build SLP failed: unsupported data-type ");
1563 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1564 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1565 }
1566
1567 return false;
1568 }
1569
1570 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1571 if (loop_vinfo)
1572 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1573 else
1574 vectorization_factor = nunits;
1575
1576 /* Calculate the unrolling factor. */
1577 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1578 if (unrolling_factor != 1 && !loop_vinfo)
1579 {
1580 if (dump_enabled_p ())
1581 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1582 "Build SLP failed: unrolling required in basic"
1583 " block SLP\n");
1584
1585 return false;
1586 }
1587
1588 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1589 scalar_stmts.create (group_size);
1590 next = stmt;
1591 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1592 {
1593 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1594 while (next)
1595 {
1596 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1597 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1598 scalar_stmts.safe_push (
1599 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1600 else
1601 scalar_stmts.safe_push (next);
1602 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1603 }
1604 }
1605 else
1606 {
1607 /* Collect reduction statements. */
1608 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1609 for (i = 0; reductions.iterate (i, &next); i++)
1610 scalar_stmts.safe_push (next);
1611 }
1612
1613 node = vect_create_new_slp_node (scalar_stmts);
1614
1615 loads.create (group_size);
1616
1617 /* Build the tree for the SLP instance. */
1618 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1619 &max_nunits, &loads,
1620 vectorization_factor, NULL, NULL, NULL,
1621 max_tree_size))
1622 {
1623 /* Calculate the unrolling factor based on the smallest type. */
1624 if (max_nunits > nunits)
1625 unrolling_factor = least_common_multiple (max_nunits, group_size)
1626 / group_size;
1627
1628 if (unrolling_factor != 1 && !loop_vinfo)
1629 {
1630 if (dump_enabled_p ())
1631 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1632 "Build SLP failed: unrolling required in basic"
1633 " block SLP\n");
1634 vect_free_slp_tree (node);
1635 loads.release ();
1636 return false;
1637 }
1638
1639 /* Create a new SLP instance. */
1640 new_instance = XNEW (struct _slp_instance);
1641 SLP_INSTANCE_TREE (new_instance) = node;
1642 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1643 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1644 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1645 SLP_INSTANCE_LOADS (new_instance) = loads;
1646 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1647
1648 /* Compute the load permutation. */
1649 slp_tree load_node;
1650 bool loads_permuted = false;
1651 FOR_EACH_VEC_ELT (loads, i, load_node)
1652 {
1653 vec<unsigned> load_permutation;
1654 int j;
1655 gimple load, first_stmt;
1656 bool this_load_permuted = false;
1657 load_permutation.create (group_size);
1658 first_stmt = GROUP_FIRST_ELEMENT
1659 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1660 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1661 {
1662 int load_place
1663 = vect_get_place_in_interleaving_chain (load, first_stmt);
1664 gcc_assert (load_place != -1);
1665 if (load_place != j)
1666 this_load_permuted = true;
1667 load_permutation.safe_push (load_place);
1668 }
1669 if (!this_load_permuted)
1670 {
1671 load_permutation.release ();
1672 continue;
1673 }
1674 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1675 loads_permuted = true;
1676 }
1677
1678 if (loads_permuted)
1679 {
1680 if (!vect_supported_load_permutation_p (new_instance))
1681 {
1682 if (dump_enabled_p ())
1683 {
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1685 "Build SLP failed: unsupported load "
1686 "permutation ");
1687 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1688 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1689 }
1690 vect_free_slp_instance (new_instance);
1691 return false;
1692 }
1693
1694 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1695 = vect_find_first_load_in_slp_instance (new_instance);
1696 }
1697
1698 /* Compute the costs of this SLP instance. */
1699 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1700 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1701
1702 if (loop_vinfo)
1703 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1704 else
1705 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1706
1707 if (dump_enabled_p ())
1708 vect_print_slp_tree (MSG_NOTE, node);
1709
1710 return true;
1711 }
1712
1713 /* Failed to SLP. */
1714 /* Free the allocated memory. */
1715 vect_free_slp_tree (node);
1716 loads.release ();
1717
1718 return false;
1719 }
1720
1721
1722 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1723 trees of packed scalar stmts if SLP is possible. */
1724
1725 bool
1726 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1727 unsigned max_tree_size)
1728 {
1729 unsigned int i;
1730 vec<gimple> grouped_stores;
1731 vec<gimple> reductions = vNULL;
1732 vec<gimple> reduc_chains = vNULL;
1733 gimple first_element;
1734 bool ok = false;
1735
1736 if (dump_enabled_p ())
1737 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1738
1739 if (loop_vinfo)
1740 {
1741 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1742 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1743 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1744 }
1745 else
1746 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1747
1748 /* Find SLP sequences starting from groups of grouped stores. */
1749 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1750 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1751 max_tree_size))
1752 ok = true;
1753
1754 if (bb_vinfo && !ok)
1755 {
1756 if (dump_enabled_p ())
1757 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1758 "Failed to SLP the basic block.\n");
1759
1760 return false;
1761 }
1762
1763 if (loop_vinfo
1764 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1765 {
1766 /* Find SLP sequences starting from reduction chains. */
1767 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1768 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1769 max_tree_size))
1770 ok = true;
1771 else
1772 return false;
1773
1774 /* Don't try to vectorize SLP reductions if reduction chain was
1775 detected. */
1776 return ok;
1777 }
1778
1779 /* Find SLP sequences starting from groups of reductions. */
1780 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1781 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1782 max_tree_size))
1783 ok = true;
1784
1785 return true;
1786 }
1787
1788
1789 /* For each possible SLP instance decide whether to SLP it and calculate overall
1790 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1791 least one instance. */
1792
1793 bool
1794 vect_make_slp_decision (loop_vec_info loop_vinfo)
1795 {
1796 unsigned int i, unrolling_factor = 1;
1797 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1798 slp_instance instance;
1799 int decided_to_slp = 0;
1800
1801 if (dump_enabled_p ())
1802 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1803 "\n");
1804
1805 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1806 {
1807 /* FORNOW: SLP if you can. */
1808 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1809 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1810
1811 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1812 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1813 loop-based vectorization. Such stmts will be marked as HYBRID. */
1814 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1815 decided_to_slp++;
1816 }
1817
1818 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1819
1820 if (decided_to_slp && dump_enabled_p ())
1821 dump_printf_loc (MSG_NOTE, vect_location,
1822 "Decided to SLP %d instances. Unrolling factor %d\n",
1823 decided_to_slp, unrolling_factor);
1824
1825 return (decided_to_slp > 0);
1826 }
1827
1828
1829 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1830 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1831
1832 static void
1833 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1834 {
1835 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1836 imm_use_iterator imm_iter;
1837 gimple use_stmt;
1838 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1839 slp_tree child;
1840 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1841 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1842 int j;
1843
1844 /* Propagate hybrid down the SLP tree. */
1845 if (stype == hybrid)
1846 ;
1847 else if (HYBRID_SLP_STMT (stmt_vinfo))
1848 stype = hybrid;
1849 else
1850 {
1851 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1852 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
1853 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1854 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1855 if (gimple_bb (use_stmt)
1856 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1857 && (use_vinfo = vinfo_for_stmt (use_stmt))
1858 && !STMT_SLP_TYPE (use_vinfo)
1859 && (STMT_VINFO_RELEVANT (use_vinfo)
1860 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
1861 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
1862 && STMT_VINFO_RELATED_STMT (use_vinfo)
1863 && !STMT_SLP_TYPE (vinfo_for_stmt
1864 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
1865 && !(gimple_code (use_stmt) == GIMPLE_PHI
1866 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
1867 stype = hybrid;
1868 }
1869
1870 if (stype == hybrid)
1871 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
1872
1873 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
1874 vect_detect_hybrid_slp_stmts (child, i, stype);
1875 }
1876
1877 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
1878
1879 static tree
1880 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
1881 {
1882 walk_stmt_info *wi = (walk_stmt_info *)data;
1883 struct loop *loopp = (struct loop *)wi->info;
1884
1885 if (wi->is_lhs)
1886 return NULL_TREE;
1887
1888 if (TREE_CODE (*tp) == SSA_NAME
1889 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
1890 {
1891 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
1892 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
1893 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
1894 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
1895 }
1896
1897 return NULL_TREE;
1898 }
1899
1900 static tree
1901 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
1902 walk_stmt_info *)
1903 {
1904 /* If the stmt is in a SLP instance then this isn't a reason
1905 to mark use definitions in other SLP instances as hybrid. */
1906 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
1907 *handled = true;
1908 return NULL_TREE;
1909 }
1910
1911 /* Find stmts that must be both vectorized and SLPed. */
1912
1913 void
1914 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1915 {
1916 unsigned int i;
1917 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1918 slp_instance instance;
1919
1920 if (dump_enabled_p ())
1921 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1922 "\n");
1923
1924 /* First walk all pattern stmt in the loop and mark defs of uses as
1925 hybrid because immediate uses in them are not recorded. */
1926 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
1927 {
1928 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
1929 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1930 gsi_next (&gsi))
1931 {
1932 gimple stmt = gsi_stmt (gsi);
1933 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1934 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
1935 {
1936 walk_stmt_info wi;
1937 memset (&wi, 0, sizeof (wi));
1938 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
1939 gimple_stmt_iterator gsi2
1940 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
1941 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
1942 vect_detect_hybrid_slp_1, &wi);
1943 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
1944 vect_detect_hybrid_slp_2,
1945 vect_detect_hybrid_slp_1, &wi);
1946 }
1947 }
1948 }
1949
1950 /* Then walk the SLP instance trees marking stmts with uses in
1951 non-SLP stmts as hybrid, also propagating hybrid down the
1952 SLP tree, collecting the above info on-the-fly. */
1953 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1954 {
1955 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
1956 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
1957 i, pure_slp);
1958 }
1959 }
1960
1961
1962 /* Create and initialize a new bb_vec_info struct for BB, as well as
1963 stmt_vec_info structs for all the stmts in it. */
1964
1965 static bb_vec_info
1966 new_bb_vec_info (basic_block bb)
1967 {
1968 bb_vec_info res = NULL;
1969 gimple_stmt_iterator gsi;
1970
1971 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1972 BB_VINFO_BB (res) = bb;
1973
1974 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1975 {
1976 gimple stmt = gsi_stmt (gsi);
1977 gimple_set_uid (stmt, 0);
1978 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1979 }
1980
1981 BB_VINFO_GROUPED_STORES (res).create (10);
1982 BB_VINFO_SLP_INSTANCES (res).create (2);
1983 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
1984
1985 bb->aux = res;
1986 return res;
1987 }
1988
1989
1990 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1991 stmts in the basic block. */
1992
1993 static void
1994 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1995 {
1996 vec<slp_instance> slp_instances;
1997 slp_instance instance;
1998 basic_block bb;
1999 gimple_stmt_iterator si;
2000 unsigned i;
2001
2002 if (!bb_vinfo)
2003 return;
2004
2005 bb = BB_VINFO_BB (bb_vinfo);
2006
2007 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2008 {
2009 gimple stmt = gsi_stmt (si);
2010 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2011
2012 if (stmt_info)
2013 /* Free stmt_vec_info. */
2014 free_stmt_vec_info (stmt);
2015 }
2016
2017 vect_destroy_datarefs (NULL, bb_vinfo);
2018 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2019 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2020 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2021 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2022 vect_free_slp_instance (instance);
2023 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2024 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2025 free (bb_vinfo);
2026 bb->aux = NULL;
2027 }
2028
2029
2030 /* Analyze statements contained in SLP tree node after recursively analyzing
2031 the subtree. Return TRUE if the operations are supported. */
2032
2033 static bool
2034 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
2035 {
2036 bool dummy;
2037 int i;
2038 gimple stmt;
2039 slp_tree child;
2040
2041 if (!node)
2042 return true;
2043
2044 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2045 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
2046 return false;
2047
2048 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2049 {
2050 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2051 gcc_assert (stmt_info);
2052 gcc_assert (PURE_SLP_STMT (stmt_info));
2053
2054 if (!vect_analyze_stmt (stmt, &dummy, node))
2055 return false;
2056 }
2057
2058 return true;
2059 }
2060
2061
2062 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2063 operations are supported. */
2064
2065 static bool
2066 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2067 {
2068 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2069 slp_instance instance;
2070 int i;
2071
2072 for (i = 0; slp_instances.iterate (i, &instance); )
2073 {
2074 if (!vect_slp_analyze_node_operations (bb_vinfo,
2075 SLP_INSTANCE_TREE (instance)))
2076 {
2077 vect_free_slp_instance (instance);
2078 slp_instances.ordered_remove (i);
2079 }
2080 else
2081 i++;
2082 }
2083
2084 if (!slp_instances.length ())
2085 return false;
2086
2087 return true;
2088 }
2089
2090
2091 /* Compute the scalar cost of the SLP node NODE and its children
2092 and return it. Do not account defs that are marked in LIFE and
2093 update LIFE according to uses of NODE. */
2094
2095 static unsigned
2096 vect_bb_slp_scalar_cost (basic_block bb,
2097 slp_tree node, vec<bool, va_heap> *life)
2098 {
2099 unsigned scalar_cost = 0;
2100 unsigned i;
2101 gimple stmt;
2102 slp_tree child;
2103
2104 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2105 {
2106 unsigned stmt_cost;
2107 ssa_op_iter op_iter;
2108 def_operand_p def_p;
2109 stmt_vec_info stmt_info;
2110
2111 if ((*life)[i])
2112 continue;
2113
2114 /* If there is a non-vectorized use of the defs then the scalar
2115 stmt is kept live in which case we do not account it or any
2116 required defs in the SLP children in the scalar cost. This
2117 way we make the vectorization more costly when compared to
2118 the scalar cost. */
2119 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2120 {
2121 imm_use_iterator use_iter;
2122 gimple use_stmt;
2123 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2124 if (!is_gimple_debug (use_stmt)
2125 && (gimple_code (use_stmt) == GIMPLE_PHI
2126 || gimple_bb (use_stmt) != bb
2127 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2128 {
2129 (*life)[i] = true;
2130 BREAK_FROM_IMM_USE_STMT (use_iter);
2131 }
2132 }
2133 if ((*life)[i])
2134 continue;
2135
2136 stmt_info = vinfo_for_stmt (stmt);
2137 if (STMT_VINFO_DATA_REF (stmt_info))
2138 {
2139 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2140 stmt_cost = vect_get_stmt_cost (scalar_load);
2141 else
2142 stmt_cost = vect_get_stmt_cost (scalar_store);
2143 }
2144 else
2145 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2146
2147 scalar_cost += stmt_cost;
2148 }
2149
2150 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2151 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2152
2153 return scalar_cost;
2154 }
2155
2156 /* Check if vectorization of the basic block is profitable. */
2157
2158 static bool
2159 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2160 {
2161 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2162 slp_instance instance;
2163 int i, j;
2164 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2165 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2166 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2167 stmt_vec_info stmt_info = NULL;
2168 stmt_vector_for_cost body_cost_vec;
2169 stmt_info_for_cost *ci;
2170
2171 /* Calculate vector costs. */
2172 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2173 {
2174 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2175
2176 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2177 {
2178 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2179 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2180 stmt_info, ci->misalign, vect_body);
2181 }
2182 }
2183
2184 /* Calculate scalar cost. */
2185 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2186 {
2187 auto_vec<bool, 20> life;
2188 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2189 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2190 SLP_INSTANCE_TREE (instance),
2191 &life);
2192 }
2193
2194 /* Complete the target-specific cost calculation. */
2195 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2196 &vec_inside_cost, &vec_epilogue_cost);
2197
2198 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2199
2200 if (dump_enabled_p ())
2201 {
2202 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2203 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2204 vec_inside_cost);
2205 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2206 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2207 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2208 }
2209
2210 /* Vectorization is profitable if its cost is less than the cost of scalar
2211 version. */
2212 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2213 return false;
2214
2215 return true;
2216 }
2217
2218 /* Check if the basic block can be vectorized. */
2219
2220 static bb_vec_info
2221 vect_slp_analyze_bb_1 (basic_block bb)
2222 {
2223 bb_vec_info bb_vinfo;
2224 vec<slp_instance> slp_instances;
2225 slp_instance instance;
2226 int i;
2227 int min_vf = 2;
2228 unsigned n_stmts = 0;
2229
2230 bb_vinfo = new_bb_vec_info (bb);
2231 if (!bb_vinfo)
2232 return NULL;
2233
2234 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2235 {
2236 if (dump_enabled_p ())
2237 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2238 "not vectorized: unhandled data-ref in basic "
2239 "block.\n");
2240
2241 destroy_bb_vec_info (bb_vinfo);
2242 return NULL;
2243 }
2244
2245 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2246 {
2247 if (dump_enabled_p ())
2248 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2249 "not vectorized: not enough data-refs in "
2250 "basic block.\n");
2251
2252 destroy_bb_vec_info (bb_vinfo);
2253 return NULL;
2254 }
2255
2256 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2257 {
2258 if (dump_enabled_p ())
2259 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2260 "not vectorized: unhandled data access in "
2261 "basic block.\n");
2262
2263 destroy_bb_vec_info (bb_vinfo);
2264 return NULL;
2265 }
2266
2267 vect_pattern_recog (NULL, bb_vinfo);
2268
2269 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2270 {
2271 if (dump_enabled_p ())
2272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2273 "not vectorized: bad data alignment in basic "
2274 "block.\n");
2275
2276 destroy_bb_vec_info (bb_vinfo);
2277 return NULL;
2278 }
2279
2280 /* Check the SLP opportunities in the basic block, analyze and build SLP
2281 trees. */
2282 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2283 {
2284 if (dump_enabled_p ())
2285 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2286 "not vectorized: failed to find SLP opportunities "
2287 "in basic block.\n");
2288
2289 destroy_bb_vec_info (bb_vinfo);
2290 return NULL;
2291 }
2292
2293 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2294
2295 /* Mark all the statements that we want to vectorize as pure SLP and
2296 relevant. */
2297 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2298 {
2299 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2300 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2301 }
2302
2303 /* Mark all the statements that we do not want to vectorize. */
2304 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2305 !gsi_end_p (gsi); gsi_next (&gsi))
2306 {
2307 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2308 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2309 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2310 }
2311
2312 /* Analyze dependences. At this point all stmts not participating in
2313 vectorization have to be marked. Dependence analysis assumes
2314 that we either vectorize all SLP instances or none at all. */
2315 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2316 {
2317 if (dump_enabled_p ())
2318 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2319 "not vectorized: unhandled data dependence "
2320 "in basic block.\n");
2321
2322 destroy_bb_vec_info (bb_vinfo);
2323 return NULL;
2324 }
2325
2326 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2327 {
2328 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2330 "not vectorized: unsupported alignment in basic "
2331 "block.\n");
2332 destroy_bb_vec_info (bb_vinfo);
2333 return NULL;
2334 }
2335
2336 if (!vect_slp_analyze_operations (bb_vinfo))
2337 {
2338 if (dump_enabled_p ())
2339 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2340 "not vectorized: bad operation in basic block.\n");
2341
2342 destroy_bb_vec_info (bb_vinfo);
2343 return NULL;
2344 }
2345
2346 /* Cost model: check if the vectorization is worthwhile. */
2347 if (!unlimited_cost_model (NULL)
2348 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2349 {
2350 if (dump_enabled_p ())
2351 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2352 "not vectorized: vectorization is not "
2353 "profitable.\n");
2354
2355 destroy_bb_vec_info (bb_vinfo);
2356 return NULL;
2357 }
2358
2359 if (dump_enabled_p ())
2360 dump_printf_loc (MSG_NOTE, vect_location,
2361 "Basic block will be vectorized using SLP\n");
2362
2363 return bb_vinfo;
2364 }
2365
2366
2367 bb_vec_info
2368 vect_slp_analyze_bb (basic_block bb)
2369 {
2370 bb_vec_info bb_vinfo;
2371 int insns = 0;
2372 gimple_stmt_iterator gsi;
2373 unsigned int vector_sizes;
2374
2375 if (dump_enabled_p ())
2376 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2377
2378 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2379 {
2380 gimple stmt = gsi_stmt (gsi);
2381 if (!is_gimple_debug (stmt)
2382 && !gimple_nop_p (stmt)
2383 && gimple_code (stmt) != GIMPLE_LABEL)
2384 insns++;
2385 }
2386
2387 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2388 {
2389 if (dump_enabled_p ())
2390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2391 "not vectorized: too many instructions in "
2392 "basic block.\n");
2393
2394 return NULL;
2395 }
2396
2397 /* Autodetect first vector size we try. */
2398 current_vector_size = 0;
2399 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2400
2401 while (1)
2402 {
2403 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2404 if (bb_vinfo)
2405 return bb_vinfo;
2406
2407 destroy_bb_vec_info (bb_vinfo);
2408
2409 vector_sizes &= ~current_vector_size;
2410 if (vector_sizes == 0
2411 || current_vector_size == 0)
2412 return NULL;
2413
2414 /* Try the next biggest vector size. */
2415 current_vector_size = 1 << floor_log2 (vector_sizes);
2416 if (dump_enabled_p ())
2417 dump_printf_loc (MSG_NOTE, vect_location,
2418 "***** Re-trying analysis with "
2419 "vector size %d\n", current_vector_size);
2420 }
2421 }
2422
2423
2424 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2425 the number of created vector stmts depends on the unrolling factor).
2426 However, the actual number of vector stmts for every SLP node depends on
2427 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2428 should be updated. In this function we assume that the inside costs
2429 calculated in vect_model_xxx_cost are linear in ncopies. */
2430
2431 void
2432 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2433 {
2434 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2435 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2436 slp_instance instance;
2437 stmt_vector_for_cost body_cost_vec;
2438 stmt_info_for_cost *si;
2439 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2440
2441 if (dump_enabled_p ())
2442 dump_printf_loc (MSG_NOTE, vect_location,
2443 "=== vect_update_slp_costs_according_to_vf ===\n");
2444
2445 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2446 {
2447 /* We assume that costs are linear in ncopies. */
2448 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2449
2450 /* Record the instance's instructions in the target cost model.
2451 This was delayed until here because the count of instructions
2452 isn't known beforehand. */
2453 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2454
2455 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2456 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2457 vinfo_for_stmt (si->stmt), si->misalign,
2458 vect_body);
2459 }
2460 }
2461
2462
2463 /* For constant and loop invariant defs of SLP_NODE this function returns
2464 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2465 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2466 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2467 REDUC_INDEX is the index of the reduction operand in the statements, unless
2468 it is -1. */
2469
2470 static void
2471 vect_get_constant_vectors (tree op, slp_tree slp_node,
2472 vec<tree> *vec_oprnds,
2473 unsigned int op_num, unsigned int number_of_vectors,
2474 int reduc_index)
2475 {
2476 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2477 gimple stmt = stmts[0];
2478 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2479 unsigned nunits;
2480 tree vec_cst;
2481 tree *elts;
2482 unsigned j, number_of_places_left_in_vector;
2483 tree vector_type;
2484 tree vop;
2485 int group_size = stmts.length ();
2486 unsigned int vec_num, i;
2487 unsigned number_of_copies = 1;
2488 vec<tree> voprnds;
2489 voprnds.create (number_of_vectors);
2490 bool constant_p, is_store;
2491 tree neutral_op = NULL;
2492 enum tree_code code = gimple_expr_code (stmt);
2493 gimple def_stmt;
2494 struct loop *loop;
2495 gimple_seq ctor_seq = NULL;
2496
2497 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2498 && reduc_index != -1)
2499 {
2500 op_num = reduc_index - 1;
2501 op = gimple_op (stmt, reduc_index);
2502 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2503 we need either neutral operands or the original operands. See
2504 get_initial_def_for_reduction() for details. */
2505 switch (code)
2506 {
2507 case WIDEN_SUM_EXPR:
2508 case DOT_PROD_EXPR:
2509 case PLUS_EXPR:
2510 case MINUS_EXPR:
2511 case BIT_IOR_EXPR:
2512 case BIT_XOR_EXPR:
2513 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2514 neutral_op = build_real (TREE_TYPE (op), dconst0);
2515 else
2516 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2517
2518 break;
2519
2520 case MULT_EXPR:
2521 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2522 neutral_op = build_real (TREE_TYPE (op), dconst1);
2523 else
2524 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2525
2526 break;
2527
2528 case BIT_AND_EXPR:
2529 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2530 break;
2531
2532 /* For MIN/MAX we don't have an easy neutral operand but
2533 the initial values can be used fine here. Only for
2534 a reduction chain we have to force a neutral element. */
2535 case MAX_EXPR:
2536 case MIN_EXPR:
2537 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2538 neutral_op = NULL;
2539 else
2540 {
2541 def_stmt = SSA_NAME_DEF_STMT (op);
2542 loop = (gimple_bb (stmt))->loop_father;
2543 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2544 loop_preheader_edge (loop));
2545 }
2546 break;
2547
2548 default:
2549 neutral_op = NULL;
2550 }
2551 }
2552
2553 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2554 {
2555 is_store = true;
2556 op = gimple_assign_rhs1 (stmt);
2557 }
2558 else
2559 is_store = false;
2560
2561 gcc_assert (op);
2562
2563 if (CONSTANT_CLASS_P (op))
2564 constant_p = true;
2565 else
2566 constant_p = false;
2567
2568 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2569 gcc_assert (vector_type);
2570 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2571
2572 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2573 created vectors. It is greater than 1 if unrolling is performed.
2574
2575 For example, we have two scalar operands, s1 and s2 (e.g., group of
2576 strided accesses of size two), while NUNITS is four (i.e., four scalars
2577 of this type can be packed in a vector). The output vector will contain
2578 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2579 will be 2).
2580
2581 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2582 containing the operands.
2583
2584 For example, NUNITS is four as before, and the group size is 8
2585 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2586 {s5, s6, s7, s8}. */
2587
2588 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2589
2590 number_of_places_left_in_vector = nunits;
2591 elts = XALLOCAVEC (tree, nunits);
2592 for (j = 0; j < number_of_copies; j++)
2593 {
2594 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2595 {
2596 if (is_store)
2597 op = gimple_assign_rhs1 (stmt);
2598 else
2599 {
2600 switch (code)
2601 {
2602 case COND_EXPR:
2603 if (op_num == 0 || op_num == 1)
2604 {
2605 tree cond = gimple_assign_rhs1 (stmt);
2606 op = TREE_OPERAND (cond, op_num);
2607 }
2608 else
2609 {
2610 if (op_num == 2)
2611 op = gimple_assign_rhs2 (stmt);
2612 else
2613 op = gimple_assign_rhs3 (stmt);
2614 }
2615 break;
2616
2617 case CALL_EXPR:
2618 op = gimple_call_arg (stmt, op_num);
2619 break;
2620
2621 case LSHIFT_EXPR:
2622 case RSHIFT_EXPR:
2623 case LROTATE_EXPR:
2624 case RROTATE_EXPR:
2625 op = gimple_op (stmt, op_num + 1);
2626 /* Unlike the other binary operators, shifts/rotates have
2627 the shift count being int, instead of the same type as
2628 the lhs, so make sure the scalar is the right type if
2629 we are dealing with vectors of
2630 long long/long/short/char. */
2631 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2632 op = fold_convert (TREE_TYPE (vector_type), op);
2633 break;
2634
2635 default:
2636 op = gimple_op (stmt, op_num + 1);
2637 break;
2638 }
2639 }
2640
2641 if (reduc_index != -1)
2642 {
2643 loop = (gimple_bb (stmt))->loop_father;
2644 def_stmt = SSA_NAME_DEF_STMT (op);
2645
2646 gcc_assert (loop);
2647
2648 /* Get the def before the loop. In reduction chain we have only
2649 one initial value. */
2650 if ((j != (number_of_copies - 1)
2651 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2652 && i != 0))
2653 && neutral_op)
2654 op = neutral_op;
2655 else
2656 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2657 loop_preheader_edge (loop));
2658 }
2659
2660 /* Create 'vect_ = {op0,op1,...,opn}'. */
2661 number_of_places_left_in_vector--;
2662 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2663 {
2664 if (CONSTANT_CLASS_P (op))
2665 {
2666 op = fold_unary (VIEW_CONVERT_EXPR,
2667 TREE_TYPE (vector_type), op);
2668 gcc_assert (op && CONSTANT_CLASS_P (op));
2669 }
2670 else
2671 {
2672 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2673 gimple init_stmt;
2674 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2675 init_stmt
2676 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2677 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2678 op = new_temp;
2679 }
2680 }
2681 elts[number_of_places_left_in_vector] = op;
2682 if (!CONSTANT_CLASS_P (op))
2683 constant_p = false;
2684
2685 if (number_of_places_left_in_vector == 0)
2686 {
2687 number_of_places_left_in_vector = nunits;
2688
2689 if (constant_p)
2690 vec_cst = build_vector (vector_type, elts);
2691 else
2692 {
2693 vec<constructor_elt, va_gc> *v;
2694 unsigned k;
2695 vec_alloc (v, nunits);
2696 for (k = 0; k < nunits; ++k)
2697 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2698 vec_cst = build_constructor (vector_type, v);
2699 }
2700 voprnds.quick_push (vect_init_vector (stmt, vec_cst,
2701 vector_type, NULL));
2702 if (ctor_seq != NULL)
2703 {
2704 gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
2705 gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
2706 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2707 GSI_SAME_STMT);
2708 ctor_seq = NULL;
2709 }
2710 }
2711 }
2712 }
2713
2714 /* Since the vectors are created in the reverse order, we should invert
2715 them. */
2716 vec_num = voprnds.length ();
2717 for (j = vec_num; j != 0; j--)
2718 {
2719 vop = voprnds[j - 1];
2720 vec_oprnds->quick_push (vop);
2721 }
2722
2723 voprnds.release ();
2724
2725 /* In case that VF is greater than the unrolling factor needed for the SLP
2726 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2727 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2728 to replicate the vectors. */
2729 while (number_of_vectors > vec_oprnds->length ())
2730 {
2731 tree neutral_vec = NULL;
2732
2733 if (neutral_op)
2734 {
2735 if (!neutral_vec)
2736 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2737
2738 vec_oprnds->quick_push (neutral_vec);
2739 }
2740 else
2741 {
2742 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2743 vec_oprnds->quick_push (vop);
2744 }
2745 }
2746 }
2747
2748
2749 /* Get vectorized definitions from SLP_NODE that contains corresponding
2750 vectorized def-stmts. */
2751
2752 static void
2753 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2754 {
2755 tree vec_oprnd;
2756 gimple vec_def_stmt;
2757 unsigned int i;
2758
2759 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2760
2761 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2762 {
2763 gcc_assert (vec_def_stmt);
2764 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2765 vec_oprnds->quick_push (vec_oprnd);
2766 }
2767 }
2768
2769
2770 /* Get vectorized definitions for SLP_NODE.
2771 If the scalar definitions are loop invariants or constants, collect them and
2772 call vect_get_constant_vectors() to create vector stmts.
2773 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2774 must be stored in the corresponding child of SLP_NODE, and we call
2775 vect_get_slp_vect_defs () to retrieve them. */
2776
2777 void
2778 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2779 vec<vec<tree> > *vec_oprnds, int reduc_index)
2780 {
2781 gimple first_stmt;
2782 int number_of_vects = 0, i;
2783 unsigned int child_index = 0;
2784 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2785 slp_tree child = NULL;
2786 vec<tree> vec_defs;
2787 tree oprnd;
2788 bool vectorized_defs;
2789
2790 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2791 FOR_EACH_VEC_ELT (ops, i, oprnd)
2792 {
2793 /* For each operand we check if it has vectorized definitions in a child
2794 node or we need to create them (for invariants and constants). We
2795 check if the LHS of the first stmt of the next child matches OPRND.
2796 If it does, we found the correct child. Otherwise, we call
2797 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2798 to check this child node for the next operand. */
2799 vectorized_defs = false;
2800 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2801 {
2802 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2803
2804 /* We have to check both pattern and original def, if available. */
2805 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2806 gimple related = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2807
2808 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2809 || (related
2810 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2811 {
2812 /* The number of vector defs is determined by the number of
2813 vector statements in the node from which we get those
2814 statements. */
2815 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2816 vectorized_defs = true;
2817 child_index++;
2818 }
2819 }
2820
2821 if (!vectorized_defs)
2822 {
2823 if (i == 0)
2824 {
2825 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2826 /* Number of vector stmts was calculated according to LHS in
2827 vect_schedule_slp_instance (), fix it by replacing LHS with
2828 RHS, if necessary. See vect_get_smallest_scalar_type () for
2829 details. */
2830 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2831 &rhs_size_unit);
2832 if (rhs_size_unit != lhs_size_unit)
2833 {
2834 number_of_vects *= rhs_size_unit;
2835 number_of_vects /= lhs_size_unit;
2836 }
2837 }
2838 }
2839
2840 /* Allocate memory for vectorized defs. */
2841 vec_defs = vNULL;
2842 vec_defs.create (number_of_vects);
2843
2844 /* For reduction defs we call vect_get_constant_vectors (), since we are
2845 looking for initial loop invariant values. */
2846 if (vectorized_defs && reduc_index == -1)
2847 /* The defs are already vectorized. */
2848 vect_get_slp_vect_defs (child, &vec_defs);
2849 else
2850 /* Build vectors from scalar defs. */
2851 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2852 number_of_vects, reduc_index);
2853
2854 vec_oprnds->quick_push (vec_defs);
2855
2856 /* For reductions, we only need initial values. */
2857 if (reduc_index != -1)
2858 return;
2859 }
2860 }
2861
2862
2863 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2864 building a vector of type MASK_TYPE from it) and two input vectors placed in
2865 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2866 shifting by STRIDE elements of DR_CHAIN for every copy.
2867 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2868 copies).
2869 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2870 the created stmts must be inserted. */
2871
2872 static inline void
2873 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2874 tree mask, int first_vec_indx, int second_vec_indx,
2875 gimple_stmt_iterator *gsi, slp_tree node,
2876 tree vectype, vec<tree> dr_chain,
2877 int ncopies, int vect_stmts_counter)
2878 {
2879 tree perm_dest;
2880 gimple perm_stmt = NULL;
2881 stmt_vec_info next_stmt_info;
2882 int i, stride;
2883 tree first_vec, second_vec, data_ref;
2884
2885 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2886
2887 /* Initialize the vect stmts of NODE to properly insert the generated
2888 stmts later. */
2889 for (i = SLP_TREE_VEC_STMTS (node).length ();
2890 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2891 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2892
2893 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2894 for (i = 0; i < ncopies; i++)
2895 {
2896 first_vec = dr_chain[first_vec_indx];
2897 second_vec = dr_chain[second_vec_indx];
2898
2899 /* Generate the permute statement. */
2900 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
2901 first_vec, second_vec, mask);
2902 data_ref = make_ssa_name (perm_dest, perm_stmt);
2903 gimple_set_lhs (perm_stmt, data_ref);
2904 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2905
2906 /* Store the vector statement in NODE. */
2907 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
2908
2909 first_vec_indx += stride;
2910 second_vec_indx += stride;
2911 }
2912
2913 /* Mark the scalar stmt as vectorized. */
2914 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2915 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2916 }
2917
2918
2919 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2920 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2921 representation. Check that the mask is valid and return FALSE if not.
2922 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2923 the next vector, i.e., the current first vector is not needed. */
2924
2925 static bool
2926 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2927 int mask_nunits, bool only_one_vec, int index,
2928 unsigned char *mask, int *current_mask_element,
2929 bool *need_next_vector, int *number_of_mask_fixes,
2930 bool *mask_fixed, bool *needs_first_vector)
2931 {
2932 int i;
2933
2934 /* Convert to target specific representation. */
2935 *current_mask_element = first_mask_element + m;
2936 /* Adjust the value in case it's a mask for second and third vectors. */
2937 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2938
2939 if (*current_mask_element < mask_nunits)
2940 *needs_first_vector = true;
2941
2942 /* We have only one input vector to permute but the mask accesses values in
2943 the next vector as well. */
2944 if (only_one_vec && *current_mask_element >= mask_nunits)
2945 {
2946 if (dump_enabled_p ())
2947 {
2948 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2949 "permutation requires at least two vectors ");
2950 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2951 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2952 }
2953
2954 return false;
2955 }
2956
2957 /* The mask requires the next vector. */
2958 while (*current_mask_element >= mask_nunits * 2)
2959 {
2960 if (*needs_first_vector || *mask_fixed)
2961 {
2962 /* We either need the first vector too or have already moved to the
2963 next vector. In both cases, this permutation needs three
2964 vectors. */
2965 if (dump_enabled_p ())
2966 {
2967 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2968 "permutation requires at "
2969 "least three vectors ");
2970 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2971 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2972 }
2973
2974 return false;
2975 }
2976
2977 /* We move to the next vector, dropping the first one and working with
2978 the second and the third - we need to adjust the values of the mask
2979 accordingly. */
2980 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2981
2982 for (i = 0; i < index; i++)
2983 mask[i] -= mask_nunits * *number_of_mask_fixes;
2984
2985 (*number_of_mask_fixes)++;
2986 *mask_fixed = true;
2987 }
2988
2989 *need_next_vector = *mask_fixed;
2990
2991 /* This was the last element of this mask. Start a new one. */
2992 if (index == mask_nunits - 1)
2993 {
2994 *number_of_mask_fixes = 1;
2995 *mask_fixed = false;
2996 *needs_first_vector = false;
2997 }
2998
2999 return true;
3000 }
3001
3002
3003 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3004 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3005 permute statements for the SLP node NODE of the SLP instance
3006 SLP_NODE_INSTANCE. */
3007
3008 bool
3009 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3010 gimple_stmt_iterator *gsi, int vf,
3011 slp_instance slp_node_instance, bool analyze_only)
3012 {
3013 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3014 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3015 tree mask_element_type = NULL_TREE, mask_type;
3016 int i, j, k, nunits, vec_index = 0, scalar_index;
3017 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3018 gimple next_scalar_stmt;
3019 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3020 int first_mask_element;
3021 int index, unroll_factor, current_mask_element, ncopies;
3022 unsigned char *mask;
3023 bool only_one_vec = false, need_next_vector = false;
3024 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3025 int number_of_mask_fixes = 1;
3026 bool mask_fixed = false;
3027 bool needs_first_vector = false;
3028 machine_mode mode;
3029
3030 mode = TYPE_MODE (vectype);
3031
3032 if (!can_vec_perm_p (mode, false, NULL))
3033 {
3034 if (dump_enabled_p ())
3035 {
3036 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3037 "no vect permute for ");
3038 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3039 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3040 }
3041 return false;
3042 }
3043
3044 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3045 same size as the vector element being permuted. */
3046 mask_element_type = lang_hooks.types.type_for_mode
3047 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3048 mask_type = get_vectype_for_scalar_type (mask_element_type);
3049 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3050 mask = XALLOCAVEC (unsigned char, nunits);
3051 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3052
3053 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3054 unrolling factor. */
3055 orig_vec_stmts_num = group_size *
3056 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3057 if (orig_vec_stmts_num == 1)
3058 only_one_vec = true;
3059
3060 /* Number of copies is determined by the final vectorization factor
3061 relatively to SLP_NODE_INSTANCE unrolling factor. */
3062 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3063
3064 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3065 return false;
3066
3067 /* Generate permutation masks for every NODE. Number of masks for each NODE
3068 is equal to GROUP_SIZE.
3069 E.g., we have a group of three nodes with three loads from the same
3070 location in each node, and the vector size is 4. I.e., we have a
3071 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3072 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3073 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3074 ...
3075
3076 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3077 The last mask is illegal since we assume two operands for permute
3078 operation, and the mask element values can't be outside that range.
3079 Hence, the last mask must be converted into {2,5,5,5}.
3080 For the first two permutations we need the first and the second input
3081 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3082 we need the second and the third vectors: {b1,c1,a2,b2} and
3083 {c2,a3,b3,c3}. */
3084
3085 {
3086 scalar_index = 0;
3087 index = 0;
3088 vect_stmts_counter = 0;
3089 vec_index = 0;
3090 first_vec_index = vec_index++;
3091 if (only_one_vec)
3092 second_vec_index = first_vec_index;
3093 else
3094 second_vec_index = vec_index++;
3095
3096 for (j = 0; j < unroll_factor; j++)
3097 {
3098 for (k = 0; k < group_size; k++)
3099 {
3100 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3101 first_mask_element = i + j * group_size;
3102 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3103 nunits, only_one_vec, index,
3104 mask, &current_mask_element,
3105 &need_next_vector,
3106 &number_of_mask_fixes, &mask_fixed,
3107 &needs_first_vector))
3108 return false;
3109 gcc_assert (current_mask_element < 2 * nunits);
3110 mask[index++] = current_mask_element;
3111
3112 if (index == nunits)
3113 {
3114 index = 0;
3115 if (!can_vec_perm_p (mode, false, mask))
3116 {
3117 if (dump_enabled_p ())
3118 {
3119 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3120 vect_location,
3121 "unsupported vect permute { ");
3122 for (i = 0; i < nunits; ++i)
3123 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3124 mask[i]);
3125 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3126 }
3127 return false;
3128 }
3129
3130 if (!analyze_only)
3131 {
3132 int l;
3133 tree mask_vec, *mask_elts;
3134 mask_elts = XALLOCAVEC (tree, nunits);
3135 for (l = 0; l < nunits; ++l)
3136 mask_elts[l] = build_int_cst (mask_element_type,
3137 mask[l]);
3138 mask_vec = build_vector (mask_type, mask_elts);
3139
3140 if (need_next_vector)
3141 {
3142 first_vec_index = second_vec_index;
3143 second_vec_index = vec_index;
3144 }
3145
3146 next_scalar_stmt
3147 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3148
3149 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3150 mask_vec, first_vec_index, second_vec_index,
3151 gsi, node, vectype, dr_chain,
3152 ncopies, vect_stmts_counter++);
3153 }
3154 }
3155 }
3156 }
3157 }
3158
3159 return true;
3160 }
3161
3162
3163
3164 /* Vectorize SLP instance tree in postorder. */
3165
3166 static bool
3167 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3168 unsigned int vectorization_factor)
3169 {
3170 gimple stmt;
3171 bool grouped_store, is_store;
3172 gimple_stmt_iterator si;
3173 stmt_vec_info stmt_info;
3174 unsigned int vec_stmts_size, nunits, group_size;
3175 tree vectype;
3176 int i;
3177 slp_tree child;
3178
3179 if (!node)
3180 return false;
3181
3182 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3183 vect_schedule_slp_instance (child, instance, vectorization_factor);
3184
3185 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3186 stmt_info = vinfo_for_stmt (stmt);
3187
3188 /* VECTYPE is the type of the destination. */
3189 vectype = STMT_VINFO_VECTYPE (stmt_info);
3190 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3191 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3192
3193 /* For each SLP instance calculate number of vector stmts to be created
3194 for the scalar stmts in each node of the SLP tree. Number of vector
3195 elements in one vector iteration is the number of scalar elements in
3196 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3197 size. */
3198 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3199
3200 if (!SLP_TREE_VEC_STMTS (node).exists ())
3201 {
3202 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3203 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3204 }
3205
3206 if (dump_enabled_p ())
3207 {
3208 dump_printf_loc (MSG_NOTE,vect_location,
3209 "------>vectorizing SLP node starting from: ");
3210 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3211 dump_printf (MSG_NOTE, "\n");
3212 }
3213
3214 /* Loads should be inserted before the first load. */
3215 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
3216 && STMT_VINFO_GROUPED_ACCESS (stmt_info)
3217 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
3218 && SLP_TREE_LOAD_PERMUTATION (node).exists ())
3219 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
3220 else if (is_pattern_stmt_p (stmt_info))
3221 si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
3222 else
3223 si = gsi_for_stmt (stmt);
3224
3225 /* Stores should be inserted just before the last store. */
3226 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
3227 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
3228 {
3229 gimple last_store = vect_find_last_store_in_slp_instance (instance);
3230 if (is_pattern_stmt_p (vinfo_for_stmt (last_store)))
3231 last_store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store));
3232 si = gsi_for_stmt (last_store);
3233 }
3234
3235 /* Mark the first element of the reduction chain as reduction to properly
3236 transform the node. In the analysis phase only the last element of the
3237 chain is marked as reduction. */
3238 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3239 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3240 {
3241 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3242 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3243 }
3244
3245 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3246 return is_store;
3247 }
3248
3249 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3250 For loop vectorization this is done in vectorizable_call, but for SLP
3251 it needs to be deferred until end of vect_schedule_slp, because multiple
3252 SLP instances may refer to the same scalar stmt. */
3253
3254 static void
3255 vect_remove_slp_scalar_calls (slp_tree node)
3256 {
3257 gimple stmt, new_stmt;
3258 gimple_stmt_iterator gsi;
3259 int i;
3260 slp_tree child;
3261 tree lhs;
3262 stmt_vec_info stmt_info;
3263
3264 if (!node)
3265 return;
3266
3267 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3268 vect_remove_slp_scalar_calls (child);
3269
3270 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3271 {
3272 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3273 continue;
3274 stmt_info = vinfo_for_stmt (stmt);
3275 if (stmt_info == NULL
3276 || is_pattern_stmt_p (stmt_info)
3277 || !PURE_SLP_STMT (stmt_info))
3278 continue;
3279 lhs = gimple_call_lhs (stmt);
3280 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3281 set_vinfo_for_stmt (new_stmt, stmt_info);
3282 set_vinfo_for_stmt (stmt, NULL);
3283 STMT_VINFO_STMT (stmt_info) = new_stmt;
3284 gsi = gsi_for_stmt (stmt);
3285 gsi_replace (&gsi, new_stmt, false);
3286 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3287 }
3288 }
3289
3290 /* Generate vector code for all SLP instances in the loop/basic block. */
3291
3292 bool
3293 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3294 {
3295 vec<slp_instance> slp_instances;
3296 slp_instance instance;
3297 unsigned int i, vf;
3298 bool is_store = false;
3299
3300 if (loop_vinfo)
3301 {
3302 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3303 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3304 }
3305 else
3306 {
3307 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3308 vf = 1;
3309 }
3310
3311 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3312 {
3313 /* Schedule the tree of INSTANCE. */
3314 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3315 instance, vf);
3316 if (dump_enabled_p ())
3317 dump_printf_loc (MSG_NOTE, vect_location,
3318 "vectorizing stmts using SLP.\n");
3319 }
3320
3321 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3322 {
3323 slp_tree root = SLP_INSTANCE_TREE (instance);
3324 gimple store;
3325 unsigned int j;
3326 gimple_stmt_iterator gsi;
3327
3328 /* Remove scalar call stmts. Do not do this for basic-block
3329 vectorization as not all uses may be vectorized.
3330 ??? Why should this be necessary? DCE should be able to
3331 remove the stmts itself.
3332 ??? For BB vectorization we can as well remove scalar
3333 stmts starting from the SLP tree root if they have no
3334 uses. */
3335 if (loop_vinfo)
3336 vect_remove_slp_scalar_calls (root);
3337
3338 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3339 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3340 {
3341 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3342 break;
3343
3344 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3345 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3346 /* Free the attached stmt_vec_info and remove the stmt. */
3347 gsi = gsi_for_stmt (store);
3348 unlink_stmt_vdef (store);
3349 gsi_remove (&gsi, true);
3350 release_defs (store);
3351 free_stmt_vec_info (store);
3352 }
3353 }
3354
3355 return is_store;
3356 }
3357
3358
3359 /* Vectorize the basic block. */
3360
3361 void
3362 vect_slp_transform_bb (basic_block bb)
3363 {
3364 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3365 gimple_stmt_iterator si;
3366
3367 gcc_assert (bb_vinfo);
3368
3369 if (dump_enabled_p ())
3370 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3371
3372 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3373 {
3374 gimple stmt = gsi_stmt (si);
3375 stmt_vec_info stmt_info;
3376
3377 if (dump_enabled_p ())
3378 {
3379 dump_printf_loc (MSG_NOTE, vect_location,
3380 "------>SLPing statement: ");
3381 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3382 dump_printf (MSG_NOTE, "\n");
3383 }
3384
3385 stmt_info = vinfo_for_stmt (stmt);
3386 gcc_assert (stmt_info);
3387
3388 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3389 if (STMT_SLP_TYPE (stmt_info))
3390 {
3391 vect_schedule_slp (NULL, bb_vinfo);
3392 break;
3393 }
3394 }
3395
3396 if (dump_enabled_p ())
3397 dump_printf_loc (MSG_NOTE, vect_location,
3398 "BASIC BLOCK VECTORIZED\n");
3399
3400 destroy_bb_vec_info (bb_vinfo);
3401 }