re PR tree-optimization/66129 (FAIL: gcc.dg/vect/vect-strided-*c execution test)
[gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
79
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
82
83 source_location
84 find_bb_location (basic_block bb)
85 {
86 gimple stmt = NULL;
87 gimple_stmt_iterator si;
88
89 if (!bb)
90 return UNKNOWN_LOCATION;
91
92 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
93 {
94 stmt = gsi_stmt (si);
95 if (gimple_location (stmt) != UNKNOWN_LOCATION)
96 return gimple_location (stmt);
97 }
98
99 return UNKNOWN_LOCATION;
100 }
101
102
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
104
105 static void
106 vect_free_slp_tree (slp_tree node)
107 {
108 int i;
109 slp_tree child;
110
111 if (!node)
112 return;
113
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115 vect_free_slp_tree (child);
116
117 SLP_TREE_CHILDREN (node).release ();
118 SLP_TREE_SCALAR_STMTS (node).release ();
119 SLP_TREE_VEC_STMTS (node).release ();
120 SLP_TREE_LOAD_PERMUTATION (node).release ();
121
122 free (node);
123 }
124
125
126 /* Free the memory allocated for the SLP instance. */
127
128 void
129 vect_free_slp_instance (slp_instance instance)
130 {
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132 SLP_INSTANCE_LOADS (instance).release ();
133 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
134 free (instance);
135 }
136
137
138 /* Create an SLP node for SCALAR_STMTS. */
139
140 static slp_tree
141 vect_create_new_slp_node (vec<gimple> scalar_stmts)
142 {
143 slp_tree node;
144 gimple stmt = scalar_stmts[0];
145 unsigned int nops;
146
147 if (is_gimple_call (stmt))
148 nops = gimple_call_num_args (stmt);
149 else if (is_gimple_assign (stmt))
150 {
151 nops = gimple_num_ops (stmt) - 1;
152 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
153 nops++;
154 }
155 else
156 return NULL;
157
158 node = XNEW (struct _slp_tree);
159 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
160 SLP_TREE_VEC_STMTS (node).create (0);
161 SLP_TREE_CHILDREN (node).create (nops);
162 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
163 SLP_TREE_TWO_OPERATORS (node) = false;
164
165 return node;
166 }
167
168
169 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
170 operand. */
171 static vec<slp_oprnd_info>
172 vect_create_oprnd_info (int nops, int group_size)
173 {
174 int i;
175 slp_oprnd_info oprnd_info;
176 vec<slp_oprnd_info> oprnds_info;
177
178 oprnds_info.create (nops);
179 for (i = 0; i < nops; i++)
180 {
181 oprnd_info = XNEW (struct _slp_oprnd_info);
182 oprnd_info->def_stmts.create (group_size);
183 oprnd_info->first_dt = vect_uninitialized_def;
184 oprnd_info->first_op_type = NULL_TREE;
185 oprnd_info->first_pattern = false;
186 oprnds_info.quick_push (oprnd_info);
187 }
188
189 return oprnds_info;
190 }
191
192
193 /* Free operands info. */
194
195 static void
196 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
197 {
198 int i;
199 slp_oprnd_info oprnd_info;
200
201 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
202 {
203 oprnd_info->def_stmts.release ();
204 XDELETE (oprnd_info);
205 }
206
207 oprnds_info.release ();
208 }
209
210
211 /* Find the place of the data-ref in STMT in the interleaving chain that starts
212 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
213
214 static int
215 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
216 {
217 gimple next_stmt = first_stmt;
218 int result = 0;
219
220 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
221 return -1;
222
223 do
224 {
225 if (next_stmt == stmt)
226 return result;
227 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
228 if (next_stmt)
229 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
230 }
231 while (next_stmt);
232
233 return -1;
234 }
235
236
237 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
238 they are of a valid type and that they match the defs of the first stmt of
239 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
240 return -1, if the error could be corrected by swapping operands of the
241 operation return 1, if everything is ok return 0. */
242
243 static int
244 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
245 gimple stmt, bool first,
246 vec<slp_oprnd_info> *oprnds_info)
247 {
248 tree oprnd;
249 unsigned int i, number_of_oprnds;
250 tree def;
251 gimple def_stmt;
252 enum vect_def_type dt = vect_uninitialized_def;
253 struct loop *loop = NULL;
254 bool pattern = false;
255 slp_oprnd_info oprnd_info;
256 int first_op_idx = 1;
257 bool commutative = false;
258 bool first_op_cond = false;
259
260 if (loop_vinfo)
261 loop = LOOP_VINFO_LOOP (loop_vinfo);
262
263 if (is_gimple_call (stmt))
264 {
265 number_of_oprnds = gimple_call_num_args (stmt);
266 first_op_idx = 3;
267 }
268 else if (is_gimple_assign (stmt))
269 {
270 enum tree_code code = gimple_assign_rhs_code (stmt);
271 number_of_oprnds = gimple_num_ops (stmt) - 1;
272 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
273 {
274 first_op_cond = true;
275 commutative = true;
276 number_of_oprnds++;
277 }
278 else
279 commutative = commutative_tree_code (code);
280 }
281 else
282 return -1;
283
284 bool swapped = false;
285 for (i = 0; i < number_of_oprnds; i++)
286 {
287 again:
288 if (first_op_cond)
289 {
290 if (i == 0 || i == 1)
291 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
292 swapped ? !i : i);
293 else
294 oprnd = gimple_op (stmt, first_op_idx + i - 1);
295 }
296 else
297 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
298
299 oprnd_info = (*oprnds_info)[i];
300
301 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
302 &def, &dt)
303 || (!def_stmt && dt != vect_constant_def))
304 {
305 if (dump_enabled_p ())
306 {
307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
308 "Build SLP failed: can't find def for ");
309 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
310 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
311 }
312
313 return -1;
314 }
315
316 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
317 from the pattern. Check that all the stmts of the node are in the
318 pattern. */
319 if (def_stmt && gimple_bb (def_stmt)
320 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
321 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
322 && gimple_code (def_stmt) != GIMPLE_PHI))
323 && vinfo_for_stmt (def_stmt)
324 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
325 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
326 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
327 {
328 pattern = true;
329 if (!first && !oprnd_info->first_pattern)
330 {
331 if (i == 0
332 && !swapped
333 && commutative)
334 {
335 swapped = true;
336 goto again;
337 }
338
339 if (dump_enabled_p ())
340 {
341 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
342 "Build SLP failed: some of the stmts"
343 " are in a pattern, and others are not ");
344 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
345 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
346 }
347
348 return 1;
349 }
350
351 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
352 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
353
354 if (dt == vect_unknown_def_type)
355 {
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
358 "Unsupported pattern.\n");
359 return -1;
360 }
361
362 switch (gimple_code (def_stmt))
363 {
364 case GIMPLE_PHI:
365 def = gimple_phi_result (def_stmt);
366 break;
367
368 case GIMPLE_ASSIGN:
369 def = gimple_assign_lhs (def_stmt);
370 break;
371
372 default:
373 if (dump_enabled_p ())
374 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
375 "unsupported defining stmt:\n");
376 return -1;
377 }
378 }
379
380 if (first)
381 {
382 oprnd_info->first_dt = dt;
383 oprnd_info->first_pattern = pattern;
384 oprnd_info->first_op_type = TREE_TYPE (oprnd);
385 }
386 else
387 {
388 /* Not first stmt of the group, check that the def-stmt/s match
389 the def-stmt/s of the first stmt. Allow different definition
390 types for reduction chains: the first stmt must be a
391 vect_reduction_def (a phi node), and the rest
392 vect_internal_def. */
393 if (((oprnd_info->first_dt != dt
394 && !(oprnd_info->first_dt == vect_reduction_def
395 && dt == vect_internal_def)
396 && !((oprnd_info->first_dt == vect_external_def
397 || oprnd_info->first_dt == vect_constant_def)
398 && (dt == vect_external_def
399 || dt == vect_constant_def)))
400 || !types_compatible_p (oprnd_info->first_op_type,
401 TREE_TYPE (oprnd))))
402 {
403 /* Try swapping operands if we got a mismatch. */
404 if (i == 0
405 && !swapped
406 && commutative)
407 {
408 swapped = true;
409 goto again;
410 }
411
412 if (dump_enabled_p ())
413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
414 "Build SLP failed: different types\n");
415
416 return 1;
417 }
418 }
419
420 /* Check the types of the definitions. */
421 switch (dt)
422 {
423 case vect_constant_def:
424 case vect_external_def:
425 case vect_reduction_def:
426 break;
427
428 case vect_internal_def:
429 oprnd_info->def_stmts.quick_push (def_stmt);
430 break;
431
432 default:
433 /* FORNOW: Not supported. */
434 if (dump_enabled_p ())
435 {
436 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
437 "Build SLP failed: illegal type of def ");
438 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
439 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
440 }
441
442 return -1;
443 }
444 }
445
446 /* Swap operands. */
447 if (swapped)
448 {
449 if (first_op_cond)
450 {
451 tree cond = gimple_assign_rhs1 (stmt);
452 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
453 &TREE_OPERAND (cond, 1));
454 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
455 }
456 else
457 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
458 gimple_assign_rhs2_ptr (stmt));
459 }
460
461 return 0;
462 }
463
464
465 /* Verify if the scalar stmts STMTS are isomorphic, require data
466 permutation or are of unsupported types of operation. Return
467 true if they are, otherwise return false and indicate in *MATCHES
468 which stmts are not isomorphic to the first one. If MATCHES[0]
469 is false then this indicates the comparison could not be
470 carried out or the stmts will never be vectorized by SLP. */
471
472 static bool
473 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
474 vec<gimple> stmts, unsigned int group_size,
475 unsigned nops, unsigned int *max_nunits,
476 unsigned int vectorization_factor, bool *matches,
477 bool *two_operators)
478 {
479 unsigned int i;
480 gimple first_stmt = stmts[0], stmt = stmts[0];
481 enum tree_code first_stmt_code = ERROR_MARK;
482 enum tree_code alt_stmt_code = ERROR_MARK;
483 enum tree_code rhs_code = ERROR_MARK;
484 enum tree_code first_cond_code = ERROR_MARK;
485 tree lhs;
486 bool need_same_oprnds = false;
487 tree vectype, scalar_type, first_op1 = NULL_TREE;
488 optab optab;
489 int icode;
490 machine_mode optab_op2_mode;
491 machine_mode vec_mode;
492 struct data_reference *first_dr;
493 HOST_WIDE_INT dummy;
494 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
495 tree cond;
496
497 /* For every stmt in NODE find its def stmt/s. */
498 FOR_EACH_VEC_ELT (stmts, i, stmt)
499 {
500 matches[i] = false;
501
502 if (dump_enabled_p ())
503 {
504 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
505 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
506 dump_printf (MSG_NOTE, "\n");
507 }
508
509 /* Fail to vectorize statements marked as unvectorizable. */
510 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
511 {
512 if (dump_enabled_p ())
513 {
514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
515 "Build SLP failed: unvectorizable statement ");
516 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
517 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
518 }
519 /* Fatal mismatch. */
520 matches[0] = false;
521 return false;
522 }
523
524 lhs = gimple_get_lhs (stmt);
525 if (lhs == NULL_TREE)
526 {
527 if (dump_enabled_p ())
528 {
529 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
530 "Build SLP failed: not GIMPLE_ASSIGN nor "
531 "GIMPLE_CALL ");
532 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
533 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
534 }
535 /* Fatal mismatch. */
536 matches[0] = false;
537 return false;
538 }
539
540 if (is_gimple_assign (stmt)
541 && gimple_assign_rhs_code (stmt) == COND_EXPR
542 && (cond = gimple_assign_rhs1 (stmt))
543 && !COMPARISON_CLASS_P (cond))
544 {
545 if (dump_enabled_p ())
546 {
547 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
548 "Build SLP failed: condition is not "
549 "comparison ");
550 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
551 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
552 }
553 /* Fatal mismatch. */
554 matches[0] = false;
555 return false;
556 }
557
558 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
559 vectype = get_vectype_for_scalar_type (scalar_type);
560 if (!vectype)
561 {
562 if (dump_enabled_p ())
563 {
564 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
565 "Build SLP failed: unsupported data-type ");
566 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
567 scalar_type);
568 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
569 }
570 /* Fatal mismatch. */
571 matches[0] = false;
572 return false;
573 }
574
575 /* If populating the vector type requires unrolling then fail
576 before adjusting *max_nunits for basic-block vectorization. */
577 if (bb_vinfo
578 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
579 {
580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
581 "Build SLP failed: unrolling required "
582 "in basic block SLP\n");
583 /* Fatal mismatch. */
584 matches[0] = false;
585 return false;
586 }
587
588 /* In case of multiple types we need to detect the smallest type. */
589 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
590 {
591 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
592 if (bb_vinfo)
593 vectorization_factor = *max_nunits;
594 }
595
596 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
597 {
598 rhs_code = CALL_EXPR;
599 if (gimple_call_internal_p (call_stmt)
600 || gimple_call_tail_p (call_stmt)
601 || gimple_call_noreturn_p (call_stmt)
602 || !gimple_call_nothrow_p (call_stmt)
603 || gimple_call_chain (call_stmt))
604 {
605 if (dump_enabled_p ())
606 {
607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
608 "Build SLP failed: unsupported call type ");
609 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
610 call_stmt, 0);
611 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
612 }
613 /* Fatal mismatch. */
614 matches[0] = false;
615 return false;
616 }
617 }
618 else
619 rhs_code = gimple_assign_rhs_code (stmt);
620
621 /* Check the operation. */
622 if (i == 0)
623 {
624 first_stmt_code = rhs_code;
625
626 /* Shift arguments should be equal in all the packed stmts for a
627 vector shift with scalar shift operand. */
628 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
629 || rhs_code == LROTATE_EXPR
630 || rhs_code == RROTATE_EXPR)
631 {
632 vec_mode = TYPE_MODE (vectype);
633
634 /* First see if we have a vector/vector shift. */
635 optab = optab_for_tree_code (rhs_code, vectype,
636 optab_vector);
637
638 if (!optab
639 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
640 {
641 /* No vector/vector shift, try for a vector/scalar shift. */
642 optab = optab_for_tree_code (rhs_code, vectype,
643 optab_scalar);
644
645 if (!optab)
646 {
647 if (dump_enabled_p ())
648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
649 "Build SLP failed: no optab.\n");
650 /* Fatal mismatch. */
651 matches[0] = false;
652 return false;
653 }
654 icode = (int) optab_handler (optab, vec_mode);
655 if (icode == CODE_FOR_nothing)
656 {
657 if (dump_enabled_p ())
658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
659 "Build SLP failed: "
660 "op not supported by target.\n");
661 /* Fatal mismatch. */
662 matches[0] = false;
663 return false;
664 }
665 optab_op2_mode = insn_data[icode].operand[2].mode;
666 if (!VECTOR_MODE_P (optab_op2_mode))
667 {
668 need_same_oprnds = true;
669 first_op1 = gimple_assign_rhs2 (stmt);
670 }
671 }
672 }
673 else if (rhs_code == WIDEN_LSHIFT_EXPR)
674 {
675 need_same_oprnds = true;
676 first_op1 = gimple_assign_rhs2 (stmt);
677 }
678 }
679 else
680 {
681 if (first_stmt_code != rhs_code
682 && alt_stmt_code == ERROR_MARK)
683 alt_stmt_code = rhs_code;
684 if (first_stmt_code != rhs_code
685 && (first_stmt_code != IMAGPART_EXPR
686 || rhs_code != REALPART_EXPR)
687 && (first_stmt_code != REALPART_EXPR
688 || rhs_code != IMAGPART_EXPR)
689 /* Handle mismatches in plus/minus by computing both
690 and merging the results. */
691 && !((first_stmt_code == PLUS_EXPR
692 || first_stmt_code == MINUS_EXPR)
693 && (alt_stmt_code == PLUS_EXPR
694 || alt_stmt_code == MINUS_EXPR)
695 && rhs_code == alt_stmt_code)
696 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
697 && (first_stmt_code == ARRAY_REF
698 || first_stmt_code == BIT_FIELD_REF
699 || first_stmt_code == INDIRECT_REF
700 || first_stmt_code == COMPONENT_REF
701 || first_stmt_code == MEM_REF)))
702 {
703 if (dump_enabled_p ())
704 {
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
706 "Build SLP failed: different operation "
707 "in stmt ");
708 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
710 "original stmt ");
711 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
712 first_stmt, 0);
713 }
714 /* Mismatch. */
715 continue;
716 }
717
718 if (need_same_oprnds
719 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
720 {
721 if (dump_enabled_p ())
722 {
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "Build SLP failed: different shift "
725 "arguments in ");
726 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
727 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
728 }
729 /* Mismatch. */
730 continue;
731 }
732
733 if (rhs_code == CALL_EXPR)
734 {
735 gimple first_stmt = stmts[0];
736 if (gimple_call_num_args (stmt) != nops
737 || !operand_equal_p (gimple_call_fn (first_stmt),
738 gimple_call_fn (stmt), 0)
739 || gimple_call_fntype (first_stmt)
740 != gimple_call_fntype (stmt))
741 {
742 if (dump_enabled_p ())
743 {
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "Build SLP failed: different calls in ");
746 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
747 stmt, 0);
748 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
749 }
750 /* Mismatch. */
751 continue;
752 }
753 }
754 }
755
756 /* Grouped store or load. */
757 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
758 {
759 if (REFERENCE_CLASS_P (lhs))
760 {
761 /* Store. */
762 ;
763 }
764 else
765 {
766 /* Load. */
767 unsigned unrolling_factor
768 = least_common_multiple
769 (*max_nunits, group_size) / group_size;
770 /* FORNOW: Check that there is no gap between the loads
771 and no gap between the groups when we need to load
772 multiple groups at once.
773 ??? We should enhance this to only disallow gaps
774 inside vectors. */
775 if ((unrolling_factor > 1
776 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
777 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
778 /* If the group is split up then GROUP_GAP
779 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
780 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
781 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
782 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
783 {
784 if (dump_enabled_p ())
785 {
786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
787 "Build SLP failed: grouped "
788 "loads have gaps ");
789 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
790 stmt, 0);
791 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
792 }
793 /* Fatal mismatch. */
794 matches[0] = false;
795 return false;
796 }
797
798 /* Check that the size of interleaved loads group is not
799 greater than the SLP group size. */
800 unsigned ncopies
801 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
802 if (loop_vinfo
803 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
804 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
805 - GROUP_GAP (vinfo_for_stmt (stmt)))
806 > ncopies * group_size))
807 {
808 if (dump_enabled_p ())
809 {
810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
811 "Build SLP failed: the number "
812 "of interleaved loads is greater than "
813 "the SLP group size ");
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
815 stmt, 0);
816 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
817 }
818 /* Fatal mismatch. */
819 matches[0] = false;
820 return false;
821 }
822
823 old_first_load = first_load;
824 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
825 if (prev_first_load)
826 {
827 /* Check that there are no loads from different interleaving
828 chains in the same node. */
829 if (prev_first_load != first_load)
830 {
831 if (dump_enabled_p ())
832 {
833 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
834 vect_location,
835 "Build SLP failed: different "
836 "interleaving chains in one node ");
837 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
838 stmt, 0);
839 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
840 }
841 /* Mismatch. */
842 continue;
843 }
844 }
845 else
846 prev_first_load = first_load;
847
848 /* In some cases a group of loads is just the same load
849 repeated N times. Only analyze its cost once. */
850 if (first_load == stmt && old_first_load != first_load)
851 {
852 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
853 if (vect_supportable_dr_alignment (first_dr, false)
854 == dr_unaligned_unsupported)
855 {
856 if (dump_enabled_p ())
857 {
858 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
859 vect_location,
860 "Build SLP failed: unsupported "
861 "unaligned load ");
862 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
863 stmt, 0);
864 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
865 }
866 /* Fatal mismatch. */
867 matches[0] = false;
868 return false;
869 }
870 }
871 }
872 } /* Grouped access. */
873 else
874 {
875 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
876 {
877 /* Not grouped load. */
878 if (dump_enabled_p ())
879 {
880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
881 "Build SLP failed: not grouped load ");
882 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
883 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
884 }
885
886 /* FORNOW: Not grouped loads are not supported. */
887 /* Fatal mismatch. */
888 matches[0] = false;
889 return false;
890 }
891
892 /* Not memory operation. */
893 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
894 && TREE_CODE_CLASS (rhs_code) != tcc_unary
895 && rhs_code != COND_EXPR
896 && rhs_code != CALL_EXPR)
897 {
898 if (dump_enabled_p ())
899 {
900 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
901 "Build SLP failed: operation");
902 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
903 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
904 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
905 }
906 /* Fatal mismatch. */
907 matches[0] = false;
908 return false;
909 }
910
911 if (rhs_code == COND_EXPR)
912 {
913 tree cond_expr = gimple_assign_rhs1 (stmt);
914
915 if (i == 0)
916 first_cond_code = TREE_CODE (cond_expr);
917 else if (first_cond_code != TREE_CODE (cond_expr))
918 {
919 if (dump_enabled_p ())
920 {
921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
922 "Build SLP failed: different"
923 " operation");
924 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
925 stmt, 0);
926 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
927 }
928 /* Mismatch. */
929 continue;
930 }
931 }
932 }
933
934 matches[i] = true;
935 }
936
937 for (i = 0; i < group_size; ++i)
938 if (!matches[i])
939 return false;
940
941 /* If we allowed a two-operation SLP node verify the target can cope
942 with the permute we are going to use. */
943 if (alt_stmt_code != ERROR_MARK
944 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
945 {
946 unsigned char *sel
947 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
948 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
949 {
950 sel[i] = i;
951 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
952 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
953 }
954 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
955 {
956 for (i = 0; i < group_size; ++i)
957 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
958 {
959 matches[i] = false;
960 if (dump_enabled_p ())
961 {
962 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
963 "Build SLP failed: different operation "
964 "in stmt ");
965 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
966 stmts[i], 0);
967 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
968 "original stmt ");
969 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
970 first_stmt, 0);
971 }
972 }
973 return false;
974 }
975 *two_operators = true;
976 }
977
978 return true;
979 }
980
981 /* Recursively build an SLP tree starting from NODE.
982 Fail (and return a value not equal to zero) if def-stmts are not
983 isomorphic, require data permutation or are of unsupported types of
984 operation. Otherwise, return 0.
985 The value returned is the depth in the SLP tree where a mismatch
986 was found. */
987
988 static bool
989 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
990 slp_tree *node, unsigned int group_size,
991 unsigned int *max_nunits,
992 vec<slp_tree> *loads,
993 unsigned int vectorization_factor,
994 bool *matches, unsigned *npermutes, unsigned *tree_size,
995 unsigned max_tree_size)
996 {
997 unsigned nops, i, this_tree_size = 0;
998 gimple stmt;
999
1000 matches[0] = false;
1001
1002 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1003 if (is_gimple_call (stmt))
1004 nops = gimple_call_num_args (stmt);
1005 else if (is_gimple_assign (stmt))
1006 {
1007 nops = gimple_num_ops (stmt) - 1;
1008 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
1009 nops++;
1010 }
1011 else
1012 return false;
1013
1014 bool two_operators = false;
1015 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
1016 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
1017 max_nunits, vectorization_factor, matches,
1018 &two_operators))
1019 return false;
1020 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
1021
1022 /* If the SLP node is a load, terminate the recursion. */
1023 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
1024 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1025 {
1026 loads->safe_push (*node);
1027 return true;
1028 }
1029
1030 /* Get at the operands, verifying they are compatible. */
1031 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
1032 slp_oprnd_info oprnd_info;
1033 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
1034 {
1035 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
1036 stmt, (i == 0), &oprnds_info))
1037 {
1038 case 0:
1039 break;
1040 case -1:
1041 matches[0] = false;
1042 vect_free_oprnd_info (oprnds_info);
1043 return false;
1044 case 1:
1045 matches[i] = false;
1046 break;
1047 }
1048 }
1049 for (i = 0; i < group_size; ++i)
1050 if (!matches[i])
1051 {
1052 vect_free_oprnd_info (oprnds_info);
1053 return false;
1054 }
1055
1056 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1057
1058 /* Create SLP_TREE nodes for the definition node/s. */
1059 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
1060 {
1061 slp_tree child;
1062 unsigned old_nloads = loads->length ();
1063 unsigned old_max_nunits = *max_nunits;
1064
1065 if (oprnd_info->first_dt != vect_internal_def)
1066 continue;
1067
1068 if (++this_tree_size > max_tree_size)
1069 {
1070 vect_free_oprnd_info (oprnds_info);
1071 return false;
1072 }
1073
1074 child = vect_create_new_slp_node (oprnd_info->def_stmts);
1075 if (!child)
1076 {
1077 vect_free_oprnd_info (oprnds_info);
1078 return false;
1079 }
1080
1081 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1082 group_size, max_nunits, loads,
1083 vectorization_factor, matches,
1084 npermutes, &this_tree_size, max_tree_size))
1085 {
1086 oprnd_info->def_stmts = vNULL;
1087 SLP_TREE_CHILDREN (*node).quick_push (child);
1088 continue;
1089 }
1090
1091 /* If the SLP build failed fatally and we analyze a basic-block
1092 simply treat nodes we fail to build as externally defined
1093 (and thus build vectors from the scalar defs).
1094 The cost model will reject outright expensive cases.
1095 ??? This doesn't treat cases where permutation ultimatively
1096 fails (or we don't try permutation below). Ideally we'd
1097 even compute a permutation that will end up with the maximum
1098 SLP tree size... */
1099 if (bb_vinfo
1100 && !matches[0]
1101 /* ??? Rejecting patterns this way doesn't work. We'd have to
1102 do extra work to cancel the pattern so the uses see the
1103 scalar version. */
1104 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1105 {
1106 dump_printf_loc (MSG_NOTE, vect_location,
1107 "Building vector operands from scalars\n");
1108 oprnd_info->def_stmts = vNULL;
1109 vect_free_slp_tree (child);
1110 SLP_TREE_CHILDREN (*node).quick_push (NULL);
1111 continue;
1112 }
1113
1114 /* If the SLP build for operand zero failed and operand zero
1115 and one can be commutated try that for the scalar stmts
1116 that failed the match. */
1117 if (i == 0
1118 /* A first scalar stmt mismatch signals a fatal mismatch. */
1119 && matches[0]
1120 /* ??? For COND_EXPRs we can swap the comparison operands
1121 as well as the arms under some constraints. */
1122 && nops == 2
1123 && oprnds_info[1]->first_dt == vect_internal_def
1124 && is_gimple_assign (stmt)
1125 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1126 && !SLP_TREE_TWO_OPERATORS (*node)
1127 /* Do so only if the number of not successful permutes was nor more
1128 than a cut-ff as re-trying the recursive match on
1129 possibly each level of the tree would expose exponential
1130 behavior. */
1131 && *npermutes < 4)
1132 {
1133 unsigned int j;
1134 slp_tree grandchild;
1135
1136 /* Roll back. */
1137 *max_nunits = old_max_nunits;
1138 loads->truncate (old_nloads);
1139 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1140 vect_free_slp_tree (grandchild);
1141 SLP_TREE_CHILDREN (child).truncate (0);
1142
1143 /* Swap mismatched definition stmts. */
1144 dump_printf_loc (MSG_NOTE, vect_location,
1145 "Re-trying with swapped operands of stmts ");
1146 for (j = 0; j < group_size; ++j)
1147 if (!matches[j])
1148 {
1149 gimple tem = oprnds_info[0]->def_stmts[j];
1150 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1151 oprnds_info[1]->def_stmts[j] = tem;
1152 dump_printf (MSG_NOTE, "%d ", j);
1153 }
1154 dump_printf (MSG_NOTE, "\n");
1155 /* And try again with scratch 'matches' ... */
1156 bool *tem = XALLOCAVEC (bool, group_size);
1157 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1158 group_size, max_nunits, loads,
1159 vectorization_factor,
1160 tem, npermutes, &this_tree_size,
1161 max_tree_size))
1162 {
1163 /* ... so if successful we can apply the operand swapping
1164 to the GIMPLE IL. This is necessary because for example
1165 vect_get_slp_defs uses operand indexes and thus expects
1166 canonical operand order. */
1167 for (j = 0; j < group_size; ++j)
1168 if (!matches[j])
1169 {
1170 gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1171 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1172 gimple_assign_rhs2_ptr (stmt));
1173 }
1174 oprnd_info->def_stmts = vNULL;
1175 SLP_TREE_CHILDREN (*node).quick_push (child);
1176 continue;
1177 }
1178
1179 ++*npermutes;
1180 }
1181
1182 oprnd_info->def_stmts = vNULL;
1183 vect_free_slp_tree (child);
1184 vect_free_oprnd_info (oprnds_info);
1185 return false;
1186 }
1187
1188 if (tree_size)
1189 *tree_size += this_tree_size;
1190
1191 vect_free_oprnd_info (oprnds_info);
1192 return true;
1193 }
1194
1195 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1196
1197 static void
1198 vect_print_slp_tree (int dump_kind, slp_tree node)
1199 {
1200 int i;
1201 gimple stmt;
1202 slp_tree child;
1203
1204 if (!node)
1205 return;
1206
1207 dump_printf (dump_kind, "node ");
1208 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1209 {
1210 dump_printf (dump_kind, "\n\tstmt %d ", i);
1211 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1212 }
1213 dump_printf (dump_kind, "\n");
1214
1215 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1216 vect_print_slp_tree (dump_kind, child);
1217 }
1218
1219
1220 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1221 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1222 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1223 stmts in NODE are to be marked. */
1224
1225 static void
1226 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1227 {
1228 int i;
1229 gimple stmt;
1230 slp_tree child;
1231
1232 if (!node)
1233 return;
1234
1235 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1236 if (j < 0 || i == j)
1237 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1238
1239 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1240 vect_mark_slp_stmts (child, mark, j);
1241 }
1242
1243
1244 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1245
1246 static void
1247 vect_mark_slp_stmts_relevant (slp_tree node)
1248 {
1249 int i;
1250 gimple stmt;
1251 stmt_vec_info stmt_info;
1252 slp_tree child;
1253
1254 if (!node)
1255 return;
1256
1257 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1258 {
1259 stmt_info = vinfo_for_stmt (stmt);
1260 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1261 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1262 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1263 }
1264
1265 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1266 vect_mark_slp_stmts_relevant (child);
1267 }
1268
1269
1270 /* Rearrange the statements of NODE according to PERMUTATION. */
1271
1272 static void
1273 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1274 vec<unsigned> permutation)
1275 {
1276 gimple stmt;
1277 vec<gimple> tmp_stmts;
1278 unsigned int i;
1279 slp_tree child;
1280
1281 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1282 vect_slp_rearrange_stmts (child, group_size, permutation);
1283
1284 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1285 tmp_stmts.create (group_size);
1286 tmp_stmts.quick_grow_cleared (group_size);
1287
1288 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1289 tmp_stmts[permutation[i]] = stmt;
1290
1291 SLP_TREE_SCALAR_STMTS (node).release ();
1292 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1293 }
1294
1295
1296 /* Check if the required load permutations in the SLP instance
1297 SLP_INSTN are supported. */
1298
1299 static bool
1300 vect_supported_load_permutation_p (slp_instance slp_instn)
1301 {
1302 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1303 unsigned int i, j, k, next;
1304 sbitmap load_index;
1305 slp_tree node;
1306 gimple stmt, load, next_load, first_load;
1307 struct data_reference *dr;
1308
1309 if (dump_enabled_p ())
1310 {
1311 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1312 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1313 if (node->load_permutation.exists ())
1314 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1315 dump_printf (MSG_NOTE, "%d ", next);
1316 else
1317 for (k = 0; k < group_size; ++k)
1318 dump_printf (MSG_NOTE, "%d ", k);
1319 dump_printf (MSG_NOTE, "\n");
1320 }
1321
1322 /* In case of reduction every load permutation is allowed, since the order
1323 of the reduction statements is not important (as opposed to the case of
1324 grouped stores). The only condition we need to check is that all the
1325 load nodes are of the same size and have the same permutation (and then
1326 rearrange all the nodes of the SLP instance according to this
1327 permutation). */
1328
1329 /* Check that all the load nodes are of the same size. */
1330 /* ??? Can't we assert this? */
1331 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1332 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1333 return false;
1334
1335 node = SLP_INSTANCE_TREE (slp_instn);
1336 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1337
1338 /* Reduction (there are no data-refs in the root).
1339 In reduction chain the order of the loads is important. */
1340 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1341 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1342 {
1343 slp_tree load;
1344 unsigned int lidx;
1345
1346 /* Compare all the permutation sequences to the first one. We know
1347 that at least one load is permuted. */
1348 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1349 if (!node->load_permutation.exists ())
1350 return false;
1351 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1352 {
1353 if (!load->load_permutation.exists ())
1354 return false;
1355 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1356 if (lidx != node->load_permutation[j])
1357 return false;
1358 }
1359
1360 /* Check that the loads in the first sequence are different and there
1361 are no gaps between them. */
1362 load_index = sbitmap_alloc (group_size);
1363 bitmap_clear (load_index);
1364 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1365 {
1366 if (bitmap_bit_p (load_index, lidx))
1367 {
1368 sbitmap_free (load_index);
1369 return false;
1370 }
1371 bitmap_set_bit (load_index, lidx);
1372 }
1373 for (i = 0; i < group_size; i++)
1374 if (!bitmap_bit_p (load_index, i))
1375 {
1376 sbitmap_free (load_index);
1377 return false;
1378 }
1379 sbitmap_free (load_index);
1380
1381 /* This permutation is valid for reduction. Since the order of the
1382 statements in the nodes is not important unless they are memory
1383 accesses, we can rearrange the statements in all the nodes
1384 according to the order of the loads. */
1385 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1386 node->load_permutation);
1387
1388 /* We are done, no actual permutations need to be generated. */
1389 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1390 SLP_TREE_LOAD_PERMUTATION (node).release ();
1391 return true;
1392 }
1393
1394 /* In basic block vectorization we allow any subchain of an interleaving
1395 chain.
1396 FORNOW: not supported in loop SLP because of realignment compications. */
1397 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1398 {
1399 /* Check whether the loads in an instance form a subchain and thus
1400 no permutation is necessary. */
1401 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1402 {
1403 bool subchain_p = true;
1404 next_load = NULL;
1405 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1406 {
1407 if (j != 0 && next_load != load)
1408 {
1409 subchain_p = false;
1410 break;
1411 }
1412 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1413 }
1414 if (subchain_p)
1415 SLP_TREE_LOAD_PERMUTATION (node).release ();
1416 else
1417 {
1418 /* Verify the permutation can be generated. */
1419 vec<tree> tem;
1420 if (!vect_transform_slp_perm_load (node, tem, NULL,
1421 1, slp_instn, true))
1422 {
1423 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1424 vect_location,
1425 "unsupported load permutation\n");
1426 return false;
1427 }
1428 }
1429 }
1430
1431 /* Check that the alignment of the first load in every subchain, i.e.,
1432 the first statement in every load node, is supported.
1433 ??? This belongs in alignment checking. */
1434 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1435 {
1436 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1437 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1438 {
1439 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1440 if (vect_supportable_dr_alignment (dr, false)
1441 == dr_unaligned_unsupported)
1442 {
1443 if (dump_enabled_p ())
1444 {
1445 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1446 vect_location,
1447 "unsupported unaligned load ");
1448 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1449 first_load, 0);
1450 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1451 }
1452 return false;
1453 }
1454 }
1455 }
1456
1457 return true;
1458 }
1459
1460 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1461 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1462 well (unless it's reduction). */
1463 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1464 return false;
1465 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1466 if (!node->load_permutation.exists ())
1467 return false;
1468
1469 load_index = sbitmap_alloc (group_size);
1470 bitmap_clear (load_index);
1471 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1472 {
1473 unsigned int lidx = node->load_permutation[0];
1474 if (bitmap_bit_p (load_index, lidx))
1475 {
1476 sbitmap_free (load_index);
1477 return false;
1478 }
1479 bitmap_set_bit (load_index, lidx);
1480 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1481 if (k != lidx)
1482 {
1483 sbitmap_free (load_index);
1484 return false;
1485 }
1486 }
1487 for (i = 0; i < group_size; i++)
1488 if (!bitmap_bit_p (load_index, i))
1489 {
1490 sbitmap_free (load_index);
1491 return false;
1492 }
1493 sbitmap_free (load_index);
1494
1495 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1496 if (node->load_permutation.exists ()
1497 && !vect_transform_slp_perm_load
1498 (node, vNULL, NULL,
1499 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1500 return false;
1501 return true;
1502 }
1503
1504
1505 /* Find the last store in SLP INSTANCE. */
1506
1507 static gimple
1508 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1509 {
1510 gimple last = NULL, stmt;
1511
1512 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1513 {
1514 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1515 if (is_pattern_stmt_p (stmt_vinfo))
1516 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1517 else
1518 last = get_later_stmt (stmt, last);
1519 }
1520
1521 return last;
1522 }
1523
1524 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1525
1526 static void
1527 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1528 slp_instance instance, slp_tree node,
1529 stmt_vector_for_cost *prologue_cost_vec,
1530 unsigned ncopies_for_cost)
1531 {
1532 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1533
1534 unsigned i;
1535 slp_tree child;
1536 gimple stmt, s;
1537 stmt_vec_info stmt_info;
1538 tree lhs;
1539 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1540
1541 /* Recurse down the SLP tree. */
1542 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1543 if (child)
1544 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1545 instance, child, prologue_cost_vec,
1546 ncopies_for_cost);
1547
1548 /* Look at the first scalar stmt to determine the cost. */
1549 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1550 stmt_info = vinfo_for_stmt (stmt);
1551 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1552 {
1553 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1554 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1555 vect_uninitialized_def,
1556 node, prologue_cost_vec, body_cost_vec);
1557 else
1558 {
1559 int i;
1560 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1561 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1562 node, prologue_cost_vec, body_cost_vec);
1563 /* If the load is permuted record the cost for the permutation.
1564 ??? Loads from multiple chains are let through here only
1565 for a single special case involving complex numbers where
1566 in the end no permutation is necessary. */
1567 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1568 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1569 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1570 && vect_get_place_in_interleaving_chain
1571 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1572 {
1573 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1574 stmt_info, 0, vect_body);
1575 break;
1576 }
1577 }
1578 }
1579 else
1580 {
1581 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1582 stmt_info, 0, vect_body);
1583 if (SLP_TREE_TWO_OPERATORS (node))
1584 {
1585 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1586 stmt_info, 0, vect_body);
1587 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1588 stmt_info, 0, vect_body);
1589 }
1590 }
1591
1592 /* Scan operands and account for prologue cost of constants/externals.
1593 ??? This over-estimates cost for multiple uses and should be
1594 re-engineered. */
1595 lhs = gimple_get_lhs (stmt);
1596 for (i = 0; i < gimple_num_ops (stmt); ++i)
1597 {
1598 tree def, op = gimple_op (stmt, i);
1599 gimple def_stmt;
1600 enum vect_def_type dt;
1601 if (!op || op == lhs)
1602 continue;
1603 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1604 &def_stmt, &def, &dt))
1605 {
1606 /* Without looking at the actual initializer a vector of
1607 constants can be implemented as load from the constant pool.
1608 ??? We need to pass down stmt_info for a vector type
1609 even if it points to the wrong stmt. */
1610 if (dt == vect_constant_def)
1611 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1612 stmt_info, 0, vect_prologue);
1613 else if (dt == vect_external_def)
1614 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1615 stmt_info, 0, vect_prologue);
1616 }
1617 }
1618 }
1619
1620 /* Compute the cost for the SLP instance INSTANCE. */
1621
1622 static void
1623 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1624 slp_instance instance, unsigned nunits)
1625 {
1626 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1627 unsigned ncopies_for_cost;
1628 stmt_info_for_cost *si;
1629 unsigned i;
1630
1631 /* Calculate the number of vector stmts to create based on the unrolling
1632 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1633 GROUP_SIZE / NUNITS otherwise. */
1634 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1635 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1636
1637 prologue_cost_vec.create (10);
1638 body_cost_vec.create (10);
1639 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1640 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1641 instance, SLP_INSTANCE_TREE (instance),
1642 &prologue_cost_vec, ncopies_for_cost);
1643
1644 /* Record the prologue costs, which were delayed until we were
1645 sure that SLP was successful. Unlike the body costs, we know
1646 the final values now regardless of the loop vectorization factor. */
1647 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1648 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1649 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1650 {
1651 struct _stmt_vec_info *stmt_info
1652 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1653 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1654 si->misalign, vect_prologue);
1655 }
1656
1657 prologue_cost_vec.release ();
1658 }
1659
1660 /* Analyze an SLP instance starting from a group of grouped stores. Call
1661 vect_build_slp_tree to build a tree of packed stmts if possible.
1662 Return FALSE if it's impossible to SLP any stmt in the loop. */
1663
1664 static bool
1665 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1666 gimple stmt, unsigned max_tree_size)
1667 {
1668 slp_instance new_instance;
1669 slp_tree node;
1670 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1671 unsigned int unrolling_factor = 1, nunits;
1672 tree vectype, scalar_type = NULL_TREE;
1673 gimple next;
1674 unsigned int vectorization_factor = 0;
1675 int i;
1676 unsigned int max_nunits = 0;
1677 vec<slp_tree> loads;
1678 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1679 vec<gimple> scalar_stmts;
1680
1681 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1682 {
1683 if (dr)
1684 {
1685 scalar_type = TREE_TYPE (DR_REF (dr));
1686 vectype = get_vectype_for_scalar_type (scalar_type);
1687 }
1688 else
1689 {
1690 gcc_assert (loop_vinfo);
1691 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1692 }
1693
1694 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1695 }
1696 else
1697 {
1698 gcc_assert (loop_vinfo);
1699 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1700 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1701 }
1702
1703 if (!vectype)
1704 {
1705 if (dump_enabled_p ())
1706 {
1707 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1708 "Build SLP failed: unsupported data-type ");
1709 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1710 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1711 }
1712
1713 return false;
1714 }
1715
1716 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1717 if (loop_vinfo)
1718 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1719 else
1720 vectorization_factor = nunits;
1721
1722 /* Calculate the unrolling factor. */
1723 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1724 if (unrolling_factor != 1 && !loop_vinfo)
1725 {
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1728 "Build SLP failed: unrolling required in basic"
1729 " block SLP\n");
1730
1731 return false;
1732 }
1733
1734 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1735 scalar_stmts.create (group_size);
1736 next = stmt;
1737 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1738 {
1739 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1740 while (next)
1741 {
1742 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1743 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1744 scalar_stmts.safe_push (
1745 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1746 else
1747 scalar_stmts.safe_push (next);
1748 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1749 }
1750 }
1751 else
1752 {
1753 /* Collect reduction statements. */
1754 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1755 for (i = 0; reductions.iterate (i, &next); i++)
1756 scalar_stmts.safe_push (next);
1757 }
1758
1759 node = vect_create_new_slp_node (scalar_stmts);
1760
1761 loads.create (group_size);
1762
1763 /* Build the tree for the SLP instance. */
1764 bool *matches = XALLOCAVEC (bool, group_size);
1765 unsigned npermutes = 0;
1766 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1767 &max_nunits, &loads,
1768 vectorization_factor, matches, &npermutes, NULL,
1769 max_tree_size))
1770 {
1771 /* Calculate the unrolling factor based on the smallest type. */
1772 if (max_nunits > nunits)
1773 unrolling_factor = least_common_multiple (max_nunits, group_size)
1774 / group_size;
1775
1776 if (unrolling_factor != 1 && !loop_vinfo)
1777 {
1778 if (dump_enabled_p ())
1779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1780 "Build SLP failed: unrolling required in basic"
1781 " block SLP\n");
1782 vect_free_slp_tree (node);
1783 loads.release ();
1784 return false;
1785 }
1786
1787 /* Create a new SLP instance. */
1788 new_instance = XNEW (struct _slp_instance);
1789 SLP_INSTANCE_TREE (new_instance) = node;
1790 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1791 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1792 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1793 SLP_INSTANCE_LOADS (new_instance) = loads;
1794
1795 /* Compute the load permutation. */
1796 slp_tree load_node;
1797 bool loads_permuted = false;
1798 FOR_EACH_VEC_ELT (loads, i, load_node)
1799 {
1800 vec<unsigned> load_permutation;
1801 int j;
1802 gimple load, first_stmt;
1803 bool this_load_permuted = false;
1804 load_permutation.create (group_size);
1805 first_stmt = GROUP_FIRST_ELEMENT
1806 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1807 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1808 {
1809 int load_place
1810 = vect_get_place_in_interleaving_chain (load, first_stmt);
1811 gcc_assert (load_place != -1);
1812 if (load_place != j)
1813 this_load_permuted = true;
1814 load_permutation.safe_push (load_place);
1815 }
1816 if (!this_load_permuted)
1817 {
1818 load_permutation.release ();
1819 continue;
1820 }
1821 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1822 loads_permuted = true;
1823 }
1824
1825 if (loads_permuted)
1826 {
1827 if (!vect_supported_load_permutation_p (new_instance))
1828 {
1829 if (dump_enabled_p ())
1830 {
1831 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1832 "Build SLP failed: unsupported load "
1833 "permutation ");
1834 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1835 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1836 }
1837 vect_free_slp_instance (new_instance);
1838 return false;
1839 }
1840 }
1841
1842
1843 if (loop_vinfo)
1844 {
1845 /* Compute the costs of this SLP instance. Delay this for BB
1846 vectorization as we don't have vector types computed yet. */
1847 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1848 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1849 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1850 }
1851 else
1852 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1853
1854 if (dump_enabled_p ())
1855 vect_print_slp_tree (MSG_NOTE, node);
1856
1857 return true;
1858 }
1859
1860 /* Failed to SLP. */
1861 /* Free the allocated memory. */
1862 vect_free_slp_tree (node);
1863 loads.release ();
1864
1865 return false;
1866 }
1867
1868
1869 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1870 trees of packed scalar stmts if SLP is possible. */
1871
1872 bool
1873 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1874 unsigned max_tree_size)
1875 {
1876 unsigned int i;
1877 vec<gimple> grouped_stores;
1878 vec<gimple> reductions = vNULL;
1879 vec<gimple> reduc_chains = vNULL;
1880 gimple first_element;
1881 bool ok = false;
1882
1883 if (dump_enabled_p ())
1884 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1885
1886 if (loop_vinfo)
1887 {
1888 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1889 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1890 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1891 }
1892 else
1893 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1894
1895 /* Find SLP sequences starting from groups of grouped stores. */
1896 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1897 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1898 max_tree_size))
1899 ok = true;
1900
1901 if (bb_vinfo && !ok)
1902 {
1903 if (dump_enabled_p ())
1904 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1905 "Failed to SLP the basic block.\n");
1906
1907 return false;
1908 }
1909
1910 if (loop_vinfo
1911 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1912 {
1913 /* Find SLP sequences starting from reduction chains. */
1914 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1915 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1916 max_tree_size))
1917 ok = true;
1918 else
1919 return false;
1920
1921 /* Don't try to vectorize SLP reductions if reduction chain was
1922 detected. */
1923 return ok;
1924 }
1925
1926 /* Find SLP sequences starting from groups of reductions. */
1927 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1928 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1929 max_tree_size))
1930 ok = true;
1931
1932 return true;
1933 }
1934
1935
1936 /* For each possible SLP instance decide whether to SLP it and calculate overall
1937 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1938 least one instance. */
1939
1940 bool
1941 vect_make_slp_decision (loop_vec_info loop_vinfo)
1942 {
1943 unsigned int i, unrolling_factor = 1;
1944 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1945 slp_instance instance;
1946 int decided_to_slp = 0;
1947
1948 if (dump_enabled_p ())
1949 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1950 "\n");
1951
1952 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1953 {
1954 /* FORNOW: SLP if you can. */
1955 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1956 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1957
1958 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1959 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1960 loop-based vectorization. Such stmts will be marked as HYBRID. */
1961 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1962 decided_to_slp++;
1963 }
1964
1965 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1966
1967 if (decided_to_slp && dump_enabled_p ())
1968 dump_printf_loc (MSG_NOTE, vect_location,
1969 "Decided to SLP %d instances. Unrolling factor %d\n",
1970 decided_to_slp, unrolling_factor);
1971
1972 return (decided_to_slp > 0);
1973 }
1974
1975
1976 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1977 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1978
1979 static void
1980 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1981 {
1982 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1983 imm_use_iterator imm_iter;
1984 gimple use_stmt;
1985 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1986 slp_tree child;
1987 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1988 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1989 int j;
1990
1991 /* Propagate hybrid down the SLP tree. */
1992 if (stype == hybrid)
1993 ;
1994 else if (HYBRID_SLP_STMT (stmt_vinfo))
1995 stype = hybrid;
1996 else
1997 {
1998 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1999 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2000 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2001 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2002 if (gimple_bb (use_stmt)
2003 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2004 && (use_vinfo = vinfo_for_stmt (use_stmt))
2005 && !STMT_SLP_TYPE (use_vinfo)
2006 && (STMT_VINFO_RELEVANT (use_vinfo)
2007 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
2008 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2009 && STMT_VINFO_RELATED_STMT (use_vinfo)
2010 && !STMT_SLP_TYPE (vinfo_for_stmt
2011 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
2012 && !(gimple_code (use_stmt) == GIMPLE_PHI
2013 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2014 stype = hybrid;
2015 }
2016
2017 if (stype == hybrid)
2018 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2019
2020 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2021 if (child)
2022 vect_detect_hybrid_slp_stmts (child, i, stype);
2023 }
2024
2025 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2026
2027 static tree
2028 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2029 {
2030 walk_stmt_info *wi = (walk_stmt_info *)data;
2031 struct loop *loopp = (struct loop *)wi->info;
2032
2033 if (wi->is_lhs)
2034 return NULL_TREE;
2035
2036 if (TREE_CODE (*tp) == SSA_NAME
2037 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2038 {
2039 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
2040 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2041 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2042 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2043 }
2044
2045 return NULL_TREE;
2046 }
2047
2048 static tree
2049 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2050 walk_stmt_info *)
2051 {
2052 /* If the stmt is in a SLP instance then this isn't a reason
2053 to mark use definitions in other SLP instances as hybrid. */
2054 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2055 *handled = true;
2056 return NULL_TREE;
2057 }
2058
2059 /* Find stmts that must be both vectorized and SLPed. */
2060
2061 void
2062 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2063 {
2064 unsigned int i;
2065 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2066 slp_instance instance;
2067
2068 if (dump_enabled_p ())
2069 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2070 "\n");
2071
2072 /* First walk all pattern stmt in the loop and mark defs of uses as
2073 hybrid because immediate uses in them are not recorded. */
2074 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2075 {
2076 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2077 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2078 gsi_next (&gsi))
2079 {
2080 gimple stmt = gsi_stmt (gsi);
2081 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2082 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2083 {
2084 walk_stmt_info wi;
2085 memset (&wi, 0, sizeof (wi));
2086 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2087 gimple_stmt_iterator gsi2
2088 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2089 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2090 vect_detect_hybrid_slp_1, &wi);
2091 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2092 vect_detect_hybrid_slp_2,
2093 vect_detect_hybrid_slp_1, &wi);
2094 }
2095 }
2096 }
2097
2098 /* Then walk the SLP instance trees marking stmts with uses in
2099 non-SLP stmts as hybrid, also propagating hybrid down the
2100 SLP tree, collecting the above info on-the-fly. */
2101 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2102 {
2103 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2104 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2105 i, pure_slp);
2106 }
2107 }
2108
2109
2110 /* Create and initialize a new bb_vec_info struct for BB, as well as
2111 stmt_vec_info structs for all the stmts in it. */
2112
2113 static bb_vec_info
2114 new_bb_vec_info (basic_block bb)
2115 {
2116 bb_vec_info res = NULL;
2117 gimple_stmt_iterator gsi;
2118
2119 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2120 BB_VINFO_BB (res) = bb;
2121
2122 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2123 {
2124 gimple stmt = gsi_stmt (gsi);
2125 gimple_set_uid (stmt, 0);
2126 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2127 }
2128
2129 BB_VINFO_GROUPED_STORES (res).create (10);
2130 BB_VINFO_SLP_INSTANCES (res).create (2);
2131 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2132
2133 bb->aux = res;
2134 return res;
2135 }
2136
2137
2138 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2139 stmts in the basic block. */
2140
2141 static void
2142 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2143 {
2144 vec<slp_instance> slp_instances;
2145 slp_instance instance;
2146 basic_block bb;
2147 gimple_stmt_iterator si;
2148 unsigned i;
2149
2150 if (!bb_vinfo)
2151 return;
2152
2153 bb = BB_VINFO_BB (bb_vinfo);
2154
2155 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2156 {
2157 gimple stmt = gsi_stmt (si);
2158 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2159
2160 if (stmt_info)
2161 /* Free stmt_vec_info. */
2162 free_stmt_vec_info (stmt);
2163 }
2164
2165 vect_destroy_datarefs (NULL, bb_vinfo);
2166 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2167 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2168 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2169 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2170 vect_free_slp_instance (instance);
2171 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2172 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2173 free (bb_vinfo);
2174 bb->aux = NULL;
2175 }
2176
2177
2178 /* Analyze statements contained in SLP tree node after recursively analyzing
2179 the subtree. Return TRUE if the operations are supported. */
2180
2181 static bool
2182 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
2183 {
2184 bool dummy;
2185 int i;
2186 gimple stmt;
2187 slp_tree child;
2188
2189 if (!node)
2190 return true;
2191
2192 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2193 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
2194 return false;
2195
2196 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2197 {
2198 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2199 gcc_assert (stmt_info);
2200 gcc_assert (PURE_SLP_STMT (stmt_info));
2201
2202 if (!vect_analyze_stmt (stmt, &dummy, node))
2203 return false;
2204 }
2205
2206 return true;
2207 }
2208
2209
2210 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2211 operations are supported. */
2212
2213 static bool
2214 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2215 {
2216 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2217 slp_instance instance;
2218 int i;
2219
2220 for (i = 0; slp_instances.iterate (i, &instance); )
2221 {
2222 if (!vect_slp_analyze_node_operations (bb_vinfo,
2223 SLP_INSTANCE_TREE (instance)))
2224 {
2225 vect_free_slp_instance (instance);
2226 slp_instances.ordered_remove (i);
2227 }
2228 else
2229 i++;
2230 }
2231
2232 if (!slp_instances.length ())
2233 return false;
2234
2235 return true;
2236 }
2237
2238
2239 /* Compute the scalar cost of the SLP node NODE and its children
2240 and return it. Do not account defs that are marked in LIFE and
2241 update LIFE according to uses of NODE. */
2242
2243 static unsigned
2244 vect_bb_slp_scalar_cost (basic_block bb,
2245 slp_tree node, vec<bool, va_heap> *life)
2246 {
2247 unsigned scalar_cost = 0;
2248 unsigned i;
2249 gimple stmt;
2250 slp_tree child;
2251
2252 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2253 {
2254 unsigned stmt_cost;
2255 ssa_op_iter op_iter;
2256 def_operand_p def_p;
2257 stmt_vec_info stmt_info;
2258
2259 if ((*life)[i])
2260 continue;
2261
2262 /* If there is a non-vectorized use of the defs then the scalar
2263 stmt is kept live in which case we do not account it or any
2264 required defs in the SLP children in the scalar cost. This
2265 way we make the vectorization more costly when compared to
2266 the scalar cost. */
2267 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2268 {
2269 imm_use_iterator use_iter;
2270 gimple use_stmt;
2271 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2272 if (!is_gimple_debug (use_stmt)
2273 && (gimple_code (use_stmt) == GIMPLE_PHI
2274 || gimple_bb (use_stmt) != bb
2275 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2276 {
2277 (*life)[i] = true;
2278 BREAK_FROM_IMM_USE_STMT (use_iter);
2279 }
2280 }
2281 if ((*life)[i])
2282 continue;
2283
2284 stmt_info = vinfo_for_stmt (stmt);
2285 if (STMT_VINFO_DATA_REF (stmt_info))
2286 {
2287 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2288 stmt_cost = vect_get_stmt_cost (scalar_load);
2289 else
2290 stmt_cost = vect_get_stmt_cost (scalar_store);
2291 }
2292 else
2293 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2294
2295 scalar_cost += stmt_cost;
2296 }
2297
2298 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2299 if (child)
2300 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2301
2302 return scalar_cost;
2303 }
2304
2305 /* Check if vectorization of the basic block is profitable. */
2306
2307 static bool
2308 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2309 {
2310 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2311 slp_instance instance;
2312 int i, j;
2313 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2314 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2315 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2316 stmt_vec_info stmt_info = NULL;
2317 stmt_vector_for_cost body_cost_vec;
2318 stmt_info_for_cost *ci;
2319
2320 /* Calculate vector costs. */
2321 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2322 {
2323 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2324
2325 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2326 {
2327 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2328 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2329 stmt_info, ci->misalign, vect_body);
2330 }
2331 }
2332
2333 /* Calculate scalar cost. */
2334 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2335 {
2336 auto_vec<bool, 20> life;
2337 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2338 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2339 SLP_INSTANCE_TREE (instance),
2340 &life);
2341 }
2342
2343 /* Complete the target-specific cost calculation. */
2344 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2345 &vec_inside_cost, &vec_epilogue_cost);
2346
2347 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2348
2349 if (dump_enabled_p ())
2350 {
2351 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2352 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2353 vec_inside_cost);
2354 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2355 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2356 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2357 }
2358
2359 /* Vectorization is profitable if its cost is less than the cost of scalar
2360 version. */
2361 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2362 return false;
2363
2364 return true;
2365 }
2366
2367 /* Check if the basic block can be vectorized. */
2368
2369 static bb_vec_info
2370 vect_slp_analyze_bb_1 (basic_block bb)
2371 {
2372 bb_vec_info bb_vinfo;
2373 vec<slp_instance> slp_instances;
2374 slp_instance instance;
2375 int i;
2376 int min_vf = 2;
2377 unsigned n_stmts = 0;
2378
2379 bb_vinfo = new_bb_vec_info (bb);
2380 if (!bb_vinfo)
2381 return NULL;
2382
2383 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2384 {
2385 if (dump_enabled_p ())
2386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2387 "not vectorized: unhandled data-ref in basic "
2388 "block.\n");
2389
2390 destroy_bb_vec_info (bb_vinfo);
2391 return NULL;
2392 }
2393
2394 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2395 {
2396 if (dump_enabled_p ())
2397 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2398 "not vectorized: not enough data-refs in "
2399 "basic block.\n");
2400
2401 destroy_bb_vec_info (bb_vinfo);
2402 return NULL;
2403 }
2404
2405 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2406 {
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2409 "not vectorized: unhandled data access in "
2410 "basic block.\n");
2411
2412 destroy_bb_vec_info (bb_vinfo);
2413 return NULL;
2414 }
2415
2416 vect_pattern_recog (NULL, bb_vinfo);
2417
2418 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2419 {
2420 if (dump_enabled_p ())
2421 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2422 "not vectorized: bad data alignment in basic "
2423 "block.\n");
2424
2425 destroy_bb_vec_info (bb_vinfo);
2426 return NULL;
2427 }
2428
2429 /* Check the SLP opportunities in the basic block, analyze and build SLP
2430 trees. */
2431 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2432 {
2433 if (dump_enabled_p ())
2434 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2435 "not vectorized: failed to find SLP opportunities "
2436 "in basic block.\n");
2437
2438 destroy_bb_vec_info (bb_vinfo);
2439 return NULL;
2440 }
2441
2442 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2443
2444 /* Mark all the statements that we want to vectorize as pure SLP and
2445 relevant. */
2446 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2447 {
2448 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2449 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2450 }
2451
2452 /* Mark all the statements that we do not want to vectorize. */
2453 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2454 !gsi_end_p (gsi); gsi_next (&gsi))
2455 {
2456 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2457 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2458 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2459 }
2460
2461 /* Analyze dependences. At this point all stmts not participating in
2462 vectorization have to be marked. Dependence analysis assumes
2463 that we either vectorize all SLP instances or none at all. */
2464 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2465 {
2466 if (dump_enabled_p ())
2467 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2468 "not vectorized: unhandled data dependence "
2469 "in basic block.\n");
2470
2471 destroy_bb_vec_info (bb_vinfo);
2472 return NULL;
2473 }
2474
2475 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2476 {
2477 if (dump_enabled_p ())
2478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2479 "not vectorized: unsupported alignment in basic "
2480 "block.\n");
2481 destroy_bb_vec_info (bb_vinfo);
2482 return NULL;
2483 }
2484
2485 if (!vect_slp_analyze_operations (bb_vinfo))
2486 {
2487 if (dump_enabled_p ())
2488 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2489 "not vectorized: bad operation in basic block.\n");
2490
2491 destroy_bb_vec_info (bb_vinfo);
2492 return NULL;
2493 }
2494
2495 /* Compute the costs of the SLP instances. */
2496 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2497 {
2498 gimple stmt = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2499 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2500 vect_analyze_slp_cost (NULL, bb_vinfo,
2501 instance, TYPE_VECTOR_SUBPARTS (vectype));
2502 }
2503
2504 /* Cost model: check if the vectorization is worthwhile. */
2505 if (!unlimited_cost_model (NULL)
2506 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2507 {
2508 if (dump_enabled_p ())
2509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2510 "not vectorized: vectorization is not "
2511 "profitable.\n");
2512
2513 destroy_bb_vec_info (bb_vinfo);
2514 return NULL;
2515 }
2516
2517 if (dump_enabled_p ())
2518 dump_printf_loc (MSG_NOTE, vect_location,
2519 "Basic block will be vectorized using SLP\n");
2520
2521 return bb_vinfo;
2522 }
2523
2524
2525 bb_vec_info
2526 vect_slp_analyze_bb (basic_block bb)
2527 {
2528 bb_vec_info bb_vinfo;
2529 int insns = 0;
2530 gimple_stmt_iterator gsi;
2531 unsigned int vector_sizes;
2532
2533 if (dump_enabled_p ())
2534 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2535
2536 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2537 {
2538 gimple stmt = gsi_stmt (gsi);
2539 if (!is_gimple_debug (stmt)
2540 && !gimple_nop_p (stmt)
2541 && gimple_code (stmt) != GIMPLE_LABEL)
2542 insns++;
2543 }
2544
2545 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2546 {
2547 if (dump_enabled_p ())
2548 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2549 "not vectorized: too many instructions in "
2550 "basic block.\n");
2551
2552 return NULL;
2553 }
2554
2555 /* Autodetect first vector size we try. */
2556 current_vector_size = 0;
2557 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2558
2559 while (1)
2560 {
2561 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2562 if (bb_vinfo)
2563 return bb_vinfo;
2564
2565 destroy_bb_vec_info (bb_vinfo);
2566
2567 vector_sizes &= ~current_vector_size;
2568 if (vector_sizes == 0
2569 || current_vector_size == 0)
2570 return NULL;
2571
2572 /* Try the next biggest vector size. */
2573 current_vector_size = 1 << floor_log2 (vector_sizes);
2574 if (dump_enabled_p ())
2575 dump_printf_loc (MSG_NOTE, vect_location,
2576 "***** Re-trying analysis with "
2577 "vector size %d\n", current_vector_size);
2578 }
2579 }
2580
2581
2582 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2583 the number of created vector stmts depends on the unrolling factor).
2584 However, the actual number of vector stmts for every SLP node depends on
2585 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2586 should be updated. In this function we assume that the inside costs
2587 calculated in vect_model_xxx_cost are linear in ncopies. */
2588
2589 void
2590 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2591 {
2592 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2593 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2594 slp_instance instance;
2595 stmt_vector_for_cost body_cost_vec;
2596 stmt_info_for_cost *si;
2597 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2598
2599 if (dump_enabled_p ())
2600 dump_printf_loc (MSG_NOTE, vect_location,
2601 "=== vect_update_slp_costs_according_to_vf ===\n");
2602
2603 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2604 {
2605 /* We assume that costs are linear in ncopies. */
2606 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2607
2608 /* Record the instance's instructions in the target cost model.
2609 This was delayed until here because the count of instructions
2610 isn't known beforehand. */
2611 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2612
2613 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2614 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2615 vinfo_for_stmt (si->stmt), si->misalign,
2616 vect_body);
2617 }
2618 }
2619
2620
2621 /* For constant and loop invariant defs of SLP_NODE this function returns
2622 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2623 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2624 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2625 REDUC_INDEX is the index of the reduction operand in the statements, unless
2626 it is -1. */
2627
2628 static void
2629 vect_get_constant_vectors (tree op, slp_tree slp_node,
2630 vec<tree> *vec_oprnds,
2631 unsigned int op_num, unsigned int number_of_vectors,
2632 int reduc_index)
2633 {
2634 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2635 gimple stmt = stmts[0];
2636 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2637 unsigned nunits;
2638 tree vec_cst;
2639 tree *elts;
2640 unsigned j, number_of_places_left_in_vector;
2641 tree vector_type;
2642 tree vop;
2643 int group_size = stmts.length ();
2644 unsigned int vec_num, i;
2645 unsigned number_of_copies = 1;
2646 vec<tree> voprnds;
2647 voprnds.create (number_of_vectors);
2648 bool constant_p, is_store;
2649 tree neutral_op = NULL;
2650 enum tree_code code = gimple_expr_code (stmt);
2651 gimple def_stmt;
2652 struct loop *loop;
2653 gimple_seq ctor_seq = NULL;
2654
2655 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2656 && reduc_index != -1)
2657 {
2658 op_num = reduc_index - 1;
2659 op = gimple_op (stmt, reduc_index);
2660 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2661 we need either neutral operands or the original operands. See
2662 get_initial_def_for_reduction() for details. */
2663 switch (code)
2664 {
2665 case WIDEN_SUM_EXPR:
2666 case DOT_PROD_EXPR:
2667 case PLUS_EXPR:
2668 case MINUS_EXPR:
2669 case BIT_IOR_EXPR:
2670 case BIT_XOR_EXPR:
2671 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2672 neutral_op = build_real (TREE_TYPE (op), dconst0);
2673 else
2674 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2675
2676 break;
2677
2678 case MULT_EXPR:
2679 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2680 neutral_op = build_real (TREE_TYPE (op), dconst1);
2681 else
2682 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2683
2684 break;
2685
2686 case BIT_AND_EXPR:
2687 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2688 break;
2689
2690 /* For MIN/MAX we don't have an easy neutral operand but
2691 the initial values can be used fine here. Only for
2692 a reduction chain we have to force a neutral element. */
2693 case MAX_EXPR:
2694 case MIN_EXPR:
2695 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2696 neutral_op = NULL;
2697 else
2698 {
2699 def_stmt = SSA_NAME_DEF_STMT (op);
2700 loop = (gimple_bb (stmt))->loop_father;
2701 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2702 loop_preheader_edge (loop));
2703 }
2704 break;
2705
2706 default:
2707 neutral_op = NULL;
2708 }
2709 }
2710
2711 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2712 {
2713 is_store = true;
2714 op = gimple_assign_rhs1 (stmt);
2715 }
2716 else
2717 is_store = false;
2718
2719 gcc_assert (op);
2720
2721 if (CONSTANT_CLASS_P (op))
2722 constant_p = true;
2723 else
2724 constant_p = false;
2725
2726 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2727 gcc_assert (vector_type);
2728 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2729
2730 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2731 created vectors. It is greater than 1 if unrolling is performed.
2732
2733 For example, we have two scalar operands, s1 and s2 (e.g., group of
2734 strided accesses of size two), while NUNITS is four (i.e., four scalars
2735 of this type can be packed in a vector). The output vector will contain
2736 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2737 will be 2).
2738
2739 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2740 containing the operands.
2741
2742 For example, NUNITS is four as before, and the group size is 8
2743 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2744 {s5, s6, s7, s8}. */
2745
2746 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2747
2748 number_of_places_left_in_vector = nunits;
2749 elts = XALLOCAVEC (tree, nunits);
2750 bool place_after_defs = false;
2751 for (j = 0; j < number_of_copies; j++)
2752 {
2753 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2754 {
2755 if (is_store)
2756 op = gimple_assign_rhs1 (stmt);
2757 else
2758 {
2759 switch (code)
2760 {
2761 case COND_EXPR:
2762 if (op_num == 0 || op_num == 1)
2763 {
2764 tree cond = gimple_assign_rhs1 (stmt);
2765 op = TREE_OPERAND (cond, op_num);
2766 }
2767 else
2768 {
2769 if (op_num == 2)
2770 op = gimple_assign_rhs2 (stmt);
2771 else
2772 op = gimple_assign_rhs3 (stmt);
2773 }
2774 break;
2775
2776 case CALL_EXPR:
2777 op = gimple_call_arg (stmt, op_num);
2778 break;
2779
2780 case LSHIFT_EXPR:
2781 case RSHIFT_EXPR:
2782 case LROTATE_EXPR:
2783 case RROTATE_EXPR:
2784 op = gimple_op (stmt, op_num + 1);
2785 /* Unlike the other binary operators, shifts/rotates have
2786 the shift count being int, instead of the same type as
2787 the lhs, so make sure the scalar is the right type if
2788 we are dealing with vectors of
2789 long long/long/short/char. */
2790 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2791 op = fold_convert (TREE_TYPE (vector_type), op);
2792 break;
2793
2794 default:
2795 op = gimple_op (stmt, op_num + 1);
2796 break;
2797 }
2798 }
2799
2800 if (reduc_index != -1)
2801 {
2802 loop = (gimple_bb (stmt))->loop_father;
2803 def_stmt = SSA_NAME_DEF_STMT (op);
2804
2805 gcc_assert (loop);
2806
2807 /* Get the def before the loop. In reduction chain we have only
2808 one initial value. */
2809 if ((j != (number_of_copies - 1)
2810 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2811 && i != 0))
2812 && neutral_op)
2813 op = neutral_op;
2814 else
2815 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2816 loop_preheader_edge (loop));
2817 }
2818
2819 /* Create 'vect_ = {op0,op1,...,opn}'. */
2820 number_of_places_left_in_vector--;
2821 tree orig_op = op;
2822 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2823 {
2824 if (CONSTANT_CLASS_P (op))
2825 {
2826 op = fold_unary (VIEW_CONVERT_EXPR,
2827 TREE_TYPE (vector_type), op);
2828 gcc_assert (op && CONSTANT_CLASS_P (op));
2829 }
2830 else
2831 {
2832 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2833 gimple init_stmt;
2834 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2835 init_stmt
2836 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2837 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2838 op = new_temp;
2839 }
2840 }
2841 elts[number_of_places_left_in_vector] = op;
2842 if (!CONSTANT_CLASS_P (op))
2843 constant_p = false;
2844 if (TREE_CODE (orig_op) == SSA_NAME
2845 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2846 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2847 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2848 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2849 place_after_defs = true;
2850
2851 if (number_of_places_left_in_vector == 0)
2852 {
2853 number_of_places_left_in_vector = nunits;
2854
2855 if (constant_p)
2856 vec_cst = build_vector (vector_type, elts);
2857 else
2858 {
2859 vec<constructor_elt, va_gc> *v;
2860 unsigned k;
2861 vec_alloc (v, nunits);
2862 for (k = 0; k < nunits; ++k)
2863 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2864 vec_cst = build_constructor (vector_type, v);
2865 }
2866 tree init;
2867 gimple_stmt_iterator gsi;
2868 if (place_after_defs)
2869 {
2870 gsi = gsi_for_stmt
2871 (vect_find_last_scalar_stmt_in_slp (slp_node));
2872 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2873 }
2874 else
2875 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2876 if (ctor_seq != NULL)
2877 {
2878 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2879 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2880 GSI_SAME_STMT);
2881 ctor_seq = NULL;
2882 }
2883 voprnds.quick_push (init);
2884 place_after_defs = false;
2885 }
2886 }
2887 }
2888
2889 /* Since the vectors are created in the reverse order, we should invert
2890 them. */
2891 vec_num = voprnds.length ();
2892 for (j = vec_num; j != 0; j--)
2893 {
2894 vop = voprnds[j - 1];
2895 vec_oprnds->quick_push (vop);
2896 }
2897
2898 voprnds.release ();
2899
2900 /* In case that VF is greater than the unrolling factor needed for the SLP
2901 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2902 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2903 to replicate the vectors. */
2904 while (number_of_vectors > vec_oprnds->length ())
2905 {
2906 tree neutral_vec = NULL;
2907
2908 if (neutral_op)
2909 {
2910 if (!neutral_vec)
2911 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2912
2913 vec_oprnds->quick_push (neutral_vec);
2914 }
2915 else
2916 {
2917 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2918 vec_oprnds->quick_push (vop);
2919 }
2920 }
2921 }
2922
2923
2924 /* Get vectorized definitions from SLP_NODE that contains corresponding
2925 vectorized def-stmts. */
2926
2927 static void
2928 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2929 {
2930 tree vec_oprnd;
2931 gimple vec_def_stmt;
2932 unsigned int i;
2933
2934 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2935
2936 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2937 {
2938 gcc_assert (vec_def_stmt);
2939 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2940 vec_oprnds->quick_push (vec_oprnd);
2941 }
2942 }
2943
2944
2945 /* Get vectorized definitions for SLP_NODE.
2946 If the scalar definitions are loop invariants or constants, collect them and
2947 call vect_get_constant_vectors() to create vector stmts.
2948 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2949 must be stored in the corresponding child of SLP_NODE, and we call
2950 vect_get_slp_vect_defs () to retrieve them. */
2951
2952 void
2953 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2954 vec<vec<tree> > *vec_oprnds, int reduc_index)
2955 {
2956 gimple first_stmt;
2957 int number_of_vects = 0, i;
2958 unsigned int child_index = 0;
2959 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2960 slp_tree child = NULL;
2961 vec<tree> vec_defs;
2962 tree oprnd;
2963 bool vectorized_defs;
2964
2965 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2966 FOR_EACH_VEC_ELT (ops, i, oprnd)
2967 {
2968 /* For each operand we check if it has vectorized definitions in a child
2969 node or we need to create them (for invariants and constants). We
2970 check if the LHS of the first stmt of the next child matches OPRND.
2971 If it does, we found the correct child. Otherwise, we call
2972 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2973 to check this child node for the next operand. */
2974 vectorized_defs = false;
2975 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2976 {
2977 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2978
2979 /* We have to check both pattern and original def, if available. */
2980 if (child)
2981 {
2982 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2983 gimple related
2984 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2985
2986 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2987 || (related
2988 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2989 {
2990 /* The number of vector defs is determined by the number of
2991 vector statements in the node from which we get those
2992 statements. */
2993 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2994 vectorized_defs = true;
2995 child_index++;
2996 }
2997 }
2998 else
2999 child_index++;
3000 }
3001
3002 if (!vectorized_defs)
3003 {
3004 if (i == 0)
3005 {
3006 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3007 /* Number of vector stmts was calculated according to LHS in
3008 vect_schedule_slp_instance (), fix it by replacing LHS with
3009 RHS, if necessary. See vect_get_smallest_scalar_type () for
3010 details. */
3011 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3012 &rhs_size_unit);
3013 if (rhs_size_unit != lhs_size_unit)
3014 {
3015 number_of_vects *= rhs_size_unit;
3016 number_of_vects /= lhs_size_unit;
3017 }
3018 }
3019 }
3020
3021 /* Allocate memory for vectorized defs. */
3022 vec_defs = vNULL;
3023 vec_defs.create (number_of_vects);
3024
3025 /* For reduction defs we call vect_get_constant_vectors (), since we are
3026 looking for initial loop invariant values. */
3027 if (vectorized_defs && reduc_index == -1)
3028 /* The defs are already vectorized. */
3029 vect_get_slp_vect_defs (child, &vec_defs);
3030 else
3031 /* Build vectors from scalar defs. */
3032 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3033 number_of_vects, reduc_index);
3034
3035 vec_oprnds->quick_push (vec_defs);
3036
3037 /* For reductions, we only need initial values. */
3038 if (reduc_index != -1)
3039 return;
3040 }
3041 }
3042
3043
3044 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3045 building a vector of type MASK_TYPE from it) and two input vectors placed in
3046 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3047 shifting by STRIDE elements of DR_CHAIN for every copy.
3048 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3049 copies).
3050 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3051 the created stmts must be inserted. */
3052
3053 static inline void
3054 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
3055 tree mask, int first_vec_indx, int second_vec_indx,
3056 gimple_stmt_iterator *gsi, slp_tree node,
3057 tree vectype, vec<tree> dr_chain,
3058 int ncopies, int vect_stmts_counter)
3059 {
3060 tree perm_dest;
3061 gimple perm_stmt = NULL;
3062 stmt_vec_info next_stmt_info;
3063 int i, stride;
3064 tree first_vec, second_vec, data_ref;
3065
3066 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3067
3068 /* Initialize the vect stmts of NODE to properly insert the generated
3069 stmts later. */
3070 for (i = SLP_TREE_VEC_STMTS (node).length ();
3071 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3072 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3073
3074 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3075 for (i = 0; i < ncopies; i++)
3076 {
3077 first_vec = dr_chain[first_vec_indx];
3078 second_vec = dr_chain[second_vec_indx];
3079
3080 /* Generate the permute statement. */
3081 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3082 first_vec, second_vec, mask);
3083 data_ref = make_ssa_name (perm_dest, perm_stmt);
3084 gimple_set_lhs (perm_stmt, data_ref);
3085 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3086
3087 /* Store the vector statement in NODE. */
3088 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3089
3090 first_vec_indx += stride;
3091 second_vec_indx += stride;
3092 }
3093
3094 /* Mark the scalar stmt as vectorized. */
3095 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3096 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3097 }
3098
3099
3100 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3101 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3102 representation. Check that the mask is valid and return FALSE if not.
3103 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3104 the next vector, i.e., the current first vector is not needed. */
3105
3106 static bool
3107 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3108 int mask_nunits, bool only_one_vec, int index,
3109 unsigned char *mask, int *current_mask_element,
3110 bool *need_next_vector, int *number_of_mask_fixes,
3111 bool *mask_fixed, bool *needs_first_vector)
3112 {
3113 int i;
3114
3115 /* Convert to target specific representation. */
3116 *current_mask_element = first_mask_element + m;
3117 /* Adjust the value in case it's a mask for second and third vectors. */
3118 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3119
3120 if (*current_mask_element < 0)
3121 {
3122 if (dump_enabled_p ())
3123 {
3124 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3125 "permutation requires past vector ");
3126 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3127 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3128 }
3129 return false;
3130 }
3131
3132 if (*current_mask_element < mask_nunits)
3133 *needs_first_vector = true;
3134
3135 /* We have only one input vector to permute but the mask accesses values in
3136 the next vector as well. */
3137 if (only_one_vec && *current_mask_element >= mask_nunits)
3138 {
3139 if (dump_enabled_p ())
3140 {
3141 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3142 "permutation requires at least two vectors ");
3143 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3144 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3145 }
3146
3147 return false;
3148 }
3149
3150 /* The mask requires the next vector. */
3151 while (*current_mask_element >= mask_nunits * 2)
3152 {
3153 if (*needs_first_vector || *mask_fixed)
3154 {
3155 /* We either need the first vector too or have already moved to the
3156 next vector. In both cases, this permutation needs three
3157 vectors. */
3158 if (dump_enabled_p ())
3159 {
3160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3161 "permutation requires at "
3162 "least three vectors ");
3163 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3164 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3165 }
3166
3167 return false;
3168 }
3169
3170 /* We move to the next vector, dropping the first one and working with
3171 the second and the third - we need to adjust the values of the mask
3172 accordingly. */
3173 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3174
3175 for (i = 0; i < index; i++)
3176 mask[i] -= mask_nunits * *number_of_mask_fixes;
3177
3178 (*number_of_mask_fixes)++;
3179 *mask_fixed = true;
3180 }
3181
3182 *need_next_vector = *mask_fixed;
3183
3184 /* This was the last element of this mask. Start a new one. */
3185 if (index == mask_nunits - 1)
3186 {
3187 *number_of_mask_fixes = 1;
3188 *mask_fixed = false;
3189 *needs_first_vector = false;
3190 }
3191
3192 return true;
3193 }
3194
3195
3196 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3197 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3198 permute statements for the SLP node NODE of the SLP instance
3199 SLP_NODE_INSTANCE. */
3200
3201 bool
3202 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3203 gimple_stmt_iterator *gsi, int vf,
3204 slp_instance slp_node_instance, bool analyze_only)
3205 {
3206 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3207 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3208 tree mask_element_type = NULL_TREE, mask_type;
3209 int i, j, k, nunits, vec_index = 0, scalar_index;
3210 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3211 gimple next_scalar_stmt;
3212 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3213 int first_mask_element;
3214 int index, unroll_factor, current_mask_element, ncopies;
3215 unsigned char *mask;
3216 bool only_one_vec = false, need_next_vector = false;
3217 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3218 int number_of_mask_fixes = 1;
3219 bool mask_fixed = false;
3220 bool needs_first_vector = false;
3221 machine_mode mode;
3222
3223 mode = TYPE_MODE (vectype);
3224
3225 if (!can_vec_perm_p (mode, false, NULL))
3226 {
3227 if (dump_enabled_p ())
3228 {
3229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3230 "no vect permute for ");
3231 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3232 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3233 }
3234 return false;
3235 }
3236
3237 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3238 same size as the vector element being permuted. */
3239 mask_element_type = lang_hooks.types.type_for_mode
3240 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3241 mask_type = get_vectype_for_scalar_type (mask_element_type);
3242 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3243 mask = XALLOCAVEC (unsigned char, nunits);
3244 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3245
3246 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3247 unrolling factor. */
3248 orig_vec_stmts_num = group_size *
3249 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3250 if (orig_vec_stmts_num == 1)
3251 only_one_vec = true;
3252
3253 /* Number of copies is determined by the final vectorization factor
3254 relatively to SLP_NODE_INSTANCE unrolling factor. */
3255 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3256
3257 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3258 return false;
3259
3260 /* Generate permutation masks for every NODE. Number of masks for each NODE
3261 is equal to GROUP_SIZE.
3262 E.g., we have a group of three nodes with three loads from the same
3263 location in each node, and the vector size is 4. I.e., we have a
3264 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3265 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3266 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3267 ...
3268
3269 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3270 The last mask is illegal since we assume two operands for permute
3271 operation, and the mask element values can't be outside that range.
3272 Hence, the last mask must be converted into {2,5,5,5}.
3273 For the first two permutations we need the first and the second input
3274 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3275 we need the second and the third vectors: {b1,c1,a2,b2} and
3276 {c2,a3,b3,c3}. */
3277
3278 {
3279 scalar_index = 0;
3280 index = 0;
3281 vect_stmts_counter = 0;
3282 vec_index = 0;
3283 first_vec_index = vec_index++;
3284 if (only_one_vec)
3285 second_vec_index = first_vec_index;
3286 else
3287 second_vec_index = vec_index++;
3288
3289 for (j = 0; j < unroll_factor; j++)
3290 {
3291 for (k = 0; k < group_size; k++)
3292 {
3293 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3294 first_mask_element = i + j * group_size;
3295 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3296 nunits, only_one_vec, index,
3297 mask, &current_mask_element,
3298 &need_next_vector,
3299 &number_of_mask_fixes, &mask_fixed,
3300 &needs_first_vector))
3301 return false;
3302 gcc_assert (current_mask_element >= 0
3303 && current_mask_element < 2 * nunits);
3304 mask[index++] = current_mask_element;
3305
3306 if (index == nunits)
3307 {
3308 index = 0;
3309 if (!can_vec_perm_p (mode, false, mask))
3310 {
3311 if (dump_enabled_p ())
3312 {
3313 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3314 vect_location,
3315 "unsupported vect permute { ");
3316 for (i = 0; i < nunits; ++i)
3317 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3318 mask[i]);
3319 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3320 }
3321 return false;
3322 }
3323
3324 if (!analyze_only)
3325 {
3326 int l;
3327 tree mask_vec, *mask_elts;
3328 mask_elts = XALLOCAVEC (tree, nunits);
3329 for (l = 0; l < nunits; ++l)
3330 mask_elts[l] = build_int_cst (mask_element_type,
3331 mask[l]);
3332 mask_vec = build_vector (mask_type, mask_elts);
3333
3334 if (need_next_vector)
3335 {
3336 first_vec_index = second_vec_index;
3337 second_vec_index = vec_index;
3338 }
3339
3340 next_scalar_stmt
3341 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3342
3343 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3344 mask_vec, first_vec_index, second_vec_index,
3345 gsi, node, vectype, dr_chain,
3346 ncopies, vect_stmts_counter++);
3347 }
3348 }
3349 }
3350 }
3351 }
3352
3353 return true;
3354 }
3355
3356
3357
3358 /* Vectorize SLP instance tree in postorder. */
3359
3360 static bool
3361 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3362 unsigned int vectorization_factor)
3363 {
3364 gimple stmt;
3365 bool grouped_store, is_store;
3366 gimple_stmt_iterator si;
3367 stmt_vec_info stmt_info;
3368 unsigned int vec_stmts_size, nunits, group_size;
3369 tree vectype;
3370 int i;
3371 slp_tree child;
3372
3373 if (!node)
3374 return false;
3375
3376 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3377 vect_schedule_slp_instance (child, instance, vectorization_factor);
3378
3379 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3380 stmt_info = vinfo_for_stmt (stmt);
3381
3382 /* VECTYPE is the type of the destination. */
3383 vectype = STMT_VINFO_VECTYPE (stmt_info);
3384 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3385 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3386
3387 /* For each SLP instance calculate number of vector stmts to be created
3388 for the scalar stmts in each node of the SLP tree. Number of vector
3389 elements in one vector iteration is the number of scalar elements in
3390 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3391 size. */
3392 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3393
3394 if (!SLP_TREE_VEC_STMTS (node).exists ())
3395 {
3396 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3397 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3398 }
3399
3400 if (dump_enabled_p ())
3401 {
3402 dump_printf_loc (MSG_NOTE,vect_location,
3403 "------>vectorizing SLP node starting from: ");
3404 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3405 dump_printf (MSG_NOTE, "\n");
3406 }
3407
3408 /* Vectorized stmts go before the last scalar stmt which is where
3409 all uses are ready. */
3410 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3411
3412 /* Mark the first element of the reduction chain as reduction to properly
3413 transform the node. In the analysis phase only the last element of the
3414 chain is marked as reduction. */
3415 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3416 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3417 {
3418 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3419 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3420 }
3421
3422 /* Handle two-operation SLP nodes by vectorizing the group with
3423 both operations and then performing a merge. */
3424 if (SLP_TREE_TWO_OPERATORS (node))
3425 {
3426 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3427 enum tree_code ocode;
3428 gimple ostmt;
3429 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3430 bool allsame = true;
3431 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3432 if (gimple_assign_rhs_code (ostmt) != code0)
3433 {
3434 mask[i] = 1;
3435 allsame = false;
3436 ocode = gimple_assign_rhs_code (ostmt);
3437 }
3438 else
3439 mask[i] = 0;
3440 if (!allsame)
3441 {
3442 vec<gimple> v0;
3443 vec<gimple> v1;
3444 unsigned j;
3445 tree tmask = NULL_TREE;
3446 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3447 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3448 SLP_TREE_VEC_STMTS (node).truncate (0);
3449 gimple_assign_set_rhs_code (stmt, ocode);
3450 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3451 gimple_assign_set_rhs_code (stmt, code0);
3452 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3453 SLP_TREE_VEC_STMTS (node).truncate (0);
3454 tree meltype = build_nonstandard_integer_type
3455 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3456 tree mvectype = get_same_sized_vectype (meltype, vectype);
3457 unsigned k = 0, l;
3458 for (j = 0; j < v0.length (); ++j)
3459 {
3460 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3461 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3462 {
3463 if (k >= group_size)
3464 k = 0;
3465 melts[l] = build_int_cst
3466 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3467 }
3468 tmask = build_vector (mvectype, melts);
3469
3470 /* ??? Not all targets support a VEC_PERM_EXPR with a
3471 constant mask that would translate to a vec_merge RTX
3472 (with their vec_perm_const_ok). We can either not
3473 vectorize in that case or let veclower do its job.
3474 Unfortunately that isn't too great and at least for
3475 plus/minus we'd eventually like to match targets
3476 vector addsub instructions. */
3477 gimple vstmt;
3478 vstmt = gimple_build_assign (make_ssa_name (vectype),
3479 VEC_PERM_EXPR,
3480 gimple_assign_lhs (v0[j]),
3481 gimple_assign_lhs (v1[j]), tmask);
3482 vect_finish_stmt_generation (stmt, vstmt, &si);
3483 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3484 }
3485 v0.release ();
3486 v1.release ();
3487 return false;
3488 }
3489 }
3490 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3491 return is_store;
3492 }
3493
3494 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3495 For loop vectorization this is done in vectorizable_call, but for SLP
3496 it needs to be deferred until end of vect_schedule_slp, because multiple
3497 SLP instances may refer to the same scalar stmt. */
3498
3499 static void
3500 vect_remove_slp_scalar_calls (slp_tree node)
3501 {
3502 gimple stmt, new_stmt;
3503 gimple_stmt_iterator gsi;
3504 int i;
3505 slp_tree child;
3506 tree lhs;
3507 stmt_vec_info stmt_info;
3508
3509 if (!node)
3510 return;
3511
3512 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3513 vect_remove_slp_scalar_calls (child);
3514
3515 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3516 {
3517 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3518 continue;
3519 stmt_info = vinfo_for_stmt (stmt);
3520 if (stmt_info == NULL
3521 || is_pattern_stmt_p (stmt_info)
3522 || !PURE_SLP_STMT (stmt_info))
3523 continue;
3524 lhs = gimple_call_lhs (stmt);
3525 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3526 set_vinfo_for_stmt (new_stmt, stmt_info);
3527 set_vinfo_for_stmt (stmt, NULL);
3528 STMT_VINFO_STMT (stmt_info) = new_stmt;
3529 gsi = gsi_for_stmt (stmt);
3530 gsi_replace (&gsi, new_stmt, false);
3531 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3532 }
3533 }
3534
3535 /* Generate vector code for all SLP instances in the loop/basic block. */
3536
3537 bool
3538 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3539 {
3540 vec<slp_instance> slp_instances;
3541 slp_instance instance;
3542 unsigned int i, vf;
3543 bool is_store = false;
3544
3545 if (loop_vinfo)
3546 {
3547 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3548 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3549 }
3550 else
3551 {
3552 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3553 vf = 1;
3554 }
3555
3556 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3557 {
3558 /* Schedule the tree of INSTANCE. */
3559 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3560 instance, vf);
3561 if (dump_enabled_p ())
3562 dump_printf_loc (MSG_NOTE, vect_location,
3563 "vectorizing stmts using SLP.\n");
3564 }
3565
3566 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3567 {
3568 slp_tree root = SLP_INSTANCE_TREE (instance);
3569 gimple store;
3570 unsigned int j;
3571 gimple_stmt_iterator gsi;
3572
3573 /* Remove scalar call stmts. Do not do this for basic-block
3574 vectorization as not all uses may be vectorized.
3575 ??? Why should this be necessary? DCE should be able to
3576 remove the stmts itself.
3577 ??? For BB vectorization we can as well remove scalar
3578 stmts starting from the SLP tree root if they have no
3579 uses. */
3580 if (loop_vinfo)
3581 vect_remove_slp_scalar_calls (root);
3582
3583 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3584 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3585 {
3586 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3587 break;
3588
3589 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3590 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3591 /* Free the attached stmt_vec_info and remove the stmt. */
3592 gsi = gsi_for_stmt (store);
3593 unlink_stmt_vdef (store);
3594 gsi_remove (&gsi, true);
3595 release_defs (store);
3596 free_stmt_vec_info (store);
3597 }
3598 }
3599
3600 return is_store;
3601 }
3602
3603
3604 /* Vectorize the basic block. */
3605
3606 void
3607 vect_slp_transform_bb (basic_block bb)
3608 {
3609 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3610 gimple_stmt_iterator si;
3611
3612 gcc_assert (bb_vinfo);
3613
3614 if (dump_enabled_p ())
3615 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3616
3617 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3618 {
3619 gimple stmt = gsi_stmt (si);
3620 stmt_vec_info stmt_info;
3621
3622 if (dump_enabled_p ())
3623 {
3624 dump_printf_loc (MSG_NOTE, vect_location,
3625 "------>SLPing statement: ");
3626 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3627 dump_printf (MSG_NOTE, "\n");
3628 }
3629
3630 stmt_info = vinfo_for_stmt (stmt);
3631 gcc_assert (stmt_info);
3632
3633 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3634 if (STMT_SLP_TYPE (stmt_info))
3635 {
3636 vect_schedule_slp (NULL, bb_vinfo);
3637 break;
3638 }
3639 }
3640
3641 if (dump_enabled_p ())
3642 dump_printf_loc (MSG_NOTE, vect_location,
3643 "BASIC BLOCK VECTORIZED\n");
3644
3645 destroy_bb_vec_info (bb_vinfo);
3646 }