tree-vect-loop.c (get_reduction_op): New function.
[gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
79
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
82
83 source_location
84 find_bb_location (basic_block bb)
85 {
86 gimple stmt = NULL;
87 gimple_stmt_iterator si;
88
89 if (!bb)
90 return UNKNOWN_LOCATION;
91
92 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
93 {
94 stmt = gsi_stmt (si);
95 if (gimple_location (stmt) != UNKNOWN_LOCATION)
96 return gimple_location (stmt);
97 }
98
99 return UNKNOWN_LOCATION;
100 }
101
102
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
104
105 static void
106 vect_free_slp_tree (slp_tree node)
107 {
108 int i;
109 slp_tree child;
110
111 if (!node)
112 return;
113
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115 vect_free_slp_tree (child);
116
117 SLP_TREE_CHILDREN (node).release ();
118 SLP_TREE_SCALAR_STMTS (node).release ();
119 SLP_TREE_VEC_STMTS (node).release ();
120 SLP_TREE_LOAD_PERMUTATION (node).release ();
121
122 free (node);
123 }
124
125
126 /* Free the memory allocated for the SLP instance. */
127
128 void
129 vect_free_slp_instance (slp_instance instance)
130 {
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132 SLP_INSTANCE_LOADS (instance).release ();
133 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
134 free (instance);
135 }
136
137
138 /* Create an SLP node for SCALAR_STMTS. */
139
140 static slp_tree
141 vect_create_new_slp_node (vec<gimple> scalar_stmts)
142 {
143 slp_tree node;
144 gimple stmt = scalar_stmts[0];
145 unsigned int nops;
146
147 if (is_gimple_call (stmt))
148 nops = gimple_call_num_args (stmt);
149 else if (is_gimple_assign (stmt))
150 {
151 nops = gimple_num_ops (stmt) - 1;
152 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
153 nops++;
154 }
155 else
156 return NULL;
157
158 node = XNEW (struct _slp_tree);
159 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
160 SLP_TREE_VEC_STMTS (node).create (0);
161 SLP_TREE_CHILDREN (node).create (nops);
162 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
163 SLP_TREE_TWO_OPERATORS (node) = false;
164
165 return node;
166 }
167
168
169 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
170 operand. */
171 static vec<slp_oprnd_info>
172 vect_create_oprnd_info (int nops, int group_size)
173 {
174 int i;
175 slp_oprnd_info oprnd_info;
176 vec<slp_oprnd_info> oprnds_info;
177
178 oprnds_info.create (nops);
179 for (i = 0; i < nops; i++)
180 {
181 oprnd_info = XNEW (struct _slp_oprnd_info);
182 oprnd_info->def_stmts.create (group_size);
183 oprnd_info->first_dt = vect_uninitialized_def;
184 oprnd_info->first_op_type = NULL_TREE;
185 oprnd_info->first_pattern = false;
186 oprnds_info.quick_push (oprnd_info);
187 }
188
189 return oprnds_info;
190 }
191
192
193 /* Free operands info. */
194
195 static void
196 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
197 {
198 int i;
199 slp_oprnd_info oprnd_info;
200
201 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
202 {
203 oprnd_info->def_stmts.release ();
204 XDELETE (oprnd_info);
205 }
206
207 oprnds_info.release ();
208 }
209
210
211 /* Find the place of the data-ref in STMT in the interleaving chain that starts
212 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
213
214 static int
215 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
216 {
217 gimple next_stmt = first_stmt;
218 int result = 0;
219
220 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
221 return -1;
222
223 do
224 {
225 if (next_stmt == stmt)
226 return result;
227 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
228 if (next_stmt)
229 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
230 }
231 while (next_stmt);
232
233 return -1;
234 }
235
236
237 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
238 they are of a valid type and that they match the defs of the first stmt of
239 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
240 return -1, if the error could be corrected by swapping operands of the
241 operation return 1, if everything is ok return 0. */
242
243 static int
244 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
245 gimple stmt, bool first,
246 vec<slp_oprnd_info> *oprnds_info)
247 {
248 tree oprnd;
249 unsigned int i, number_of_oprnds;
250 tree def;
251 gimple def_stmt;
252 enum vect_def_type dt = vect_uninitialized_def;
253 struct loop *loop = NULL;
254 bool pattern = false;
255 slp_oprnd_info oprnd_info;
256 int first_op_idx = 1;
257 bool commutative = false;
258 bool first_op_cond = false;
259
260 if (loop_vinfo)
261 loop = LOOP_VINFO_LOOP (loop_vinfo);
262
263 if (is_gimple_call (stmt))
264 {
265 number_of_oprnds = gimple_call_num_args (stmt);
266 first_op_idx = 3;
267 }
268 else if (is_gimple_assign (stmt))
269 {
270 enum tree_code code = gimple_assign_rhs_code (stmt);
271 number_of_oprnds = gimple_num_ops (stmt) - 1;
272 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
273 {
274 first_op_cond = true;
275 commutative = true;
276 number_of_oprnds++;
277 }
278 else
279 commutative = commutative_tree_code (code);
280 }
281 else
282 return -1;
283
284 bool swapped = false;
285 for (i = 0; i < number_of_oprnds; i++)
286 {
287 again:
288 if (first_op_cond)
289 {
290 if (i == 0 || i == 1)
291 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
292 swapped ? !i : i);
293 else
294 oprnd = gimple_op (stmt, first_op_idx + i - 1);
295 }
296 else
297 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
298
299 oprnd_info = (*oprnds_info)[i];
300
301 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
302 &def, &dt)
303 || (!def_stmt && dt != vect_constant_def))
304 {
305 if (dump_enabled_p ())
306 {
307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
308 "Build SLP failed: can't find def for ");
309 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
310 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
311 }
312
313 return -1;
314 }
315
316 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
317 from the pattern. Check that all the stmts of the node are in the
318 pattern. */
319 if (def_stmt && gimple_bb (def_stmt)
320 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
321 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
322 && gimple_code (def_stmt) != GIMPLE_PHI))
323 && vinfo_for_stmt (def_stmt)
324 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
325 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
326 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
327 {
328 pattern = true;
329 if (!first && !oprnd_info->first_pattern)
330 {
331 if (i == 0
332 && !swapped
333 && commutative)
334 {
335 swapped = true;
336 goto again;
337 }
338
339 if (dump_enabled_p ())
340 {
341 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
342 "Build SLP failed: some of the stmts"
343 " are in a pattern, and others are not ");
344 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
345 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
346 }
347
348 return 1;
349 }
350
351 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
352 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
353
354 if (dt == vect_unknown_def_type)
355 {
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
358 "Unsupported pattern.\n");
359 return -1;
360 }
361
362 switch (gimple_code (def_stmt))
363 {
364 case GIMPLE_PHI:
365 def = gimple_phi_result (def_stmt);
366 break;
367
368 case GIMPLE_ASSIGN:
369 def = gimple_assign_lhs (def_stmt);
370 break;
371
372 default:
373 if (dump_enabled_p ())
374 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
375 "unsupported defining stmt:\n");
376 return -1;
377 }
378 }
379
380 if (first)
381 {
382 oprnd_info->first_dt = dt;
383 oprnd_info->first_pattern = pattern;
384 oprnd_info->first_op_type = TREE_TYPE (oprnd);
385 }
386 else
387 {
388 /* Not first stmt of the group, check that the def-stmt/s match
389 the def-stmt/s of the first stmt. Allow different definition
390 types for reduction chains: the first stmt must be a
391 vect_reduction_def (a phi node), and the rest
392 vect_internal_def. */
393 if (((oprnd_info->first_dt != dt
394 && !(oprnd_info->first_dt == vect_reduction_def
395 && dt == vect_internal_def)
396 && !((oprnd_info->first_dt == vect_external_def
397 || oprnd_info->first_dt == vect_constant_def)
398 && (dt == vect_external_def
399 || dt == vect_constant_def)))
400 || !types_compatible_p (oprnd_info->first_op_type,
401 TREE_TYPE (oprnd))))
402 {
403 /* Try swapping operands if we got a mismatch. */
404 if (i == 0
405 && !swapped
406 && commutative)
407 {
408 swapped = true;
409 goto again;
410 }
411
412 if (dump_enabled_p ())
413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
414 "Build SLP failed: different types\n");
415
416 return 1;
417 }
418 }
419
420 /* Check the types of the definitions. */
421 switch (dt)
422 {
423 case vect_constant_def:
424 case vect_external_def:
425 case vect_reduction_def:
426 break;
427
428 case vect_internal_def:
429 oprnd_info->def_stmts.quick_push (def_stmt);
430 break;
431
432 default:
433 /* FORNOW: Not supported. */
434 if (dump_enabled_p ())
435 {
436 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
437 "Build SLP failed: illegal type of def ");
438 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
439 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
440 }
441
442 return -1;
443 }
444 }
445
446 /* Swap operands. */
447 if (swapped)
448 {
449 if (first_op_cond)
450 {
451 tree cond = gimple_assign_rhs1 (stmt);
452 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
453 &TREE_OPERAND (cond, 1));
454 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
455 }
456 else
457 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
458 gimple_assign_rhs2_ptr (stmt));
459 }
460
461 return 0;
462 }
463
464
465 /* Verify if the scalar stmts STMTS are isomorphic, require data
466 permutation or are of unsupported types of operation. Return
467 true if they are, otherwise return false and indicate in *MATCHES
468 which stmts are not isomorphic to the first one. If MATCHES[0]
469 is false then this indicates the comparison could not be
470 carried out or the stmts will never be vectorized by SLP. */
471
472 static bool
473 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
474 vec<gimple> stmts, unsigned int group_size,
475 unsigned nops, unsigned int *max_nunits,
476 unsigned int vectorization_factor, bool *matches,
477 bool *two_operators)
478 {
479 unsigned int i;
480 gimple first_stmt = stmts[0], stmt = stmts[0];
481 enum tree_code first_stmt_code = ERROR_MARK;
482 enum tree_code alt_stmt_code = ERROR_MARK;
483 enum tree_code rhs_code = ERROR_MARK;
484 enum tree_code first_cond_code = ERROR_MARK;
485 tree lhs;
486 bool need_same_oprnds = false;
487 tree vectype, scalar_type, first_op1 = NULL_TREE;
488 optab optab;
489 int icode;
490 machine_mode optab_op2_mode;
491 machine_mode vec_mode;
492 struct data_reference *first_dr;
493 HOST_WIDE_INT dummy;
494 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
495 tree cond;
496
497 /* For every stmt in NODE find its def stmt/s. */
498 FOR_EACH_VEC_ELT (stmts, i, stmt)
499 {
500 matches[i] = false;
501
502 if (dump_enabled_p ())
503 {
504 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
505 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
506 dump_printf (MSG_NOTE, "\n");
507 }
508
509 /* Fail to vectorize statements marked as unvectorizable. */
510 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
511 {
512 if (dump_enabled_p ())
513 {
514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
515 "Build SLP failed: unvectorizable statement ");
516 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
517 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
518 }
519 /* Fatal mismatch. */
520 matches[0] = false;
521 return false;
522 }
523
524 lhs = gimple_get_lhs (stmt);
525 if (lhs == NULL_TREE)
526 {
527 if (dump_enabled_p ())
528 {
529 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
530 "Build SLP failed: not GIMPLE_ASSIGN nor "
531 "GIMPLE_CALL ");
532 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
533 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
534 }
535 /* Fatal mismatch. */
536 matches[0] = false;
537 return false;
538 }
539
540 if (is_gimple_assign (stmt)
541 && gimple_assign_rhs_code (stmt) == COND_EXPR
542 && (cond = gimple_assign_rhs1 (stmt))
543 && !COMPARISON_CLASS_P (cond))
544 {
545 if (dump_enabled_p ())
546 {
547 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
548 "Build SLP failed: condition is not "
549 "comparison ");
550 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
551 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
552 }
553 /* Fatal mismatch. */
554 matches[0] = false;
555 return false;
556 }
557
558 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
559 vectype = get_vectype_for_scalar_type (scalar_type);
560 if (!vectype)
561 {
562 if (dump_enabled_p ())
563 {
564 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
565 "Build SLP failed: unsupported data-type ");
566 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
567 scalar_type);
568 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
569 }
570 /* Fatal mismatch. */
571 matches[0] = false;
572 return false;
573 }
574
575 /* If populating the vector type requires unrolling then fail
576 before adjusting *max_nunits for basic-block vectorization. */
577 if (bb_vinfo
578 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
579 {
580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
581 "Build SLP failed: unrolling required "
582 "in basic block SLP\n");
583 /* Fatal mismatch. */
584 matches[0] = false;
585 return false;
586 }
587
588 /* In case of multiple types we need to detect the smallest type. */
589 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
590 {
591 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
592 if (bb_vinfo)
593 vectorization_factor = *max_nunits;
594 }
595
596 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
597 {
598 rhs_code = CALL_EXPR;
599 if (gimple_call_internal_p (call_stmt)
600 || gimple_call_tail_p (call_stmt)
601 || gimple_call_noreturn_p (call_stmt)
602 || !gimple_call_nothrow_p (call_stmt)
603 || gimple_call_chain (call_stmt))
604 {
605 if (dump_enabled_p ())
606 {
607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
608 "Build SLP failed: unsupported call type ");
609 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
610 call_stmt, 0);
611 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
612 }
613 /* Fatal mismatch. */
614 matches[0] = false;
615 return false;
616 }
617 }
618 else
619 rhs_code = gimple_assign_rhs_code (stmt);
620
621 /* Check the operation. */
622 if (i == 0)
623 {
624 first_stmt_code = rhs_code;
625
626 /* Shift arguments should be equal in all the packed stmts for a
627 vector shift with scalar shift operand. */
628 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
629 || rhs_code == LROTATE_EXPR
630 || rhs_code == RROTATE_EXPR)
631 {
632 vec_mode = TYPE_MODE (vectype);
633
634 /* First see if we have a vector/vector shift. */
635 optab = optab_for_tree_code (rhs_code, vectype,
636 optab_vector);
637
638 if (!optab
639 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
640 {
641 /* No vector/vector shift, try for a vector/scalar shift. */
642 optab = optab_for_tree_code (rhs_code, vectype,
643 optab_scalar);
644
645 if (!optab)
646 {
647 if (dump_enabled_p ())
648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
649 "Build SLP failed: no optab.\n");
650 /* Fatal mismatch. */
651 matches[0] = false;
652 return false;
653 }
654 icode = (int) optab_handler (optab, vec_mode);
655 if (icode == CODE_FOR_nothing)
656 {
657 if (dump_enabled_p ())
658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
659 "Build SLP failed: "
660 "op not supported by target.\n");
661 /* Fatal mismatch. */
662 matches[0] = false;
663 return false;
664 }
665 optab_op2_mode = insn_data[icode].operand[2].mode;
666 if (!VECTOR_MODE_P (optab_op2_mode))
667 {
668 need_same_oprnds = true;
669 first_op1 = gimple_assign_rhs2 (stmt);
670 }
671 }
672 }
673 else if (rhs_code == WIDEN_LSHIFT_EXPR)
674 {
675 need_same_oprnds = true;
676 first_op1 = gimple_assign_rhs2 (stmt);
677 }
678 }
679 else
680 {
681 if (first_stmt_code != rhs_code
682 && alt_stmt_code == ERROR_MARK)
683 alt_stmt_code = rhs_code;
684 if (first_stmt_code != rhs_code
685 && (first_stmt_code != IMAGPART_EXPR
686 || rhs_code != REALPART_EXPR)
687 && (first_stmt_code != REALPART_EXPR
688 || rhs_code != IMAGPART_EXPR)
689 /* Handle mismatches in plus/minus by computing both
690 and merging the results. */
691 && !((first_stmt_code == PLUS_EXPR
692 || first_stmt_code == MINUS_EXPR)
693 && (alt_stmt_code == PLUS_EXPR
694 || alt_stmt_code == MINUS_EXPR)
695 && rhs_code == alt_stmt_code)
696 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
697 && (first_stmt_code == ARRAY_REF
698 || first_stmt_code == BIT_FIELD_REF
699 || first_stmt_code == INDIRECT_REF
700 || first_stmt_code == COMPONENT_REF
701 || first_stmt_code == MEM_REF)))
702 {
703 if (dump_enabled_p ())
704 {
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
706 "Build SLP failed: different operation "
707 "in stmt ");
708 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
710 "original stmt ");
711 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
712 first_stmt, 0);
713 }
714 /* Mismatch. */
715 continue;
716 }
717
718 if (need_same_oprnds
719 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
720 {
721 if (dump_enabled_p ())
722 {
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "Build SLP failed: different shift "
725 "arguments in ");
726 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
727 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
728 }
729 /* Mismatch. */
730 continue;
731 }
732
733 if (rhs_code == CALL_EXPR)
734 {
735 gimple first_stmt = stmts[0];
736 if (gimple_call_num_args (stmt) != nops
737 || !operand_equal_p (gimple_call_fn (first_stmt),
738 gimple_call_fn (stmt), 0)
739 || gimple_call_fntype (first_stmt)
740 != gimple_call_fntype (stmt))
741 {
742 if (dump_enabled_p ())
743 {
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "Build SLP failed: different calls in ");
746 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
747 stmt, 0);
748 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
749 }
750 /* Mismatch. */
751 continue;
752 }
753 }
754 }
755
756 /* Grouped store or load. */
757 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
758 {
759 if (REFERENCE_CLASS_P (lhs))
760 {
761 /* Store. */
762 ;
763 }
764 else
765 {
766 /* Load. */
767 unsigned unrolling_factor
768 = least_common_multiple
769 (*max_nunits, group_size) / group_size;
770 /* FORNOW: Check that there is no gap between the loads
771 and no gap between the groups when we need to load
772 multiple groups at once.
773 ??? We should enhance this to only disallow gaps
774 inside vectors. */
775 if ((unrolling_factor > 1
776 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
777 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
778 /* If the group is split up then GROUP_GAP
779 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
780 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
781 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
782 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
783 {
784 if (dump_enabled_p ())
785 {
786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
787 "Build SLP failed: grouped "
788 "loads have gaps ");
789 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
790 stmt, 0);
791 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
792 }
793 /* Fatal mismatch. */
794 matches[0] = false;
795 return false;
796 }
797
798 /* Check that the size of interleaved loads group is not
799 greater than the SLP group size. */
800 unsigned ncopies
801 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
802 if (loop_vinfo
803 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
804 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
805 - GROUP_GAP (vinfo_for_stmt (stmt)))
806 > ncopies * group_size))
807 {
808 if (dump_enabled_p ())
809 {
810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
811 "Build SLP failed: the number "
812 "of interleaved loads is greater than "
813 "the SLP group size ");
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
815 stmt, 0);
816 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
817 }
818 /* Fatal mismatch. */
819 matches[0] = false;
820 return false;
821 }
822
823 old_first_load = first_load;
824 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
825 if (prev_first_load)
826 {
827 /* Check that there are no loads from different interleaving
828 chains in the same node. */
829 if (prev_first_load != first_load)
830 {
831 if (dump_enabled_p ())
832 {
833 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
834 vect_location,
835 "Build SLP failed: different "
836 "interleaving chains in one node ");
837 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
838 stmt, 0);
839 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
840 }
841 /* Mismatch. */
842 continue;
843 }
844 }
845 else
846 prev_first_load = first_load;
847
848 /* In some cases a group of loads is just the same load
849 repeated N times. Only analyze its cost once. */
850 if (first_load == stmt && old_first_load != first_load)
851 {
852 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
853 if (vect_supportable_dr_alignment (first_dr, false)
854 == dr_unaligned_unsupported)
855 {
856 if (dump_enabled_p ())
857 {
858 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
859 vect_location,
860 "Build SLP failed: unsupported "
861 "unaligned load ");
862 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
863 stmt, 0);
864 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
865 }
866 /* Fatal mismatch. */
867 matches[0] = false;
868 return false;
869 }
870 }
871 }
872 } /* Grouped access. */
873 else
874 {
875 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
876 {
877 /* Not grouped load. */
878 if (dump_enabled_p ())
879 {
880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
881 "Build SLP failed: not grouped load ");
882 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
883 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
884 }
885
886 /* FORNOW: Not grouped loads are not supported. */
887 /* Fatal mismatch. */
888 matches[0] = false;
889 return false;
890 }
891
892 /* Not memory operation. */
893 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
894 && TREE_CODE_CLASS (rhs_code) != tcc_unary
895 && rhs_code != COND_EXPR
896 && rhs_code != CALL_EXPR)
897 {
898 if (dump_enabled_p ())
899 {
900 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
901 "Build SLP failed: operation");
902 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
903 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
904 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
905 }
906 /* Fatal mismatch. */
907 matches[0] = false;
908 return false;
909 }
910
911 if (rhs_code == COND_EXPR)
912 {
913 tree cond_expr = gimple_assign_rhs1 (stmt);
914
915 if (i == 0)
916 first_cond_code = TREE_CODE (cond_expr);
917 else if (first_cond_code != TREE_CODE (cond_expr))
918 {
919 if (dump_enabled_p ())
920 {
921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
922 "Build SLP failed: different"
923 " operation");
924 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
925 stmt, 0);
926 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
927 }
928 /* Mismatch. */
929 continue;
930 }
931 }
932 }
933
934 matches[i] = true;
935 }
936
937 for (i = 0; i < group_size; ++i)
938 if (!matches[i])
939 return false;
940
941 /* If we allowed a two-operation SLP node verify the target can cope
942 with the permute we are going to use. */
943 if (alt_stmt_code != ERROR_MARK
944 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
945 {
946 unsigned char *sel
947 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
948 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
949 {
950 sel[i] = i;
951 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
952 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
953 }
954 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
955 {
956 for (i = 0; i < group_size; ++i)
957 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
958 {
959 matches[i] = false;
960 if (dump_enabled_p ())
961 {
962 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
963 "Build SLP failed: different operation "
964 "in stmt ");
965 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
966 stmts[i], 0);
967 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
968 "original stmt ");
969 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
970 first_stmt, 0);
971 }
972 }
973 return false;
974 }
975 *two_operators = true;
976 }
977
978 return true;
979 }
980
981 /* Recursively build an SLP tree starting from NODE.
982 Fail (and return a value not equal to zero) if def-stmts are not
983 isomorphic, require data permutation or are of unsupported types of
984 operation. Otherwise, return 0.
985 The value returned is the depth in the SLP tree where a mismatch
986 was found. */
987
988 static bool
989 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
990 slp_tree *node, unsigned int group_size,
991 unsigned int *max_nunits,
992 vec<slp_tree> *loads,
993 unsigned int vectorization_factor,
994 bool *matches, unsigned *npermutes, unsigned *tree_size,
995 unsigned max_tree_size)
996 {
997 unsigned nops, i, this_tree_size = 0;
998 gimple stmt;
999
1000 matches[0] = false;
1001
1002 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1003 if (is_gimple_call (stmt))
1004 nops = gimple_call_num_args (stmt);
1005 else if (is_gimple_assign (stmt))
1006 {
1007 nops = gimple_num_ops (stmt) - 1;
1008 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
1009 nops++;
1010 }
1011 else
1012 return false;
1013
1014 bool two_operators = false;
1015 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
1016 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
1017 max_nunits, vectorization_factor, matches,
1018 &two_operators))
1019 return false;
1020 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
1021
1022 /* If the SLP node is a load, terminate the recursion. */
1023 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
1024 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1025 {
1026 loads->safe_push (*node);
1027 return true;
1028 }
1029
1030 /* Get at the operands, verifying they are compatible. */
1031 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
1032 slp_oprnd_info oprnd_info;
1033 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
1034 {
1035 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
1036 stmt, (i == 0), &oprnds_info))
1037 {
1038 case 0:
1039 break;
1040 case -1:
1041 matches[0] = false;
1042 vect_free_oprnd_info (oprnds_info);
1043 return false;
1044 case 1:
1045 matches[i] = false;
1046 break;
1047 }
1048 }
1049 for (i = 0; i < group_size; ++i)
1050 if (!matches[i])
1051 {
1052 vect_free_oprnd_info (oprnds_info);
1053 return false;
1054 }
1055
1056 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1057
1058 /* Create SLP_TREE nodes for the definition node/s. */
1059 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
1060 {
1061 slp_tree child;
1062 unsigned old_nloads = loads->length ();
1063 unsigned old_max_nunits = *max_nunits;
1064
1065 if (oprnd_info->first_dt != vect_internal_def)
1066 continue;
1067
1068 if (++this_tree_size > max_tree_size)
1069 {
1070 vect_free_oprnd_info (oprnds_info);
1071 return false;
1072 }
1073
1074 child = vect_create_new_slp_node (oprnd_info->def_stmts);
1075 if (!child)
1076 {
1077 vect_free_oprnd_info (oprnds_info);
1078 return false;
1079 }
1080
1081 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1082 group_size, max_nunits, loads,
1083 vectorization_factor, matches,
1084 npermutes, &this_tree_size, max_tree_size))
1085 {
1086 oprnd_info->def_stmts = vNULL;
1087 SLP_TREE_CHILDREN (*node).quick_push (child);
1088 continue;
1089 }
1090
1091 /* If the SLP build failed fatally and we analyze a basic-block
1092 simply treat nodes we fail to build as externally defined
1093 (and thus build vectors from the scalar defs).
1094 The cost model will reject outright expensive cases.
1095 ??? This doesn't treat cases where permutation ultimatively
1096 fails (or we don't try permutation below). Ideally we'd
1097 even compute a permutation that will end up with the maximum
1098 SLP tree size... */
1099 if (bb_vinfo
1100 && !matches[0]
1101 /* ??? Rejecting patterns this way doesn't work. We'd have to
1102 do extra work to cancel the pattern so the uses see the
1103 scalar version. */
1104 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1105 {
1106 unsigned int j;
1107 slp_tree grandchild;
1108
1109 /* Roll back. */
1110 *max_nunits = old_max_nunits;
1111 loads->truncate (old_nloads);
1112 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1113 vect_free_slp_tree (grandchild);
1114 SLP_TREE_CHILDREN (child).truncate (0);
1115
1116 dump_printf_loc (MSG_NOTE, vect_location,
1117 "Building vector operands from scalars\n");
1118 oprnd_info->def_stmts = vNULL;
1119 vect_free_slp_tree (child);
1120 SLP_TREE_CHILDREN (*node).quick_push (NULL);
1121 continue;
1122 }
1123
1124 /* If the SLP build for operand zero failed and operand zero
1125 and one can be commutated try that for the scalar stmts
1126 that failed the match. */
1127 if (i == 0
1128 /* A first scalar stmt mismatch signals a fatal mismatch. */
1129 && matches[0]
1130 /* ??? For COND_EXPRs we can swap the comparison operands
1131 as well as the arms under some constraints. */
1132 && nops == 2
1133 && oprnds_info[1]->first_dt == vect_internal_def
1134 && is_gimple_assign (stmt)
1135 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1136 && !SLP_TREE_TWO_OPERATORS (*node)
1137 /* Do so only if the number of not successful permutes was nor more
1138 than a cut-ff as re-trying the recursive match on
1139 possibly each level of the tree would expose exponential
1140 behavior. */
1141 && *npermutes < 4)
1142 {
1143 unsigned int j;
1144 slp_tree grandchild;
1145
1146 /* Roll back. */
1147 *max_nunits = old_max_nunits;
1148 loads->truncate (old_nloads);
1149 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1150 vect_free_slp_tree (grandchild);
1151 SLP_TREE_CHILDREN (child).truncate (0);
1152
1153 /* Swap mismatched definition stmts. */
1154 dump_printf_loc (MSG_NOTE, vect_location,
1155 "Re-trying with swapped operands of stmts ");
1156 for (j = 0; j < group_size; ++j)
1157 if (!matches[j])
1158 {
1159 gimple tem = oprnds_info[0]->def_stmts[j];
1160 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1161 oprnds_info[1]->def_stmts[j] = tem;
1162 dump_printf (MSG_NOTE, "%d ", j);
1163 }
1164 dump_printf (MSG_NOTE, "\n");
1165 /* And try again with scratch 'matches' ... */
1166 bool *tem = XALLOCAVEC (bool, group_size);
1167 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1168 group_size, max_nunits, loads,
1169 vectorization_factor,
1170 tem, npermutes, &this_tree_size,
1171 max_tree_size))
1172 {
1173 /* ... so if successful we can apply the operand swapping
1174 to the GIMPLE IL. This is necessary because for example
1175 vect_get_slp_defs uses operand indexes and thus expects
1176 canonical operand order. */
1177 for (j = 0; j < group_size; ++j)
1178 if (!matches[j])
1179 {
1180 gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1181 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1182 gimple_assign_rhs2_ptr (stmt));
1183 }
1184 oprnd_info->def_stmts = vNULL;
1185 SLP_TREE_CHILDREN (*node).quick_push (child);
1186 continue;
1187 }
1188
1189 ++*npermutes;
1190 }
1191
1192 oprnd_info->def_stmts = vNULL;
1193 vect_free_slp_tree (child);
1194 vect_free_oprnd_info (oprnds_info);
1195 return false;
1196 }
1197
1198 if (tree_size)
1199 *tree_size += this_tree_size;
1200
1201 vect_free_oprnd_info (oprnds_info);
1202 return true;
1203 }
1204
1205 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1206
1207 static void
1208 vect_print_slp_tree (int dump_kind, slp_tree node)
1209 {
1210 int i;
1211 gimple stmt;
1212 slp_tree child;
1213
1214 if (!node)
1215 return;
1216
1217 dump_printf (dump_kind, "node ");
1218 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1219 {
1220 dump_printf (dump_kind, "\n\tstmt %d ", i);
1221 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1222 }
1223 dump_printf (dump_kind, "\n");
1224
1225 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1226 vect_print_slp_tree (dump_kind, child);
1227 }
1228
1229
1230 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1231 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1232 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1233 stmts in NODE are to be marked. */
1234
1235 static void
1236 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1237 {
1238 int i;
1239 gimple stmt;
1240 slp_tree child;
1241
1242 if (!node)
1243 return;
1244
1245 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1246 if (j < 0 || i == j)
1247 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1248
1249 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1250 vect_mark_slp_stmts (child, mark, j);
1251 }
1252
1253
1254 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1255
1256 static void
1257 vect_mark_slp_stmts_relevant (slp_tree node)
1258 {
1259 int i;
1260 gimple stmt;
1261 stmt_vec_info stmt_info;
1262 slp_tree child;
1263
1264 if (!node)
1265 return;
1266
1267 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1268 {
1269 stmt_info = vinfo_for_stmt (stmt);
1270 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1271 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1272 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1273 }
1274
1275 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1276 vect_mark_slp_stmts_relevant (child);
1277 }
1278
1279
1280 /* Rearrange the statements of NODE according to PERMUTATION. */
1281
1282 static void
1283 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1284 vec<unsigned> permutation)
1285 {
1286 gimple stmt;
1287 vec<gimple> tmp_stmts;
1288 unsigned int i;
1289 slp_tree child;
1290
1291 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1292 vect_slp_rearrange_stmts (child, group_size, permutation);
1293
1294 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1295 tmp_stmts.create (group_size);
1296 tmp_stmts.quick_grow_cleared (group_size);
1297
1298 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1299 tmp_stmts[permutation[i]] = stmt;
1300
1301 SLP_TREE_SCALAR_STMTS (node).release ();
1302 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1303 }
1304
1305
1306 /* Check if the required load permutations in the SLP instance
1307 SLP_INSTN are supported. */
1308
1309 static bool
1310 vect_supported_load_permutation_p (slp_instance slp_instn)
1311 {
1312 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1313 unsigned int i, j, k, next;
1314 sbitmap load_index;
1315 slp_tree node;
1316 gimple stmt, load, next_load, first_load;
1317 struct data_reference *dr;
1318
1319 if (dump_enabled_p ())
1320 {
1321 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1322 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1323 if (node->load_permutation.exists ())
1324 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1325 dump_printf (MSG_NOTE, "%d ", next);
1326 else
1327 for (k = 0; k < group_size; ++k)
1328 dump_printf (MSG_NOTE, "%d ", k);
1329 dump_printf (MSG_NOTE, "\n");
1330 }
1331
1332 /* In case of reduction every load permutation is allowed, since the order
1333 of the reduction statements is not important (as opposed to the case of
1334 grouped stores). The only condition we need to check is that all the
1335 load nodes are of the same size and have the same permutation (and then
1336 rearrange all the nodes of the SLP instance according to this
1337 permutation). */
1338
1339 /* Check that all the load nodes are of the same size. */
1340 /* ??? Can't we assert this? */
1341 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1342 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1343 return false;
1344
1345 node = SLP_INSTANCE_TREE (slp_instn);
1346 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1347
1348 /* Reduction (there are no data-refs in the root).
1349 In reduction chain the order of the loads is important. */
1350 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1351 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1352 {
1353 slp_tree load;
1354 unsigned int lidx;
1355
1356 /* Compare all the permutation sequences to the first one. We know
1357 that at least one load is permuted. */
1358 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1359 if (!node->load_permutation.exists ())
1360 return false;
1361 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1362 {
1363 if (!load->load_permutation.exists ())
1364 return false;
1365 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1366 if (lidx != node->load_permutation[j])
1367 return false;
1368 }
1369
1370 /* Check that the loads in the first sequence are different and there
1371 are no gaps between them. */
1372 load_index = sbitmap_alloc (group_size);
1373 bitmap_clear (load_index);
1374 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1375 {
1376 if (bitmap_bit_p (load_index, lidx))
1377 {
1378 sbitmap_free (load_index);
1379 return false;
1380 }
1381 bitmap_set_bit (load_index, lidx);
1382 }
1383 for (i = 0; i < group_size; i++)
1384 if (!bitmap_bit_p (load_index, i))
1385 {
1386 sbitmap_free (load_index);
1387 return false;
1388 }
1389 sbitmap_free (load_index);
1390
1391 /* This permutation is valid for reduction. Since the order of the
1392 statements in the nodes is not important unless they are memory
1393 accesses, we can rearrange the statements in all the nodes
1394 according to the order of the loads. */
1395 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1396 node->load_permutation);
1397
1398 /* We are done, no actual permutations need to be generated. */
1399 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1400 SLP_TREE_LOAD_PERMUTATION (node).release ();
1401 return true;
1402 }
1403
1404 /* In basic block vectorization we allow any subchain of an interleaving
1405 chain.
1406 FORNOW: not supported in loop SLP because of realignment compications. */
1407 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1408 {
1409 /* Check whether the loads in an instance form a subchain and thus
1410 no permutation is necessary. */
1411 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1412 {
1413 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1414 continue;
1415 bool subchain_p = true;
1416 next_load = NULL;
1417 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1418 {
1419 if (j != 0 && next_load != load)
1420 {
1421 subchain_p = false;
1422 break;
1423 }
1424 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1425 }
1426 if (subchain_p)
1427 SLP_TREE_LOAD_PERMUTATION (node).release ();
1428 else
1429 {
1430 /* Verify the permutation can be generated. */
1431 vec<tree> tem;
1432 if (!vect_transform_slp_perm_load (node, tem, NULL,
1433 1, slp_instn, true))
1434 {
1435 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1436 vect_location,
1437 "unsupported load permutation\n");
1438 return false;
1439 }
1440 }
1441 }
1442
1443 /* Check that the alignment of the first load in every subchain, i.e.,
1444 the first statement in every load node, is supported.
1445 ??? This belongs in alignment checking. */
1446 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1447 {
1448 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1449 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1450 {
1451 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1452 if (vect_supportable_dr_alignment (dr, false)
1453 == dr_unaligned_unsupported)
1454 {
1455 if (dump_enabled_p ())
1456 {
1457 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1458 vect_location,
1459 "unsupported unaligned load ");
1460 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1461 first_load, 0);
1462 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1463 }
1464 return false;
1465 }
1466 }
1467 }
1468
1469 return true;
1470 }
1471
1472 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1473 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1474 well (unless it's reduction). */
1475 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1476 return false;
1477 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1478 if (!node->load_permutation.exists ())
1479 return false;
1480
1481 load_index = sbitmap_alloc (group_size);
1482 bitmap_clear (load_index);
1483 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1484 {
1485 unsigned int lidx = node->load_permutation[0];
1486 if (bitmap_bit_p (load_index, lidx))
1487 {
1488 sbitmap_free (load_index);
1489 return false;
1490 }
1491 bitmap_set_bit (load_index, lidx);
1492 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1493 if (k != lidx)
1494 {
1495 sbitmap_free (load_index);
1496 return false;
1497 }
1498 }
1499 for (i = 0; i < group_size; i++)
1500 if (!bitmap_bit_p (load_index, i))
1501 {
1502 sbitmap_free (load_index);
1503 return false;
1504 }
1505 sbitmap_free (load_index);
1506
1507 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1508 if (node->load_permutation.exists ()
1509 && !vect_transform_slp_perm_load
1510 (node, vNULL, NULL,
1511 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1512 return false;
1513 return true;
1514 }
1515
1516
1517 /* Find the last store in SLP INSTANCE. */
1518
1519 static gimple
1520 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1521 {
1522 gimple last = NULL, stmt;
1523
1524 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1525 {
1526 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1527 if (is_pattern_stmt_p (stmt_vinfo))
1528 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1529 else
1530 last = get_later_stmt (stmt, last);
1531 }
1532
1533 return last;
1534 }
1535
1536 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1537
1538 static void
1539 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1540 slp_instance instance, slp_tree node,
1541 stmt_vector_for_cost *prologue_cost_vec,
1542 unsigned ncopies_for_cost)
1543 {
1544 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1545
1546 unsigned i;
1547 slp_tree child;
1548 gimple stmt, s;
1549 stmt_vec_info stmt_info;
1550 tree lhs;
1551 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1552
1553 /* Recurse down the SLP tree. */
1554 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1555 if (child)
1556 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1557 instance, child, prologue_cost_vec,
1558 ncopies_for_cost);
1559
1560 /* Look at the first scalar stmt to determine the cost. */
1561 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1562 stmt_info = vinfo_for_stmt (stmt);
1563 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1564 {
1565 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1566 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1567 vect_uninitialized_def,
1568 node, prologue_cost_vec, body_cost_vec);
1569 else
1570 {
1571 int i;
1572 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1573 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1574 node, prologue_cost_vec, body_cost_vec);
1575 /* If the load is permuted record the cost for the permutation.
1576 ??? Loads from multiple chains are let through here only
1577 for a single special case involving complex numbers where
1578 in the end no permutation is necessary. */
1579 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1580 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1581 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1582 && vect_get_place_in_interleaving_chain
1583 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1584 {
1585 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1586 stmt_info, 0, vect_body);
1587 break;
1588 }
1589 }
1590 }
1591 else
1592 {
1593 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1594 stmt_info, 0, vect_body);
1595 if (SLP_TREE_TWO_OPERATORS (node))
1596 {
1597 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1598 stmt_info, 0, vect_body);
1599 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1600 stmt_info, 0, vect_body);
1601 }
1602 }
1603
1604 /* Scan operands and account for prologue cost of constants/externals.
1605 ??? This over-estimates cost for multiple uses and should be
1606 re-engineered. */
1607 lhs = gimple_get_lhs (stmt);
1608 for (i = 0; i < gimple_num_ops (stmt); ++i)
1609 {
1610 tree def, op = gimple_op (stmt, i);
1611 gimple def_stmt;
1612 enum vect_def_type dt;
1613 if (!op || op == lhs)
1614 continue;
1615 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1616 &def_stmt, &def, &dt))
1617 {
1618 /* Without looking at the actual initializer a vector of
1619 constants can be implemented as load from the constant pool.
1620 ??? We need to pass down stmt_info for a vector type
1621 even if it points to the wrong stmt. */
1622 if (dt == vect_constant_def)
1623 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1624 stmt_info, 0, vect_prologue);
1625 else if (dt == vect_external_def)
1626 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1627 stmt_info, 0, vect_prologue);
1628 }
1629 }
1630 }
1631
1632 /* Compute the cost for the SLP instance INSTANCE. */
1633
1634 static void
1635 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1636 slp_instance instance, unsigned nunits)
1637 {
1638 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1639 unsigned ncopies_for_cost;
1640 stmt_info_for_cost *si;
1641 unsigned i;
1642
1643 /* Calculate the number of vector stmts to create based on the unrolling
1644 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1645 GROUP_SIZE / NUNITS otherwise. */
1646 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1647 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1648
1649 prologue_cost_vec.create (10);
1650 body_cost_vec.create (10);
1651 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1652 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1653 instance, SLP_INSTANCE_TREE (instance),
1654 &prologue_cost_vec, ncopies_for_cost);
1655
1656 /* Record the prologue costs, which were delayed until we were
1657 sure that SLP was successful. Unlike the body costs, we know
1658 the final values now regardless of the loop vectorization factor. */
1659 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1660 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1661 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1662 {
1663 struct _stmt_vec_info *stmt_info
1664 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1665 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1666 si->misalign, vect_prologue);
1667 }
1668
1669 prologue_cost_vec.release ();
1670 }
1671
1672 /* Analyze an SLP instance starting from a group of grouped stores. Call
1673 vect_build_slp_tree to build a tree of packed stmts if possible.
1674 Return FALSE if it's impossible to SLP any stmt in the loop. */
1675
1676 static bool
1677 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1678 gimple stmt, unsigned max_tree_size)
1679 {
1680 slp_instance new_instance;
1681 slp_tree node;
1682 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1683 unsigned int unrolling_factor = 1, nunits;
1684 tree vectype, scalar_type = NULL_TREE;
1685 gimple next;
1686 unsigned int vectorization_factor = 0;
1687 int i;
1688 unsigned int max_nunits = 0;
1689 vec<slp_tree> loads;
1690 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1691 vec<gimple> scalar_stmts;
1692
1693 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1694 {
1695 if (dr)
1696 {
1697 scalar_type = TREE_TYPE (DR_REF (dr));
1698 vectype = get_vectype_for_scalar_type (scalar_type);
1699 }
1700 else
1701 {
1702 gcc_assert (loop_vinfo);
1703 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1704 }
1705
1706 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1707 }
1708 else
1709 {
1710 gcc_assert (loop_vinfo);
1711 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1712 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1713 }
1714
1715 if (!vectype)
1716 {
1717 if (dump_enabled_p ())
1718 {
1719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1720 "Build SLP failed: unsupported data-type ");
1721 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1722 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1723 }
1724
1725 return false;
1726 }
1727
1728 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1729 if (loop_vinfo)
1730 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1731 else
1732 vectorization_factor = nunits;
1733
1734 /* Calculate the unrolling factor. */
1735 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1736 if (unrolling_factor != 1 && !loop_vinfo)
1737 {
1738 if (dump_enabled_p ())
1739 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1740 "Build SLP failed: unrolling required in basic"
1741 " block SLP\n");
1742
1743 return false;
1744 }
1745
1746 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1747 scalar_stmts.create (group_size);
1748 next = stmt;
1749 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1750 {
1751 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1752 while (next)
1753 {
1754 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1755 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1756 scalar_stmts.safe_push (
1757 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1758 else
1759 scalar_stmts.safe_push (next);
1760 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1761 }
1762 }
1763 else
1764 {
1765 /* Collect reduction statements. */
1766 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1767 for (i = 0; reductions.iterate (i, &next); i++)
1768 scalar_stmts.safe_push (next);
1769 }
1770
1771 node = vect_create_new_slp_node (scalar_stmts);
1772
1773 loads.create (group_size);
1774
1775 /* Build the tree for the SLP instance. */
1776 bool *matches = XALLOCAVEC (bool, group_size);
1777 unsigned npermutes = 0;
1778 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1779 &max_nunits, &loads,
1780 vectorization_factor, matches, &npermutes, NULL,
1781 max_tree_size))
1782 {
1783 /* Calculate the unrolling factor based on the smallest type. */
1784 if (max_nunits > nunits)
1785 unrolling_factor = least_common_multiple (max_nunits, group_size)
1786 / group_size;
1787
1788 if (unrolling_factor != 1 && !loop_vinfo)
1789 {
1790 if (dump_enabled_p ())
1791 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1792 "Build SLP failed: unrolling required in basic"
1793 " block SLP\n");
1794 vect_free_slp_tree (node);
1795 loads.release ();
1796 return false;
1797 }
1798
1799 /* Create a new SLP instance. */
1800 new_instance = XNEW (struct _slp_instance);
1801 SLP_INSTANCE_TREE (new_instance) = node;
1802 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1803 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1804 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1805 SLP_INSTANCE_LOADS (new_instance) = loads;
1806
1807 /* Compute the load permutation. */
1808 slp_tree load_node;
1809 bool loads_permuted = false;
1810 FOR_EACH_VEC_ELT (loads, i, load_node)
1811 {
1812 vec<unsigned> load_permutation;
1813 int j;
1814 gimple load, first_stmt;
1815 bool this_load_permuted = false;
1816 load_permutation.create (group_size);
1817 first_stmt = GROUP_FIRST_ELEMENT
1818 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1819 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1820 {
1821 int load_place
1822 = vect_get_place_in_interleaving_chain (load, first_stmt);
1823 gcc_assert (load_place != -1);
1824 if (load_place != j)
1825 this_load_permuted = true;
1826 load_permutation.safe_push (load_place);
1827 }
1828 if (!this_load_permuted)
1829 {
1830 load_permutation.release ();
1831 continue;
1832 }
1833 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1834 loads_permuted = true;
1835 }
1836
1837 if (loads_permuted)
1838 {
1839 if (!vect_supported_load_permutation_p (new_instance))
1840 {
1841 if (dump_enabled_p ())
1842 {
1843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1844 "Build SLP failed: unsupported load "
1845 "permutation ");
1846 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1847 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1848 }
1849 vect_free_slp_instance (new_instance);
1850 return false;
1851 }
1852 }
1853
1854
1855 if (loop_vinfo)
1856 {
1857 /* Compute the costs of this SLP instance. Delay this for BB
1858 vectorization as we don't have vector types computed yet. */
1859 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1860 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1861 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1862 }
1863 else
1864 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1865
1866 if (dump_enabled_p ())
1867 vect_print_slp_tree (MSG_NOTE, node);
1868
1869 return true;
1870 }
1871
1872 /* Failed to SLP. */
1873 /* Free the allocated memory. */
1874 vect_free_slp_tree (node);
1875 loads.release ();
1876
1877 return false;
1878 }
1879
1880
1881 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1882 trees of packed scalar stmts if SLP is possible. */
1883
1884 bool
1885 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1886 unsigned max_tree_size)
1887 {
1888 unsigned int i;
1889 vec<gimple> grouped_stores;
1890 vec<gimple> reductions = vNULL;
1891 vec<gimple> reduc_chains = vNULL;
1892 gimple first_element;
1893 bool ok = false;
1894
1895 if (dump_enabled_p ())
1896 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1897
1898 if (loop_vinfo)
1899 {
1900 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1901 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1902 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1903 }
1904 else
1905 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1906
1907 /* Find SLP sequences starting from groups of grouped stores. */
1908 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1909 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1910 max_tree_size))
1911 ok = true;
1912
1913 if (bb_vinfo && !ok)
1914 {
1915 if (dump_enabled_p ())
1916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1917 "Failed to SLP the basic block.\n");
1918
1919 return false;
1920 }
1921
1922 if (loop_vinfo
1923 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1924 {
1925 /* Find SLP sequences starting from reduction chains. */
1926 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1927 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1928 max_tree_size))
1929 ok = true;
1930 else
1931 return false;
1932
1933 /* Don't try to vectorize SLP reductions if reduction chain was
1934 detected. */
1935 return ok;
1936 }
1937
1938 /* Find SLP sequences starting from groups of reductions. */
1939 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1940 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1941 max_tree_size))
1942 ok = true;
1943
1944 return true;
1945 }
1946
1947
1948 /* For each possible SLP instance decide whether to SLP it and calculate overall
1949 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1950 least one instance. */
1951
1952 bool
1953 vect_make_slp_decision (loop_vec_info loop_vinfo)
1954 {
1955 unsigned int i, unrolling_factor = 1;
1956 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1957 slp_instance instance;
1958 int decided_to_slp = 0;
1959
1960 if (dump_enabled_p ())
1961 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1962 "\n");
1963
1964 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1965 {
1966 /* FORNOW: SLP if you can. */
1967 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1968 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1969
1970 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1971 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1972 loop-based vectorization. Such stmts will be marked as HYBRID. */
1973 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1974 decided_to_slp++;
1975 }
1976
1977 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1978
1979 if (decided_to_slp && dump_enabled_p ())
1980 dump_printf_loc (MSG_NOTE, vect_location,
1981 "Decided to SLP %d instances. Unrolling factor %d\n",
1982 decided_to_slp, unrolling_factor);
1983
1984 return (decided_to_slp > 0);
1985 }
1986
1987
1988 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1989 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1990
1991 static void
1992 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1993 {
1994 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1995 imm_use_iterator imm_iter;
1996 gimple use_stmt;
1997 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1998 slp_tree child;
1999 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2000 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2001 int j;
2002
2003 /* Propagate hybrid down the SLP tree. */
2004 if (stype == hybrid)
2005 ;
2006 else if (HYBRID_SLP_STMT (stmt_vinfo))
2007 stype = hybrid;
2008 else
2009 {
2010 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
2011 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2012 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2013 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2014 if (gimple_bb (use_stmt)
2015 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2016 && (use_vinfo = vinfo_for_stmt (use_stmt))
2017 && !STMT_SLP_TYPE (use_vinfo)
2018 && (STMT_VINFO_RELEVANT (use_vinfo)
2019 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
2020 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2021 && STMT_VINFO_RELATED_STMT (use_vinfo)
2022 && !STMT_SLP_TYPE (vinfo_for_stmt
2023 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
2024 && !(gimple_code (use_stmt) == GIMPLE_PHI
2025 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2026 stype = hybrid;
2027 }
2028
2029 if (stype == hybrid)
2030 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2031
2032 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2033 if (child)
2034 vect_detect_hybrid_slp_stmts (child, i, stype);
2035 }
2036
2037 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2038
2039 static tree
2040 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2041 {
2042 walk_stmt_info *wi = (walk_stmt_info *)data;
2043 struct loop *loopp = (struct loop *)wi->info;
2044
2045 if (wi->is_lhs)
2046 return NULL_TREE;
2047
2048 if (TREE_CODE (*tp) == SSA_NAME
2049 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2050 {
2051 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
2052 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2053 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2054 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2055 }
2056
2057 return NULL_TREE;
2058 }
2059
2060 static tree
2061 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2062 walk_stmt_info *)
2063 {
2064 /* If the stmt is in a SLP instance then this isn't a reason
2065 to mark use definitions in other SLP instances as hybrid. */
2066 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2067 *handled = true;
2068 return NULL_TREE;
2069 }
2070
2071 /* Find stmts that must be both vectorized and SLPed. */
2072
2073 void
2074 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2075 {
2076 unsigned int i;
2077 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2078 slp_instance instance;
2079
2080 if (dump_enabled_p ())
2081 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2082 "\n");
2083
2084 /* First walk all pattern stmt in the loop and mark defs of uses as
2085 hybrid because immediate uses in them are not recorded. */
2086 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2087 {
2088 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2089 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2090 gsi_next (&gsi))
2091 {
2092 gimple stmt = gsi_stmt (gsi);
2093 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2094 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2095 {
2096 walk_stmt_info wi;
2097 memset (&wi, 0, sizeof (wi));
2098 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2099 gimple_stmt_iterator gsi2
2100 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2101 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2102 vect_detect_hybrid_slp_1, &wi);
2103 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2104 vect_detect_hybrid_slp_2,
2105 vect_detect_hybrid_slp_1, &wi);
2106 }
2107 }
2108 }
2109
2110 /* Then walk the SLP instance trees marking stmts with uses in
2111 non-SLP stmts as hybrid, also propagating hybrid down the
2112 SLP tree, collecting the above info on-the-fly. */
2113 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2114 {
2115 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2116 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2117 i, pure_slp);
2118 }
2119 }
2120
2121
2122 /* Create and initialize a new bb_vec_info struct for BB, as well as
2123 stmt_vec_info structs for all the stmts in it. */
2124
2125 static bb_vec_info
2126 new_bb_vec_info (basic_block bb)
2127 {
2128 bb_vec_info res = NULL;
2129 gimple_stmt_iterator gsi;
2130
2131 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2132 BB_VINFO_BB (res) = bb;
2133
2134 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2135 {
2136 gimple stmt = gsi_stmt (gsi);
2137 gimple_set_uid (stmt, 0);
2138 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2139 }
2140
2141 BB_VINFO_GROUPED_STORES (res).create (10);
2142 BB_VINFO_SLP_INSTANCES (res).create (2);
2143 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2144
2145 bb->aux = res;
2146 return res;
2147 }
2148
2149
2150 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2151 stmts in the basic block. */
2152
2153 static void
2154 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2155 {
2156 vec<slp_instance> slp_instances;
2157 slp_instance instance;
2158 basic_block bb;
2159 gimple_stmt_iterator si;
2160 unsigned i;
2161
2162 if (!bb_vinfo)
2163 return;
2164
2165 bb = BB_VINFO_BB (bb_vinfo);
2166
2167 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2168 {
2169 gimple stmt = gsi_stmt (si);
2170 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2171
2172 if (stmt_info)
2173 /* Free stmt_vec_info. */
2174 free_stmt_vec_info (stmt);
2175 }
2176
2177 vect_destroy_datarefs (NULL, bb_vinfo);
2178 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2179 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2180 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2181 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2182 vect_free_slp_instance (instance);
2183 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2184 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2185 free (bb_vinfo);
2186 bb->aux = NULL;
2187 }
2188
2189
2190 /* Analyze statements contained in SLP tree node after recursively analyzing
2191 the subtree. Return TRUE if the operations are supported. */
2192
2193 static bool
2194 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
2195 {
2196 bool dummy;
2197 int i;
2198 gimple stmt;
2199 slp_tree child;
2200
2201 if (!node)
2202 return true;
2203
2204 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2205 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
2206 return false;
2207
2208 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2209 {
2210 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2211 gcc_assert (stmt_info);
2212 gcc_assert (PURE_SLP_STMT (stmt_info));
2213
2214 if (!vect_analyze_stmt (stmt, &dummy, node))
2215 return false;
2216 }
2217
2218 return true;
2219 }
2220
2221
2222 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2223 operations are supported. */
2224
2225 static bool
2226 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2227 {
2228 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2229 slp_instance instance;
2230 int i;
2231
2232 for (i = 0; slp_instances.iterate (i, &instance); )
2233 {
2234 if (!vect_slp_analyze_node_operations (bb_vinfo,
2235 SLP_INSTANCE_TREE (instance)))
2236 {
2237 vect_free_slp_instance (instance);
2238 slp_instances.ordered_remove (i);
2239 }
2240 else
2241 i++;
2242 }
2243
2244 if (!slp_instances.length ())
2245 return false;
2246
2247 return true;
2248 }
2249
2250
2251 /* Compute the scalar cost of the SLP node NODE and its children
2252 and return it. Do not account defs that are marked in LIFE and
2253 update LIFE according to uses of NODE. */
2254
2255 static unsigned
2256 vect_bb_slp_scalar_cost (basic_block bb,
2257 slp_tree node, vec<bool, va_heap> *life)
2258 {
2259 unsigned scalar_cost = 0;
2260 unsigned i;
2261 gimple stmt;
2262 slp_tree child;
2263
2264 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2265 {
2266 unsigned stmt_cost;
2267 ssa_op_iter op_iter;
2268 def_operand_p def_p;
2269 stmt_vec_info stmt_info;
2270
2271 if ((*life)[i])
2272 continue;
2273
2274 /* If there is a non-vectorized use of the defs then the scalar
2275 stmt is kept live in which case we do not account it or any
2276 required defs in the SLP children in the scalar cost. This
2277 way we make the vectorization more costly when compared to
2278 the scalar cost. */
2279 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2280 {
2281 imm_use_iterator use_iter;
2282 gimple use_stmt;
2283 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2284 if (!is_gimple_debug (use_stmt)
2285 && (gimple_code (use_stmt) == GIMPLE_PHI
2286 || gimple_bb (use_stmt) != bb
2287 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2288 {
2289 (*life)[i] = true;
2290 BREAK_FROM_IMM_USE_STMT (use_iter);
2291 }
2292 }
2293 if ((*life)[i])
2294 continue;
2295
2296 stmt_info = vinfo_for_stmt (stmt);
2297 if (STMT_VINFO_DATA_REF (stmt_info))
2298 {
2299 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2300 stmt_cost = vect_get_stmt_cost (scalar_load);
2301 else
2302 stmt_cost = vect_get_stmt_cost (scalar_store);
2303 }
2304 else
2305 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2306
2307 scalar_cost += stmt_cost;
2308 }
2309
2310 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2311 if (child)
2312 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2313
2314 return scalar_cost;
2315 }
2316
2317 /* Check if vectorization of the basic block is profitable. */
2318
2319 static bool
2320 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2321 {
2322 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2323 slp_instance instance;
2324 int i, j;
2325 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2326 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2327 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2328 stmt_vec_info stmt_info = NULL;
2329 stmt_vector_for_cost body_cost_vec;
2330 stmt_info_for_cost *ci;
2331
2332 /* Calculate vector costs. */
2333 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2334 {
2335 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2336
2337 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2338 {
2339 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2340 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2341 stmt_info, ci->misalign, vect_body);
2342 }
2343 }
2344
2345 /* Calculate scalar cost. */
2346 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2347 {
2348 auto_vec<bool, 20> life;
2349 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2350 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2351 SLP_INSTANCE_TREE (instance),
2352 &life);
2353 }
2354
2355 /* Complete the target-specific cost calculation. */
2356 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2357 &vec_inside_cost, &vec_epilogue_cost);
2358
2359 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2360
2361 if (dump_enabled_p ())
2362 {
2363 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2364 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2365 vec_inside_cost);
2366 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2367 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2368 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2369 }
2370
2371 /* Vectorization is profitable if its cost is less than the cost of scalar
2372 version. */
2373 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2374 return false;
2375
2376 return true;
2377 }
2378
2379 /* Check if the basic block can be vectorized. */
2380
2381 static bb_vec_info
2382 vect_slp_analyze_bb_1 (basic_block bb)
2383 {
2384 bb_vec_info bb_vinfo;
2385 vec<slp_instance> slp_instances;
2386 slp_instance instance;
2387 int i;
2388 int min_vf = 2;
2389 unsigned n_stmts = 0;
2390
2391 bb_vinfo = new_bb_vec_info (bb);
2392 if (!bb_vinfo)
2393 return NULL;
2394
2395 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2396 {
2397 if (dump_enabled_p ())
2398 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2399 "not vectorized: unhandled data-ref in basic "
2400 "block.\n");
2401
2402 destroy_bb_vec_info (bb_vinfo);
2403 return NULL;
2404 }
2405
2406 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2407 {
2408 if (dump_enabled_p ())
2409 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2410 "not vectorized: not enough data-refs in "
2411 "basic block.\n");
2412
2413 destroy_bb_vec_info (bb_vinfo);
2414 return NULL;
2415 }
2416
2417 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2418 {
2419 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2421 "not vectorized: unhandled data access in "
2422 "basic block.\n");
2423
2424 destroy_bb_vec_info (bb_vinfo);
2425 return NULL;
2426 }
2427
2428 vect_pattern_recog (NULL, bb_vinfo);
2429
2430 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2431 {
2432 if (dump_enabled_p ())
2433 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2434 "not vectorized: bad data alignment in basic "
2435 "block.\n");
2436
2437 destroy_bb_vec_info (bb_vinfo);
2438 return NULL;
2439 }
2440
2441 /* Check the SLP opportunities in the basic block, analyze and build SLP
2442 trees. */
2443 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2444 {
2445 if (dump_enabled_p ())
2446 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2447 "not vectorized: failed to find SLP opportunities "
2448 "in basic block.\n");
2449
2450 destroy_bb_vec_info (bb_vinfo);
2451 return NULL;
2452 }
2453
2454 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2455
2456 /* Mark all the statements that we want to vectorize as pure SLP and
2457 relevant. */
2458 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2459 {
2460 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2461 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2462 }
2463
2464 /* Mark all the statements that we do not want to vectorize. */
2465 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2466 !gsi_end_p (gsi); gsi_next (&gsi))
2467 {
2468 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2469 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2470 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2471 }
2472
2473 /* Analyze dependences. At this point all stmts not participating in
2474 vectorization have to be marked. Dependence analysis assumes
2475 that we either vectorize all SLP instances or none at all. */
2476 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2477 {
2478 if (dump_enabled_p ())
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2480 "not vectorized: unhandled data dependence "
2481 "in basic block.\n");
2482
2483 destroy_bb_vec_info (bb_vinfo);
2484 return NULL;
2485 }
2486
2487 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2488 {
2489 if (dump_enabled_p ())
2490 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2491 "not vectorized: unsupported alignment in basic "
2492 "block.\n");
2493 destroy_bb_vec_info (bb_vinfo);
2494 return NULL;
2495 }
2496
2497 if (!vect_slp_analyze_operations (bb_vinfo))
2498 {
2499 if (dump_enabled_p ())
2500 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2501 "not vectorized: bad operation in basic block.\n");
2502
2503 destroy_bb_vec_info (bb_vinfo);
2504 return NULL;
2505 }
2506
2507 /* Compute the costs of the SLP instances. */
2508 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2509 {
2510 gimple stmt = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2511 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2512 vect_analyze_slp_cost (NULL, bb_vinfo,
2513 instance, TYPE_VECTOR_SUBPARTS (vectype));
2514 }
2515
2516 /* Cost model: check if the vectorization is worthwhile. */
2517 if (!unlimited_cost_model (NULL)
2518 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2519 {
2520 if (dump_enabled_p ())
2521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2522 "not vectorized: vectorization is not "
2523 "profitable.\n");
2524
2525 destroy_bb_vec_info (bb_vinfo);
2526 return NULL;
2527 }
2528
2529 if (dump_enabled_p ())
2530 dump_printf_loc (MSG_NOTE, vect_location,
2531 "Basic block will be vectorized using SLP\n");
2532
2533 return bb_vinfo;
2534 }
2535
2536
2537 bb_vec_info
2538 vect_slp_analyze_bb (basic_block bb)
2539 {
2540 bb_vec_info bb_vinfo;
2541 int insns = 0;
2542 gimple_stmt_iterator gsi;
2543 unsigned int vector_sizes;
2544
2545 if (dump_enabled_p ())
2546 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2547
2548 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2549 {
2550 gimple stmt = gsi_stmt (gsi);
2551 if (!is_gimple_debug (stmt)
2552 && !gimple_nop_p (stmt)
2553 && gimple_code (stmt) != GIMPLE_LABEL)
2554 insns++;
2555 }
2556
2557 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2558 {
2559 if (dump_enabled_p ())
2560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2561 "not vectorized: too many instructions in "
2562 "basic block.\n");
2563
2564 return NULL;
2565 }
2566
2567 /* Autodetect first vector size we try. */
2568 current_vector_size = 0;
2569 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2570
2571 while (1)
2572 {
2573 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2574 if (bb_vinfo)
2575 return bb_vinfo;
2576
2577 destroy_bb_vec_info (bb_vinfo);
2578
2579 vector_sizes &= ~current_vector_size;
2580 if (vector_sizes == 0
2581 || current_vector_size == 0)
2582 return NULL;
2583
2584 /* Try the next biggest vector size. */
2585 current_vector_size = 1 << floor_log2 (vector_sizes);
2586 if (dump_enabled_p ())
2587 dump_printf_loc (MSG_NOTE, vect_location,
2588 "***** Re-trying analysis with "
2589 "vector size %d\n", current_vector_size);
2590 }
2591 }
2592
2593
2594 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2595 the number of created vector stmts depends on the unrolling factor).
2596 However, the actual number of vector stmts for every SLP node depends on
2597 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2598 should be updated. In this function we assume that the inside costs
2599 calculated in vect_model_xxx_cost are linear in ncopies. */
2600
2601 void
2602 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2603 {
2604 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2605 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2606 slp_instance instance;
2607 stmt_vector_for_cost body_cost_vec;
2608 stmt_info_for_cost *si;
2609 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2610
2611 if (dump_enabled_p ())
2612 dump_printf_loc (MSG_NOTE, vect_location,
2613 "=== vect_update_slp_costs_according_to_vf ===\n");
2614
2615 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2616 {
2617 /* We assume that costs are linear in ncopies. */
2618 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2619
2620 /* Record the instance's instructions in the target cost model.
2621 This was delayed until here because the count of instructions
2622 isn't known beforehand. */
2623 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2624
2625 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2626 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2627 vinfo_for_stmt (si->stmt), si->misalign,
2628 vect_body);
2629 }
2630 }
2631
2632
2633 /* For constant and loop invariant defs of SLP_NODE this function returns
2634 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2635 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2636 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2637 REDUC_INDEX is the index of the reduction operand in the statements, unless
2638 it is -1. */
2639
2640 static void
2641 vect_get_constant_vectors (tree op, slp_tree slp_node,
2642 vec<tree> *vec_oprnds,
2643 unsigned int op_num, unsigned int number_of_vectors,
2644 int reduc_index)
2645 {
2646 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2647 gimple stmt = stmts[0];
2648 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2649 unsigned nunits;
2650 tree vec_cst;
2651 tree *elts;
2652 unsigned j, number_of_places_left_in_vector;
2653 tree vector_type;
2654 tree vop;
2655 int group_size = stmts.length ();
2656 unsigned int vec_num, i;
2657 unsigned number_of_copies = 1;
2658 vec<tree> voprnds;
2659 voprnds.create (number_of_vectors);
2660 bool constant_p, is_store;
2661 tree neutral_op = NULL;
2662 enum tree_code code = gimple_expr_code (stmt);
2663 gimple def_stmt;
2664 struct loop *loop;
2665 gimple_seq ctor_seq = NULL;
2666
2667 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2668 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2669
2670 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2671 && reduc_index != -1)
2672 {
2673 op_num = reduc_index;
2674 op = gimple_op (stmt, op_num + 1);
2675 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2676 we need either neutral operands or the original operands. See
2677 get_initial_def_for_reduction() for details. */
2678 switch (code)
2679 {
2680 case WIDEN_SUM_EXPR:
2681 case DOT_PROD_EXPR:
2682 case SAD_EXPR:
2683 case PLUS_EXPR:
2684 case MINUS_EXPR:
2685 case BIT_IOR_EXPR:
2686 case BIT_XOR_EXPR:
2687 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2688 neutral_op = build_real (TREE_TYPE (op), dconst0);
2689 else
2690 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2691
2692 break;
2693
2694 case MULT_EXPR:
2695 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2696 neutral_op = build_real (TREE_TYPE (op), dconst1);
2697 else
2698 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2699
2700 break;
2701
2702 case BIT_AND_EXPR:
2703 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2704 break;
2705
2706 /* For MIN/MAX we don't have an easy neutral operand but
2707 the initial values can be used fine here. Only for
2708 a reduction chain we have to force a neutral element. */
2709 case MAX_EXPR:
2710 case MIN_EXPR:
2711 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2712 neutral_op = NULL;
2713 else
2714 {
2715 def_stmt = SSA_NAME_DEF_STMT (op);
2716 loop = (gimple_bb (stmt))->loop_father;
2717 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2718 loop_preheader_edge (loop));
2719 }
2720 break;
2721
2722 default:
2723 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2724 neutral_op = NULL;
2725 }
2726 }
2727
2728 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2729 {
2730 is_store = true;
2731 op = gimple_assign_rhs1 (stmt);
2732 }
2733 else
2734 is_store = false;
2735
2736 gcc_assert (op);
2737
2738 if (CONSTANT_CLASS_P (op))
2739 constant_p = true;
2740 else
2741 constant_p = false;
2742
2743 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2744 created vectors. It is greater than 1 if unrolling is performed.
2745
2746 For example, we have two scalar operands, s1 and s2 (e.g., group of
2747 strided accesses of size two), while NUNITS is four (i.e., four scalars
2748 of this type can be packed in a vector). The output vector will contain
2749 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2750 will be 2).
2751
2752 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2753 containing the operands.
2754
2755 For example, NUNITS is four as before, and the group size is 8
2756 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2757 {s5, s6, s7, s8}. */
2758
2759 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2760
2761 number_of_places_left_in_vector = nunits;
2762 elts = XALLOCAVEC (tree, nunits);
2763 bool place_after_defs = false;
2764 for (j = 0; j < number_of_copies; j++)
2765 {
2766 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2767 {
2768 if (is_store)
2769 op = gimple_assign_rhs1 (stmt);
2770 else
2771 {
2772 switch (code)
2773 {
2774 case COND_EXPR:
2775 if (op_num == 0 || op_num == 1)
2776 {
2777 tree cond = gimple_assign_rhs1 (stmt);
2778 op = TREE_OPERAND (cond, op_num);
2779 }
2780 else
2781 {
2782 if (op_num == 2)
2783 op = gimple_assign_rhs2 (stmt);
2784 else
2785 op = gimple_assign_rhs3 (stmt);
2786 }
2787 break;
2788
2789 case CALL_EXPR:
2790 op = gimple_call_arg (stmt, op_num);
2791 break;
2792
2793 case LSHIFT_EXPR:
2794 case RSHIFT_EXPR:
2795 case LROTATE_EXPR:
2796 case RROTATE_EXPR:
2797 op = gimple_op (stmt, op_num + 1);
2798 /* Unlike the other binary operators, shifts/rotates have
2799 the shift count being int, instead of the same type as
2800 the lhs, so make sure the scalar is the right type if
2801 we are dealing with vectors of
2802 long long/long/short/char. */
2803 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2804 op = fold_convert (TREE_TYPE (vector_type), op);
2805 break;
2806
2807 default:
2808 op = gimple_op (stmt, op_num + 1);
2809 break;
2810 }
2811 }
2812
2813 if (reduc_index != -1)
2814 {
2815 loop = (gimple_bb (stmt))->loop_father;
2816 def_stmt = SSA_NAME_DEF_STMT (op);
2817
2818 gcc_assert (loop);
2819
2820 /* Get the def before the loop. In reduction chain we have only
2821 one initial value. */
2822 if ((j != (number_of_copies - 1)
2823 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2824 && i != 0))
2825 && neutral_op)
2826 op = neutral_op;
2827 else
2828 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2829 loop_preheader_edge (loop));
2830 }
2831
2832 /* Create 'vect_ = {op0,op1,...,opn}'. */
2833 number_of_places_left_in_vector--;
2834 tree orig_op = op;
2835 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2836 {
2837 if (CONSTANT_CLASS_P (op))
2838 {
2839 op = fold_unary (VIEW_CONVERT_EXPR,
2840 TREE_TYPE (vector_type), op);
2841 gcc_assert (op && CONSTANT_CLASS_P (op));
2842 }
2843 else
2844 {
2845 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2846 gimple init_stmt;
2847 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2848 init_stmt
2849 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2850 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2851 op = new_temp;
2852 }
2853 }
2854 elts[number_of_places_left_in_vector] = op;
2855 if (!CONSTANT_CLASS_P (op))
2856 constant_p = false;
2857 if (TREE_CODE (orig_op) == SSA_NAME
2858 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2859 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2860 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2861 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2862 place_after_defs = true;
2863
2864 if (number_of_places_left_in_vector == 0)
2865 {
2866 number_of_places_left_in_vector = nunits;
2867
2868 if (constant_p)
2869 vec_cst = build_vector (vector_type, elts);
2870 else
2871 {
2872 vec<constructor_elt, va_gc> *v;
2873 unsigned k;
2874 vec_alloc (v, nunits);
2875 for (k = 0; k < nunits; ++k)
2876 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2877 vec_cst = build_constructor (vector_type, v);
2878 }
2879 tree init;
2880 gimple_stmt_iterator gsi;
2881 if (place_after_defs)
2882 {
2883 gsi = gsi_for_stmt
2884 (vect_find_last_scalar_stmt_in_slp (slp_node));
2885 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2886 }
2887 else
2888 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2889 if (ctor_seq != NULL)
2890 {
2891 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2892 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2893 GSI_SAME_STMT);
2894 ctor_seq = NULL;
2895 }
2896 voprnds.quick_push (init);
2897 place_after_defs = false;
2898 }
2899 }
2900 }
2901
2902 /* Since the vectors are created in the reverse order, we should invert
2903 them. */
2904 vec_num = voprnds.length ();
2905 for (j = vec_num; j != 0; j--)
2906 {
2907 vop = voprnds[j - 1];
2908 vec_oprnds->quick_push (vop);
2909 }
2910
2911 voprnds.release ();
2912
2913 /* In case that VF is greater than the unrolling factor needed for the SLP
2914 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2915 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2916 to replicate the vectors. */
2917 while (number_of_vectors > vec_oprnds->length ())
2918 {
2919 tree neutral_vec = NULL;
2920
2921 if (neutral_op)
2922 {
2923 if (!neutral_vec)
2924 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2925
2926 vec_oprnds->quick_push (neutral_vec);
2927 }
2928 else
2929 {
2930 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2931 vec_oprnds->quick_push (vop);
2932 }
2933 }
2934 }
2935
2936
2937 /* Get vectorized definitions from SLP_NODE that contains corresponding
2938 vectorized def-stmts. */
2939
2940 static void
2941 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2942 {
2943 tree vec_oprnd;
2944 gimple vec_def_stmt;
2945 unsigned int i;
2946
2947 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2948
2949 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2950 {
2951 gcc_assert (vec_def_stmt);
2952 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2953 vec_oprnds->quick_push (vec_oprnd);
2954 }
2955 }
2956
2957
2958 /* Get vectorized definitions for SLP_NODE.
2959 If the scalar definitions are loop invariants or constants, collect them and
2960 call vect_get_constant_vectors() to create vector stmts.
2961 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2962 must be stored in the corresponding child of SLP_NODE, and we call
2963 vect_get_slp_vect_defs () to retrieve them. */
2964
2965 void
2966 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2967 vec<vec<tree> > *vec_oprnds, int reduc_index)
2968 {
2969 gimple first_stmt;
2970 int number_of_vects = 0, i;
2971 unsigned int child_index = 0;
2972 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2973 slp_tree child = NULL;
2974 vec<tree> vec_defs;
2975 tree oprnd;
2976 bool vectorized_defs;
2977
2978 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2979 FOR_EACH_VEC_ELT (ops, i, oprnd)
2980 {
2981 /* For each operand we check if it has vectorized definitions in a child
2982 node or we need to create them (for invariants and constants). We
2983 check if the LHS of the first stmt of the next child matches OPRND.
2984 If it does, we found the correct child. Otherwise, we call
2985 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2986 to check this child node for the next operand. */
2987 vectorized_defs = false;
2988 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2989 {
2990 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2991
2992 /* We have to check both pattern and original def, if available. */
2993 if (child)
2994 {
2995 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2996 gimple related
2997 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2998
2999 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
3000 || (related
3001 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3002 {
3003 /* The number of vector defs is determined by the number of
3004 vector statements in the node from which we get those
3005 statements. */
3006 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3007 vectorized_defs = true;
3008 child_index++;
3009 }
3010 }
3011 else
3012 child_index++;
3013 }
3014
3015 if (!vectorized_defs)
3016 {
3017 if (i == 0)
3018 {
3019 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3020 /* Number of vector stmts was calculated according to LHS in
3021 vect_schedule_slp_instance (), fix it by replacing LHS with
3022 RHS, if necessary. See vect_get_smallest_scalar_type () for
3023 details. */
3024 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3025 &rhs_size_unit);
3026 if (rhs_size_unit != lhs_size_unit)
3027 {
3028 number_of_vects *= rhs_size_unit;
3029 number_of_vects /= lhs_size_unit;
3030 }
3031 }
3032 }
3033
3034 /* Allocate memory for vectorized defs. */
3035 vec_defs = vNULL;
3036 vec_defs.create (number_of_vects);
3037
3038 /* For reduction defs we call vect_get_constant_vectors (), since we are
3039 looking for initial loop invariant values. */
3040 if (vectorized_defs && reduc_index == -1)
3041 /* The defs are already vectorized. */
3042 vect_get_slp_vect_defs (child, &vec_defs);
3043 else
3044 /* Build vectors from scalar defs. */
3045 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3046 number_of_vects, reduc_index);
3047
3048 vec_oprnds->quick_push (vec_defs);
3049
3050 /* For reductions, we only need initial values. */
3051 if (reduc_index != -1)
3052 return;
3053 }
3054 }
3055
3056
3057 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3058 building a vector of type MASK_TYPE from it) and two input vectors placed in
3059 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3060 shifting by STRIDE elements of DR_CHAIN for every copy.
3061 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3062 copies).
3063 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3064 the created stmts must be inserted. */
3065
3066 static inline void
3067 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
3068 tree mask, int first_vec_indx, int second_vec_indx,
3069 gimple_stmt_iterator *gsi, slp_tree node,
3070 tree vectype, vec<tree> dr_chain,
3071 int ncopies, int vect_stmts_counter)
3072 {
3073 tree perm_dest;
3074 gimple perm_stmt = NULL;
3075 stmt_vec_info next_stmt_info;
3076 int i, stride;
3077 tree first_vec, second_vec, data_ref;
3078
3079 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3080
3081 /* Initialize the vect stmts of NODE to properly insert the generated
3082 stmts later. */
3083 for (i = SLP_TREE_VEC_STMTS (node).length ();
3084 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3085 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3086
3087 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3088 for (i = 0; i < ncopies; i++)
3089 {
3090 first_vec = dr_chain[first_vec_indx];
3091 second_vec = dr_chain[second_vec_indx];
3092
3093 /* Generate the permute statement. */
3094 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3095 first_vec, second_vec, mask);
3096 data_ref = make_ssa_name (perm_dest, perm_stmt);
3097 gimple_set_lhs (perm_stmt, data_ref);
3098 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3099
3100 /* Store the vector statement in NODE. */
3101 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3102
3103 first_vec_indx += stride;
3104 second_vec_indx += stride;
3105 }
3106
3107 /* Mark the scalar stmt as vectorized. */
3108 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3109 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3110 }
3111
3112
3113 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3114 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3115 representation. Check that the mask is valid and return FALSE if not.
3116 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3117 the next vector, i.e., the current first vector is not needed. */
3118
3119 static bool
3120 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3121 int mask_nunits, bool only_one_vec, int index,
3122 unsigned char *mask, int *current_mask_element,
3123 bool *need_next_vector, int *number_of_mask_fixes,
3124 bool *mask_fixed, bool *needs_first_vector)
3125 {
3126 int i;
3127
3128 /* Convert to target specific representation. */
3129 *current_mask_element = first_mask_element + m;
3130 /* Adjust the value in case it's a mask for second and third vectors. */
3131 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3132
3133 if (*current_mask_element < 0)
3134 {
3135 if (dump_enabled_p ())
3136 {
3137 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3138 "permutation requires past vector ");
3139 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3140 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3141 }
3142 return false;
3143 }
3144
3145 if (*current_mask_element < mask_nunits)
3146 *needs_first_vector = true;
3147
3148 /* We have only one input vector to permute but the mask accesses values in
3149 the next vector as well. */
3150 if (only_one_vec && *current_mask_element >= mask_nunits)
3151 {
3152 if (dump_enabled_p ())
3153 {
3154 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3155 "permutation requires at least two vectors ");
3156 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3157 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3158 }
3159
3160 return false;
3161 }
3162
3163 /* The mask requires the next vector. */
3164 while (*current_mask_element >= mask_nunits * 2)
3165 {
3166 if (*needs_first_vector || *mask_fixed)
3167 {
3168 /* We either need the first vector too or have already moved to the
3169 next vector. In both cases, this permutation needs three
3170 vectors. */
3171 if (dump_enabled_p ())
3172 {
3173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3174 "permutation requires at "
3175 "least three vectors ");
3176 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3177 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3178 }
3179
3180 return false;
3181 }
3182
3183 /* We move to the next vector, dropping the first one and working with
3184 the second and the third - we need to adjust the values of the mask
3185 accordingly. */
3186 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3187
3188 for (i = 0; i < index; i++)
3189 mask[i] -= mask_nunits * *number_of_mask_fixes;
3190
3191 (*number_of_mask_fixes)++;
3192 *mask_fixed = true;
3193 }
3194
3195 *need_next_vector = *mask_fixed;
3196
3197 /* This was the last element of this mask. Start a new one. */
3198 if (index == mask_nunits - 1)
3199 {
3200 *number_of_mask_fixes = 1;
3201 *mask_fixed = false;
3202 *needs_first_vector = false;
3203 }
3204
3205 return true;
3206 }
3207
3208
3209 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3210 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3211 permute statements for the SLP node NODE of the SLP instance
3212 SLP_NODE_INSTANCE. */
3213
3214 bool
3215 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3216 gimple_stmt_iterator *gsi, int vf,
3217 slp_instance slp_node_instance, bool analyze_only)
3218 {
3219 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3220 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3221 tree mask_element_type = NULL_TREE, mask_type;
3222 int i, j, k, nunits, vec_index = 0, scalar_index;
3223 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3224 gimple next_scalar_stmt;
3225 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3226 int first_mask_element;
3227 int index, unroll_factor, current_mask_element, ncopies;
3228 unsigned char *mask;
3229 bool only_one_vec = false, need_next_vector = false;
3230 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3231 int number_of_mask_fixes = 1;
3232 bool mask_fixed = false;
3233 bool needs_first_vector = false;
3234 machine_mode mode;
3235
3236 mode = TYPE_MODE (vectype);
3237
3238 if (!can_vec_perm_p (mode, false, NULL))
3239 {
3240 if (dump_enabled_p ())
3241 {
3242 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3243 "no vect permute for ");
3244 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3245 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3246 }
3247 return false;
3248 }
3249
3250 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3251 same size as the vector element being permuted. */
3252 mask_element_type = lang_hooks.types.type_for_mode
3253 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3254 mask_type = get_vectype_for_scalar_type (mask_element_type);
3255 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3256 mask = XALLOCAVEC (unsigned char, nunits);
3257 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3258
3259 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3260 unrolling factor. */
3261 orig_vec_stmts_num = group_size *
3262 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3263 if (orig_vec_stmts_num == 1)
3264 only_one_vec = true;
3265
3266 /* Number of copies is determined by the final vectorization factor
3267 relatively to SLP_NODE_INSTANCE unrolling factor. */
3268 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3269
3270 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3271 return false;
3272
3273 /* Generate permutation masks for every NODE. Number of masks for each NODE
3274 is equal to GROUP_SIZE.
3275 E.g., we have a group of three nodes with three loads from the same
3276 location in each node, and the vector size is 4. I.e., we have a
3277 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3278 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3279 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3280 ...
3281
3282 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3283 The last mask is illegal since we assume two operands for permute
3284 operation, and the mask element values can't be outside that range.
3285 Hence, the last mask must be converted into {2,5,5,5}.
3286 For the first two permutations we need the first and the second input
3287 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3288 we need the second and the third vectors: {b1,c1,a2,b2} and
3289 {c2,a3,b3,c3}. */
3290
3291 {
3292 scalar_index = 0;
3293 index = 0;
3294 vect_stmts_counter = 0;
3295 vec_index = 0;
3296 first_vec_index = vec_index++;
3297 if (only_one_vec)
3298 second_vec_index = first_vec_index;
3299 else
3300 second_vec_index = vec_index++;
3301
3302 for (j = 0; j < unroll_factor; j++)
3303 {
3304 for (k = 0; k < group_size; k++)
3305 {
3306 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3307 first_mask_element = i + j * group_size;
3308 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3309 nunits, only_one_vec, index,
3310 mask, &current_mask_element,
3311 &need_next_vector,
3312 &number_of_mask_fixes, &mask_fixed,
3313 &needs_first_vector))
3314 return false;
3315 gcc_assert (current_mask_element >= 0
3316 && current_mask_element < 2 * nunits);
3317 mask[index++] = current_mask_element;
3318
3319 if (index == nunits)
3320 {
3321 index = 0;
3322 if (!can_vec_perm_p (mode, false, mask))
3323 {
3324 if (dump_enabled_p ())
3325 {
3326 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3327 vect_location,
3328 "unsupported vect permute { ");
3329 for (i = 0; i < nunits; ++i)
3330 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3331 mask[i]);
3332 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3333 }
3334 return false;
3335 }
3336
3337 if (!analyze_only)
3338 {
3339 int l;
3340 tree mask_vec, *mask_elts;
3341 mask_elts = XALLOCAVEC (tree, nunits);
3342 for (l = 0; l < nunits; ++l)
3343 mask_elts[l] = build_int_cst (mask_element_type,
3344 mask[l]);
3345 mask_vec = build_vector (mask_type, mask_elts);
3346
3347 if (need_next_vector)
3348 {
3349 first_vec_index = second_vec_index;
3350 second_vec_index = vec_index;
3351 }
3352
3353 next_scalar_stmt
3354 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3355
3356 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3357 mask_vec, first_vec_index, second_vec_index,
3358 gsi, node, vectype, dr_chain,
3359 ncopies, vect_stmts_counter++);
3360 }
3361 }
3362 }
3363 }
3364 }
3365
3366 return true;
3367 }
3368
3369
3370
3371 /* Vectorize SLP instance tree in postorder. */
3372
3373 static bool
3374 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3375 unsigned int vectorization_factor)
3376 {
3377 gimple stmt;
3378 bool grouped_store, is_store;
3379 gimple_stmt_iterator si;
3380 stmt_vec_info stmt_info;
3381 unsigned int vec_stmts_size, nunits, group_size;
3382 tree vectype;
3383 int i;
3384 slp_tree child;
3385
3386 if (!node)
3387 return false;
3388
3389 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3390 vect_schedule_slp_instance (child, instance, vectorization_factor);
3391
3392 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3393 stmt_info = vinfo_for_stmt (stmt);
3394
3395 /* VECTYPE is the type of the destination. */
3396 vectype = STMT_VINFO_VECTYPE (stmt_info);
3397 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3398 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3399
3400 /* For each SLP instance calculate number of vector stmts to be created
3401 for the scalar stmts in each node of the SLP tree. Number of vector
3402 elements in one vector iteration is the number of scalar elements in
3403 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3404 size. */
3405 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3406
3407 if (!SLP_TREE_VEC_STMTS (node).exists ())
3408 {
3409 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3410 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3411 }
3412
3413 if (dump_enabled_p ())
3414 {
3415 dump_printf_loc (MSG_NOTE,vect_location,
3416 "------>vectorizing SLP node starting from: ");
3417 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3418 dump_printf (MSG_NOTE, "\n");
3419 }
3420
3421 /* Vectorized stmts go before the last scalar stmt which is where
3422 all uses are ready. */
3423 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3424
3425 /* Mark the first element of the reduction chain as reduction to properly
3426 transform the node. In the analysis phase only the last element of the
3427 chain is marked as reduction. */
3428 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3429 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3430 {
3431 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3432 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3433 }
3434
3435 /* Handle two-operation SLP nodes by vectorizing the group with
3436 both operations and then performing a merge. */
3437 if (SLP_TREE_TWO_OPERATORS (node))
3438 {
3439 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3440 enum tree_code ocode;
3441 gimple ostmt;
3442 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3443 bool allsame = true;
3444 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3445 if (gimple_assign_rhs_code (ostmt) != code0)
3446 {
3447 mask[i] = 1;
3448 allsame = false;
3449 ocode = gimple_assign_rhs_code (ostmt);
3450 }
3451 else
3452 mask[i] = 0;
3453 if (!allsame)
3454 {
3455 vec<gimple> v0;
3456 vec<gimple> v1;
3457 unsigned j;
3458 tree tmask = NULL_TREE;
3459 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3460 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3461 SLP_TREE_VEC_STMTS (node).truncate (0);
3462 gimple_assign_set_rhs_code (stmt, ocode);
3463 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3464 gimple_assign_set_rhs_code (stmt, code0);
3465 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3466 SLP_TREE_VEC_STMTS (node).truncate (0);
3467 tree meltype = build_nonstandard_integer_type
3468 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3469 tree mvectype = get_same_sized_vectype (meltype, vectype);
3470 unsigned k = 0, l;
3471 for (j = 0; j < v0.length (); ++j)
3472 {
3473 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3474 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3475 {
3476 if (k >= group_size)
3477 k = 0;
3478 melts[l] = build_int_cst
3479 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3480 }
3481 tmask = build_vector (mvectype, melts);
3482
3483 /* ??? Not all targets support a VEC_PERM_EXPR with a
3484 constant mask that would translate to a vec_merge RTX
3485 (with their vec_perm_const_ok). We can either not
3486 vectorize in that case or let veclower do its job.
3487 Unfortunately that isn't too great and at least for
3488 plus/minus we'd eventually like to match targets
3489 vector addsub instructions. */
3490 gimple vstmt;
3491 vstmt = gimple_build_assign (make_ssa_name (vectype),
3492 VEC_PERM_EXPR,
3493 gimple_assign_lhs (v0[j]),
3494 gimple_assign_lhs (v1[j]), tmask);
3495 vect_finish_stmt_generation (stmt, vstmt, &si);
3496 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3497 }
3498 v0.release ();
3499 v1.release ();
3500 return false;
3501 }
3502 }
3503 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3504 return is_store;
3505 }
3506
3507 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3508 For loop vectorization this is done in vectorizable_call, but for SLP
3509 it needs to be deferred until end of vect_schedule_slp, because multiple
3510 SLP instances may refer to the same scalar stmt. */
3511
3512 static void
3513 vect_remove_slp_scalar_calls (slp_tree node)
3514 {
3515 gimple stmt, new_stmt;
3516 gimple_stmt_iterator gsi;
3517 int i;
3518 slp_tree child;
3519 tree lhs;
3520 stmt_vec_info stmt_info;
3521
3522 if (!node)
3523 return;
3524
3525 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3526 vect_remove_slp_scalar_calls (child);
3527
3528 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3529 {
3530 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3531 continue;
3532 stmt_info = vinfo_for_stmt (stmt);
3533 if (stmt_info == NULL
3534 || is_pattern_stmt_p (stmt_info)
3535 || !PURE_SLP_STMT (stmt_info))
3536 continue;
3537 lhs = gimple_call_lhs (stmt);
3538 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3539 set_vinfo_for_stmt (new_stmt, stmt_info);
3540 set_vinfo_for_stmt (stmt, NULL);
3541 STMT_VINFO_STMT (stmt_info) = new_stmt;
3542 gsi = gsi_for_stmt (stmt);
3543 gsi_replace (&gsi, new_stmt, false);
3544 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3545 }
3546 }
3547
3548 /* Generate vector code for all SLP instances in the loop/basic block. */
3549
3550 bool
3551 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3552 {
3553 vec<slp_instance> slp_instances;
3554 slp_instance instance;
3555 unsigned int i, vf;
3556 bool is_store = false;
3557
3558 if (loop_vinfo)
3559 {
3560 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3561 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3562 }
3563 else
3564 {
3565 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3566 vf = 1;
3567 }
3568
3569 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3570 {
3571 /* Schedule the tree of INSTANCE. */
3572 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3573 instance, vf);
3574 if (dump_enabled_p ())
3575 dump_printf_loc (MSG_NOTE, vect_location,
3576 "vectorizing stmts using SLP.\n");
3577 }
3578
3579 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3580 {
3581 slp_tree root = SLP_INSTANCE_TREE (instance);
3582 gimple store;
3583 unsigned int j;
3584 gimple_stmt_iterator gsi;
3585
3586 /* Remove scalar call stmts. Do not do this for basic-block
3587 vectorization as not all uses may be vectorized.
3588 ??? Why should this be necessary? DCE should be able to
3589 remove the stmts itself.
3590 ??? For BB vectorization we can as well remove scalar
3591 stmts starting from the SLP tree root if they have no
3592 uses. */
3593 if (loop_vinfo)
3594 vect_remove_slp_scalar_calls (root);
3595
3596 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3597 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3598 {
3599 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3600 break;
3601
3602 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3603 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3604 /* Free the attached stmt_vec_info and remove the stmt. */
3605 gsi = gsi_for_stmt (store);
3606 unlink_stmt_vdef (store);
3607 gsi_remove (&gsi, true);
3608 release_defs (store);
3609 free_stmt_vec_info (store);
3610 }
3611 }
3612
3613 return is_store;
3614 }
3615
3616
3617 /* Vectorize the basic block. */
3618
3619 void
3620 vect_slp_transform_bb (basic_block bb)
3621 {
3622 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3623 gimple_stmt_iterator si;
3624
3625 gcc_assert (bb_vinfo);
3626
3627 if (dump_enabled_p ())
3628 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3629
3630 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3631 {
3632 gimple stmt = gsi_stmt (si);
3633 stmt_vec_info stmt_info;
3634
3635 if (dump_enabled_p ())
3636 {
3637 dump_printf_loc (MSG_NOTE, vect_location,
3638 "------>SLPing statement: ");
3639 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3640 dump_printf (MSG_NOTE, "\n");
3641 }
3642
3643 stmt_info = vinfo_for_stmt (stmt);
3644 gcc_assert (stmt_info);
3645
3646 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3647 if (STMT_SLP_TYPE (stmt_info))
3648 {
3649 vect_schedule_slp (NULL, bb_vinfo);
3650 break;
3651 }
3652 }
3653
3654 if (dump_enabled_p ())
3655 dump_printf_loc (MSG_NOTE, vect_location,
3656 "BASIC BLOCK VECTORIZED\n");
3657
3658 destroy_bb_vec_info (bb_vinfo);
3659 }