tree-vect-stmts.c (vectorizable_load): Initialize slp_perm earlier and remove ??...
[gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
79
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
82
83 source_location
84 find_bb_location (basic_block bb)
85 {
86 gimple stmt = NULL;
87 gimple_stmt_iterator si;
88
89 if (!bb)
90 return UNKNOWN_LOCATION;
91
92 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
93 {
94 stmt = gsi_stmt (si);
95 if (gimple_location (stmt) != UNKNOWN_LOCATION)
96 return gimple_location (stmt);
97 }
98
99 return UNKNOWN_LOCATION;
100 }
101
102
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
104
105 static void
106 vect_free_slp_tree (slp_tree node)
107 {
108 int i;
109 slp_tree child;
110
111 if (!node)
112 return;
113
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115 vect_free_slp_tree (child);
116
117 SLP_TREE_CHILDREN (node).release ();
118 SLP_TREE_SCALAR_STMTS (node).release ();
119 SLP_TREE_VEC_STMTS (node).release ();
120 SLP_TREE_LOAD_PERMUTATION (node).release ();
121
122 free (node);
123 }
124
125
126 /* Free the memory allocated for the SLP instance. */
127
128 void
129 vect_free_slp_instance (slp_instance instance)
130 {
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132 SLP_INSTANCE_LOADS (instance).release ();
133 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
134 free (instance);
135 }
136
137
138 /* Create an SLP node for SCALAR_STMTS. */
139
140 static slp_tree
141 vect_create_new_slp_node (vec<gimple> scalar_stmts)
142 {
143 slp_tree node;
144 gimple stmt = scalar_stmts[0];
145 unsigned int nops;
146
147 if (is_gimple_call (stmt))
148 nops = gimple_call_num_args (stmt);
149 else if (is_gimple_assign (stmt))
150 {
151 nops = gimple_num_ops (stmt) - 1;
152 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
153 nops++;
154 }
155 else
156 return NULL;
157
158 node = XNEW (struct _slp_tree);
159 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
160 SLP_TREE_VEC_STMTS (node).create (0);
161 SLP_TREE_CHILDREN (node).create (nops);
162 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
163 SLP_TREE_TWO_OPERATORS (node) = false;
164
165 return node;
166 }
167
168
169 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
170 operand. */
171 static vec<slp_oprnd_info>
172 vect_create_oprnd_info (int nops, int group_size)
173 {
174 int i;
175 slp_oprnd_info oprnd_info;
176 vec<slp_oprnd_info> oprnds_info;
177
178 oprnds_info.create (nops);
179 for (i = 0; i < nops; i++)
180 {
181 oprnd_info = XNEW (struct _slp_oprnd_info);
182 oprnd_info->def_stmts.create (group_size);
183 oprnd_info->first_dt = vect_uninitialized_def;
184 oprnd_info->first_op_type = NULL_TREE;
185 oprnd_info->first_pattern = false;
186 oprnd_info->second_pattern = false;
187 oprnds_info.quick_push (oprnd_info);
188 }
189
190 return oprnds_info;
191 }
192
193
194 /* Free operands info. */
195
196 static void
197 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
198 {
199 int i;
200 slp_oprnd_info oprnd_info;
201
202 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
203 {
204 oprnd_info->def_stmts.release ();
205 XDELETE (oprnd_info);
206 }
207
208 oprnds_info.release ();
209 }
210
211
212 /* Find the place of the data-ref in STMT in the interleaving chain that starts
213 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
214
215 static int
216 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
217 {
218 gimple next_stmt = first_stmt;
219 int result = 0;
220
221 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
222 return -1;
223
224 do
225 {
226 if (next_stmt == stmt)
227 return result;
228 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
229 if (next_stmt)
230 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
231 }
232 while (next_stmt);
233
234 return -1;
235 }
236
237
238 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
239 they are of a valid type and that they match the defs of the first stmt of
240 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
241 return -1, if the error could be corrected by swapping operands of the
242 operation return 1, if everything is ok return 0. */
243
244 static int
245 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
246 gimple stmt, unsigned stmt_num,
247 vec<slp_oprnd_info> *oprnds_info)
248 {
249 tree oprnd;
250 unsigned int i, number_of_oprnds;
251 tree def;
252 gimple def_stmt;
253 enum vect_def_type dt = vect_uninitialized_def;
254 struct loop *loop = NULL;
255 bool pattern = false;
256 slp_oprnd_info oprnd_info;
257 int first_op_idx = 1;
258 bool commutative = false;
259 bool first_op_cond = false;
260 bool first = stmt_num == 0;
261 bool second = stmt_num == 1;
262
263 if (loop_vinfo)
264 loop = LOOP_VINFO_LOOP (loop_vinfo);
265
266 if (is_gimple_call (stmt))
267 {
268 number_of_oprnds = gimple_call_num_args (stmt);
269 first_op_idx = 3;
270 }
271 else if (is_gimple_assign (stmt))
272 {
273 enum tree_code code = gimple_assign_rhs_code (stmt);
274 number_of_oprnds = gimple_num_ops (stmt) - 1;
275 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
276 {
277 first_op_cond = true;
278 commutative = true;
279 number_of_oprnds++;
280 }
281 else
282 commutative = commutative_tree_code (code);
283 }
284 else
285 return -1;
286
287 bool swapped = false;
288 for (i = 0; i < number_of_oprnds; i++)
289 {
290 again:
291 if (first_op_cond)
292 {
293 if (i == 0 || i == 1)
294 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
295 swapped ? !i : i);
296 else
297 oprnd = gimple_op (stmt, first_op_idx + i - 1);
298 }
299 else
300 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
301
302 oprnd_info = (*oprnds_info)[i];
303
304 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
305 &def, &dt)
306 || (!def_stmt && dt != vect_constant_def))
307 {
308 if (dump_enabled_p ())
309 {
310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
311 "Build SLP failed: can't find def for ");
312 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
313 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
314 }
315
316 return -1;
317 }
318
319 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
320 from the pattern. Check that all the stmts of the node are in the
321 pattern. */
322 if (def_stmt && gimple_bb (def_stmt)
323 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
324 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
325 && gimple_code (def_stmt) != GIMPLE_PHI))
326 && vinfo_for_stmt (def_stmt)
327 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
328 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
329 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
330 {
331 pattern = true;
332 if (!first && !oprnd_info->first_pattern
333 /* Allow different pattern state for the defs of the
334 first stmt in reduction chains. */
335 && (oprnd_info->first_dt != vect_reduction_def
336 || (!second && !oprnd_info->second_pattern)))
337 {
338 if (i == 0
339 && !swapped
340 && commutative)
341 {
342 swapped = true;
343 goto again;
344 }
345
346 if (dump_enabled_p ())
347 {
348 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
349 "Build SLP failed: some of the stmts"
350 " are in a pattern, and others are not ");
351 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
352 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
353 }
354
355 return 1;
356 }
357
358 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
359 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
360
361 if (dt == vect_unknown_def_type)
362 {
363 if (dump_enabled_p ())
364 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
365 "Unsupported pattern.\n");
366 return -1;
367 }
368
369 switch (gimple_code (def_stmt))
370 {
371 case GIMPLE_PHI:
372 def = gimple_phi_result (def_stmt);
373 break;
374
375 case GIMPLE_ASSIGN:
376 def = gimple_assign_lhs (def_stmt);
377 break;
378
379 default:
380 if (dump_enabled_p ())
381 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
382 "unsupported defining stmt:\n");
383 return -1;
384 }
385 }
386
387 if (second)
388 oprnd_info->second_pattern = pattern;
389
390 if (first)
391 {
392 oprnd_info->first_dt = dt;
393 oprnd_info->first_pattern = pattern;
394 oprnd_info->first_op_type = TREE_TYPE (oprnd);
395 }
396 else
397 {
398 /* Not first stmt of the group, check that the def-stmt/s match
399 the def-stmt/s of the first stmt. Allow different definition
400 types for reduction chains: the first stmt must be a
401 vect_reduction_def (a phi node), and the rest
402 vect_internal_def. */
403 if (((oprnd_info->first_dt != dt
404 && !(oprnd_info->first_dt == vect_reduction_def
405 && dt == vect_internal_def)
406 && !((oprnd_info->first_dt == vect_external_def
407 || oprnd_info->first_dt == vect_constant_def)
408 && (dt == vect_external_def
409 || dt == vect_constant_def)))
410 || !types_compatible_p (oprnd_info->first_op_type,
411 TREE_TYPE (oprnd))))
412 {
413 /* Try swapping operands if we got a mismatch. */
414 if (i == 0
415 && !swapped
416 && commutative)
417 {
418 swapped = true;
419 goto again;
420 }
421
422 if (dump_enabled_p ())
423 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
424 "Build SLP failed: different types\n");
425
426 return 1;
427 }
428 }
429
430 /* Check the types of the definitions. */
431 switch (dt)
432 {
433 case vect_constant_def:
434 case vect_external_def:
435 case vect_reduction_def:
436 break;
437
438 case vect_internal_def:
439 oprnd_info->def_stmts.quick_push (def_stmt);
440 break;
441
442 default:
443 /* FORNOW: Not supported. */
444 if (dump_enabled_p ())
445 {
446 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
447 "Build SLP failed: illegal type of def ");
448 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
449 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
450 }
451
452 return -1;
453 }
454 }
455
456 /* Swap operands. */
457 if (swapped)
458 {
459 if (first_op_cond)
460 {
461 tree cond = gimple_assign_rhs1 (stmt);
462 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
463 &TREE_OPERAND (cond, 1));
464 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
465 }
466 else
467 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
468 gimple_assign_rhs2_ptr (stmt));
469 }
470
471 return 0;
472 }
473
474
475 /* Verify if the scalar stmts STMTS are isomorphic, require data
476 permutation or are of unsupported types of operation. Return
477 true if they are, otherwise return false and indicate in *MATCHES
478 which stmts are not isomorphic to the first one. If MATCHES[0]
479 is false then this indicates the comparison could not be
480 carried out or the stmts will never be vectorized by SLP. */
481
482 static bool
483 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
484 vec<gimple> stmts, unsigned int group_size,
485 unsigned nops, unsigned int *max_nunits,
486 unsigned int vectorization_factor, bool *matches,
487 bool *two_operators)
488 {
489 unsigned int i;
490 gimple first_stmt = stmts[0], stmt = stmts[0];
491 enum tree_code first_stmt_code = ERROR_MARK;
492 enum tree_code alt_stmt_code = ERROR_MARK;
493 enum tree_code rhs_code = ERROR_MARK;
494 enum tree_code first_cond_code = ERROR_MARK;
495 tree lhs;
496 bool need_same_oprnds = false;
497 tree vectype, scalar_type, first_op1 = NULL_TREE;
498 optab optab;
499 int icode;
500 machine_mode optab_op2_mode;
501 machine_mode vec_mode;
502 struct data_reference *first_dr;
503 HOST_WIDE_INT dummy;
504 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
505 tree cond;
506
507 /* For every stmt in NODE find its def stmt/s. */
508 FOR_EACH_VEC_ELT (stmts, i, stmt)
509 {
510 matches[i] = false;
511
512 if (dump_enabled_p ())
513 {
514 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
515 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
516 dump_printf (MSG_NOTE, "\n");
517 }
518
519 /* Fail to vectorize statements marked as unvectorizable. */
520 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
521 {
522 if (dump_enabled_p ())
523 {
524 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
525 "Build SLP failed: unvectorizable statement ");
526 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
527 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
528 }
529 /* Fatal mismatch. */
530 matches[0] = false;
531 return false;
532 }
533
534 lhs = gimple_get_lhs (stmt);
535 if (lhs == NULL_TREE)
536 {
537 if (dump_enabled_p ())
538 {
539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
540 "Build SLP failed: not GIMPLE_ASSIGN nor "
541 "GIMPLE_CALL ");
542 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
543 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
544 }
545 /* Fatal mismatch. */
546 matches[0] = false;
547 return false;
548 }
549
550 if (is_gimple_assign (stmt)
551 && gimple_assign_rhs_code (stmt) == COND_EXPR
552 && (cond = gimple_assign_rhs1 (stmt))
553 && !COMPARISON_CLASS_P (cond))
554 {
555 if (dump_enabled_p ())
556 {
557 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
558 "Build SLP failed: condition is not "
559 "comparison ");
560 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
561 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
562 }
563 /* Fatal mismatch. */
564 matches[0] = false;
565 return false;
566 }
567
568 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
569 vectype = get_vectype_for_scalar_type (scalar_type);
570 if (!vectype)
571 {
572 if (dump_enabled_p ())
573 {
574 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
575 "Build SLP failed: unsupported data-type ");
576 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
577 scalar_type);
578 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
579 }
580 /* Fatal mismatch. */
581 matches[0] = false;
582 return false;
583 }
584
585 /* If populating the vector type requires unrolling then fail
586 before adjusting *max_nunits for basic-block vectorization. */
587 if (bb_vinfo
588 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
589 {
590 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
591 "Build SLP failed: unrolling required "
592 "in basic block SLP\n");
593 /* Fatal mismatch. */
594 matches[0] = false;
595 return false;
596 }
597
598 /* In case of multiple types we need to detect the smallest type. */
599 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
600 {
601 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
602 if (bb_vinfo)
603 vectorization_factor = *max_nunits;
604 }
605
606 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
607 {
608 rhs_code = CALL_EXPR;
609 if (gimple_call_internal_p (call_stmt)
610 || gimple_call_tail_p (call_stmt)
611 || gimple_call_noreturn_p (call_stmt)
612 || !gimple_call_nothrow_p (call_stmt)
613 || gimple_call_chain (call_stmt))
614 {
615 if (dump_enabled_p ())
616 {
617 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
618 "Build SLP failed: unsupported call type ");
619 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
620 call_stmt, 0);
621 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
622 }
623 /* Fatal mismatch. */
624 matches[0] = false;
625 return false;
626 }
627 }
628 else
629 rhs_code = gimple_assign_rhs_code (stmt);
630
631 /* Check the operation. */
632 if (i == 0)
633 {
634 first_stmt_code = rhs_code;
635
636 /* Shift arguments should be equal in all the packed stmts for a
637 vector shift with scalar shift operand. */
638 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
639 || rhs_code == LROTATE_EXPR
640 || rhs_code == RROTATE_EXPR)
641 {
642 vec_mode = TYPE_MODE (vectype);
643
644 /* First see if we have a vector/vector shift. */
645 optab = optab_for_tree_code (rhs_code, vectype,
646 optab_vector);
647
648 if (!optab
649 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
650 {
651 /* No vector/vector shift, try for a vector/scalar shift. */
652 optab = optab_for_tree_code (rhs_code, vectype,
653 optab_scalar);
654
655 if (!optab)
656 {
657 if (dump_enabled_p ())
658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
659 "Build SLP failed: no optab.\n");
660 /* Fatal mismatch. */
661 matches[0] = false;
662 return false;
663 }
664 icode = (int) optab_handler (optab, vec_mode);
665 if (icode == CODE_FOR_nothing)
666 {
667 if (dump_enabled_p ())
668 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
669 "Build SLP failed: "
670 "op not supported by target.\n");
671 /* Fatal mismatch. */
672 matches[0] = false;
673 return false;
674 }
675 optab_op2_mode = insn_data[icode].operand[2].mode;
676 if (!VECTOR_MODE_P (optab_op2_mode))
677 {
678 need_same_oprnds = true;
679 first_op1 = gimple_assign_rhs2 (stmt);
680 }
681 }
682 }
683 else if (rhs_code == WIDEN_LSHIFT_EXPR)
684 {
685 need_same_oprnds = true;
686 first_op1 = gimple_assign_rhs2 (stmt);
687 }
688 }
689 else
690 {
691 if (first_stmt_code != rhs_code
692 && alt_stmt_code == ERROR_MARK)
693 alt_stmt_code = rhs_code;
694 if (first_stmt_code != rhs_code
695 && (first_stmt_code != IMAGPART_EXPR
696 || rhs_code != REALPART_EXPR)
697 && (first_stmt_code != REALPART_EXPR
698 || rhs_code != IMAGPART_EXPR)
699 /* Handle mismatches in plus/minus by computing both
700 and merging the results. */
701 && !((first_stmt_code == PLUS_EXPR
702 || first_stmt_code == MINUS_EXPR)
703 && (alt_stmt_code == PLUS_EXPR
704 || alt_stmt_code == MINUS_EXPR)
705 && rhs_code == alt_stmt_code)
706 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
707 && (first_stmt_code == ARRAY_REF
708 || first_stmt_code == BIT_FIELD_REF
709 || first_stmt_code == INDIRECT_REF
710 || first_stmt_code == COMPONENT_REF
711 || first_stmt_code == MEM_REF)))
712 {
713 if (dump_enabled_p ())
714 {
715 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
716 "Build SLP failed: different operation "
717 "in stmt ");
718 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720 "original stmt ");
721 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
722 first_stmt, 0);
723 }
724 /* Mismatch. */
725 continue;
726 }
727
728 if (need_same_oprnds
729 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
730 {
731 if (dump_enabled_p ())
732 {
733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
734 "Build SLP failed: different shift "
735 "arguments in ");
736 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
737 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
738 }
739 /* Mismatch. */
740 continue;
741 }
742
743 if (rhs_code == CALL_EXPR)
744 {
745 gimple first_stmt = stmts[0];
746 if (gimple_call_num_args (stmt) != nops
747 || !operand_equal_p (gimple_call_fn (first_stmt),
748 gimple_call_fn (stmt), 0)
749 || gimple_call_fntype (first_stmt)
750 != gimple_call_fntype (stmt))
751 {
752 if (dump_enabled_p ())
753 {
754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
755 "Build SLP failed: different calls in ");
756 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
757 stmt, 0);
758 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
759 }
760 /* Mismatch. */
761 continue;
762 }
763 }
764 }
765
766 /* Grouped store or load. */
767 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
768 {
769 if (REFERENCE_CLASS_P (lhs))
770 {
771 /* Store. */
772 ;
773 }
774 else
775 {
776 /* Load. */
777 unsigned unrolling_factor
778 = least_common_multiple
779 (*max_nunits, group_size) / group_size;
780 /* FORNOW: Check that there is no gap between the loads
781 and no gap between the groups when we need to load
782 multiple groups at once.
783 ??? We should enhance this to only disallow gaps
784 inside vectors. */
785 if ((unrolling_factor > 1
786 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
787 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
788 /* If the group is split up then GROUP_GAP
789 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
790 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
791 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
792 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
793 {
794 if (dump_enabled_p ())
795 {
796 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
797 "Build SLP failed: grouped "
798 "loads have gaps ");
799 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
800 stmt, 0);
801 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
802 }
803 /* Fatal mismatch. */
804 matches[0] = false;
805 return false;
806 }
807
808 /* Check that the size of interleaved loads group is not
809 greater than the SLP group size. */
810 unsigned ncopies
811 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
812 if (loop_vinfo
813 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
814 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
815 - GROUP_GAP (vinfo_for_stmt (stmt)))
816 > ncopies * group_size))
817 {
818 if (dump_enabled_p ())
819 {
820 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
821 "Build SLP failed: the number "
822 "of interleaved loads is greater than "
823 "the SLP group size ");
824 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
825 stmt, 0);
826 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
827 }
828 /* Fatal mismatch. */
829 matches[0] = false;
830 return false;
831 }
832
833 old_first_load = first_load;
834 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
835 if (prev_first_load)
836 {
837 /* Check that there are no loads from different interleaving
838 chains in the same node. */
839 if (prev_first_load != first_load)
840 {
841 if (dump_enabled_p ())
842 {
843 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
844 vect_location,
845 "Build SLP failed: different "
846 "interleaving chains in one node ");
847 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
848 stmt, 0);
849 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
850 }
851 /* Mismatch. */
852 continue;
853 }
854 }
855 else
856 prev_first_load = first_load;
857
858 /* In some cases a group of loads is just the same load
859 repeated N times. Only analyze its cost once. */
860 if (first_load == stmt && old_first_load != first_load)
861 {
862 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
863 if (vect_supportable_dr_alignment (first_dr, false)
864 == dr_unaligned_unsupported)
865 {
866 if (dump_enabled_p ())
867 {
868 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
869 vect_location,
870 "Build SLP failed: unsupported "
871 "unaligned load ");
872 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
873 stmt, 0);
874 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
875 }
876 /* Fatal mismatch. */
877 matches[0] = false;
878 return false;
879 }
880 }
881 }
882 } /* Grouped access. */
883 else
884 {
885 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
886 {
887 /* Not grouped load. */
888 if (dump_enabled_p ())
889 {
890 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
891 "Build SLP failed: not grouped load ");
892 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
893 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
894 }
895
896 /* FORNOW: Not grouped loads are not supported. */
897 /* Fatal mismatch. */
898 matches[0] = false;
899 return false;
900 }
901
902 /* Not memory operation. */
903 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
904 && TREE_CODE_CLASS (rhs_code) != tcc_unary
905 && TREE_CODE_CLASS (rhs_code) != tcc_expression
906 && rhs_code != CALL_EXPR)
907 {
908 if (dump_enabled_p ())
909 {
910 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
911 "Build SLP failed: operation");
912 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
913 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
914 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
915 }
916 /* Fatal mismatch. */
917 matches[0] = false;
918 return false;
919 }
920
921 if (rhs_code == COND_EXPR)
922 {
923 tree cond_expr = gimple_assign_rhs1 (stmt);
924
925 if (i == 0)
926 first_cond_code = TREE_CODE (cond_expr);
927 else if (first_cond_code != TREE_CODE (cond_expr))
928 {
929 if (dump_enabled_p ())
930 {
931 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
932 "Build SLP failed: different"
933 " operation");
934 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
935 stmt, 0);
936 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
937 }
938 /* Mismatch. */
939 continue;
940 }
941 }
942 }
943
944 matches[i] = true;
945 }
946
947 for (i = 0; i < group_size; ++i)
948 if (!matches[i])
949 return false;
950
951 /* If we allowed a two-operation SLP node verify the target can cope
952 with the permute we are going to use. */
953 if (alt_stmt_code != ERROR_MARK
954 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
955 {
956 unsigned char *sel
957 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
958 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
959 {
960 sel[i] = i;
961 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
962 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
963 }
964 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
965 {
966 for (i = 0; i < group_size; ++i)
967 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
968 {
969 matches[i] = false;
970 if (dump_enabled_p ())
971 {
972 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
973 "Build SLP failed: different operation "
974 "in stmt ");
975 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
976 stmts[i], 0);
977 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
978 "original stmt ");
979 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
980 first_stmt, 0);
981 }
982 }
983 return false;
984 }
985 *two_operators = true;
986 }
987
988 return true;
989 }
990
991 /* Recursively build an SLP tree starting from NODE.
992 Fail (and return a value not equal to zero) if def-stmts are not
993 isomorphic, require data permutation or are of unsupported types of
994 operation. Otherwise, return 0.
995 The value returned is the depth in the SLP tree where a mismatch
996 was found. */
997
998 static bool
999 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1000 slp_tree *node, unsigned int group_size,
1001 unsigned int *max_nunits,
1002 vec<slp_tree> *loads,
1003 unsigned int vectorization_factor,
1004 bool *matches, unsigned *npermutes, unsigned *tree_size,
1005 unsigned max_tree_size)
1006 {
1007 unsigned nops, i, this_tree_size = 0;
1008 gimple stmt;
1009
1010 matches[0] = false;
1011
1012 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1013 if (is_gimple_call (stmt))
1014 nops = gimple_call_num_args (stmt);
1015 else if (is_gimple_assign (stmt))
1016 {
1017 nops = gimple_num_ops (stmt) - 1;
1018 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
1019 nops++;
1020 }
1021 else
1022 return false;
1023
1024 bool two_operators = false;
1025 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
1026 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
1027 max_nunits, vectorization_factor, matches,
1028 &two_operators))
1029 return false;
1030 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
1031
1032 /* If the SLP node is a load, terminate the recursion. */
1033 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
1034 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1035 {
1036 loads->safe_push (*node);
1037 return true;
1038 }
1039
1040 /* Get at the operands, verifying they are compatible. */
1041 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
1042 slp_oprnd_info oprnd_info;
1043 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
1044 {
1045 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
1046 stmt, i, &oprnds_info))
1047 {
1048 case 0:
1049 break;
1050 case -1:
1051 matches[0] = false;
1052 vect_free_oprnd_info (oprnds_info);
1053 return false;
1054 case 1:
1055 matches[i] = false;
1056 break;
1057 }
1058 }
1059 for (i = 0; i < group_size; ++i)
1060 if (!matches[i])
1061 {
1062 vect_free_oprnd_info (oprnds_info);
1063 return false;
1064 }
1065
1066 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1067
1068 /* Create SLP_TREE nodes for the definition node/s. */
1069 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
1070 {
1071 slp_tree child;
1072 unsigned old_nloads = loads->length ();
1073 unsigned old_max_nunits = *max_nunits;
1074
1075 if (oprnd_info->first_dt != vect_internal_def)
1076 continue;
1077
1078 if (++this_tree_size > max_tree_size)
1079 {
1080 vect_free_oprnd_info (oprnds_info);
1081 return false;
1082 }
1083
1084 child = vect_create_new_slp_node (oprnd_info->def_stmts);
1085 if (!child)
1086 {
1087 vect_free_oprnd_info (oprnds_info);
1088 return false;
1089 }
1090
1091 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1092 group_size, max_nunits, loads,
1093 vectorization_factor, matches,
1094 npermutes, &this_tree_size, max_tree_size))
1095 {
1096 oprnd_info->def_stmts = vNULL;
1097 SLP_TREE_CHILDREN (*node).quick_push (child);
1098 continue;
1099 }
1100
1101 /* If the SLP build failed fatally and we analyze a basic-block
1102 simply treat nodes we fail to build as externally defined
1103 (and thus build vectors from the scalar defs).
1104 The cost model will reject outright expensive cases.
1105 ??? This doesn't treat cases where permutation ultimatively
1106 fails (or we don't try permutation below). Ideally we'd
1107 even compute a permutation that will end up with the maximum
1108 SLP tree size... */
1109 if (bb_vinfo
1110 && !matches[0]
1111 /* ??? Rejecting patterns this way doesn't work. We'd have to
1112 do extra work to cancel the pattern so the uses see the
1113 scalar version. */
1114 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1115 {
1116 unsigned int j;
1117 slp_tree grandchild;
1118
1119 /* Roll back. */
1120 *max_nunits = old_max_nunits;
1121 loads->truncate (old_nloads);
1122 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1123 vect_free_slp_tree (grandchild);
1124 SLP_TREE_CHILDREN (child).truncate (0);
1125
1126 dump_printf_loc (MSG_NOTE, vect_location,
1127 "Building vector operands from scalars\n");
1128 oprnd_info->def_stmts = vNULL;
1129 vect_free_slp_tree (child);
1130 SLP_TREE_CHILDREN (*node).quick_push (NULL);
1131 continue;
1132 }
1133
1134 /* If the SLP build for operand zero failed and operand zero
1135 and one can be commutated try that for the scalar stmts
1136 that failed the match. */
1137 if (i == 0
1138 /* A first scalar stmt mismatch signals a fatal mismatch. */
1139 && matches[0]
1140 /* ??? For COND_EXPRs we can swap the comparison operands
1141 as well as the arms under some constraints. */
1142 && nops == 2
1143 && oprnds_info[1]->first_dt == vect_internal_def
1144 && is_gimple_assign (stmt)
1145 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1146 && !SLP_TREE_TWO_OPERATORS (*node)
1147 /* Do so only if the number of not successful permutes was nor more
1148 than a cut-ff as re-trying the recursive match on
1149 possibly each level of the tree would expose exponential
1150 behavior. */
1151 && *npermutes < 4)
1152 {
1153 unsigned int j;
1154 slp_tree grandchild;
1155
1156 /* Roll back. */
1157 *max_nunits = old_max_nunits;
1158 loads->truncate (old_nloads);
1159 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1160 vect_free_slp_tree (grandchild);
1161 SLP_TREE_CHILDREN (child).truncate (0);
1162
1163 /* Swap mismatched definition stmts. */
1164 dump_printf_loc (MSG_NOTE, vect_location,
1165 "Re-trying with swapped operands of stmts ");
1166 for (j = 0; j < group_size; ++j)
1167 if (!matches[j])
1168 {
1169 gimple tem = oprnds_info[0]->def_stmts[j];
1170 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1171 oprnds_info[1]->def_stmts[j] = tem;
1172 dump_printf (MSG_NOTE, "%d ", j);
1173 }
1174 dump_printf (MSG_NOTE, "\n");
1175 /* And try again with scratch 'matches' ... */
1176 bool *tem = XALLOCAVEC (bool, group_size);
1177 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1178 group_size, max_nunits, loads,
1179 vectorization_factor,
1180 tem, npermutes, &this_tree_size,
1181 max_tree_size))
1182 {
1183 /* ... so if successful we can apply the operand swapping
1184 to the GIMPLE IL. This is necessary because for example
1185 vect_get_slp_defs uses operand indexes and thus expects
1186 canonical operand order. */
1187 for (j = 0; j < group_size; ++j)
1188 if (!matches[j])
1189 {
1190 gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1191 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1192 gimple_assign_rhs2_ptr (stmt));
1193 }
1194 oprnd_info->def_stmts = vNULL;
1195 SLP_TREE_CHILDREN (*node).quick_push (child);
1196 continue;
1197 }
1198
1199 ++*npermutes;
1200 }
1201
1202 oprnd_info->def_stmts = vNULL;
1203 vect_free_slp_tree (child);
1204 vect_free_oprnd_info (oprnds_info);
1205 return false;
1206 }
1207
1208 if (tree_size)
1209 *tree_size += this_tree_size;
1210
1211 vect_free_oprnd_info (oprnds_info);
1212 return true;
1213 }
1214
1215 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1216
1217 static void
1218 vect_print_slp_tree (int dump_kind, slp_tree node)
1219 {
1220 int i;
1221 gimple stmt;
1222 slp_tree child;
1223
1224 if (!node)
1225 return;
1226
1227 dump_printf (dump_kind, "node ");
1228 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1229 {
1230 dump_printf (dump_kind, "\n\tstmt %d ", i);
1231 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1232 }
1233 dump_printf (dump_kind, "\n");
1234
1235 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1236 vect_print_slp_tree (dump_kind, child);
1237 }
1238
1239
1240 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1241 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1242 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1243 stmts in NODE are to be marked. */
1244
1245 static void
1246 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1247 {
1248 int i;
1249 gimple stmt;
1250 slp_tree child;
1251
1252 if (!node)
1253 return;
1254
1255 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1256 if (j < 0 || i == j)
1257 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1258
1259 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1260 vect_mark_slp_stmts (child, mark, j);
1261 }
1262
1263
1264 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1265
1266 static void
1267 vect_mark_slp_stmts_relevant (slp_tree node)
1268 {
1269 int i;
1270 gimple stmt;
1271 stmt_vec_info stmt_info;
1272 slp_tree child;
1273
1274 if (!node)
1275 return;
1276
1277 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1278 {
1279 stmt_info = vinfo_for_stmt (stmt);
1280 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1281 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1282 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1283 }
1284
1285 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1286 vect_mark_slp_stmts_relevant (child);
1287 }
1288
1289
1290 /* Rearrange the statements of NODE according to PERMUTATION. */
1291
1292 static void
1293 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1294 vec<unsigned> permutation)
1295 {
1296 gimple stmt;
1297 vec<gimple> tmp_stmts;
1298 unsigned int i;
1299 slp_tree child;
1300
1301 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1302 vect_slp_rearrange_stmts (child, group_size, permutation);
1303
1304 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1305 tmp_stmts.create (group_size);
1306 tmp_stmts.quick_grow_cleared (group_size);
1307
1308 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1309 tmp_stmts[permutation[i]] = stmt;
1310
1311 SLP_TREE_SCALAR_STMTS (node).release ();
1312 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1313 }
1314
1315
1316 /* Check if the required load permutations in the SLP instance
1317 SLP_INSTN are supported. */
1318
1319 static bool
1320 vect_supported_load_permutation_p (slp_instance slp_instn)
1321 {
1322 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1323 unsigned int i, j, k, next;
1324 sbitmap load_index;
1325 slp_tree node;
1326 gimple stmt, load, next_load, first_load;
1327 struct data_reference *dr;
1328
1329 if (dump_enabled_p ())
1330 {
1331 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1332 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1333 if (node->load_permutation.exists ())
1334 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1335 dump_printf (MSG_NOTE, "%d ", next);
1336 else
1337 for (k = 0; k < group_size; ++k)
1338 dump_printf (MSG_NOTE, "%d ", k);
1339 dump_printf (MSG_NOTE, "\n");
1340 }
1341
1342 /* In case of reduction every load permutation is allowed, since the order
1343 of the reduction statements is not important (as opposed to the case of
1344 grouped stores). The only condition we need to check is that all the
1345 load nodes are of the same size and have the same permutation (and then
1346 rearrange all the nodes of the SLP instance according to this
1347 permutation). */
1348
1349 /* Check that all the load nodes are of the same size. */
1350 /* ??? Can't we assert this? */
1351 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1352 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1353 return false;
1354
1355 node = SLP_INSTANCE_TREE (slp_instn);
1356 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1357
1358 /* Reduction (there are no data-refs in the root).
1359 In reduction chain the order of the loads is important. */
1360 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1361 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1362 {
1363 slp_tree load;
1364 unsigned int lidx;
1365
1366 /* Compare all the permutation sequences to the first one. We know
1367 that at least one load is permuted. */
1368 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1369 if (!node->load_permutation.exists ())
1370 return false;
1371 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1372 {
1373 if (!load->load_permutation.exists ())
1374 return false;
1375 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1376 if (lidx != node->load_permutation[j])
1377 return false;
1378 }
1379
1380 /* Check that the loads in the first sequence are different and there
1381 are no gaps between them. */
1382 load_index = sbitmap_alloc (group_size);
1383 bitmap_clear (load_index);
1384 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1385 {
1386 if (bitmap_bit_p (load_index, lidx))
1387 {
1388 sbitmap_free (load_index);
1389 return false;
1390 }
1391 bitmap_set_bit (load_index, lidx);
1392 }
1393 for (i = 0; i < group_size; i++)
1394 if (!bitmap_bit_p (load_index, i))
1395 {
1396 sbitmap_free (load_index);
1397 return false;
1398 }
1399 sbitmap_free (load_index);
1400
1401 /* This permutation is valid for reduction. Since the order of the
1402 statements in the nodes is not important unless they are memory
1403 accesses, we can rearrange the statements in all the nodes
1404 according to the order of the loads. */
1405 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1406 node->load_permutation);
1407
1408 /* We are done, no actual permutations need to be generated. */
1409 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1410 SLP_TREE_LOAD_PERMUTATION (node).release ();
1411 return true;
1412 }
1413
1414 /* In basic block vectorization we allow any subchain of an interleaving
1415 chain.
1416 FORNOW: not supported in loop SLP because of realignment compications. */
1417 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1418 {
1419 /* Check whether the loads in an instance form a subchain and thus
1420 no permutation is necessary. */
1421 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1422 {
1423 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1424 continue;
1425 bool subchain_p = true;
1426 next_load = NULL;
1427 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1428 {
1429 if (j != 0 && next_load != load)
1430 {
1431 subchain_p = false;
1432 break;
1433 }
1434 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1435 }
1436 if (subchain_p)
1437 SLP_TREE_LOAD_PERMUTATION (node).release ();
1438 else
1439 {
1440 /* Verify the permutation can be generated. */
1441 vec<tree> tem;
1442 if (!vect_transform_slp_perm_load (node, tem, NULL,
1443 1, slp_instn, true))
1444 {
1445 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1446 vect_location,
1447 "unsupported load permutation\n");
1448 return false;
1449 }
1450 }
1451 }
1452
1453 /* Check that the alignment of the first load in every subchain, i.e.,
1454 the first statement in every load node, is supported.
1455 ??? This belongs in alignment checking. */
1456 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1457 {
1458 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1459 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1460 {
1461 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1462 if (vect_supportable_dr_alignment (dr, false)
1463 == dr_unaligned_unsupported)
1464 {
1465 if (dump_enabled_p ())
1466 {
1467 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1468 vect_location,
1469 "unsupported unaligned load ");
1470 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1471 first_load, 0);
1472 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1473 }
1474 return false;
1475 }
1476 }
1477 }
1478
1479 return true;
1480 }
1481
1482 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1483 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1484 well (unless it's reduction). */
1485 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1486 return false;
1487 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1488 if (!node->load_permutation.exists ())
1489 return false;
1490
1491 load_index = sbitmap_alloc (group_size);
1492 bitmap_clear (load_index);
1493 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1494 {
1495 unsigned int lidx = node->load_permutation[0];
1496 if (bitmap_bit_p (load_index, lidx))
1497 {
1498 sbitmap_free (load_index);
1499 return false;
1500 }
1501 bitmap_set_bit (load_index, lidx);
1502 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1503 if (k != lidx)
1504 {
1505 sbitmap_free (load_index);
1506 return false;
1507 }
1508 }
1509 for (i = 0; i < group_size; i++)
1510 if (!bitmap_bit_p (load_index, i))
1511 {
1512 sbitmap_free (load_index);
1513 return false;
1514 }
1515 sbitmap_free (load_index);
1516
1517 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1518 if (node->load_permutation.exists ()
1519 && !vect_transform_slp_perm_load
1520 (node, vNULL, NULL,
1521 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1522 return false;
1523 return true;
1524 }
1525
1526
1527 /* Find the last store in SLP INSTANCE. */
1528
1529 static gimple
1530 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1531 {
1532 gimple last = NULL, stmt;
1533
1534 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1535 {
1536 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1537 if (is_pattern_stmt_p (stmt_vinfo))
1538 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1539 else
1540 last = get_later_stmt (stmt, last);
1541 }
1542
1543 return last;
1544 }
1545
1546 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1547
1548 static void
1549 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1550 slp_instance instance, slp_tree node,
1551 stmt_vector_for_cost *prologue_cost_vec,
1552 unsigned ncopies_for_cost)
1553 {
1554 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1555
1556 unsigned i;
1557 slp_tree child;
1558 gimple stmt, s;
1559 stmt_vec_info stmt_info;
1560 tree lhs;
1561 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1562
1563 /* Recurse down the SLP tree. */
1564 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1565 if (child)
1566 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1567 instance, child, prologue_cost_vec,
1568 ncopies_for_cost);
1569
1570 /* Look at the first scalar stmt to determine the cost. */
1571 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1572 stmt_info = vinfo_for_stmt (stmt);
1573 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1574 {
1575 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1576 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1577 vect_uninitialized_def,
1578 node, prologue_cost_vec, body_cost_vec);
1579 else
1580 {
1581 int i;
1582 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1583 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1584 node, prologue_cost_vec, body_cost_vec);
1585 /* If the load is permuted record the cost for the permutation.
1586 ??? Loads from multiple chains are let through here only
1587 for a single special case involving complex numbers where
1588 in the end no permutation is necessary. */
1589 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1590 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1591 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1592 && vect_get_place_in_interleaving_chain
1593 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1594 {
1595 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1596 stmt_info, 0, vect_body);
1597 break;
1598 }
1599 }
1600 }
1601 else
1602 {
1603 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1604 stmt_info, 0, vect_body);
1605 if (SLP_TREE_TWO_OPERATORS (node))
1606 {
1607 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1608 stmt_info, 0, vect_body);
1609 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1610 stmt_info, 0, vect_body);
1611 }
1612 }
1613
1614 /* Scan operands and account for prologue cost of constants/externals.
1615 ??? This over-estimates cost for multiple uses and should be
1616 re-engineered. */
1617 lhs = gimple_get_lhs (stmt);
1618 for (i = 0; i < gimple_num_ops (stmt); ++i)
1619 {
1620 tree def, op = gimple_op (stmt, i);
1621 gimple def_stmt;
1622 enum vect_def_type dt;
1623 if (!op || op == lhs)
1624 continue;
1625 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1626 &def_stmt, &def, &dt))
1627 {
1628 /* Without looking at the actual initializer a vector of
1629 constants can be implemented as load from the constant pool.
1630 ??? We need to pass down stmt_info for a vector type
1631 even if it points to the wrong stmt. */
1632 if (dt == vect_constant_def)
1633 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1634 stmt_info, 0, vect_prologue);
1635 else if (dt == vect_external_def)
1636 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1637 stmt_info, 0, vect_prologue);
1638 }
1639 }
1640 }
1641
1642 /* Compute the cost for the SLP instance INSTANCE. */
1643
1644 static void
1645 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1646 slp_instance instance, unsigned nunits)
1647 {
1648 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1649 unsigned ncopies_for_cost;
1650 stmt_info_for_cost *si;
1651 unsigned i;
1652
1653 /* Calculate the number of vector stmts to create based on the unrolling
1654 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1655 GROUP_SIZE / NUNITS otherwise. */
1656 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1657 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1658
1659 prologue_cost_vec.create (10);
1660 body_cost_vec.create (10);
1661 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1662 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1663 instance, SLP_INSTANCE_TREE (instance),
1664 &prologue_cost_vec, ncopies_for_cost);
1665
1666 /* Record the prologue costs, which were delayed until we were
1667 sure that SLP was successful. Unlike the body costs, we know
1668 the final values now regardless of the loop vectorization factor. */
1669 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1670 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1671 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1672 {
1673 struct _stmt_vec_info *stmt_info
1674 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1675 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1676 si->misalign, vect_prologue);
1677 }
1678
1679 prologue_cost_vec.release ();
1680 }
1681
1682 /* Analyze an SLP instance starting from a group of grouped stores. Call
1683 vect_build_slp_tree to build a tree of packed stmts if possible.
1684 Return FALSE if it's impossible to SLP any stmt in the loop. */
1685
1686 static bool
1687 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1688 gimple stmt, unsigned max_tree_size)
1689 {
1690 slp_instance new_instance;
1691 slp_tree node;
1692 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1693 unsigned int unrolling_factor = 1, nunits;
1694 tree vectype, scalar_type = NULL_TREE;
1695 gimple next;
1696 unsigned int vectorization_factor = 0;
1697 int i;
1698 unsigned int max_nunits = 0;
1699 vec<slp_tree> loads;
1700 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1701 vec<gimple> scalar_stmts;
1702
1703 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1704 {
1705 if (dr)
1706 {
1707 scalar_type = TREE_TYPE (DR_REF (dr));
1708 vectype = get_vectype_for_scalar_type (scalar_type);
1709 }
1710 else
1711 {
1712 gcc_assert (loop_vinfo);
1713 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1714 }
1715
1716 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1717 }
1718 else
1719 {
1720 gcc_assert (loop_vinfo);
1721 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1722 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1723 }
1724
1725 if (!vectype)
1726 {
1727 if (dump_enabled_p ())
1728 {
1729 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1730 "Build SLP failed: unsupported data-type ");
1731 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1732 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1733 }
1734
1735 return false;
1736 }
1737
1738 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1739 if (loop_vinfo)
1740 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1741 else
1742 vectorization_factor = nunits;
1743
1744 /* Calculate the unrolling factor. */
1745 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1746 if (unrolling_factor != 1 && !loop_vinfo)
1747 {
1748 if (dump_enabled_p ())
1749 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1750 "Build SLP failed: unrolling required in basic"
1751 " block SLP\n");
1752
1753 return false;
1754 }
1755
1756 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1757 scalar_stmts.create (group_size);
1758 next = stmt;
1759 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1760 {
1761 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1762 while (next)
1763 {
1764 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1765 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1766 scalar_stmts.safe_push (
1767 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1768 else
1769 scalar_stmts.safe_push (next);
1770 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1771 }
1772 }
1773 else
1774 {
1775 /* Collect reduction statements. */
1776 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1777 for (i = 0; reductions.iterate (i, &next); i++)
1778 scalar_stmts.safe_push (next);
1779 }
1780
1781 node = vect_create_new_slp_node (scalar_stmts);
1782
1783 loads.create (group_size);
1784
1785 /* Build the tree for the SLP instance. */
1786 bool *matches = XALLOCAVEC (bool, group_size);
1787 unsigned npermutes = 0;
1788 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1789 &max_nunits, &loads,
1790 vectorization_factor, matches, &npermutes, NULL,
1791 max_tree_size))
1792 {
1793 /* Calculate the unrolling factor based on the smallest type. */
1794 if (max_nunits > nunits)
1795 unrolling_factor = least_common_multiple (max_nunits, group_size)
1796 / group_size;
1797
1798 if (unrolling_factor != 1 && !loop_vinfo)
1799 {
1800 if (dump_enabled_p ())
1801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1802 "Build SLP failed: unrolling required in basic"
1803 " block SLP\n");
1804 vect_free_slp_tree (node);
1805 loads.release ();
1806 return false;
1807 }
1808
1809 /* Create a new SLP instance. */
1810 new_instance = XNEW (struct _slp_instance);
1811 SLP_INSTANCE_TREE (new_instance) = node;
1812 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1813 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1814 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1815 SLP_INSTANCE_LOADS (new_instance) = loads;
1816
1817 /* Compute the load permutation. */
1818 slp_tree load_node;
1819 bool loads_permuted = false;
1820 FOR_EACH_VEC_ELT (loads, i, load_node)
1821 {
1822 vec<unsigned> load_permutation;
1823 int j;
1824 gimple load, first_stmt;
1825 bool this_load_permuted = false;
1826 load_permutation.create (group_size);
1827 first_stmt = GROUP_FIRST_ELEMENT
1828 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1829 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1830 {
1831 int load_place
1832 = vect_get_place_in_interleaving_chain (load, first_stmt);
1833 gcc_assert (load_place != -1);
1834 if (load_place != j)
1835 this_load_permuted = true;
1836 load_permutation.safe_push (load_place);
1837 }
1838 if (!this_load_permuted)
1839 {
1840 load_permutation.release ();
1841 continue;
1842 }
1843 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1844 loads_permuted = true;
1845 }
1846
1847 if (loads_permuted)
1848 {
1849 if (!vect_supported_load_permutation_p (new_instance))
1850 {
1851 if (dump_enabled_p ())
1852 {
1853 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1854 "Build SLP failed: unsupported load "
1855 "permutation ");
1856 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1857 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1858 }
1859 vect_free_slp_instance (new_instance);
1860 return false;
1861 }
1862 }
1863
1864
1865 if (loop_vinfo)
1866 {
1867 /* Compute the costs of this SLP instance. Delay this for BB
1868 vectorization as we don't have vector types computed yet. */
1869 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1870 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1871 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1872 }
1873 else
1874 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1875
1876 if (dump_enabled_p ())
1877 vect_print_slp_tree (MSG_NOTE, node);
1878
1879 return true;
1880 }
1881
1882 /* Failed to SLP. */
1883 /* Free the allocated memory. */
1884 vect_free_slp_tree (node);
1885 loads.release ();
1886
1887 return false;
1888 }
1889
1890
1891 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1892 trees of packed scalar stmts if SLP is possible. */
1893
1894 bool
1895 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1896 unsigned max_tree_size)
1897 {
1898 unsigned int i;
1899 vec<gimple> grouped_stores;
1900 vec<gimple> reductions = vNULL;
1901 vec<gimple> reduc_chains = vNULL;
1902 gimple first_element;
1903 bool ok = false;
1904
1905 if (dump_enabled_p ())
1906 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1907
1908 if (loop_vinfo)
1909 {
1910 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1911 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1912 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1913 }
1914 else
1915 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1916
1917 /* Find SLP sequences starting from groups of grouped stores. */
1918 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1919 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1920 max_tree_size))
1921 ok = true;
1922
1923 if (reduc_chains.length () > 0)
1924 {
1925 /* Find SLP sequences starting from reduction chains. */
1926 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1927 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1928 max_tree_size))
1929 ok = true;
1930 else
1931 return false;
1932
1933 /* Don't try to vectorize SLP reductions if reduction chain was
1934 detected. */
1935 return ok;
1936 }
1937
1938 /* Find SLP sequences starting from groups of reductions. */
1939 if (reductions.length () > 1
1940 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1941 max_tree_size))
1942 ok = true;
1943
1944 return true;
1945 }
1946
1947
1948 /* For each possible SLP instance decide whether to SLP it and calculate overall
1949 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1950 least one instance. */
1951
1952 bool
1953 vect_make_slp_decision (loop_vec_info loop_vinfo)
1954 {
1955 unsigned int i, unrolling_factor = 1;
1956 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1957 slp_instance instance;
1958 int decided_to_slp = 0;
1959
1960 if (dump_enabled_p ())
1961 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1962 "\n");
1963
1964 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1965 {
1966 /* FORNOW: SLP if you can. */
1967 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1968 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1969
1970 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1971 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1972 loop-based vectorization. Such stmts will be marked as HYBRID. */
1973 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1974 decided_to_slp++;
1975 }
1976
1977 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1978
1979 if (decided_to_slp && dump_enabled_p ())
1980 dump_printf_loc (MSG_NOTE, vect_location,
1981 "Decided to SLP %d instances. Unrolling factor %d\n",
1982 decided_to_slp, unrolling_factor);
1983
1984 return (decided_to_slp > 0);
1985 }
1986
1987
1988 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1989 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1990
1991 static void
1992 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1993 {
1994 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1995 imm_use_iterator imm_iter;
1996 gimple use_stmt;
1997 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1998 slp_tree child;
1999 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2000 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2001 int j;
2002
2003 /* Propagate hybrid down the SLP tree. */
2004 if (stype == hybrid)
2005 ;
2006 else if (HYBRID_SLP_STMT (stmt_vinfo))
2007 stype = hybrid;
2008 else
2009 {
2010 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
2011 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2012 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2013 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2014 if (gimple_bb (use_stmt)
2015 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2016 && (use_vinfo = vinfo_for_stmt (use_stmt))
2017 && !STMT_SLP_TYPE (use_vinfo)
2018 && (STMT_VINFO_RELEVANT (use_vinfo)
2019 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
2020 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2021 && STMT_VINFO_RELATED_STMT (use_vinfo)
2022 && !STMT_SLP_TYPE (vinfo_for_stmt
2023 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
2024 && !(gimple_code (use_stmt) == GIMPLE_PHI
2025 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2026 stype = hybrid;
2027 }
2028
2029 if (stype == hybrid)
2030 {
2031 if (dump_enabled_p ())
2032 {
2033 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2034 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2035 }
2036 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2037 }
2038
2039 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2040 if (child)
2041 vect_detect_hybrid_slp_stmts (child, i, stype);
2042 }
2043
2044 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2045
2046 static tree
2047 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2048 {
2049 walk_stmt_info *wi = (walk_stmt_info *)data;
2050 struct loop *loopp = (struct loop *)wi->info;
2051
2052 if (wi->is_lhs)
2053 return NULL_TREE;
2054
2055 if (TREE_CODE (*tp) == SSA_NAME
2056 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2057 {
2058 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
2059 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2060 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2061 {
2062 if (dump_enabled_p ())
2063 {
2064 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2065 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2066 }
2067 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2068 }
2069 }
2070
2071 return NULL_TREE;
2072 }
2073
2074 static tree
2075 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2076 walk_stmt_info *)
2077 {
2078 /* If the stmt is in a SLP instance then this isn't a reason
2079 to mark use definitions in other SLP instances as hybrid. */
2080 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2081 *handled = true;
2082 return NULL_TREE;
2083 }
2084
2085 /* Find stmts that must be both vectorized and SLPed. */
2086
2087 void
2088 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2089 {
2090 unsigned int i;
2091 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2092 slp_instance instance;
2093
2094 if (dump_enabled_p ())
2095 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2096 "\n");
2097
2098 /* First walk all pattern stmt in the loop and mark defs of uses as
2099 hybrid because immediate uses in them are not recorded. */
2100 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2101 {
2102 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2103 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2104 gsi_next (&gsi))
2105 {
2106 gimple stmt = gsi_stmt (gsi);
2107 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2108 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2109 {
2110 walk_stmt_info wi;
2111 memset (&wi, 0, sizeof (wi));
2112 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2113 gimple_stmt_iterator gsi2
2114 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2115 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2116 vect_detect_hybrid_slp_1, &wi);
2117 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2118 vect_detect_hybrid_slp_2,
2119 vect_detect_hybrid_slp_1, &wi);
2120 }
2121 }
2122 }
2123
2124 /* Then walk the SLP instance trees marking stmts with uses in
2125 non-SLP stmts as hybrid, also propagating hybrid down the
2126 SLP tree, collecting the above info on-the-fly. */
2127 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2128 {
2129 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2130 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2131 i, pure_slp);
2132 }
2133 }
2134
2135
2136 /* Create and initialize a new bb_vec_info struct for BB, as well as
2137 stmt_vec_info structs for all the stmts in it. */
2138
2139 static bb_vec_info
2140 new_bb_vec_info (basic_block bb)
2141 {
2142 bb_vec_info res = NULL;
2143 gimple_stmt_iterator gsi;
2144
2145 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2146 BB_VINFO_BB (res) = bb;
2147
2148 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2149 {
2150 gimple stmt = gsi_stmt (gsi);
2151 gimple_set_uid (stmt, 0);
2152 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2153 }
2154
2155 BB_VINFO_GROUPED_STORES (res).create (10);
2156 BB_VINFO_SLP_INSTANCES (res).create (2);
2157 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2158
2159 bb->aux = res;
2160 return res;
2161 }
2162
2163
2164 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2165 stmts in the basic block. */
2166
2167 static void
2168 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2169 {
2170 vec<slp_instance> slp_instances;
2171 slp_instance instance;
2172 basic_block bb;
2173 gimple_stmt_iterator si;
2174 unsigned i;
2175
2176 if (!bb_vinfo)
2177 return;
2178
2179 bb = BB_VINFO_BB (bb_vinfo);
2180
2181 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2182 {
2183 gimple stmt = gsi_stmt (si);
2184 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2185
2186 if (stmt_info)
2187 /* Free stmt_vec_info. */
2188 free_stmt_vec_info (stmt);
2189 }
2190
2191 vect_destroy_datarefs (NULL, bb_vinfo);
2192 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2193 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2194 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2195 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2196 vect_free_slp_instance (instance);
2197 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2198 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2199 free (bb_vinfo);
2200 bb->aux = NULL;
2201 }
2202
2203
2204 /* Analyze statements contained in SLP tree node after recursively analyzing
2205 the subtree. Return TRUE if the operations are supported. */
2206
2207 static bool
2208 vect_slp_analyze_node_operations (slp_tree node)
2209 {
2210 bool dummy;
2211 int i;
2212 gimple stmt;
2213 slp_tree child;
2214
2215 if (!node)
2216 return true;
2217
2218 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2219 if (!vect_slp_analyze_node_operations (child))
2220 return false;
2221
2222 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2223 {
2224 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2225 gcc_assert (stmt_info);
2226 gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
2227
2228 if (!vect_analyze_stmt (stmt, &dummy, node))
2229 return false;
2230 }
2231
2232 return true;
2233 }
2234
2235
2236 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2237 operations are supported. */
2238
2239 bool
2240 vect_slp_analyze_operations (vec<slp_instance> slp_instances)
2241 {
2242 slp_instance instance;
2243 int i;
2244
2245 if (dump_enabled_p ())
2246 dump_printf_loc (MSG_NOTE, vect_location,
2247 "=== vect_slp_analyze_operations ===\n");
2248
2249 for (i = 0; slp_instances.iterate (i, &instance); )
2250 {
2251 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance)))
2252 {
2253 dump_printf_loc (MSG_NOTE, vect_location,
2254 "removing SLP instance operations starting from: ");
2255 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2256 SLP_TREE_SCALAR_STMTS
2257 (SLP_INSTANCE_TREE (instance))[0], 0);
2258 vect_free_slp_instance (instance);
2259 slp_instances.ordered_remove (i);
2260 }
2261 else
2262 i++;
2263 }
2264
2265 if (!slp_instances.length ())
2266 return false;
2267
2268 return true;
2269 }
2270
2271
2272 /* Compute the scalar cost of the SLP node NODE and its children
2273 and return it. Do not account defs that are marked in LIFE and
2274 update LIFE according to uses of NODE. */
2275
2276 static unsigned
2277 vect_bb_slp_scalar_cost (basic_block bb,
2278 slp_tree node, vec<bool, va_heap> *life)
2279 {
2280 unsigned scalar_cost = 0;
2281 unsigned i;
2282 gimple stmt;
2283 slp_tree child;
2284
2285 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2286 {
2287 unsigned stmt_cost;
2288 ssa_op_iter op_iter;
2289 def_operand_p def_p;
2290 stmt_vec_info stmt_info;
2291
2292 if ((*life)[i])
2293 continue;
2294
2295 /* If there is a non-vectorized use of the defs then the scalar
2296 stmt is kept live in which case we do not account it or any
2297 required defs in the SLP children in the scalar cost. This
2298 way we make the vectorization more costly when compared to
2299 the scalar cost. */
2300 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2301 {
2302 imm_use_iterator use_iter;
2303 gimple use_stmt;
2304 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2305 if (!is_gimple_debug (use_stmt)
2306 && (gimple_code (use_stmt) == GIMPLE_PHI
2307 || gimple_bb (use_stmt) != bb
2308 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2309 {
2310 (*life)[i] = true;
2311 BREAK_FROM_IMM_USE_STMT (use_iter);
2312 }
2313 }
2314 if ((*life)[i])
2315 continue;
2316
2317 stmt_info = vinfo_for_stmt (stmt);
2318 if (STMT_VINFO_DATA_REF (stmt_info))
2319 {
2320 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2321 stmt_cost = vect_get_stmt_cost (scalar_load);
2322 else
2323 stmt_cost = vect_get_stmt_cost (scalar_store);
2324 }
2325 else
2326 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2327
2328 scalar_cost += stmt_cost;
2329 }
2330
2331 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2332 if (child)
2333 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2334
2335 return scalar_cost;
2336 }
2337
2338 /* Check if vectorization of the basic block is profitable. */
2339
2340 static bool
2341 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2342 {
2343 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2344 slp_instance instance;
2345 int i, j;
2346 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2347 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2348 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2349 stmt_vec_info stmt_info = NULL;
2350 stmt_vector_for_cost body_cost_vec;
2351 stmt_info_for_cost *ci;
2352
2353 /* Calculate vector costs. */
2354 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2355 {
2356 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2357
2358 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2359 {
2360 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2361 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2362 stmt_info, ci->misalign, vect_body);
2363 }
2364 }
2365
2366 /* Calculate scalar cost. */
2367 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2368 {
2369 auto_vec<bool, 20> life;
2370 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2371 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2372 SLP_INSTANCE_TREE (instance),
2373 &life);
2374 }
2375
2376 /* Complete the target-specific cost calculation. */
2377 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2378 &vec_inside_cost, &vec_epilogue_cost);
2379
2380 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2381
2382 if (dump_enabled_p ())
2383 {
2384 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2385 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2386 vec_inside_cost);
2387 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2388 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2389 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2390 }
2391
2392 /* Vectorization is profitable if its cost is less than the cost of scalar
2393 version. */
2394 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2395 return false;
2396
2397 return true;
2398 }
2399
2400 /* Check if the basic block can be vectorized. */
2401
2402 static bb_vec_info
2403 vect_slp_analyze_bb_1 (basic_block bb)
2404 {
2405 bb_vec_info bb_vinfo;
2406 vec<slp_instance> slp_instances;
2407 slp_instance instance;
2408 int i;
2409 int min_vf = 2;
2410 unsigned n_stmts = 0;
2411
2412 bb_vinfo = new_bb_vec_info (bb);
2413 if (!bb_vinfo)
2414 return NULL;
2415
2416 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2417 {
2418 if (dump_enabled_p ())
2419 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2420 "not vectorized: unhandled data-ref in basic "
2421 "block.\n");
2422
2423 destroy_bb_vec_info (bb_vinfo);
2424 return NULL;
2425 }
2426
2427 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2428 {
2429 if (dump_enabled_p ())
2430 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2431 "not vectorized: not enough data-refs in "
2432 "basic block.\n");
2433
2434 destroy_bb_vec_info (bb_vinfo);
2435 return NULL;
2436 }
2437
2438 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2439 {
2440 if (dump_enabled_p ())
2441 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2442 "not vectorized: unhandled data access in "
2443 "basic block.\n");
2444
2445 destroy_bb_vec_info (bb_vinfo);
2446 return NULL;
2447 }
2448
2449 vect_pattern_recog (NULL, bb_vinfo);
2450
2451 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2452 {
2453 if (dump_enabled_p ())
2454 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2455 "not vectorized: bad data alignment in basic "
2456 "block.\n");
2457
2458 destroy_bb_vec_info (bb_vinfo);
2459 return NULL;
2460 }
2461
2462 /* Check the SLP opportunities in the basic block, analyze and build SLP
2463 trees. */
2464 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2465 {
2466 if (dump_enabled_p ())
2467 {
2468 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2469 "Failed to SLP the basic block.\n");
2470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2471 "not vectorized: failed to find SLP opportunities "
2472 "in basic block.\n");
2473 }
2474
2475 destroy_bb_vec_info (bb_vinfo);
2476 return NULL;
2477 }
2478
2479 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2480
2481 /* Mark all the statements that we want to vectorize as pure SLP and
2482 relevant. */
2483 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2484 {
2485 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2486 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2487 }
2488
2489 /* Mark all the statements that we do not want to vectorize. */
2490 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2491 !gsi_end_p (gsi); gsi_next (&gsi))
2492 {
2493 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2494 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2495 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2496 }
2497
2498 /* Analyze dependences. At this point all stmts not participating in
2499 vectorization have to be marked. Dependence analysis assumes
2500 that we either vectorize all SLP instances or none at all. */
2501 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2502 {
2503 if (dump_enabled_p ())
2504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2505 "not vectorized: unhandled data dependence "
2506 "in basic block.\n");
2507
2508 destroy_bb_vec_info (bb_vinfo);
2509 return NULL;
2510 }
2511
2512 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2513 {
2514 if (dump_enabled_p ())
2515 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2516 "not vectorized: unsupported alignment in basic "
2517 "block.\n");
2518 destroy_bb_vec_info (bb_vinfo);
2519 return NULL;
2520 }
2521
2522 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo)))
2523 {
2524 if (dump_enabled_p ())
2525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2526 "not vectorized: bad operation in basic block.\n");
2527
2528 destroy_bb_vec_info (bb_vinfo);
2529 return NULL;
2530 }
2531
2532 /* Compute the costs of the SLP instances. */
2533 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2534 {
2535 gimple stmt = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2536 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2537 vect_analyze_slp_cost (NULL, bb_vinfo,
2538 instance, TYPE_VECTOR_SUBPARTS (vectype));
2539 }
2540
2541 /* Cost model: check if the vectorization is worthwhile. */
2542 if (!unlimited_cost_model (NULL)
2543 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2544 {
2545 if (dump_enabled_p ())
2546 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2547 "not vectorized: vectorization is not "
2548 "profitable.\n");
2549
2550 destroy_bb_vec_info (bb_vinfo);
2551 return NULL;
2552 }
2553
2554 if (dump_enabled_p ())
2555 dump_printf_loc (MSG_NOTE, vect_location,
2556 "Basic block will be vectorized using SLP\n");
2557
2558 return bb_vinfo;
2559 }
2560
2561
2562 bb_vec_info
2563 vect_slp_analyze_bb (basic_block bb)
2564 {
2565 bb_vec_info bb_vinfo;
2566 int insns = 0;
2567 gimple_stmt_iterator gsi;
2568 unsigned int vector_sizes;
2569
2570 if (dump_enabled_p ())
2571 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2572
2573 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2574 {
2575 gimple stmt = gsi_stmt (gsi);
2576 if (!is_gimple_debug (stmt)
2577 && !gimple_nop_p (stmt)
2578 && gimple_code (stmt) != GIMPLE_LABEL)
2579 insns++;
2580 }
2581
2582 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2583 {
2584 if (dump_enabled_p ())
2585 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2586 "not vectorized: too many instructions in "
2587 "basic block.\n");
2588
2589 return NULL;
2590 }
2591
2592 /* Autodetect first vector size we try. */
2593 current_vector_size = 0;
2594 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2595
2596 while (1)
2597 {
2598 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2599 if (bb_vinfo)
2600 return bb_vinfo;
2601
2602 destroy_bb_vec_info (bb_vinfo);
2603
2604 vector_sizes &= ~current_vector_size;
2605 if (vector_sizes == 0
2606 || current_vector_size == 0)
2607 return NULL;
2608
2609 /* Try the next biggest vector size. */
2610 current_vector_size = 1 << floor_log2 (vector_sizes);
2611 if (dump_enabled_p ())
2612 dump_printf_loc (MSG_NOTE, vect_location,
2613 "***** Re-trying analysis with "
2614 "vector size %d\n", current_vector_size);
2615 }
2616 }
2617
2618
2619 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2620 the number of created vector stmts depends on the unrolling factor).
2621 However, the actual number of vector stmts for every SLP node depends on
2622 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2623 should be updated. In this function we assume that the inside costs
2624 calculated in vect_model_xxx_cost are linear in ncopies. */
2625
2626 void
2627 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2628 {
2629 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2630 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2631 slp_instance instance;
2632 stmt_vector_for_cost body_cost_vec;
2633 stmt_info_for_cost *si;
2634 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2635
2636 if (dump_enabled_p ())
2637 dump_printf_loc (MSG_NOTE, vect_location,
2638 "=== vect_update_slp_costs_according_to_vf ===\n");
2639
2640 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2641 {
2642 /* We assume that costs are linear in ncopies. */
2643 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2644
2645 /* Record the instance's instructions in the target cost model.
2646 This was delayed until here because the count of instructions
2647 isn't known beforehand. */
2648 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2649
2650 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2651 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2652 vinfo_for_stmt (si->stmt), si->misalign,
2653 vect_body);
2654 }
2655 }
2656
2657
2658 /* For constant and loop invariant defs of SLP_NODE this function returns
2659 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2660 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2661 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2662 REDUC_INDEX is the index of the reduction operand in the statements, unless
2663 it is -1. */
2664
2665 static void
2666 vect_get_constant_vectors (tree op, slp_tree slp_node,
2667 vec<tree> *vec_oprnds,
2668 unsigned int op_num, unsigned int number_of_vectors,
2669 int reduc_index)
2670 {
2671 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2672 gimple stmt = stmts[0];
2673 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2674 unsigned nunits;
2675 tree vec_cst;
2676 tree *elts;
2677 unsigned j, number_of_places_left_in_vector;
2678 tree vector_type;
2679 tree vop;
2680 int group_size = stmts.length ();
2681 unsigned int vec_num, i;
2682 unsigned number_of_copies = 1;
2683 vec<tree> voprnds;
2684 voprnds.create (number_of_vectors);
2685 bool constant_p, is_store;
2686 tree neutral_op = NULL;
2687 enum tree_code code = gimple_expr_code (stmt);
2688 gimple def_stmt;
2689 struct loop *loop;
2690 gimple_seq ctor_seq = NULL;
2691
2692 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2693 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2694
2695 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2696 && reduc_index != -1)
2697 {
2698 op_num = reduc_index;
2699 op = gimple_op (stmt, op_num + 1);
2700 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2701 we need either neutral operands or the original operands. See
2702 get_initial_def_for_reduction() for details. */
2703 switch (code)
2704 {
2705 case WIDEN_SUM_EXPR:
2706 case DOT_PROD_EXPR:
2707 case SAD_EXPR:
2708 case PLUS_EXPR:
2709 case MINUS_EXPR:
2710 case BIT_IOR_EXPR:
2711 case BIT_XOR_EXPR:
2712 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2713 neutral_op = build_real (TREE_TYPE (op), dconst0);
2714 else
2715 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2716
2717 break;
2718
2719 case MULT_EXPR:
2720 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2721 neutral_op = build_real (TREE_TYPE (op), dconst1);
2722 else
2723 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2724
2725 break;
2726
2727 case BIT_AND_EXPR:
2728 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2729 break;
2730
2731 /* For MIN/MAX we don't have an easy neutral operand but
2732 the initial values can be used fine here. Only for
2733 a reduction chain we have to force a neutral element. */
2734 case MAX_EXPR:
2735 case MIN_EXPR:
2736 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2737 neutral_op = NULL;
2738 else
2739 {
2740 def_stmt = SSA_NAME_DEF_STMT (op);
2741 loop = (gimple_bb (stmt))->loop_father;
2742 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2743 loop_preheader_edge (loop));
2744 }
2745 break;
2746
2747 default:
2748 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2749 neutral_op = NULL;
2750 }
2751 }
2752
2753 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2754 {
2755 is_store = true;
2756 op = gimple_assign_rhs1 (stmt);
2757 }
2758 else
2759 is_store = false;
2760
2761 gcc_assert (op);
2762
2763 if (CONSTANT_CLASS_P (op))
2764 constant_p = true;
2765 else
2766 constant_p = false;
2767
2768 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2769 created vectors. It is greater than 1 if unrolling is performed.
2770
2771 For example, we have two scalar operands, s1 and s2 (e.g., group of
2772 strided accesses of size two), while NUNITS is four (i.e., four scalars
2773 of this type can be packed in a vector). The output vector will contain
2774 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2775 will be 2).
2776
2777 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2778 containing the operands.
2779
2780 For example, NUNITS is four as before, and the group size is 8
2781 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2782 {s5, s6, s7, s8}. */
2783
2784 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2785
2786 number_of_places_left_in_vector = nunits;
2787 elts = XALLOCAVEC (tree, nunits);
2788 bool place_after_defs = false;
2789 for (j = 0; j < number_of_copies; j++)
2790 {
2791 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2792 {
2793 if (is_store)
2794 op = gimple_assign_rhs1 (stmt);
2795 else
2796 {
2797 switch (code)
2798 {
2799 case COND_EXPR:
2800 if (op_num == 0 || op_num == 1)
2801 {
2802 tree cond = gimple_assign_rhs1 (stmt);
2803 op = TREE_OPERAND (cond, op_num);
2804 }
2805 else
2806 {
2807 if (op_num == 2)
2808 op = gimple_assign_rhs2 (stmt);
2809 else
2810 op = gimple_assign_rhs3 (stmt);
2811 }
2812 break;
2813
2814 case CALL_EXPR:
2815 op = gimple_call_arg (stmt, op_num);
2816 break;
2817
2818 case LSHIFT_EXPR:
2819 case RSHIFT_EXPR:
2820 case LROTATE_EXPR:
2821 case RROTATE_EXPR:
2822 op = gimple_op (stmt, op_num + 1);
2823 /* Unlike the other binary operators, shifts/rotates have
2824 the shift count being int, instead of the same type as
2825 the lhs, so make sure the scalar is the right type if
2826 we are dealing with vectors of
2827 long long/long/short/char. */
2828 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2829 op = fold_convert (TREE_TYPE (vector_type), op);
2830 break;
2831
2832 default:
2833 op = gimple_op (stmt, op_num + 1);
2834 break;
2835 }
2836 }
2837
2838 if (reduc_index != -1)
2839 {
2840 loop = (gimple_bb (stmt))->loop_father;
2841 def_stmt = SSA_NAME_DEF_STMT (op);
2842
2843 gcc_assert (loop);
2844
2845 /* Get the def before the loop. In reduction chain we have only
2846 one initial value. */
2847 if ((j != (number_of_copies - 1)
2848 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2849 && i != 0))
2850 && neutral_op)
2851 op = neutral_op;
2852 else
2853 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2854 loop_preheader_edge (loop));
2855 }
2856
2857 /* Create 'vect_ = {op0,op1,...,opn}'. */
2858 number_of_places_left_in_vector--;
2859 tree orig_op = op;
2860 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2861 {
2862 if (CONSTANT_CLASS_P (op))
2863 {
2864 op = fold_unary (VIEW_CONVERT_EXPR,
2865 TREE_TYPE (vector_type), op);
2866 gcc_assert (op && CONSTANT_CLASS_P (op));
2867 }
2868 else
2869 {
2870 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2871 gimple init_stmt;
2872 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2873 init_stmt
2874 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2875 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2876 op = new_temp;
2877 }
2878 }
2879 elts[number_of_places_left_in_vector] = op;
2880 if (!CONSTANT_CLASS_P (op))
2881 constant_p = false;
2882 if (TREE_CODE (orig_op) == SSA_NAME
2883 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2884 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2885 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2886 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2887 place_after_defs = true;
2888
2889 if (number_of_places_left_in_vector == 0)
2890 {
2891 number_of_places_left_in_vector = nunits;
2892
2893 if (constant_p)
2894 vec_cst = build_vector (vector_type, elts);
2895 else
2896 {
2897 vec<constructor_elt, va_gc> *v;
2898 unsigned k;
2899 vec_alloc (v, nunits);
2900 for (k = 0; k < nunits; ++k)
2901 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2902 vec_cst = build_constructor (vector_type, v);
2903 }
2904 tree init;
2905 gimple_stmt_iterator gsi;
2906 if (place_after_defs)
2907 {
2908 gsi = gsi_for_stmt
2909 (vect_find_last_scalar_stmt_in_slp (slp_node));
2910 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2911 }
2912 else
2913 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2914 if (ctor_seq != NULL)
2915 {
2916 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2917 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2918 GSI_SAME_STMT);
2919 ctor_seq = NULL;
2920 }
2921 voprnds.quick_push (init);
2922 place_after_defs = false;
2923 }
2924 }
2925 }
2926
2927 /* Since the vectors are created in the reverse order, we should invert
2928 them. */
2929 vec_num = voprnds.length ();
2930 for (j = vec_num; j != 0; j--)
2931 {
2932 vop = voprnds[j - 1];
2933 vec_oprnds->quick_push (vop);
2934 }
2935
2936 voprnds.release ();
2937
2938 /* In case that VF is greater than the unrolling factor needed for the SLP
2939 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2940 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2941 to replicate the vectors. */
2942 while (number_of_vectors > vec_oprnds->length ())
2943 {
2944 tree neutral_vec = NULL;
2945
2946 if (neutral_op)
2947 {
2948 if (!neutral_vec)
2949 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2950
2951 vec_oprnds->quick_push (neutral_vec);
2952 }
2953 else
2954 {
2955 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2956 vec_oprnds->quick_push (vop);
2957 }
2958 }
2959 }
2960
2961
2962 /* Get vectorized definitions from SLP_NODE that contains corresponding
2963 vectorized def-stmts. */
2964
2965 static void
2966 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2967 {
2968 tree vec_oprnd;
2969 gimple vec_def_stmt;
2970 unsigned int i;
2971
2972 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2973
2974 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2975 {
2976 gcc_assert (vec_def_stmt);
2977 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2978 vec_oprnds->quick_push (vec_oprnd);
2979 }
2980 }
2981
2982
2983 /* Get vectorized definitions for SLP_NODE.
2984 If the scalar definitions are loop invariants or constants, collect them and
2985 call vect_get_constant_vectors() to create vector stmts.
2986 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2987 must be stored in the corresponding child of SLP_NODE, and we call
2988 vect_get_slp_vect_defs () to retrieve them. */
2989
2990 void
2991 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2992 vec<vec<tree> > *vec_oprnds, int reduc_index)
2993 {
2994 gimple first_stmt;
2995 int number_of_vects = 0, i;
2996 unsigned int child_index = 0;
2997 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2998 slp_tree child = NULL;
2999 vec<tree> vec_defs;
3000 tree oprnd;
3001 bool vectorized_defs;
3002
3003 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
3004 FOR_EACH_VEC_ELT (ops, i, oprnd)
3005 {
3006 /* For each operand we check if it has vectorized definitions in a child
3007 node or we need to create them (for invariants and constants). We
3008 check if the LHS of the first stmt of the next child matches OPRND.
3009 If it does, we found the correct child. Otherwise, we call
3010 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
3011 to check this child node for the next operand. */
3012 vectorized_defs = false;
3013 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
3014 {
3015 child = SLP_TREE_CHILDREN (slp_node)[child_index];
3016
3017 /* We have to check both pattern and original def, if available. */
3018 if (child)
3019 {
3020 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
3021 gimple related
3022 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
3023
3024 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
3025 || (related
3026 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3027 {
3028 /* The number of vector defs is determined by the number of
3029 vector statements in the node from which we get those
3030 statements. */
3031 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3032 vectorized_defs = true;
3033 child_index++;
3034 }
3035 }
3036 else
3037 child_index++;
3038 }
3039
3040 if (!vectorized_defs)
3041 {
3042 if (i == 0)
3043 {
3044 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3045 /* Number of vector stmts was calculated according to LHS in
3046 vect_schedule_slp_instance (), fix it by replacing LHS with
3047 RHS, if necessary. See vect_get_smallest_scalar_type () for
3048 details. */
3049 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3050 &rhs_size_unit);
3051 if (rhs_size_unit != lhs_size_unit)
3052 {
3053 number_of_vects *= rhs_size_unit;
3054 number_of_vects /= lhs_size_unit;
3055 }
3056 }
3057 }
3058
3059 /* Allocate memory for vectorized defs. */
3060 vec_defs = vNULL;
3061 vec_defs.create (number_of_vects);
3062
3063 /* For reduction defs we call vect_get_constant_vectors (), since we are
3064 looking for initial loop invariant values. */
3065 if (vectorized_defs && reduc_index == -1)
3066 /* The defs are already vectorized. */
3067 vect_get_slp_vect_defs (child, &vec_defs);
3068 else
3069 /* Build vectors from scalar defs. */
3070 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3071 number_of_vects, reduc_index);
3072
3073 vec_oprnds->quick_push (vec_defs);
3074
3075 /* For reductions, we only need initial values. */
3076 if (reduc_index != -1)
3077 return;
3078 }
3079 }
3080
3081
3082 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3083 building a vector of type MASK_TYPE from it) and two input vectors placed in
3084 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3085 shifting by STRIDE elements of DR_CHAIN for every copy.
3086 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3087 copies).
3088 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3089 the created stmts must be inserted. */
3090
3091 static inline void
3092 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
3093 tree mask, int first_vec_indx, int second_vec_indx,
3094 gimple_stmt_iterator *gsi, slp_tree node,
3095 tree vectype, vec<tree> dr_chain,
3096 int ncopies, int vect_stmts_counter)
3097 {
3098 tree perm_dest;
3099 gimple perm_stmt = NULL;
3100 stmt_vec_info next_stmt_info;
3101 int i, stride;
3102 tree first_vec, second_vec, data_ref;
3103
3104 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3105
3106 /* Initialize the vect stmts of NODE to properly insert the generated
3107 stmts later. */
3108 for (i = SLP_TREE_VEC_STMTS (node).length ();
3109 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3110 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3111
3112 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3113 for (i = 0; i < ncopies; i++)
3114 {
3115 first_vec = dr_chain[first_vec_indx];
3116 second_vec = dr_chain[second_vec_indx];
3117
3118 /* Generate the permute statement. */
3119 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3120 first_vec, second_vec, mask);
3121 data_ref = make_ssa_name (perm_dest, perm_stmt);
3122 gimple_set_lhs (perm_stmt, data_ref);
3123 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3124
3125 /* Store the vector statement in NODE. */
3126 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3127
3128 first_vec_indx += stride;
3129 second_vec_indx += stride;
3130 }
3131
3132 /* Mark the scalar stmt as vectorized. */
3133 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3134 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3135 }
3136
3137
3138 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3139 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3140 representation. Check that the mask is valid and return FALSE if not.
3141 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3142 the next vector, i.e., the current first vector is not needed. */
3143
3144 static bool
3145 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3146 int mask_nunits, bool only_one_vec, int index,
3147 unsigned char *mask, int *current_mask_element,
3148 bool *need_next_vector, int *number_of_mask_fixes,
3149 bool *mask_fixed, bool *needs_first_vector)
3150 {
3151 int i;
3152
3153 /* Convert to target specific representation. */
3154 *current_mask_element = first_mask_element + m;
3155 /* Adjust the value in case it's a mask for second and third vectors. */
3156 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3157
3158 if (*current_mask_element < 0)
3159 {
3160 if (dump_enabled_p ())
3161 {
3162 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3163 "permutation requires past vector ");
3164 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3165 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3166 }
3167 return false;
3168 }
3169
3170 if (*current_mask_element < mask_nunits)
3171 *needs_first_vector = true;
3172
3173 /* We have only one input vector to permute but the mask accesses values in
3174 the next vector as well. */
3175 if (only_one_vec && *current_mask_element >= mask_nunits)
3176 {
3177 if (dump_enabled_p ())
3178 {
3179 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3180 "permutation requires at least two vectors ");
3181 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3182 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3183 }
3184
3185 return false;
3186 }
3187
3188 /* The mask requires the next vector. */
3189 while (*current_mask_element >= mask_nunits * 2)
3190 {
3191 if (*needs_first_vector || *mask_fixed)
3192 {
3193 /* We either need the first vector too or have already moved to the
3194 next vector. In both cases, this permutation needs three
3195 vectors. */
3196 if (dump_enabled_p ())
3197 {
3198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3199 "permutation requires at "
3200 "least three vectors ");
3201 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3202 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3203 }
3204
3205 return false;
3206 }
3207
3208 /* We move to the next vector, dropping the first one and working with
3209 the second and the third - we need to adjust the values of the mask
3210 accordingly. */
3211 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3212
3213 for (i = 0; i < index; i++)
3214 mask[i] -= mask_nunits * *number_of_mask_fixes;
3215
3216 (*number_of_mask_fixes)++;
3217 *mask_fixed = true;
3218 }
3219
3220 *need_next_vector = *mask_fixed;
3221
3222 /* This was the last element of this mask. Start a new one. */
3223 if (index == mask_nunits - 1)
3224 {
3225 *number_of_mask_fixes = 1;
3226 *mask_fixed = false;
3227 *needs_first_vector = false;
3228 }
3229
3230 return true;
3231 }
3232
3233
3234 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3235 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3236 permute statements for the SLP node NODE of the SLP instance
3237 SLP_NODE_INSTANCE. */
3238
3239 bool
3240 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3241 gimple_stmt_iterator *gsi, int vf,
3242 slp_instance slp_node_instance, bool analyze_only)
3243 {
3244 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3245 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3246 tree mask_element_type = NULL_TREE, mask_type;
3247 int i, j, k, nunits, vec_index = 0, scalar_index;
3248 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3249 gimple next_scalar_stmt;
3250 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3251 int first_mask_element;
3252 int index, unroll_factor, current_mask_element, ncopies;
3253 unsigned char *mask;
3254 bool only_one_vec = false, need_next_vector = false;
3255 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3256 int number_of_mask_fixes = 1;
3257 bool mask_fixed = false;
3258 bool needs_first_vector = false;
3259 machine_mode mode;
3260
3261 mode = TYPE_MODE (vectype);
3262
3263 if (!can_vec_perm_p (mode, false, NULL))
3264 {
3265 if (dump_enabled_p ())
3266 {
3267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3268 "no vect permute for ");
3269 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3270 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3271 }
3272 return false;
3273 }
3274
3275 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3276 same size as the vector element being permuted. */
3277 mask_element_type = lang_hooks.types.type_for_mode
3278 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3279 mask_type = get_vectype_for_scalar_type (mask_element_type);
3280 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3281 mask = XALLOCAVEC (unsigned char, nunits);
3282 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3283
3284 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3285 unrolling factor. */
3286 orig_vec_stmts_num = group_size *
3287 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3288 if (orig_vec_stmts_num == 1)
3289 only_one_vec = true;
3290
3291 /* Number of copies is determined by the final vectorization factor
3292 relatively to SLP_NODE_INSTANCE unrolling factor. */
3293 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3294
3295 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3296 return false;
3297
3298 /* Generate permutation masks for every NODE. Number of masks for each NODE
3299 is equal to GROUP_SIZE.
3300 E.g., we have a group of three nodes with three loads from the same
3301 location in each node, and the vector size is 4. I.e., we have a
3302 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3303 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3304 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3305 ...
3306
3307 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3308 The last mask is illegal since we assume two operands for permute
3309 operation, and the mask element values can't be outside that range.
3310 Hence, the last mask must be converted into {2,5,5,5}.
3311 For the first two permutations we need the first and the second input
3312 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3313 we need the second and the third vectors: {b1,c1,a2,b2} and
3314 {c2,a3,b3,c3}. */
3315
3316 {
3317 scalar_index = 0;
3318 index = 0;
3319 vect_stmts_counter = 0;
3320 vec_index = 0;
3321 first_vec_index = vec_index++;
3322 if (only_one_vec)
3323 second_vec_index = first_vec_index;
3324 else
3325 second_vec_index = vec_index++;
3326
3327 for (j = 0; j < unroll_factor; j++)
3328 {
3329 for (k = 0; k < group_size; k++)
3330 {
3331 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3332 first_mask_element = i + j * group_size;
3333 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3334 nunits, only_one_vec, index,
3335 mask, &current_mask_element,
3336 &need_next_vector,
3337 &number_of_mask_fixes, &mask_fixed,
3338 &needs_first_vector))
3339 return false;
3340 gcc_assert (current_mask_element >= 0
3341 && current_mask_element < 2 * nunits);
3342 mask[index++] = current_mask_element;
3343
3344 if (index == nunits)
3345 {
3346 index = 0;
3347 if (!can_vec_perm_p (mode, false, mask))
3348 {
3349 if (dump_enabled_p ())
3350 {
3351 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3352 vect_location,
3353 "unsupported vect permute { ");
3354 for (i = 0; i < nunits; ++i)
3355 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3356 mask[i]);
3357 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3358 }
3359 return false;
3360 }
3361
3362 if (!analyze_only)
3363 {
3364 int l;
3365 tree mask_vec, *mask_elts;
3366 mask_elts = XALLOCAVEC (tree, nunits);
3367 for (l = 0; l < nunits; ++l)
3368 mask_elts[l] = build_int_cst (mask_element_type,
3369 mask[l]);
3370 mask_vec = build_vector (mask_type, mask_elts);
3371
3372 if (need_next_vector)
3373 {
3374 first_vec_index = second_vec_index;
3375 second_vec_index = vec_index;
3376 }
3377
3378 next_scalar_stmt
3379 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3380
3381 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3382 mask_vec, first_vec_index, second_vec_index,
3383 gsi, node, vectype, dr_chain,
3384 ncopies, vect_stmts_counter++);
3385 }
3386 }
3387 }
3388 }
3389 }
3390
3391 return true;
3392 }
3393
3394
3395
3396 /* Vectorize SLP instance tree in postorder. */
3397
3398 static bool
3399 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3400 unsigned int vectorization_factor)
3401 {
3402 gimple stmt;
3403 bool grouped_store, is_store;
3404 gimple_stmt_iterator si;
3405 stmt_vec_info stmt_info;
3406 unsigned int vec_stmts_size, nunits, group_size;
3407 tree vectype;
3408 int i;
3409 slp_tree child;
3410
3411 if (!node)
3412 return false;
3413
3414 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3415 vect_schedule_slp_instance (child, instance, vectorization_factor);
3416
3417 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3418 stmt_info = vinfo_for_stmt (stmt);
3419
3420 /* VECTYPE is the type of the destination. */
3421 vectype = STMT_VINFO_VECTYPE (stmt_info);
3422 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3423 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3424
3425 /* For each SLP instance calculate number of vector stmts to be created
3426 for the scalar stmts in each node of the SLP tree. Number of vector
3427 elements in one vector iteration is the number of scalar elements in
3428 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3429 size. */
3430 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3431
3432 if (!SLP_TREE_VEC_STMTS (node).exists ())
3433 {
3434 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3435 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3436 }
3437
3438 if (dump_enabled_p ())
3439 {
3440 dump_printf_loc (MSG_NOTE,vect_location,
3441 "------>vectorizing SLP node starting from: ");
3442 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3443 dump_printf (MSG_NOTE, "\n");
3444 }
3445
3446 /* Vectorized stmts go before the last scalar stmt which is where
3447 all uses are ready. */
3448 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3449
3450 /* Mark the first element of the reduction chain as reduction to properly
3451 transform the node. In the analysis phase only the last element of the
3452 chain is marked as reduction. */
3453 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3454 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3455 {
3456 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3457 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3458 }
3459
3460 /* Handle two-operation SLP nodes by vectorizing the group with
3461 both operations and then performing a merge. */
3462 if (SLP_TREE_TWO_OPERATORS (node))
3463 {
3464 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3465 enum tree_code ocode;
3466 gimple ostmt;
3467 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3468 bool allsame = true;
3469 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3470 if (gimple_assign_rhs_code (ostmt) != code0)
3471 {
3472 mask[i] = 1;
3473 allsame = false;
3474 ocode = gimple_assign_rhs_code (ostmt);
3475 }
3476 else
3477 mask[i] = 0;
3478 if (!allsame)
3479 {
3480 vec<gimple> v0;
3481 vec<gimple> v1;
3482 unsigned j;
3483 tree tmask = NULL_TREE;
3484 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3485 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3486 SLP_TREE_VEC_STMTS (node).truncate (0);
3487 gimple_assign_set_rhs_code (stmt, ocode);
3488 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3489 gimple_assign_set_rhs_code (stmt, code0);
3490 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3491 SLP_TREE_VEC_STMTS (node).truncate (0);
3492 tree meltype = build_nonstandard_integer_type
3493 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3494 tree mvectype = get_same_sized_vectype (meltype, vectype);
3495 unsigned k = 0, l;
3496 for (j = 0; j < v0.length (); ++j)
3497 {
3498 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3499 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3500 {
3501 if (k >= group_size)
3502 k = 0;
3503 melts[l] = build_int_cst
3504 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3505 }
3506 tmask = build_vector (mvectype, melts);
3507
3508 /* ??? Not all targets support a VEC_PERM_EXPR with a
3509 constant mask that would translate to a vec_merge RTX
3510 (with their vec_perm_const_ok). We can either not
3511 vectorize in that case or let veclower do its job.
3512 Unfortunately that isn't too great and at least for
3513 plus/minus we'd eventually like to match targets
3514 vector addsub instructions. */
3515 gimple vstmt;
3516 vstmt = gimple_build_assign (make_ssa_name (vectype),
3517 VEC_PERM_EXPR,
3518 gimple_assign_lhs (v0[j]),
3519 gimple_assign_lhs (v1[j]), tmask);
3520 vect_finish_stmt_generation (stmt, vstmt, &si);
3521 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3522 }
3523 v0.release ();
3524 v1.release ();
3525 return false;
3526 }
3527 }
3528 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3529 return is_store;
3530 }
3531
3532 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3533 For loop vectorization this is done in vectorizable_call, but for SLP
3534 it needs to be deferred until end of vect_schedule_slp, because multiple
3535 SLP instances may refer to the same scalar stmt. */
3536
3537 static void
3538 vect_remove_slp_scalar_calls (slp_tree node)
3539 {
3540 gimple stmt, new_stmt;
3541 gimple_stmt_iterator gsi;
3542 int i;
3543 slp_tree child;
3544 tree lhs;
3545 stmt_vec_info stmt_info;
3546
3547 if (!node)
3548 return;
3549
3550 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3551 vect_remove_slp_scalar_calls (child);
3552
3553 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3554 {
3555 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3556 continue;
3557 stmt_info = vinfo_for_stmt (stmt);
3558 if (stmt_info == NULL
3559 || is_pattern_stmt_p (stmt_info)
3560 || !PURE_SLP_STMT (stmt_info))
3561 continue;
3562 lhs = gimple_call_lhs (stmt);
3563 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3564 set_vinfo_for_stmt (new_stmt, stmt_info);
3565 set_vinfo_for_stmt (stmt, NULL);
3566 STMT_VINFO_STMT (stmt_info) = new_stmt;
3567 gsi = gsi_for_stmt (stmt);
3568 gsi_replace (&gsi, new_stmt, false);
3569 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3570 }
3571 }
3572
3573 /* Generate vector code for all SLP instances in the loop/basic block. */
3574
3575 bool
3576 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3577 {
3578 vec<slp_instance> slp_instances;
3579 slp_instance instance;
3580 unsigned int i, vf;
3581 bool is_store = false;
3582
3583 if (loop_vinfo)
3584 {
3585 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3586 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3587 }
3588 else
3589 {
3590 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3591 vf = 1;
3592 }
3593
3594 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3595 {
3596 /* Schedule the tree of INSTANCE. */
3597 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3598 instance, vf);
3599 if (dump_enabled_p ())
3600 dump_printf_loc (MSG_NOTE, vect_location,
3601 "vectorizing stmts using SLP.\n");
3602 }
3603
3604 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3605 {
3606 slp_tree root = SLP_INSTANCE_TREE (instance);
3607 gimple store;
3608 unsigned int j;
3609 gimple_stmt_iterator gsi;
3610
3611 /* Remove scalar call stmts. Do not do this for basic-block
3612 vectorization as not all uses may be vectorized.
3613 ??? Why should this be necessary? DCE should be able to
3614 remove the stmts itself.
3615 ??? For BB vectorization we can as well remove scalar
3616 stmts starting from the SLP tree root if they have no
3617 uses. */
3618 if (loop_vinfo)
3619 vect_remove_slp_scalar_calls (root);
3620
3621 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3622 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3623 {
3624 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3625 break;
3626
3627 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3628 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3629 /* Free the attached stmt_vec_info and remove the stmt. */
3630 gsi = gsi_for_stmt (store);
3631 unlink_stmt_vdef (store);
3632 gsi_remove (&gsi, true);
3633 release_defs (store);
3634 free_stmt_vec_info (store);
3635 }
3636 }
3637
3638 return is_store;
3639 }
3640
3641
3642 /* Vectorize the basic block. */
3643
3644 void
3645 vect_slp_transform_bb (basic_block bb)
3646 {
3647 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3648 gimple_stmt_iterator si;
3649
3650 gcc_assert (bb_vinfo);
3651
3652 if (dump_enabled_p ())
3653 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3654
3655 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3656 {
3657 gimple stmt = gsi_stmt (si);
3658 stmt_vec_info stmt_info;
3659
3660 if (dump_enabled_p ())
3661 {
3662 dump_printf_loc (MSG_NOTE, vect_location,
3663 "------>SLPing statement: ");
3664 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3665 dump_printf (MSG_NOTE, "\n");
3666 }
3667
3668 stmt_info = vinfo_for_stmt (stmt);
3669 gcc_assert (stmt_info);
3670
3671 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3672 if (STMT_SLP_TYPE (stmt_info))
3673 {
3674 vect_schedule_slp (NULL, bb_vinfo);
3675 break;
3676 }
3677 }
3678
3679 if (dump_enabled_p ())
3680 dump_printf_loc (MSG_NOTE, vect_location,
3681 "BASIC BLOCK VECTORIZED\n");
3682
3683 destroy_bb_vec_info (bb_vinfo);
3684 }