tree-vect-slp.c (vect_supported_load_permutation_p): Use vect_transform_slp_perm_load...
[gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
79
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
82
83 source_location
84 find_bb_location (basic_block bb)
85 {
86 gimple stmt = NULL;
87 gimple_stmt_iterator si;
88
89 if (!bb)
90 return UNKNOWN_LOCATION;
91
92 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
93 {
94 stmt = gsi_stmt (si);
95 if (gimple_location (stmt) != UNKNOWN_LOCATION)
96 return gimple_location (stmt);
97 }
98
99 return UNKNOWN_LOCATION;
100 }
101
102
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
104
105 static void
106 vect_free_slp_tree (slp_tree node)
107 {
108 int i;
109 slp_tree child;
110
111 if (!node)
112 return;
113
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115 vect_free_slp_tree (child);
116
117 SLP_TREE_CHILDREN (node).release ();
118 SLP_TREE_SCALAR_STMTS (node).release ();
119 SLP_TREE_VEC_STMTS (node).release ();
120 SLP_TREE_LOAD_PERMUTATION (node).release ();
121
122 free (node);
123 }
124
125
126 /* Free the memory allocated for the SLP instance. */
127
128 void
129 vect_free_slp_instance (slp_instance instance)
130 {
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132 SLP_INSTANCE_LOADS (instance).release ();
133 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
134 free (instance);
135 }
136
137
138 /* Create an SLP node for SCALAR_STMTS. */
139
140 static slp_tree
141 vect_create_new_slp_node (vec<gimple> scalar_stmts)
142 {
143 slp_tree node;
144 gimple stmt = scalar_stmts[0];
145 unsigned int nops;
146
147 if (is_gimple_call (stmt))
148 nops = gimple_call_num_args (stmt);
149 else if (is_gimple_assign (stmt))
150 {
151 nops = gimple_num_ops (stmt) - 1;
152 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
153 nops++;
154 }
155 else
156 return NULL;
157
158 node = XNEW (struct _slp_tree);
159 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
160 SLP_TREE_VEC_STMTS (node).create (0);
161 SLP_TREE_CHILDREN (node).create (nops);
162 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
163
164 return node;
165 }
166
167
168 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
169 operand. */
170 static vec<slp_oprnd_info>
171 vect_create_oprnd_info (int nops, int group_size)
172 {
173 int i;
174 slp_oprnd_info oprnd_info;
175 vec<slp_oprnd_info> oprnds_info;
176
177 oprnds_info.create (nops);
178 for (i = 0; i < nops; i++)
179 {
180 oprnd_info = XNEW (struct _slp_oprnd_info);
181 oprnd_info->def_stmts.create (group_size);
182 oprnd_info->first_dt = vect_uninitialized_def;
183 oprnd_info->first_op_type = NULL_TREE;
184 oprnd_info->first_pattern = false;
185 oprnds_info.quick_push (oprnd_info);
186 }
187
188 return oprnds_info;
189 }
190
191
192 /* Free operands info. */
193
194 static void
195 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
196 {
197 int i;
198 slp_oprnd_info oprnd_info;
199
200 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
201 {
202 oprnd_info->def_stmts.release ();
203 XDELETE (oprnd_info);
204 }
205
206 oprnds_info.release ();
207 }
208
209
210 /* Find the place of the data-ref in STMT in the interleaving chain that starts
211 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
212
213 static int
214 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
215 {
216 gimple next_stmt = first_stmt;
217 int result = 0;
218
219 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
220 return -1;
221
222 do
223 {
224 if (next_stmt == stmt)
225 return result;
226 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
227 if (next_stmt)
228 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
229 }
230 while (next_stmt);
231
232 return -1;
233 }
234
235
236 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
237 they are of a valid type and that they match the defs of the first stmt of
238 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
239 return -1, if the error could be corrected by swapping operands of the
240 operation return 1, if everything is ok return 0. */
241
242 static int
243 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
244 gimple stmt, bool first,
245 vec<slp_oprnd_info> *oprnds_info)
246 {
247 tree oprnd;
248 unsigned int i, number_of_oprnds;
249 tree def;
250 gimple def_stmt;
251 enum vect_def_type dt = vect_uninitialized_def;
252 struct loop *loop = NULL;
253 bool pattern = false;
254 slp_oprnd_info oprnd_info;
255 int first_op_idx = 1;
256 bool commutative = false;
257 bool first_op_cond = false;
258
259 if (loop_vinfo)
260 loop = LOOP_VINFO_LOOP (loop_vinfo);
261
262 if (is_gimple_call (stmt))
263 {
264 number_of_oprnds = gimple_call_num_args (stmt);
265 first_op_idx = 3;
266 }
267 else if (is_gimple_assign (stmt))
268 {
269 enum tree_code code = gimple_assign_rhs_code (stmt);
270 number_of_oprnds = gimple_num_ops (stmt) - 1;
271 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
272 {
273 first_op_cond = true;
274 commutative = true;
275 number_of_oprnds++;
276 }
277 else
278 commutative = commutative_tree_code (code);
279 }
280 else
281 return -1;
282
283 bool swapped = false;
284 for (i = 0; i < number_of_oprnds; i++)
285 {
286 again:
287 if (first_op_cond)
288 {
289 if (i == 0 || i == 1)
290 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
291 swapped ? !i : i);
292 else
293 oprnd = gimple_op (stmt, first_op_idx + i - 1);
294 }
295 else
296 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
297
298 oprnd_info = (*oprnds_info)[i];
299
300 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
301 &def, &dt)
302 || (!def_stmt && dt != vect_constant_def))
303 {
304 if (dump_enabled_p ())
305 {
306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
307 "Build SLP failed: can't find def for ");
308 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
310 }
311
312 return -1;
313 }
314
315 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
316 from the pattern. Check that all the stmts of the node are in the
317 pattern. */
318 if (def_stmt && gimple_bb (def_stmt)
319 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
320 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
321 && gimple_code (def_stmt) != GIMPLE_PHI))
322 && vinfo_for_stmt (def_stmt)
323 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
324 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
325 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
326 {
327 pattern = true;
328 if (!first && !oprnd_info->first_pattern)
329 {
330 if (i == 0
331 && !swapped
332 && commutative)
333 {
334 swapped = true;
335 goto again;
336 }
337
338 if (dump_enabled_p ())
339 {
340 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
341 "Build SLP failed: some of the stmts"
342 " are in a pattern, and others are not ");
343 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
344 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
345 }
346
347 return 1;
348 }
349
350 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
351 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
352
353 if (dt == vect_unknown_def_type)
354 {
355 if (dump_enabled_p ())
356 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
357 "Unsupported pattern.\n");
358 return -1;
359 }
360
361 switch (gimple_code (def_stmt))
362 {
363 case GIMPLE_PHI:
364 def = gimple_phi_result (def_stmt);
365 break;
366
367 case GIMPLE_ASSIGN:
368 def = gimple_assign_lhs (def_stmt);
369 break;
370
371 default:
372 if (dump_enabled_p ())
373 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
374 "unsupported defining stmt:\n");
375 return -1;
376 }
377 }
378
379 if (first)
380 {
381 oprnd_info->first_dt = dt;
382 oprnd_info->first_pattern = pattern;
383 oprnd_info->first_op_type = TREE_TYPE (oprnd);
384 }
385 else
386 {
387 /* Not first stmt of the group, check that the def-stmt/s match
388 the def-stmt/s of the first stmt. Allow different definition
389 types for reduction chains: the first stmt must be a
390 vect_reduction_def (a phi node), and the rest
391 vect_internal_def. */
392 if (((oprnd_info->first_dt != dt
393 && !(oprnd_info->first_dt == vect_reduction_def
394 && dt == vect_internal_def)
395 && !((oprnd_info->first_dt == vect_external_def
396 || oprnd_info->first_dt == vect_constant_def)
397 && (dt == vect_external_def
398 || dt == vect_constant_def)))
399 || !types_compatible_p (oprnd_info->first_op_type,
400 TREE_TYPE (oprnd))))
401 {
402 /* Try swapping operands if we got a mismatch. */
403 if (i == 0
404 && !swapped
405 && commutative)
406 {
407 swapped = true;
408 goto again;
409 }
410
411 if (dump_enabled_p ())
412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
413 "Build SLP failed: different types\n");
414
415 return 1;
416 }
417 }
418
419 /* Check the types of the definitions. */
420 switch (dt)
421 {
422 case vect_constant_def:
423 case vect_external_def:
424 case vect_reduction_def:
425 break;
426
427 case vect_internal_def:
428 oprnd_info->def_stmts.quick_push (def_stmt);
429 break;
430
431 default:
432 /* FORNOW: Not supported. */
433 if (dump_enabled_p ())
434 {
435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
436 "Build SLP failed: illegal type of def ");
437 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
438 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
439 }
440
441 return -1;
442 }
443 }
444
445 /* Swap operands. */
446 if (swapped)
447 {
448 if (first_op_cond)
449 {
450 tree cond = gimple_assign_rhs1 (stmt);
451 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
452 &TREE_OPERAND (cond, 1));
453 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
454 }
455 else
456 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
457 gimple_assign_rhs2_ptr (stmt));
458 }
459
460 return 0;
461 }
462
463
464 /* Verify if the scalar stmts STMTS are isomorphic, require data
465 permutation or are of unsupported types of operation. Return
466 true if they are, otherwise return false and indicate in *MATCHES
467 which stmts are not isomorphic to the first one. If MATCHES[0]
468 is false then this indicates the comparison could not be
469 carried out or the stmts will never be vectorized by SLP. */
470
471 static bool
472 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
473 vec<gimple> stmts, unsigned int group_size,
474 unsigned nops, unsigned int *max_nunits,
475 unsigned int vectorization_factor, bool *matches)
476 {
477 unsigned int i;
478 gimple stmt = stmts[0];
479 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
480 enum tree_code first_cond_code = ERROR_MARK;
481 tree lhs;
482 bool need_same_oprnds = false;
483 tree vectype, scalar_type, first_op1 = NULL_TREE;
484 optab optab;
485 int icode;
486 machine_mode optab_op2_mode;
487 machine_mode vec_mode;
488 struct data_reference *first_dr;
489 HOST_WIDE_INT dummy;
490 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
491 tree cond;
492
493 /* For every stmt in NODE find its def stmt/s. */
494 FOR_EACH_VEC_ELT (stmts, i, stmt)
495 {
496 matches[i] = false;
497
498 if (dump_enabled_p ())
499 {
500 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
501 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
502 dump_printf (MSG_NOTE, "\n");
503 }
504
505 /* Fail to vectorize statements marked as unvectorizable. */
506 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
507 {
508 if (dump_enabled_p ())
509 {
510 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
511 "Build SLP failed: unvectorizable statement ");
512 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
513 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
514 }
515 /* Fatal mismatch. */
516 matches[0] = false;
517 return false;
518 }
519
520 lhs = gimple_get_lhs (stmt);
521 if (lhs == NULL_TREE)
522 {
523 if (dump_enabled_p ())
524 {
525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
526 "Build SLP failed: not GIMPLE_ASSIGN nor "
527 "GIMPLE_CALL ");
528 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
529 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
530 }
531 /* Fatal mismatch. */
532 matches[0] = false;
533 return false;
534 }
535
536 if (is_gimple_assign (stmt)
537 && gimple_assign_rhs_code (stmt) == COND_EXPR
538 && (cond = gimple_assign_rhs1 (stmt))
539 && !COMPARISON_CLASS_P (cond))
540 {
541 if (dump_enabled_p ())
542 {
543 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
544 "Build SLP failed: condition is not "
545 "comparison ");
546 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
547 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
548 }
549 /* Fatal mismatch. */
550 matches[0] = false;
551 return false;
552 }
553
554 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
555 vectype = get_vectype_for_scalar_type (scalar_type);
556 if (!vectype)
557 {
558 if (dump_enabled_p ())
559 {
560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
561 "Build SLP failed: unsupported data-type ");
562 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
563 scalar_type);
564 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
565 }
566 /* Fatal mismatch. */
567 matches[0] = false;
568 return false;
569 }
570
571 /* In case of multiple types we need to detect the smallest type. */
572 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
573 {
574 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
575 if (bb_vinfo)
576 vectorization_factor = *max_nunits;
577 }
578
579 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
580 {
581 rhs_code = CALL_EXPR;
582 if (gimple_call_internal_p (call_stmt)
583 || gimple_call_tail_p (call_stmt)
584 || gimple_call_noreturn_p (call_stmt)
585 || !gimple_call_nothrow_p (call_stmt)
586 || gimple_call_chain (call_stmt))
587 {
588 if (dump_enabled_p ())
589 {
590 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
591 "Build SLP failed: unsupported call type ");
592 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
593 call_stmt, 0);
594 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
595 }
596 /* Fatal mismatch. */
597 matches[0] = false;
598 return false;
599 }
600 }
601 else
602 rhs_code = gimple_assign_rhs_code (stmt);
603
604 /* Check the operation. */
605 if (i == 0)
606 {
607 first_stmt_code = rhs_code;
608
609 /* Shift arguments should be equal in all the packed stmts for a
610 vector shift with scalar shift operand. */
611 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
612 || rhs_code == LROTATE_EXPR
613 || rhs_code == RROTATE_EXPR)
614 {
615 vec_mode = TYPE_MODE (vectype);
616
617 /* First see if we have a vector/vector shift. */
618 optab = optab_for_tree_code (rhs_code, vectype,
619 optab_vector);
620
621 if (!optab
622 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
623 {
624 /* No vector/vector shift, try for a vector/scalar shift. */
625 optab = optab_for_tree_code (rhs_code, vectype,
626 optab_scalar);
627
628 if (!optab)
629 {
630 if (dump_enabled_p ())
631 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
632 "Build SLP failed: no optab.\n");
633 /* Fatal mismatch. */
634 matches[0] = false;
635 return false;
636 }
637 icode = (int) optab_handler (optab, vec_mode);
638 if (icode == CODE_FOR_nothing)
639 {
640 if (dump_enabled_p ())
641 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
642 "Build SLP failed: "
643 "op not supported by target.\n");
644 /* Fatal mismatch. */
645 matches[0] = false;
646 return false;
647 }
648 optab_op2_mode = insn_data[icode].operand[2].mode;
649 if (!VECTOR_MODE_P (optab_op2_mode))
650 {
651 need_same_oprnds = true;
652 first_op1 = gimple_assign_rhs2 (stmt);
653 }
654 }
655 }
656 else if (rhs_code == WIDEN_LSHIFT_EXPR)
657 {
658 need_same_oprnds = true;
659 first_op1 = gimple_assign_rhs2 (stmt);
660 }
661 }
662 else
663 {
664 if (first_stmt_code != rhs_code
665 && (first_stmt_code != IMAGPART_EXPR
666 || rhs_code != REALPART_EXPR)
667 && (first_stmt_code != REALPART_EXPR
668 || rhs_code != IMAGPART_EXPR)
669 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
670 && (first_stmt_code == ARRAY_REF
671 || first_stmt_code == BIT_FIELD_REF
672 || first_stmt_code == INDIRECT_REF
673 || first_stmt_code == COMPONENT_REF
674 || first_stmt_code == MEM_REF)))
675 {
676 if (dump_enabled_p ())
677 {
678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
679 "Build SLP failed: different operation "
680 "in stmt ");
681 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
682 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
683 }
684 /* Mismatch. */
685 continue;
686 }
687
688 if (need_same_oprnds
689 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
690 {
691 if (dump_enabled_p ())
692 {
693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
694 "Build SLP failed: different shift "
695 "arguments in ");
696 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
697 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
698 }
699 /* Mismatch. */
700 continue;
701 }
702
703 if (rhs_code == CALL_EXPR)
704 {
705 gimple first_stmt = stmts[0];
706 if (gimple_call_num_args (stmt) != nops
707 || !operand_equal_p (gimple_call_fn (first_stmt),
708 gimple_call_fn (stmt), 0)
709 || gimple_call_fntype (first_stmt)
710 != gimple_call_fntype (stmt))
711 {
712 if (dump_enabled_p ())
713 {
714 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
715 "Build SLP failed: different calls in ");
716 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
717 stmt, 0);
718 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
719 }
720 /* Mismatch. */
721 continue;
722 }
723 }
724 }
725
726 /* Grouped store or load. */
727 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
728 {
729 if (REFERENCE_CLASS_P (lhs))
730 {
731 /* Store. */
732 ;
733 }
734 else
735 {
736 /* Load. */
737 unsigned unrolling_factor
738 = least_common_multiple
739 (*max_nunits, group_size) / group_size;
740 /* FORNOW: Check that there is no gap between the loads
741 and no gap between the groups when we need to load
742 multiple groups at once.
743 ??? We should enhance this to only disallow gaps
744 inside vectors. */
745 if ((unrolling_factor > 1
746 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
747 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
748 /* If the group is split up then GROUP_GAP
749 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
750 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
751 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
752 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
753 {
754 if (dump_enabled_p ())
755 {
756 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
757 "Build SLP failed: grouped "
758 "loads have gaps ");
759 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
760 stmt, 0);
761 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
762 }
763 /* Fatal mismatch. */
764 matches[0] = false;
765 return false;
766 }
767
768 /* Check that the size of interleaved loads group is not
769 greater than the SLP group size. */
770 unsigned ncopies
771 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
772 if (loop_vinfo
773 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
774 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
775 - GROUP_GAP (vinfo_for_stmt (stmt)))
776 > ncopies * group_size))
777 {
778 if (dump_enabled_p ())
779 {
780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
781 "Build SLP failed: the number "
782 "of interleaved loads is greater than "
783 "the SLP group size ");
784 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
785 stmt, 0);
786 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
787 }
788 /* Fatal mismatch. */
789 matches[0] = false;
790 return false;
791 }
792
793 old_first_load = first_load;
794 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
795 if (prev_first_load)
796 {
797 /* Check that there are no loads from different interleaving
798 chains in the same node. */
799 if (prev_first_load != first_load)
800 {
801 if (dump_enabled_p ())
802 {
803 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
804 vect_location,
805 "Build SLP failed: different "
806 "interleaving chains in one node ");
807 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
808 stmt, 0);
809 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
810 }
811 /* Mismatch. */
812 continue;
813 }
814 }
815 else
816 prev_first_load = first_load;
817
818 /* In some cases a group of loads is just the same load
819 repeated N times. Only analyze its cost once. */
820 if (first_load == stmt && old_first_load != first_load)
821 {
822 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
823 if (vect_supportable_dr_alignment (first_dr, false)
824 == dr_unaligned_unsupported)
825 {
826 if (dump_enabled_p ())
827 {
828 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
829 vect_location,
830 "Build SLP failed: unsupported "
831 "unaligned load ");
832 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
833 stmt, 0);
834 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
835 }
836 /* Fatal mismatch. */
837 matches[0] = false;
838 return false;
839 }
840 }
841 }
842 } /* Grouped access. */
843 else
844 {
845 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
846 {
847 /* Not grouped load. */
848 if (dump_enabled_p ())
849 {
850 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
851 "Build SLP failed: not grouped load ");
852 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
853 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
854 }
855
856 /* FORNOW: Not grouped loads are not supported. */
857 /* Fatal mismatch. */
858 matches[0] = false;
859 return false;
860 }
861
862 /* Not memory operation. */
863 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
864 && TREE_CODE_CLASS (rhs_code) != tcc_unary
865 && rhs_code != COND_EXPR
866 && rhs_code != CALL_EXPR)
867 {
868 if (dump_enabled_p ())
869 {
870 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
871 "Build SLP failed: operation");
872 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
873 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
874 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
875 }
876 /* Fatal mismatch. */
877 matches[0] = false;
878 return false;
879 }
880
881 if (rhs_code == COND_EXPR)
882 {
883 tree cond_expr = gimple_assign_rhs1 (stmt);
884
885 if (i == 0)
886 first_cond_code = TREE_CODE (cond_expr);
887 else if (first_cond_code != TREE_CODE (cond_expr))
888 {
889 if (dump_enabled_p ())
890 {
891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
892 "Build SLP failed: different"
893 " operation");
894 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
895 stmt, 0);
896 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
897 }
898 /* Mismatch. */
899 continue;
900 }
901 }
902 }
903
904 matches[i] = true;
905 }
906
907 for (i = 0; i < group_size; ++i)
908 if (!matches[i])
909 return false;
910
911 return true;
912 }
913
914 /* Recursively build an SLP tree starting from NODE.
915 Fail (and return a value not equal to zero) if def-stmts are not
916 isomorphic, require data permutation or are of unsupported types of
917 operation. Otherwise, return 0.
918 The value returned is the depth in the SLP tree where a mismatch
919 was found. */
920
921 static bool
922 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
923 slp_tree *node, unsigned int group_size,
924 unsigned int *max_nunits,
925 vec<slp_tree> *loads,
926 unsigned int vectorization_factor,
927 bool *matches, unsigned *npermutes, unsigned *tree_size,
928 unsigned max_tree_size)
929 {
930 unsigned nops, i, this_tree_size = 0;
931 gimple stmt;
932
933 matches[0] = false;
934
935 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
936 if (is_gimple_call (stmt))
937 nops = gimple_call_num_args (stmt);
938 else if (is_gimple_assign (stmt))
939 {
940 nops = gimple_num_ops (stmt) - 1;
941 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
942 nops++;
943 }
944 else
945 return false;
946
947 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
948 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
949 max_nunits, vectorization_factor, matches))
950 return false;
951
952 /* If the SLP node is a load, terminate the recursion. */
953 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
954 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
955 {
956 loads->safe_push (*node);
957 return true;
958 }
959
960 /* Get at the operands, verifying they are compatible. */
961 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
962 slp_oprnd_info oprnd_info;
963 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
964 {
965 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
966 stmt, (i == 0), &oprnds_info))
967 {
968 case 0:
969 break;
970 case -1:
971 matches[0] = false;
972 vect_free_oprnd_info (oprnds_info);
973 return false;
974 case 1:
975 matches[i] = false;
976 break;
977 }
978 }
979 for (i = 0; i < group_size; ++i)
980 if (!matches[i])
981 {
982 vect_free_oprnd_info (oprnds_info);
983 return false;
984 }
985
986 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
987
988 /* Create SLP_TREE nodes for the definition node/s. */
989 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
990 {
991 slp_tree child;
992 unsigned old_nloads = loads->length ();
993 unsigned old_max_nunits = *max_nunits;
994
995 if (oprnd_info->first_dt != vect_internal_def)
996 continue;
997
998 if (++this_tree_size > max_tree_size)
999 {
1000 vect_free_oprnd_info (oprnds_info);
1001 return false;
1002 }
1003
1004 child = vect_create_new_slp_node (oprnd_info->def_stmts);
1005 if (!child)
1006 {
1007 vect_free_oprnd_info (oprnds_info);
1008 return false;
1009 }
1010
1011 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1012 group_size, max_nunits, loads,
1013 vectorization_factor, matches,
1014 npermutes, &this_tree_size, max_tree_size))
1015 {
1016 oprnd_info->def_stmts = vNULL;
1017 SLP_TREE_CHILDREN (*node).quick_push (child);
1018 continue;
1019 }
1020
1021 /* If the SLP build failed fatally and we analyze a basic-block
1022 simply treat nodes we fail to build as externally defined
1023 (and thus build vectors from the scalar defs).
1024 The cost model will reject outright expensive cases.
1025 ??? This doesn't treat cases where permutation ultimatively
1026 fails (or we don't try permutation below). Ideally we'd
1027 even compute a permutation that will end up with the maximum
1028 SLP tree size... */
1029 if (bb_vinfo
1030 && !matches[0]
1031 /* ??? Rejecting patterns this way doesn't work. We'd have to
1032 do extra work to cancel the pattern so the uses see the
1033 scalar version. */
1034 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1035 {
1036 dump_printf_loc (MSG_NOTE, vect_location,
1037 "Building vector operands from scalars\n");
1038 oprnd_info->def_stmts = vNULL;
1039 vect_free_slp_tree (child);
1040 SLP_TREE_CHILDREN (*node).quick_push (NULL);
1041 continue;
1042 }
1043
1044 /* If the SLP build for operand zero failed and operand zero
1045 and one can be commutated try that for the scalar stmts
1046 that failed the match. */
1047 if (i == 0
1048 /* A first scalar stmt mismatch signals a fatal mismatch. */
1049 && matches[0]
1050 /* ??? For COND_EXPRs we can swap the comparison operands
1051 as well as the arms under some constraints. */
1052 && nops == 2
1053 && oprnds_info[1]->first_dt == vect_internal_def
1054 && is_gimple_assign (stmt)
1055 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1056 /* Do so only if the number of not successful permutes was nor more
1057 than a cut-ff as re-trying the recursive match on
1058 possibly each level of the tree would expose exponential
1059 behavior. */
1060 && *npermutes < 4)
1061 {
1062 unsigned int j;
1063 slp_tree grandchild;
1064
1065 /* Roll back. */
1066 *max_nunits = old_max_nunits;
1067 loads->truncate (old_nloads);
1068 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1069 vect_free_slp_tree (grandchild);
1070 SLP_TREE_CHILDREN (child).truncate (0);
1071
1072 /* Swap mismatched definition stmts. */
1073 dump_printf_loc (MSG_NOTE, vect_location,
1074 "Re-trying with swapped operands of stmts ");
1075 for (j = 0; j < group_size; ++j)
1076 if (!matches[j])
1077 {
1078 gimple tem = oprnds_info[0]->def_stmts[j];
1079 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1080 oprnds_info[1]->def_stmts[j] = tem;
1081 dump_printf (MSG_NOTE, "%d ", j);
1082 }
1083 dump_printf (MSG_NOTE, "\n");
1084 /* And try again with scratch 'matches' ... */
1085 bool *tem = XALLOCAVEC (bool, group_size);
1086 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1087 group_size, max_nunits, loads,
1088 vectorization_factor,
1089 tem, npermutes, &this_tree_size,
1090 max_tree_size))
1091 {
1092 /* ... so if successful we can apply the operand swapping
1093 to the GIMPLE IL. This is necessary because for example
1094 vect_get_slp_defs uses operand indexes and thus expects
1095 canonical operand order. */
1096 for (j = 0; j < group_size; ++j)
1097 if (!matches[j])
1098 {
1099 gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1100 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1101 gimple_assign_rhs2_ptr (stmt));
1102 }
1103 oprnd_info->def_stmts = vNULL;
1104 SLP_TREE_CHILDREN (*node).quick_push (child);
1105 continue;
1106 }
1107
1108 ++*npermutes;
1109 }
1110
1111 oprnd_info->def_stmts = vNULL;
1112 vect_free_slp_tree (child);
1113 vect_free_oprnd_info (oprnds_info);
1114 return false;
1115 }
1116
1117 if (tree_size)
1118 *tree_size += this_tree_size;
1119
1120 vect_free_oprnd_info (oprnds_info);
1121 return true;
1122 }
1123
1124 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1125
1126 static void
1127 vect_print_slp_tree (int dump_kind, slp_tree node)
1128 {
1129 int i;
1130 gimple stmt;
1131 slp_tree child;
1132
1133 if (!node)
1134 return;
1135
1136 dump_printf (dump_kind, "node ");
1137 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1138 {
1139 dump_printf (dump_kind, "\n\tstmt %d ", i);
1140 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1141 }
1142 dump_printf (dump_kind, "\n");
1143
1144 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1145 vect_print_slp_tree (dump_kind, child);
1146 }
1147
1148
1149 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1150 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1151 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1152 stmts in NODE are to be marked. */
1153
1154 static void
1155 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1156 {
1157 int i;
1158 gimple stmt;
1159 slp_tree child;
1160
1161 if (!node)
1162 return;
1163
1164 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1165 if (j < 0 || i == j)
1166 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1167
1168 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1169 vect_mark_slp_stmts (child, mark, j);
1170 }
1171
1172
1173 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1174
1175 static void
1176 vect_mark_slp_stmts_relevant (slp_tree node)
1177 {
1178 int i;
1179 gimple stmt;
1180 stmt_vec_info stmt_info;
1181 slp_tree child;
1182
1183 if (!node)
1184 return;
1185
1186 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1187 {
1188 stmt_info = vinfo_for_stmt (stmt);
1189 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1190 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1191 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1192 }
1193
1194 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1195 vect_mark_slp_stmts_relevant (child);
1196 }
1197
1198
1199 /* Rearrange the statements of NODE according to PERMUTATION. */
1200
1201 static void
1202 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1203 vec<unsigned> permutation)
1204 {
1205 gimple stmt;
1206 vec<gimple> tmp_stmts;
1207 unsigned int i;
1208 slp_tree child;
1209
1210 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1211 vect_slp_rearrange_stmts (child, group_size, permutation);
1212
1213 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1214 tmp_stmts.create (group_size);
1215 tmp_stmts.quick_grow_cleared (group_size);
1216
1217 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1218 tmp_stmts[permutation[i]] = stmt;
1219
1220 SLP_TREE_SCALAR_STMTS (node).release ();
1221 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1222 }
1223
1224
1225 /* Check if the required load permutations in the SLP instance
1226 SLP_INSTN are supported. */
1227
1228 static bool
1229 vect_supported_load_permutation_p (slp_instance slp_instn)
1230 {
1231 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1232 unsigned int i, j, k, next;
1233 sbitmap load_index;
1234 slp_tree node;
1235 gimple stmt, load, next_load, first_load;
1236 struct data_reference *dr;
1237
1238 if (dump_enabled_p ())
1239 {
1240 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1241 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1242 if (node->load_permutation.exists ())
1243 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1244 dump_printf (MSG_NOTE, "%d ", next);
1245 else
1246 for (k = 0; k < group_size; ++k)
1247 dump_printf (MSG_NOTE, "%d ", k);
1248 dump_printf (MSG_NOTE, "\n");
1249 }
1250
1251 /* In case of reduction every load permutation is allowed, since the order
1252 of the reduction statements is not important (as opposed to the case of
1253 grouped stores). The only condition we need to check is that all the
1254 load nodes are of the same size and have the same permutation (and then
1255 rearrange all the nodes of the SLP instance according to this
1256 permutation). */
1257
1258 /* Check that all the load nodes are of the same size. */
1259 /* ??? Can't we assert this? */
1260 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1261 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1262 return false;
1263
1264 node = SLP_INSTANCE_TREE (slp_instn);
1265 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1266
1267 /* Reduction (there are no data-refs in the root).
1268 In reduction chain the order of the loads is important. */
1269 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1270 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1271 {
1272 slp_tree load;
1273 unsigned int lidx;
1274
1275 /* Compare all the permutation sequences to the first one. We know
1276 that at least one load is permuted. */
1277 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1278 if (!node->load_permutation.exists ())
1279 return false;
1280 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1281 {
1282 if (!load->load_permutation.exists ())
1283 return false;
1284 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1285 if (lidx != node->load_permutation[j])
1286 return false;
1287 }
1288
1289 /* Check that the loads in the first sequence are different and there
1290 are no gaps between them. */
1291 load_index = sbitmap_alloc (group_size);
1292 bitmap_clear (load_index);
1293 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1294 {
1295 if (bitmap_bit_p (load_index, lidx))
1296 {
1297 sbitmap_free (load_index);
1298 return false;
1299 }
1300 bitmap_set_bit (load_index, lidx);
1301 }
1302 for (i = 0; i < group_size; i++)
1303 if (!bitmap_bit_p (load_index, i))
1304 {
1305 sbitmap_free (load_index);
1306 return false;
1307 }
1308 sbitmap_free (load_index);
1309
1310 /* This permutation is valid for reduction. Since the order of the
1311 statements in the nodes is not important unless they are memory
1312 accesses, we can rearrange the statements in all the nodes
1313 according to the order of the loads. */
1314 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1315 node->load_permutation);
1316
1317 /* We are done, no actual permutations need to be generated. */
1318 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1319 SLP_TREE_LOAD_PERMUTATION (node).release ();
1320 return true;
1321 }
1322
1323 /* In basic block vectorization we allow any subchain of an interleaving
1324 chain.
1325 FORNOW: not supported in loop SLP because of realignment compications. */
1326 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1327 {
1328 /* Check whether the loads in an instance form a subchain and thus
1329 no permutation is necessary. */
1330 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1331 {
1332 bool subchain_p = true;
1333 next_load = NULL;
1334 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1335 {
1336 if (j != 0 && next_load != load)
1337 {
1338 subchain_p = false;
1339 break;
1340 }
1341 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1342 }
1343 if (subchain_p)
1344 SLP_TREE_LOAD_PERMUTATION (node).release ();
1345 else
1346 {
1347 /* Verify the permutation can be generated. */
1348 vec<tree> tem;
1349 if (!vect_transform_slp_perm_load (node, tem, NULL,
1350 1, slp_instn, true))
1351 {
1352 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1353 vect_location,
1354 "unsupported load permutation\n");
1355 return false;
1356 }
1357 }
1358 }
1359
1360 /* Check that the alignment of the first load in every subchain, i.e.,
1361 the first statement in every load node, is supported.
1362 ??? This belongs in alignment checking. */
1363 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1364 {
1365 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1366 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1367 {
1368 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1369 if (vect_supportable_dr_alignment (dr, false)
1370 == dr_unaligned_unsupported)
1371 {
1372 if (dump_enabled_p ())
1373 {
1374 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1375 vect_location,
1376 "unsupported unaligned load ");
1377 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1378 first_load, 0);
1379 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1380 }
1381 return false;
1382 }
1383 }
1384 }
1385
1386 return true;
1387 }
1388
1389 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1390 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1391 well (unless it's reduction). */
1392 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1393 return false;
1394 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1395 if (!node->load_permutation.exists ())
1396 return false;
1397
1398 load_index = sbitmap_alloc (group_size);
1399 bitmap_clear (load_index);
1400 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1401 {
1402 unsigned int lidx = node->load_permutation[0];
1403 if (bitmap_bit_p (load_index, lidx))
1404 {
1405 sbitmap_free (load_index);
1406 return false;
1407 }
1408 bitmap_set_bit (load_index, lidx);
1409 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1410 if (k != lidx)
1411 {
1412 sbitmap_free (load_index);
1413 return false;
1414 }
1415 }
1416 for (i = 0; i < group_size; i++)
1417 if (!bitmap_bit_p (load_index, i))
1418 {
1419 sbitmap_free (load_index);
1420 return false;
1421 }
1422 sbitmap_free (load_index);
1423
1424 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1425 if (node->load_permutation.exists ()
1426 && !vect_transform_slp_perm_load
1427 (node, vNULL, NULL,
1428 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1429 return false;
1430 return true;
1431 }
1432
1433
1434 /* Find the last store in SLP INSTANCE. */
1435
1436 static gimple
1437 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1438 {
1439 gimple last = NULL, stmt;
1440
1441 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1442 {
1443 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1444 if (is_pattern_stmt_p (stmt_vinfo))
1445 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1446 else
1447 last = get_later_stmt (stmt, last);
1448 }
1449
1450 return last;
1451 }
1452
1453 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1454
1455 static void
1456 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1457 slp_instance instance, slp_tree node,
1458 stmt_vector_for_cost *prologue_cost_vec,
1459 unsigned ncopies_for_cost)
1460 {
1461 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1462
1463 unsigned i;
1464 slp_tree child;
1465 gimple stmt, s;
1466 stmt_vec_info stmt_info;
1467 tree lhs;
1468 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1469
1470 /* Recurse down the SLP tree. */
1471 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1472 if (child)
1473 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1474 instance, child, prologue_cost_vec,
1475 ncopies_for_cost);
1476
1477 /* Look at the first scalar stmt to determine the cost. */
1478 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1479 stmt_info = vinfo_for_stmt (stmt);
1480 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1481 {
1482 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1483 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1484 vect_uninitialized_def,
1485 node, prologue_cost_vec, body_cost_vec);
1486 else
1487 {
1488 int i;
1489 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1490 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1491 node, prologue_cost_vec, body_cost_vec);
1492 /* If the load is permuted record the cost for the permutation.
1493 ??? Loads from multiple chains are let through here only
1494 for a single special case involving complex numbers where
1495 in the end no permutation is necessary. */
1496 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1497 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1498 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1499 && vect_get_place_in_interleaving_chain
1500 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1501 {
1502 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1503 stmt_info, 0, vect_body);
1504 break;
1505 }
1506 }
1507 }
1508 else
1509 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1510 stmt_info, 0, vect_body);
1511
1512 /* Scan operands and account for prologue cost of constants/externals.
1513 ??? This over-estimates cost for multiple uses and should be
1514 re-engineered. */
1515 lhs = gimple_get_lhs (stmt);
1516 for (i = 0; i < gimple_num_ops (stmt); ++i)
1517 {
1518 tree def, op = gimple_op (stmt, i);
1519 gimple def_stmt;
1520 enum vect_def_type dt;
1521 if (!op || op == lhs)
1522 continue;
1523 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1524 &def_stmt, &def, &dt))
1525 {
1526 /* Without looking at the actual initializer a vector of
1527 constants can be implemented as load from the constant pool.
1528 ??? We need to pass down stmt_info for a vector type
1529 even if it points to the wrong stmt. */
1530 if (dt == vect_constant_def)
1531 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1532 stmt_info, 0, vect_prologue);
1533 else if (dt == vect_external_def)
1534 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1535 stmt_info, 0, vect_prologue);
1536 }
1537 }
1538 }
1539
1540 /* Compute the cost for the SLP instance INSTANCE. */
1541
1542 static void
1543 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1544 slp_instance instance, unsigned nunits)
1545 {
1546 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1547 unsigned ncopies_for_cost;
1548 stmt_info_for_cost *si;
1549 unsigned i;
1550
1551 /* Calculate the number of vector stmts to create based on the unrolling
1552 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1553 GROUP_SIZE / NUNITS otherwise. */
1554 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1555 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1556
1557 prologue_cost_vec.create (10);
1558 body_cost_vec.create (10);
1559 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1560 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1561 instance, SLP_INSTANCE_TREE (instance),
1562 &prologue_cost_vec, ncopies_for_cost);
1563
1564 /* Record the prologue costs, which were delayed until we were
1565 sure that SLP was successful. Unlike the body costs, we know
1566 the final values now regardless of the loop vectorization factor. */
1567 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1568 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1569 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1570 {
1571 struct _stmt_vec_info *stmt_info
1572 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1573 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1574 si->misalign, vect_prologue);
1575 }
1576
1577 prologue_cost_vec.release ();
1578 }
1579
1580 /* Analyze an SLP instance starting from a group of grouped stores. Call
1581 vect_build_slp_tree to build a tree of packed stmts if possible.
1582 Return FALSE if it's impossible to SLP any stmt in the loop. */
1583
1584 static bool
1585 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1586 gimple stmt, unsigned max_tree_size)
1587 {
1588 slp_instance new_instance;
1589 slp_tree node;
1590 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1591 unsigned int unrolling_factor = 1, nunits;
1592 tree vectype, scalar_type = NULL_TREE;
1593 gimple next;
1594 unsigned int vectorization_factor = 0;
1595 int i;
1596 unsigned int max_nunits = 0;
1597 vec<slp_tree> loads;
1598 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1599 vec<gimple> scalar_stmts;
1600
1601 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1602 {
1603 if (dr)
1604 {
1605 scalar_type = TREE_TYPE (DR_REF (dr));
1606 vectype = get_vectype_for_scalar_type (scalar_type);
1607 }
1608 else
1609 {
1610 gcc_assert (loop_vinfo);
1611 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1612 }
1613
1614 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1615 }
1616 else
1617 {
1618 gcc_assert (loop_vinfo);
1619 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1620 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1621 }
1622
1623 if (!vectype)
1624 {
1625 if (dump_enabled_p ())
1626 {
1627 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1628 "Build SLP failed: unsupported data-type ");
1629 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1630 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1631 }
1632
1633 return false;
1634 }
1635
1636 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1637 if (loop_vinfo)
1638 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1639 else
1640 vectorization_factor = nunits;
1641
1642 /* Calculate the unrolling factor. */
1643 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1644 if (unrolling_factor != 1 && !loop_vinfo)
1645 {
1646 if (dump_enabled_p ())
1647 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1648 "Build SLP failed: unrolling required in basic"
1649 " block SLP\n");
1650
1651 return false;
1652 }
1653
1654 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1655 scalar_stmts.create (group_size);
1656 next = stmt;
1657 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1658 {
1659 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1660 while (next)
1661 {
1662 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1663 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1664 scalar_stmts.safe_push (
1665 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1666 else
1667 scalar_stmts.safe_push (next);
1668 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1669 }
1670 }
1671 else
1672 {
1673 /* Collect reduction statements. */
1674 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1675 for (i = 0; reductions.iterate (i, &next); i++)
1676 scalar_stmts.safe_push (next);
1677 }
1678
1679 node = vect_create_new_slp_node (scalar_stmts);
1680
1681 loads.create (group_size);
1682
1683 /* Build the tree for the SLP instance. */
1684 bool *matches = XALLOCAVEC (bool, group_size);
1685 unsigned npermutes = 0;
1686 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1687 &max_nunits, &loads,
1688 vectorization_factor, matches, &npermutes, NULL,
1689 max_tree_size))
1690 {
1691 /* Calculate the unrolling factor based on the smallest type. */
1692 if (max_nunits > nunits)
1693 unrolling_factor = least_common_multiple (max_nunits, group_size)
1694 / group_size;
1695
1696 if (unrolling_factor != 1 && !loop_vinfo)
1697 {
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1700 "Build SLP failed: unrolling required in basic"
1701 " block SLP\n");
1702 vect_free_slp_tree (node);
1703 loads.release ();
1704 return false;
1705 }
1706
1707 /* Create a new SLP instance. */
1708 new_instance = XNEW (struct _slp_instance);
1709 SLP_INSTANCE_TREE (new_instance) = node;
1710 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1711 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1712 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1713 SLP_INSTANCE_LOADS (new_instance) = loads;
1714
1715 /* Compute the load permutation. */
1716 slp_tree load_node;
1717 bool loads_permuted = false;
1718 FOR_EACH_VEC_ELT (loads, i, load_node)
1719 {
1720 vec<unsigned> load_permutation;
1721 int j;
1722 gimple load, first_stmt;
1723 bool this_load_permuted = false;
1724 load_permutation.create (group_size);
1725 first_stmt = GROUP_FIRST_ELEMENT
1726 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1727 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1728 {
1729 int load_place
1730 = vect_get_place_in_interleaving_chain (load, first_stmt);
1731 gcc_assert (load_place != -1);
1732 if (load_place != j)
1733 this_load_permuted = true;
1734 load_permutation.safe_push (load_place);
1735 }
1736 if (!this_load_permuted)
1737 {
1738 load_permutation.release ();
1739 continue;
1740 }
1741 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1742 loads_permuted = true;
1743 }
1744
1745 if (loads_permuted)
1746 {
1747 if (!vect_supported_load_permutation_p (new_instance))
1748 {
1749 if (dump_enabled_p ())
1750 {
1751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1752 "Build SLP failed: unsupported load "
1753 "permutation ");
1754 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1755 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1756 }
1757 vect_free_slp_instance (new_instance);
1758 return false;
1759 }
1760 }
1761
1762
1763 if (loop_vinfo)
1764 {
1765 /* Compute the costs of this SLP instance. Delay this for BB
1766 vectorization as we don't have vector types computed yet. */
1767 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1768 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1769 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1770 }
1771 else
1772 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1773
1774 if (dump_enabled_p ())
1775 vect_print_slp_tree (MSG_NOTE, node);
1776
1777 return true;
1778 }
1779
1780 /* Failed to SLP. */
1781 /* Free the allocated memory. */
1782 vect_free_slp_tree (node);
1783 loads.release ();
1784
1785 return false;
1786 }
1787
1788
1789 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1790 trees of packed scalar stmts if SLP is possible. */
1791
1792 bool
1793 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1794 unsigned max_tree_size)
1795 {
1796 unsigned int i;
1797 vec<gimple> grouped_stores;
1798 vec<gimple> reductions = vNULL;
1799 vec<gimple> reduc_chains = vNULL;
1800 gimple first_element;
1801 bool ok = false;
1802
1803 if (dump_enabled_p ())
1804 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1805
1806 if (loop_vinfo)
1807 {
1808 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1809 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1810 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1811 }
1812 else
1813 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1814
1815 /* Find SLP sequences starting from groups of grouped stores. */
1816 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1817 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1818 max_tree_size))
1819 ok = true;
1820
1821 if (bb_vinfo && !ok)
1822 {
1823 if (dump_enabled_p ())
1824 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1825 "Failed to SLP the basic block.\n");
1826
1827 return false;
1828 }
1829
1830 if (loop_vinfo
1831 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1832 {
1833 /* Find SLP sequences starting from reduction chains. */
1834 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1835 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1836 max_tree_size))
1837 ok = true;
1838 else
1839 return false;
1840
1841 /* Don't try to vectorize SLP reductions if reduction chain was
1842 detected. */
1843 return ok;
1844 }
1845
1846 /* Find SLP sequences starting from groups of reductions. */
1847 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1848 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1849 max_tree_size))
1850 ok = true;
1851
1852 return true;
1853 }
1854
1855
1856 /* For each possible SLP instance decide whether to SLP it and calculate overall
1857 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1858 least one instance. */
1859
1860 bool
1861 vect_make_slp_decision (loop_vec_info loop_vinfo)
1862 {
1863 unsigned int i, unrolling_factor = 1;
1864 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1865 slp_instance instance;
1866 int decided_to_slp = 0;
1867
1868 if (dump_enabled_p ())
1869 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1870 "\n");
1871
1872 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1873 {
1874 /* FORNOW: SLP if you can. */
1875 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1876 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1877
1878 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1879 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1880 loop-based vectorization. Such stmts will be marked as HYBRID. */
1881 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1882 decided_to_slp++;
1883 }
1884
1885 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1886
1887 if (decided_to_slp && dump_enabled_p ())
1888 dump_printf_loc (MSG_NOTE, vect_location,
1889 "Decided to SLP %d instances. Unrolling factor %d\n",
1890 decided_to_slp, unrolling_factor);
1891
1892 return (decided_to_slp > 0);
1893 }
1894
1895
1896 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1897 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1898
1899 static void
1900 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1901 {
1902 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1903 imm_use_iterator imm_iter;
1904 gimple use_stmt;
1905 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1906 slp_tree child;
1907 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1908 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1909 int j;
1910
1911 /* Propagate hybrid down the SLP tree. */
1912 if (stype == hybrid)
1913 ;
1914 else if (HYBRID_SLP_STMT (stmt_vinfo))
1915 stype = hybrid;
1916 else
1917 {
1918 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1919 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
1920 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1921 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1922 if (gimple_bb (use_stmt)
1923 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1924 && (use_vinfo = vinfo_for_stmt (use_stmt))
1925 && !STMT_SLP_TYPE (use_vinfo)
1926 && (STMT_VINFO_RELEVANT (use_vinfo)
1927 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
1928 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
1929 && STMT_VINFO_RELATED_STMT (use_vinfo)
1930 && !STMT_SLP_TYPE (vinfo_for_stmt
1931 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
1932 && !(gimple_code (use_stmt) == GIMPLE_PHI
1933 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
1934 stype = hybrid;
1935 }
1936
1937 if (stype == hybrid)
1938 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
1939
1940 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
1941 if (child)
1942 vect_detect_hybrid_slp_stmts (child, i, stype);
1943 }
1944
1945 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
1946
1947 static tree
1948 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
1949 {
1950 walk_stmt_info *wi = (walk_stmt_info *)data;
1951 struct loop *loopp = (struct loop *)wi->info;
1952
1953 if (wi->is_lhs)
1954 return NULL_TREE;
1955
1956 if (TREE_CODE (*tp) == SSA_NAME
1957 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
1958 {
1959 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
1960 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
1961 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
1962 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
1963 }
1964
1965 return NULL_TREE;
1966 }
1967
1968 static tree
1969 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
1970 walk_stmt_info *)
1971 {
1972 /* If the stmt is in a SLP instance then this isn't a reason
1973 to mark use definitions in other SLP instances as hybrid. */
1974 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
1975 *handled = true;
1976 return NULL_TREE;
1977 }
1978
1979 /* Find stmts that must be both vectorized and SLPed. */
1980
1981 void
1982 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1983 {
1984 unsigned int i;
1985 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1986 slp_instance instance;
1987
1988 if (dump_enabled_p ())
1989 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1990 "\n");
1991
1992 /* First walk all pattern stmt in the loop and mark defs of uses as
1993 hybrid because immediate uses in them are not recorded. */
1994 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
1995 {
1996 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
1997 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1998 gsi_next (&gsi))
1999 {
2000 gimple stmt = gsi_stmt (gsi);
2001 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2002 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2003 {
2004 walk_stmt_info wi;
2005 memset (&wi, 0, sizeof (wi));
2006 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2007 gimple_stmt_iterator gsi2
2008 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2009 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2010 vect_detect_hybrid_slp_1, &wi);
2011 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2012 vect_detect_hybrid_slp_2,
2013 vect_detect_hybrid_slp_1, &wi);
2014 }
2015 }
2016 }
2017
2018 /* Then walk the SLP instance trees marking stmts with uses in
2019 non-SLP stmts as hybrid, also propagating hybrid down the
2020 SLP tree, collecting the above info on-the-fly. */
2021 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2022 {
2023 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2024 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2025 i, pure_slp);
2026 }
2027 }
2028
2029
2030 /* Create and initialize a new bb_vec_info struct for BB, as well as
2031 stmt_vec_info structs for all the stmts in it. */
2032
2033 static bb_vec_info
2034 new_bb_vec_info (basic_block bb)
2035 {
2036 bb_vec_info res = NULL;
2037 gimple_stmt_iterator gsi;
2038
2039 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2040 BB_VINFO_BB (res) = bb;
2041
2042 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2043 {
2044 gimple stmt = gsi_stmt (gsi);
2045 gimple_set_uid (stmt, 0);
2046 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2047 }
2048
2049 BB_VINFO_GROUPED_STORES (res).create (10);
2050 BB_VINFO_SLP_INSTANCES (res).create (2);
2051 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2052
2053 bb->aux = res;
2054 return res;
2055 }
2056
2057
2058 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2059 stmts in the basic block. */
2060
2061 static void
2062 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2063 {
2064 vec<slp_instance> slp_instances;
2065 slp_instance instance;
2066 basic_block bb;
2067 gimple_stmt_iterator si;
2068 unsigned i;
2069
2070 if (!bb_vinfo)
2071 return;
2072
2073 bb = BB_VINFO_BB (bb_vinfo);
2074
2075 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2076 {
2077 gimple stmt = gsi_stmt (si);
2078 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2079
2080 if (stmt_info)
2081 /* Free stmt_vec_info. */
2082 free_stmt_vec_info (stmt);
2083 }
2084
2085 vect_destroy_datarefs (NULL, bb_vinfo);
2086 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2087 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2088 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2089 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2090 vect_free_slp_instance (instance);
2091 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2092 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2093 free (bb_vinfo);
2094 bb->aux = NULL;
2095 }
2096
2097
2098 /* Analyze statements contained in SLP tree node after recursively analyzing
2099 the subtree. Return TRUE if the operations are supported. */
2100
2101 static bool
2102 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
2103 {
2104 bool dummy;
2105 int i;
2106 gimple stmt;
2107 slp_tree child;
2108
2109 if (!node)
2110 return true;
2111
2112 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2113 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
2114 return false;
2115
2116 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2117 {
2118 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2119 gcc_assert (stmt_info);
2120 gcc_assert (PURE_SLP_STMT (stmt_info));
2121
2122 if (!vect_analyze_stmt (stmt, &dummy, node))
2123 return false;
2124 }
2125
2126 return true;
2127 }
2128
2129
2130 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2131 operations are supported. */
2132
2133 static bool
2134 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2135 {
2136 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2137 slp_instance instance;
2138 int i;
2139
2140 for (i = 0; slp_instances.iterate (i, &instance); )
2141 {
2142 if (!vect_slp_analyze_node_operations (bb_vinfo,
2143 SLP_INSTANCE_TREE (instance)))
2144 {
2145 vect_free_slp_instance (instance);
2146 slp_instances.ordered_remove (i);
2147 }
2148 else
2149 i++;
2150 }
2151
2152 if (!slp_instances.length ())
2153 return false;
2154
2155 return true;
2156 }
2157
2158
2159 /* Compute the scalar cost of the SLP node NODE and its children
2160 and return it. Do not account defs that are marked in LIFE and
2161 update LIFE according to uses of NODE. */
2162
2163 static unsigned
2164 vect_bb_slp_scalar_cost (basic_block bb,
2165 slp_tree node, vec<bool, va_heap> *life)
2166 {
2167 unsigned scalar_cost = 0;
2168 unsigned i;
2169 gimple stmt;
2170 slp_tree child;
2171
2172 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2173 {
2174 unsigned stmt_cost;
2175 ssa_op_iter op_iter;
2176 def_operand_p def_p;
2177 stmt_vec_info stmt_info;
2178
2179 if ((*life)[i])
2180 continue;
2181
2182 /* If there is a non-vectorized use of the defs then the scalar
2183 stmt is kept live in which case we do not account it or any
2184 required defs in the SLP children in the scalar cost. This
2185 way we make the vectorization more costly when compared to
2186 the scalar cost. */
2187 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2188 {
2189 imm_use_iterator use_iter;
2190 gimple use_stmt;
2191 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2192 if (!is_gimple_debug (use_stmt)
2193 && (gimple_code (use_stmt) == GIMPLE_PHI
2194 || gimple_bb (use_stmt) != bb
2195 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2196 {
2197 (*life)[i] = true;
2198 BREAK_FROM_IMM_USE_STMT (use_iter);
2199 }
2200 }
2201 if ((*life)[i])
2202 continue;
2203
2204 stmt_info = vinfo_for_stmt (stmt);
2205 if (STMT_VINFO_DATA_REF (stmt_info))
2206 {
2207 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2208 stmt_cost = vect_get_stmt_cost (scalar_load);
2209 else
2210 stmt_cost = vect_get_stmt_cost (scalar_store);
2211 }
2212 else
2213 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2214
2215 scalar_cost += stmt_cost;
2216 }
2217
2218 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2219 if (child)
2220 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2221
2222 return scalar_cost;
2223 }
2224
2225 /* Check if vectorization of the basic block is profitable. */
2226
2227 static bool
2228 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2229 {
2230 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2231 slp_instance instance;
2232 int i, j;
2233 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2234 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2235 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2236 stmt_vec_info stmt_info = NULL;
2237 stmt_vector_for_cost body_cost_vec;
2238 stmt_info_for_cost *ci;
2239
2240 /* Calculate vector costs. */
2241 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2242 {
2243 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2244
2245 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2246 {
2247 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2248 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2249 stmt_info, ci->misalign, vect_body);
2250 }
2251 }
2252
2253 /* Calculate scalar cost. */
2254 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2255 {
2256 auto_vec<bool, 20> life;
2257 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2258 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2259 SLP_INSTANCE_TREE (instance),
2260 &life);
2261 }
2262
2263 /* Complete the target-specific cost calculation. */
2264 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2265 &vec_inside_cost, &vec_epilogue_cost);
2266
2267 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2268
2269 if (dump_enabled_p ())
2270 {
2271 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2272 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2273 vec_inside_cost);
2274 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2275 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2276 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2277 }
2278
2279 /* Vectorization is profitable if its cost is less than the cost of scalar
2280 version. */
2281 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2282 return false;
2283
2284 return true;
2285 }
2286
2287 /* Check if the basic block can be vectorized. */
2288
2289 static bb_vec_info
2290 vect_slp_analyze_bb_1 (basic_block bb)
2291 {
2292 bb_vec_info bb_vinfo;
2293 vec<slp_instance> slp_instances;
2294 slp_instance instance;
2295 int i;
2296 int min_vf = 2;
2297 unsigned n_stmts = 0;
2298
2299 bb_vinfo = new_bb_vec_info (bb);
2300 if (!bb_vinfo)
2301 return NULL;
2302
2303 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2304 {
2305 if (dump_enabled_p ())
2306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2307 "not vectorized: unhandled data-ref in basic "
2308 "block.\n");
2309
2310 destroy_bb_vec_info (bb_vinfo);
2311 return NULL;
2312 }
2313
2314 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2315 {
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "not vectorized: not enough data-refs in "
2319 "basic block.\n");
2320
2321 destroy_bb_vec_info (bb_vinfo);
2322 return NULL;
2323 }
2324
2325 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2326 {
2327 if (dump_enabled_p ())
2328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2329 "not vectorized: unhandled data access in "
2330 "basic block.\n");
2331
2332 destroy_bb_vec_info (bb_vinfo);
2333 return NULL;
2334 }
2335
2336 vect_pattern_recog (NULL, bb_vinfo);
2337
2338 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2339 {
2340 if (dump_enabled_p ())
2341 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2342 "not vectorized: bad data alignment in basic "
2343 "block.\n");
2344
2345 destroy_bb_vec_info (bb_vinfo);
2346 return NULL;
2347 }
2348
2349 /* Check the SLP opportunities in the basic block, analyze and build SLP
2350 trees. */
2351 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2352 {
2353 if (dump_enabled_p ())
2354 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2355 "not vectorized: failed to find SLP opportunities "
2356 "in basic block.\n");
2357
2358 destroy_bb_vec_info (bb_vinfo);
2359 return NULL;
2360 }
2361
2362 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2363
2364 /* Mark all the statements that we want to vectorize as pure SLP and
2365 relevant. */
2366 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2367 {
2368 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2369 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2370 }
2371
2372 /* Mark all the statements that we do not want to vectorize. */
2373 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2374 !gsi_end_p (gsi); gsi_next (&gsi))
2375 {
2376 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2377 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2378 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2379 }
2380
2381 /* Analyze dependences. At this point all stmts not participating in
2382 vectorization have to be marked. Dependence analysis assumes
2383 that we either vectorize all SLP instances or none at all. */
2384 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2385 {
2386 if (dump_enabled_p ())
2387 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2388 "not vectorized: unhandled data dependence "
2389 "in basic block.\n");
2390
2391 destroy_bb_vec_info (bb_vinfo);
2392 return NULL;
2393 }
2394
2395 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2396 {
2397 if (dump_enabled_p ())
2398 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2399 "not vectorized: unsupported alignment in basic "
2400 "block.\n");
2401 destroy_bb_vec_info (bb_vinfo);
2402 return NULL;
2403 }
2404
2405 if (!vect_slp_analyze_operations (bb_vinfo))
2406 {
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2409 "not vectorized: bad operation in basic block.\n");
2410
2411 destroy_bb_vec_info (bb_vinfo);
2412 return NULL;
2413 }
2414
2415 /* Compute the costs of the SLP instances. */
2416 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2417 {
2418 gimple stmt = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2419 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2420 vect_analyze_slp_cost (NULL, bb_vinfo,
2421 instance, TYPE_VECTOR_SUBPARTS (vectype));
2422 }
2423
2424 /* Cost model: check if the vectorization is worthwhile. */
2425 if (!unlimited_cost_model (NULL)
2426 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2427 {
2428 if (dump_enabled_p ())
2429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2430 "not vectorized: vectorization is not "
2431 "profitable.\n");
2432
2433 destroy_bb_vec_info (bb_vinfo);
2434 return NULL;
2435 }
2436
2437 if (dump_enabled_p ())
2438 dump_printf_loc (MSG_NOTE, vect_location,
2439 "Basic block will be vectorized using SLP\n");
2440
2441 return bb_vinfo;
2442 }
2443
2444
2445 bb_vec_info
2446 vect_slp_analyze_bb (basic_block bb)
2447 {
2448 bb_vec_info bb_vinfo;
2449 int insns = 0;
2450 gimple_stmt_iterator gsi;
2451 unsigned int vector_sizes;
2452
2453 if (dump_enabled_p ())
2454 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2455
2456 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2457 {
2458 gimple stmt = gsi_stmt (gsi);
2459 if (!is_gimple_debug (stmt)
2460 && !gimple_nop_p (stmt)
2461 && gimple_code (stmt) != GIMPLE_LABEL)
2462 insns++;
2463 }
2464
2465 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2466 {
2467 if (dump_enabled_p ())
2468 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2469 "not vectorized: too many instructions in "
2470 "basic block.\n");
2471
2472 return NULL;
2473 }
2474
2475 /* Autodetect first vector size we try. */
2476 current_vector_size = 0;
2477 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2478
2479 while (1)
2480 {
2481 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2482 if (bb_vinfo)
2483 return bb_vinfo;
2484
2485 destroy_bb_vec_info (bb_vinfo);
2486
2487 vector_sizes &= ~current_vector_size;
2488 if (vector_sizes == 0
2489 || current_vector_size == 0)
2490 return NULL;
2491
2492 /* Try the next biggest vector size. */
2493 current_vector_size = 1 << floor_log2 (vector_sizes);
2494 if (dump_enabled_p ())
2495 dump_printf_loc (MSG_NOTE, vect_location,
2496 "***** Re-trying analysis with "
2497 "vector size %d\n", current_vector_size);
2498 }
2499 }
2500
2501
2502 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2503 the number of created vector stmts depends on the unrolling factor).
2504 However, the actual number of vector stmts for every SLP node depends on
2505 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2506 should be updated. In this function we assume that the inside costs
2507 calculated in vect_model_xxx_cost are linear in ncopies. */
2508
2509 void
2510 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2511 {
2512 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2513 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2514 slp_instance instance;
2515 stmt_vector_for_cost body_cost_vec;
2516 stmt_info_for_cost *si;
2517 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2518
2519 if (dump_enabled_p ())
2520 dump_printf_loc (MSG_NOTE, vect_location,
2521 "=== vect_update_slp_costs_according_to_vf ===\n");
2522
2523 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2524 {
2525 /* We assume that costs are linear in ncopies. */
2526 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2527
2528 /* Record the instance's instructions in the target cost model.
2529 This was delayed until here because the count of instructions
2530 isn't known beforehand. */
2531 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2532
2533 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2534 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2535 vinfo_for_stmt (si->stmt), si->misalign,
2536 vect_body);
2537 }
2538 }
2539
2540
2541 /* For constant and loop invariant defs of SLP_NODE this function returns
2542 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2543 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2544 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2545 REDUC_INDEX is the index of the reduction operand in the statements, unless
2546 it is -1. */
2547
2548 static void
2549 vect_get_constant_vectors (tree op, slp_tree slp_node,
2550 vec<tree> *vec_oprnds,
2551 unsigned int op_num, unsigned int number_of_vectors,
2552 int reduc_index)
2553 {
2554 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2555 gimple stmt = stmts[0];
2556 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2557 unsigned nunits;
2558 tree vec_cst;
2559 tree *elts;
2560 unsigned j, number_of_places_left_in_vector;
2561 tree vector_type;
2562 tree vop;
2563 int group_size = stmts.length ();
2564 unsigned int vec_num, i;
2565 unsigned number_of_copies = 1;
2566 vec<tree> voprnds;
2567 voprnds.create (number_of_vectors);
2568 bool constant_p, is_store;
2569 tree neutral_op = NULL;
2570 enum tree_code code = gimple_expr_code (stmt);
2571 gimple def_stmt;
2572 struct loop *loop;
2573 gimple_seq ctor_seq = NULL;
2574
2575 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2576 && reduc_index != -1)
2577 {
2578 op_num = reduc_index - 1;
2579 op = gimple_op (stmt, reduc_index);
2580 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2581 we need either neutral operands or the original operands. See
2582 get_initial_def_for_reduction() for details. */
2583 switch (code)
2584 {
2585 case WIDEN_SUM_EXPR:
2586 case DOT_PROD_EXPR:
2587 case PLUS_EXPR:
2588 case MINUS_EXPR:
2589 case BIT_IOR_EXPR:
2590 case BIT_XOR_EXPR:
2591 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2592 neutral_op = build_real (TREE_TYPE (op), dconst0);
2593 else
2594 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2595
2596 break;
2597
2598 case MULT_EXPR:
2599 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2600 neutral_op = build_real (TREE_TYPE (op), dconst1);
2601 else
2602 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2603
2604 break;
2605
2606 case BIT_AND_EXPR:
2607 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2608 break;
2609
2610 /* For MIN/MAX we don't have an easy neutral operand but
2611 the initial values can be used fine here. Only for
2612 a reduction chain we have to force a neutral element. */
2613 case MAX_EXPR:
2614 case MIN_EXPR:
2615 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2616 neutral_op = NULL;
2617 else
2618 {
2619 def_stmt = SSA_NAME_DEF_STMT (op);
2620 loop = (gimple_bb (stmt))->loop_father;
2621 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2622 loop_preheader_edge (loop));
2623 }
2624 break;
2625
2626 default:
2627 neutral_op = NULL;
2628 }
2629 }
2630
2631 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2632 {
2633 is_store = true;
2634 op = gimple_assign_rhs1 (stmt);
2635 }
2636 else
2637 is_store = false;
2638
2639 gcc_assert (op);
2640
2641 if (CONSTANT_CLASS_P (op))
2642 constant_p = true;
2643 else
2644 constant_p = false;
2645
2646 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2647 gcc_assert (vector_type);
2648 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2649
2650 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2651 created vectors. It is greater than 1 if unrolling is performed.
2652
2653 For example, we have two scalar operands, s1 and s2 (e.g., group of
2654 strided accesses of size two), while NUNITS is four (i.e., four scalars
2655 of this type can be packed in a vector). The output vector will contain
2656 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2657 will be 2).
2658
2659 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2660 containing the operands.
2661
2662 For example, NUNITS is four as before, and the group size is 8
2663 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2664 {s5, s6, s7, s8}. */
2665
2666 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2667
2668 number_of_places_left_in_vector = nunits;
2669 elts = XALLOCAVEC (tree, nunits);
2670 bool place_after_defs = false;
2671 for (j = 0; j < number_of_copies; j++)
2672 {
2673 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2674 {
2675 if (is_store)
2676 op = gimple_assign_rhs1 (stmt);
2677 else
2678 {
2679 switch (code)
2680 {
2681 case COND_EXPR:
2682 if (op_num == 0 || op_num == 1)
2683 {
2684 tree cond = gimple_assign_rhs1 (stmt);
2685 op = TREE_OPERAND (cond, op_num);
2686 }
2687 else
2688 {
2689 if (op_num == 2)
2690 op = gimple_assign_rhs2 (stmt);
2691 else
2692 op = gimple_assign_rhs3 (stmt);
2693 }
2694 break;
2695
2696 case CALL_EXPR:
2697 op = gimple_call_arg (stmt, op_num);
2698 break;
2699
2700 case LSHIFT_EXPR:
2701 case RSHIFT_EXPR:
2702 case LROTATE_EXPR:
2703 case RROTATE_EXPR:
2704 op = gimple_op (stmt, op_num + 1);
2705 /* Unlike the other binary operators, shifts/rotates have
2706 the shift count being int, instead of the same type as
2707 the lhs, so make sure the scalar is the right type if
2708 we are dealing with vectors of
2709 long long/long/short/char. */
2710 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2711 op = fold_convert (TREE_TYPE (vector_type), op);
2712 break;
2713
2714 default:
2715 op = gimple_op (stmt, op_num + 1);
2716 break;
2717 }
2718 }
2719
2720 if (reduc_index != -1)
2721 {
2722 loop = (gimple_bb (stmt))->loop_father;
2723 def_stmt = SSA_NAME_DEF_STMT (op);
2724
2725 gcc_assert (loop);
2726
2727 /* Get the def before the loop. In reduction chain we have only
2728 one initial value. */
2729 if ((j != (number_of_copies - 1)
2730 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2731 && i != 0))
2732 && neutral_op)
2733 op = neutral_op;
2734 else
2735 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2736 loop_preheader_edge (loop));
2737 }
2738
2739 /* Create 'vect_ = {op0,op1,...,opn}'. */
2740 number_of_places_left_in_vector--;
2741 tree orig_op = op;
2742 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2743 {
2744 if (CONSTANT_CLASS_P (op))
2745 {
2746 op = fold_unary (VIEW_CONVERT_EXPR,
2747 TREE_TYPE (vector_type), op);
2748 gcc_assert (op && CONSTANT_CLASS_P (op));
2749 }
2750 else
2751 {
2752 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2753 gimple init_stmt;
2754 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2755 init_stmt
2756 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2757 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2758 op = new_temp;
2759 }
2760 }
2761 elts[number_of_places_left_in_vector] = op;
2762 if (!CONSTANT_CLASS_P (op))
2763 constant_p = false;
2764 if (TREE_CODE (orig_op) == SSA_NAME
2765 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2766 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2767 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2768 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2769 place_after_defs = true;
2770
2771 if (number_of_places_left_in_vector == 0)
2772 {
2773 number_of_places_left_in_vector = nunits;
2774
2775 if (constant_p)
2776 vec_cst = build_vector (vector_type, elts);
2777 else
2778 {
2779 vec<constructor_elt, va_gc> *v;
2780 unsigned k;
2781 vec_alloc (v, nunits);
2782 for (k = 0; k < nunits; ++k)
2783 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2784 vec_cst = build_constructor (vector_type, v);
2785 }
2786 tree init;
2787 gimple_stmt_iterator gsi;
2788 if (place_after_defs)
2789 {
2790 gsi = gsi_for_stmt
2791 (vect_find_last_scalar_stmt_in_slp (slp_node));
2792 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2793 }
2794 else
2795 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2796 if (ctor_seq != NULL)
2797 {
2798 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2799 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2800 GSI_SAME_STMT);
2801 ctor_seq = NULL;
2802 }
2803 voprnds.quick_push (init);
2804 place_after_defs = false;
2805 }
2806 }
2807 }
2808
2809 /* Since the vectors are created in the reverse order, we should invert
2810 them. */
2811 vec_num = voprnds.length ();
2812 for (j = vec_num; j != 0; j--)
2813 {
2814 vop = voprnds[j - 1];
2815 vec_oprnds->quick_push (vop);
2816 }
2817
2818 voprnds.release ();
2819
2820 /* In case that VF is greater than the unrolling factor needed for the SLP
2821 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2822 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2823 to replicate the vectors. */
2824 while (number_of_vectors > vec_oprnds->length ())
2825 {
2826 tree neutral_vec = NULL;
2827
2828 if (neutral_op)
2829 {
2830 if (!neutral_vec)
2831 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2832
2833 vec_oprnds->quick_push (neutral_vec);
2834 }
2835 else
2836 {
2837 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2838 vec_oprnds->quick_push (vop);
2839 }
2840 }
2841 }
2842
2843
2844 /* Get vectorized definitions from SLP_NODE that contains corresponding
2845 vectorized def-stmts. */
2846
2847 static void
2848 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2849 {
2850 tree vec_oprnd;
2851 gimple vec_def_stmt;
2852 unsigned int i;
2853
2854 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2855
2856 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2857 {
2858 gcc_assert (vec_def_stmt);
2859 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2860 vec_oprnds->quick_push (vec_oprnd);
2861 }
2862 }
2863
2864
2865 /* Get vectorized definitions for SLP_NODE.
2866 If the scalar definitions are loop invariants or constants, collect them and
2867 call vect_get_constant_vectors() to create vector stmts.
2868 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2869 must be stored in the corresponding child of SLP_NODE, and we call
2870 vect_get_slp_vect_defs () to retrieve them. */
2871
2872 void
2873 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2874 vec<vec<tree> > *vec_oprnds, int reduc_index)
2875 {
2876 gimple first_stmt;
2877 int number_of_vects = 0, i;
2878 unsigned int child_index = 0;
2879 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2880 slp_tree child = NULL;
2881 vec<tree> vec_defs;
2882 tree oprnd;
2883 bool vectorized_defs;
2884
2885 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2886 FOR_EACH_VEC_ELT (ops, i, oprnd)
2887 {
2888 /* For each operand we check if it has vectorized definitions in a child
2889 node or we need to create them (for invariants and constants). We
2890 check if the LHS of the first stmt of the next child matches OPRND.
2891 If it does, we found the correct child. Otherwise, we call
2892 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2893 to check this child node for the next operand. */
2894 vectorized_defs = false;
2895 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2896 {
2897 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2898
2899 /* We have to check both pattern and original def, if available. */
2900 if (child)
2901 {
2902 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2903 gimple related
2904 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2905
2906 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2907 || (related
2908 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2909 {
2910 /* The number of vector defs is determined by the number of
2911 vector statements in the node from which we get those
2912 statements. */
2913 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2914 vectorized_defs = true;
2915 child_index++;
2916 }
2917 }
2918 else
2919 child_index++;
2920 }
2921
2922 if (!vectorized_defs)
2923 {
2924 if (i == 0)
2925 {
2926 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2927 /* Number of vector stmts was calculated according to LHS in
2928 vect_schedule_slp_instance (), fix it by replacing LHS with
2929 RHS, if necessary. See vect_get_smallest_scalar_type () for
2930 details. */
2931 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2932 &rhs_size_unit);
2933 if (rhs_size_unit != lhs_size_unit)
2934 {
2935 number_of_vects *= rhs_size_unit;
2936 number_of_vects /= lhs_size_unit;
2937 }
2938 }
2939 }
2940
2941 /* Allocate memory for vectorized defs. */
2942 vec_defs = vNULL;
2943 vec_defs.create (number_of_vects);
2944
2945 /* For reduction defs we call vect_get_constant_vectors (), since we are
2946 looking for initial loop invariant values. */
2947 if (vectorized_defs && reduc_index == -1)
2948 /* The defs are already vectorized. */
2949 vect_get_slp_vect_defs (child, &vec_defs);
2950 else
2951 /* Build vectors from scalar defs. */
2952 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2953 number_of_vects, reduc_index);
2954
2955 vec_oprnds->quick_push (vec_defs);
2956
2957 /* For reductions, we only need initial values. */
2958 if (reduc_index != -1)
2959 return;
2960 }
2961 }
2962
2963
2964 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2965 building a vector of type MASK_TYPE from it) and two input vectors placed in
2966 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2967 shifting by STRIDE elements of DR_CHAIN for every copy.
2968 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2969 copies).
2970 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2971 the created stmts must be inserted. */
2972
2973 static inline void
2974 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2975 tree mask, int first_vec_indx, int second_vec_indx,
2976 gimple_stmt_iterator *gsi, slp_tree node,
2977 tree vectype, vec<tree> dr_chain,
2978 int ncopies, int vect_stmts_counter)
2979 {
2980 tree perm_dest;
2981 gimple perm_stmt = NULL;
2982 stmt_vec_info next_stmt_info;
2983 int i, stride;
2984 tree first_vec, second_vec, data_ref;
2985
2986 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2987
2988 /* Initialize the vect stmts of NODE to properly insert the generated
2989 stmts later. */
2990 for (i = SLP_TREE_VEC_STMTS (node).length ();
2991 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2992 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2993
2994 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2995 for (i = 0; i < ncopies; i++)
2996 {
2997 first_vec = dr_chain[first_vec_indx];
2998 second_vec = dr_chain[second_vec_indx];
2999
3000 /* Generate the permute statement. */
3001 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3002 first_vec, second_vec, mask);
3003 data_ref = make_ssa_name (perm_dest, perm_stmt);
3004 gimple_set_lhs (perm_stmt, data_ref);
3005 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3006
3007 /* Store the vector statement in NODE. */
3008 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3009
3010 first_vec_indx += stride;
3011 second_vec_indx += stride;
3012 }
3013
3014 /* Mark the scalar stmt as vectorized. */
3015 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3016 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3017 }
3018
3019
3020 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3021 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3022 representation. Check that the mask is valid and return FALSE if not.
3023 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3024 the next vector, i.e., the current first vector is not needed. */
3025
3026 static bool
3027 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3028 int mask_nunits, bool only_one_vec, int index,
3029 unsigned char *mask, int *current_mask_element,
3030 bool *need_next_vector, int *number_of_mask_fixes,
3031 bool *mask_fixed, bool *needs_first_vector)
3032 {
3033 int i;
3034
3035 /* Convert to target specific representation. */
3036 *current_mask_element = first_mask_element + m;
3037 /* Adjust the value in case it's a mask for second and third vectors. */
3038 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3039
3040 if (*current_mask_element < 0)
3041 {
3042 if (dump_enabled_p ())
3043 {
3044 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3045 "permutation requires past vector ");
3046 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3047 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3048 }
3049 return false;
3050 }
3051
3052 if (*current_mask_element < mask_nunits)
3053 *needs_first_vector = true;
3054
3055 /* We have only one input vector to permute but the mask accesses values in
3056 the next vector as well. */
3057 if (only_one_vec && *current_mask_element >= mask_nunits)
3058 {
3059 if (dump_enabled_p ())
3060 {
3061 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3062 "permutation requires at least two vectors ");
3063 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3064 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3065 }
3066
3067 return false;
3068 }
3069
3070 /* The mask requires the next vector. */
3071 while (*current_mask_element >= mask_nunits * 2)
3072 {
3073 if (*needs_first_vector || *mask_fixed)
3074 {
3075 /* We either need the first vector too or have already moved to the
3076 next vector. In both cases, this permutation needs three
3077 vectors. */
3078 if (dump_enabled_p ())
3079 {
3080 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3081 "permutation requires at "
3082 "least three vectors ");
3083 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3084 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3085 }
3086
3087 return false;
3088 }
3089
3090 /* We move to the next vector, dropping the first one and working with
3091 the second and the third - we need to adjust the values of the mask
3092 accordingly. */
3093 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3094
3095 for (i = 0; i < index; i++)
3096 mask[i] -= mask_nunits * *number_of_mask_fixes;
3097
3098 (*number_of_mask_fixes)++;
3099 *mask_fixed = true;
3100 }
3101
3102 *need_next_vector = *mask_fixed;
3103
3104 /* This was the last element of this mask. Start a new one. */
3105 if (index == mask_nunits - 1)
3106 {
3107 *number_of_mask_fixes = 1;
3108 *mask_fixed = false;
3109 *needs_first_vector = false;
3110 }
3111
3112 return true;
3113 }
3114
3115
3116 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3117 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3118 permute statements for the SLP node NODE of the SLP instance
3119 SLP_NODE_INSTANCE. */
3120
3121 bool
3122 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3123 gimple_stmt_iterator *gsi, int vf,
3124 slp_instance slp_node_instance, bool analyze_only)
3125 {
3126 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3127 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3128 tree mask_element_type = NULL_TREE, mask_type;
3129 int i, j, k, nunits, vec_index = 0, scalar_index;
3130 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3131 gimple next_scalar_stmt;
3132 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3133 int first_mask_element;
3134 int index, unroll_factor, current_mask_element, ncopies;
3135 unsigned char *mask;
3136 bool only_one_vec = false, need_next_vector = false;
3137 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3138 int number_of_mask_fixes = 1;
3139 bool mask_fixed = false;
3140 bool needs_first_vector = false;
3141 machine_mode mode;
3142
3143 mode = TYPE_MODE (vectype);
3144
3145 if (!can_vec_perm_p (mode, false, NULL))
3146 {
3147 if (dump_enabled_p ())
3148 {
3149 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3150 "no vect permute for ");
3151 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3152 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3153 }
3154 return false;
3155 }
3156
3157 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3158 same size as the vector element being permuted. */
3159 mask_element_type = lang_hooks.types.type_for_mode
3160 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3161 mask_type = get_vectype_for_scalar_type (mask_element_type);
3162 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3163 mask = XALLOCAVEC (unsigned char, nunits);
3164 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3165
3166 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3167 unrolling factor. */
3168 orig_vec_stmts_num = group_size *
3169 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3170 if (orig_vec_stmts_num == 1)
3171 only_one_vec = true;
3172
3173 /* Number of copies is determined by the final vectorization factor
3174 relatively to SLP_NODE_INSTANCE unrolling factor. */
3175 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3176
3177 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3178 return false;
3179
3180 /* Generate permutation masks for every NODE. Number of masks for each NODE
3181 is equal to GROUP_SIZE.
3182 E.g., we have a group of three nodes with three loads from the same
3183 location in each node, and the vector size is 4. I.e., we have a
3184 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3185 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3186 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3187 ...
3188
3189 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3190 The last mask is illegal since we assume two operands for permute
3191 operation, and the mask element values can't be outside that range.
3192 Hence, the last mask must be converted into {2,5,5,5}.
3193 For the first two permutations we need the first and the second input
3194 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3195 we need the second and the third vectors: {b1,c1,a2,b2} and
3196 {c2,a3,b3,c3}. */
3197
3198 {
3199 scalar_index = 0;
3200 index = 0;
3201 vect_stmts_counter = 0;
3202 vec_index = 0;
3203 first_vec_index = vec_index++;
3204 if (only_one_vec)
3205 second_vec_index = first_vec_index;
3206 else
3207 second_vec_index = vec_index++;
3208
3209 for (j = 0; j < unroll_factor; j++)
3210 {
3211 for (k = 0; k < group_size; k++)
3212 {
3213 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3214 first_mask_element = i + j * group_size;
3215 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3216 nunits, only_one_vec, index,
3217 mask, &current_mask_element,
3218 &need_next_vector,
3219 &number_of_mask_fixes, &mask_fixed,
3220 &needs_first_vector))
3221 return false;
3222 gcc_assert (current_mask_element >= 0
3223 && current_mask_element < 2 * nunits);
3224 mask[index++] = current_mask_element;
3225
3226 if (index == nunits)
3227 {
3228 index = 0;
3229 if (!can_vec_perm_p (mode, false, mask))
3230 {
3231 if (dump_enabled_p ())
3232 {
3233 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3234 vect_location,
3235 "unsupported vect permute { ");
3236 for (i = 0; i < nunits; ++i)
3237 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3238 mask[i]);
3239 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3240 }
3241 return false;
3242 }
3243
3244 if (!analyze_only)
3245 {
3246 int l;
3247 tree mask_vec, *mask_elts;
3248 mask_elts = XALLOCAVEC (tree, nunits);
3249 for (l = 0; l < nunits; ++l)
3250 mask_elts[l] = build_int_cst (mask_element_type,
3251 mask[l]);
3252 mask_vec = build_vector (mask_type, mask_elts);
3253
3254 if (need_next_vector)
3255 {
3256 first_vec_index = second_vec_index;
3257 second_vec_index = vec_index;
3258 }
3259
3260 next_scalar_stmt
3261 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3262
3263 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3264 mask_vec, first_vec_index, second_vec_index,
3265 gsi, node, vectype, dr_chain,
3266 ncopies, vect_stmts_counter++);
3267 }
3268 }
3269 }
3270 }
3271 }
3272
3273 return true;
3274 }
3275
3276
3277
3278 /* Vectorize SLP instance tree in postorder. */
3279
3280 static bool
3281 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3282 unsigned int vectorization_factor)
3283 {
3284 gimple stmt;
3285 bool grouped_store, is_store;
3286 gimple_stmt_iterator si;
3287 stmt_vec_info stmt_info;
3288 unsigned int vec_stmts_size, nunits, group_size;
3289 tree vectype;
3290 int i;
3291 slp_tree child;
3292
3293 if (!node)
3294 return false;
3295
3296 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3297 vect_schedule_slp_instance (child, instance, vectorization_factor);
3298
3299 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3300 stmt_info = vinfo_for_stmt (stmt);
3301
3302 /* VECTYPE is the type of the destination. */
3303 vectype = STMT_VINFO_VECTYPE (stmt_info);
3304 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3305 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3306
3307 /* For each SLP instance calculate number of vector stmts to be created
3308 for the scalar stmts in each node of the SLP tree. Number of vector
3309 elements in one vector iteration is the number of scalar elements in
3310 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3311 size. */
3312 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3313
3314 if (!SLP_TREE_VEC_STMTS (node).exists ())
3315 {
3316 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3317 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3318 }
3319
3320 if (dump_enabled_p ())
3321 {
3322 dump_printf_loc (MSG_NOTE,vect_location,
3323 "------>vectorizing SLP node starting from: ");
3324 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3325 dump_printf (MSG_NOTE, "\n");
3326 }
3327
3328 /* Vectorized stmts go before the last scalar stmt which is where
3329 all uses are ready. */
3330 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3331
3332 /* Mark the first element of the reduction chain as reduction to properly
3333 transform the node. In the analysis phase only the last element of the
3334 chain is marked as reduction. */
3335 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3336 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3337 {
3338 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3339 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3340 }
3341
3342 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3343 return is_store;
3344 }
3345
3346 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3347 For loop vectorization this is done in vectorizable_call, but for SLP
3348 it needs to be deferred until end of vect_schedule_slp, because multiple
3349 SLP instances may refer to the same scalar stmt. */
3350
3351 static void
3352 vect_remove_slp_scalar_calls (slp_tree node)
3353 {
3354 gimple stmt, new_stmt;
3355 gimple_stmt_iterator gsi;
3356 int i;
3357 slp_tree child;
3358 tree lhs;
3359 stmt_vec_info stmt_info;
3360
3361 if (!node)
3362 return;
3363
3364 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3365 vect_remove_slp_scalar_calls (child);
3366
3367 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3368 {
3369 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3370 continue;
3371 stmt_info = vinfo_for_stmt (stmt);
3372 if (stmt_info == NULL
3373 || is_pattern_stmt_p (stmt_info)
3374 || !PURE_SLP_STMT (stmt_info))
3375 continue;
3376 lhs = gimple_call_lhs (stmt);
3377 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3378 set_vinfo_for_stmt (new_stmt, stmt_info);
3379 set_vinfo_for_stmt (stmt, NULL);
3380 STMT_VINFO_STMT (stmt_info) = new_stmt;
3381 gsi = gsi_for_stmt (stmt);
3382 gsi_replace (&gsi, new_stmt, false);
3383 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3384 }
3385 }
3386
3387 /* Generate vector code for all SLP instances in the loop/basic block. */
3388
3389 bool
3390 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3391 {
3392 vec<slp_instance> slp_instances;
3393 slp_instance instance;
3394 unsigned int i, vf;
3395 bool is_store = false;
3396
3397 if (loop_vinfo)
3398 {
3399 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3400 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3401 }
3402 else
3403 {
3404 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3405 vf = 1;
3406 }
3407
3408 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3409 {
3410 /* Schedule the tree of INSTANCE. */
3411 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3412 instance, vf);
3413 if (dump_enabled_p ())
3414 dump_printf_loc (MSG_NOTE, vect_location,
3415 "vectorizing stmts using SLP.\n");
3416 }
3417
3418 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3419 {
3420 slp_tree root = SLP_INSTANCE_TREE (instance);
3421 gimple store;
3422 unsigned int j;
3423 gimple_stmt_iterator gsi;
3424
3425 /* Remove scalar call stmts. Do not do this for basic-block
3426 vectorization as not all uses may be vectorized.
3427 ??? Why should this be necessary? DCE should be able to
3428 remove the stmts itself.
3429 ??? For BB vectorization we can as well remove scalar
3430 stmts starting from the SLP tree root if they have no
3431 uses. */
3432 if (loop_vinfo)
3433 vect_remove_slp_scalar_calls (root);
3434
3435 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3436 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3437 {
3438 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3439 break;
3440
3441 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3442 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3443 /* Free the attached stmt_vec_info and remove the stmt. */
3444 gsi = gsi_for_stmt (store);
3445 unlink_stmt_vdef (store);
3446 gsi_remove (&gsi, true);
3447 release_defs (store);
3448 free_stmt_vec_info (store);
3449 }
3450 }
3451
3452 return is_store;
3453 }
3454
3455
3456 /* Vectorize the basic block. */
3457
3458 void
3459 vect_slp_transform_bb (basic_block bb)
3460 {
3461 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3462 gimple_stmt_iterator si;
3463
3464 gcc_assert (bb_vinfo);
3465
3466 if (dump_enabled_p ())
3467 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3468
3469 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3470 {
3471 gimple stmt = gsi_stmt (si);
3472 stmt_vec_info stmt_info;
3473
3474 if (dump_enabled_p ())
3475 {
3476 dump_printf_loc (MSG_NOTE, vect_location,
3477 "------>SLPing statement: ");
3478 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3479 dump_printf (MSG_NOTE, "\n");
3480 }
3481
3482 stmt_info = vinfo_for_stmt (stmt);
3483 gcc_assert (stmt_info);
3484
3485 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3486 if (STMT_SLP_TYPE (stmt_info))
3487 {
3488 vect_schedule_slp (NULL, bb_vinfo);
3489 break;
3490 }
3491 }
3492
3493 if (dump_enabled_p ())
3494 dump_printf_loc (MSG_NOTE, vect_location,
3495 "BASIC BLOCK VECTORIZED\n");
3496
3497 destroy_bb_vec_info (bb_vinfo);
3498 }