intrinsic.h (gfc_check_selected_real_kind, [...]): Update prototypes.
[gcc.git] / gcc / tree-vect-patterns.c
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "target.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "cfgloop.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "params.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
38 #include "recog.h"
39 #include "toplev.h"
40
41 /* Function prototypes */
42 static void vect_pattern_recog_1
43 (gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
44 static bool widened_name_p (tree, gimple, tree *, gimple *);
45
46 /* Pattern recognition functions */
47 static gimple vect_recog_widen_sum_pattern (gimple, tree *, tree *);
48 static gimple vect_recog_widen_mult_pattern (gimple, tree *, tree *);
49 static gimple vect_recog_dot_prod_pattern (gimple, tree *, tree *);
50 static gimple vect_recog_pow_pattern (gimple, tree *, tree *);
51 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
52 vect_recog_widen_mult_pattern,
53 vect_recog_widen_sum_pattern,
54 vect_recog_dot_prod_pattern,
55 vect_recog_pow_pattern};
56
57
58 /* Function widened_name_p
59
60 Check whether NAME, an ssa-name used in USE_STMT,
61 is a result of a type-promotion, such that:
62 DEF_STMT: NAME = NOP (name0)
63 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
64 */
65
66 static bool
67 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
68 {
69 tree dummy;
70 gimple dummy_gimple;
71 loop_vec_info loop_vinfo;
72 stmt_vec_info stmt_vinfo;
73 tree type = TREE_TYPE (name);
74 tree oprnd0;
75 enum vect_def_type dt;
76 tree def;
77
78 stmt_vinfo = vinfo_for_stmt (use_stmt);
79 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
80
81 if (!vect_is_simple_use (name, loop_vinfo, NULL, def_stmt, &def, &dt))
82 return false;
83
84 if (dt != vect_internal_def
85 && dt != vect_external_def && dt != vect_constant_def)
86 return false;
87
88 if (! *def_stmt)
89 return false;
90
91 if (!is_gimple_assign (*def_stmt))
92 return false;
93
94 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
95 return false;
96
97 oprnd0 = gimple_assign_rhs1 (*def_stmt);
98
99 *half_type = TREE_TYPE (oprnd0);
100 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
101 || (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type))
102 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
103 return false;
104
105 if (!vect_is_simple_use (oprnd0, loop_vinfo, NULL, &dummy_gimple, &dummy,
106 &dt))
107 return false;
108
109 return true;
110 }
111
112 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
113 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
114
115 static tree
116 vect_recog_temp_ssa_var (tree type, gimple stmt)
117 {
118 tree var = create_tmp_var (type, "patt");
119
120 add_referenced_var (var);
121 var = make_ssa_name (var, stmt);
122 return var;
123 }
124
125 /* Function vect_recog_dot_prod_pattern
126
127 Try to find the following pattern:
128
129 type x_t, y_t;
130 TYPE1 prod;
131 TYPE2 sum = init;
132 loop:
133 sum_0 = phi <init, sum_1>
134 S1 x_t = ...
135 S2 y_t = ...
136 S3 x_T = (TYPE1) x_t;
137 S4 y_T = (TYPE1) y_t;
138 S5 prod = x_T * y_T;
139 [S6 prod = (TYPE2) prod; #optional]
140 S7 sum_1 = prod + sum_0;
141
142 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
143 same size of 'TYPE1' or bigger. This is a special case of a reduction
144 computation.
145
146 Input:
147
148 * LAST_STMT: A stmt from which the pattern search begins. In the example,
149 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
150 detected.
151
152 Output:
153
154 * TYPE_IN: The type of the input arguments to the pattern.
155
156 * TYPE_OUT: The type of the output of this pattern.
157
158 * Return value: A new stmt that will be used to replace the sequence of
159 stmts that constitute the pattern. In this case it will be:
160 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
161
162 Note: The dot-prod idiom is a widening reduction pattern that is
163 vectorized without preserving all the intermediate results. It
164 produces only N/2 (widened) results (by summing up pairs of
165 intermediate results) rather than all N results. Therefore, we
166 cannot allow this pattern when we want to get all the results and in
167 the correct order (as is the case when this computation is in an
168 inner-loop nested in an outer-loop that us being vectorized). */
169
170 static gimple
171 vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
172 {
173 gimple stmt;
174 tree oprnd0, oprnd1;
175 tree oprnd00, oprnd01;
176 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
177 tree type, half_type;
178 gimple pattern_stmt;
179 tree prod_type;
180 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
181 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
182 tree var, rhs;
183
184 if (!is_gimple_assign (last_stmt))
185 return NULL;
186
187 type = gimple_expr_type (last_stmt);
188
189 /* Look for the following pattern
190 DX = (TYPE1) X;
191 DY = (TYPE1) Y;
192 DPROD = DX * DY;
193 DDPROD = (TYPE2) DPROD;
194 sum_1 = DDPROD + sum_0;
195 In which
196 - DX is double the size of X
197 - DY is double the size of Y
198 - DX, DY, DPROD all have the same type
199 - sum is the same size of DPROD or bigger
200 - sum has been recognized as a reduction variable.
201
202 This is equivalent to:
203 DPROD = X w* Y; #widen mult
204 sum_1 = DPROD w+ sum_0; #widen summation
205 or
206 DPROD = X w* Y; #widen mult
207 sum_1 = DPROD + sum_0; #summation
208 */
209
210 /* Starting from LAST_STMT, follow the defs of its uses in search
211 of the above pattern. */
212
213 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
214 return NULL;
215
216 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
217 {
218 /* Has been detected as widening-summation? */
219
220 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
221 type = gimple_expr_type (stmt);
222 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
223 return NULL;
224 oprnd0 = gimple_assign_rhs1 (stmt);
225 oprnd1 = gimple_assign_rhs2 (stmt);
226 half_type = TREE_TYPE (oprnd0);
227 }
228 else
229 {
230 gimple def_stmt;
231
232 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
233 return NULL;
234 oprnd0 = gimple_assign_rhs1 (last_stmt);
235 oprnd1 = gimple_assign_rhs2 (last_stmt);
236 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
237 || !types_compatible_p (TREE_TYPE (oprnd1), type))
238 return NULL;
239 stmt = last_stmt;
240
241 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
242 {
243 stmt = def_stmt;
244 oprnd0 = gimple_assign_rhs1 (stmt);
245 }
246 else
247 half_type = type;
248 }
249
250 /* So far so good. Since last_stmt was detected as a (summation) reduction,
251 we know that oprnd1 is the reduction variable (defined by a loop-header
252 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
253 Left to check that oprnd0 is defined by a (widen_)mult_expr */
254
255 prod_type = half_type;
256 stmt = SSA_NAME_DEF_STMT (oprnd0);
257 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
258 inside the loop (in case we are analyzing an outer-loop). */
259 if (!is_gimple_assign (stmt))
260 return NULL;
261 stmt_vinfo = vinfo_for_stmt (stmt);
262 gcc_assert (stmt_vinfo);
263 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
264 return NULL;
265 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
266 return NULL;
267 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
268 {
269 /* Has been detected as a widening multiplication? */
270
271 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
272 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
273 return NULL;
274 stmt_vinfo = vinfo_for_stmt (stmt);
275 gcc_assert (stmt_vinfo);
276 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
277 oprnd00 = gimple_assign_rhs1 (stmt);
278 oprnd01 = gimple_assign_rhs2 (stmt);
279 }
280 else
281 {
282 tree half_type0, half_type1;
283 gimple def_stmt;
284 tree oprnd0, oprnd1;
285
286 oprnd0 = gimple_assign_rhs1 (stmt);
287 oprnd1 = gimple_assign_rhs2 (stmt);
288 if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
289 || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
290 return NULL;
291 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
292 return NULL;
293 oprnd00 = gimple_assign_rhs1 (def_stmt);
294 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
295 return NULL;
296 oprnd01 = gimple_assign_rhs1 (def_stmt);
297 if (!types_compatible_p (half_type0, half_type1))
298 return NULL;
299 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
300 return NULL;
301 }
302
303 half_type = TREE_TYPE (oprnd00);
304 *type_in = half_type;
305 *type_out = type;
306
307 /* Pattern detected. Create a stmt to be used to replace the pattern: */
308 var = vect_recog_temp_ssa_var (type, NULL);
309 rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
310 pattern_stmt = gimple_build_assign (var, rhs);
311
312 if (vect_print_dump_info (REPORT_DETAILS))
313 {
314 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
315 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
316 }
317
318 /* We don't allow changing the order of the computation in the inner-loop
319 when doing outer-loop vectorization. */
320 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
321
322 return pattern_stmt;
323 }
324
325 /* Function vect_recog_widen_mult_pattern
326
327 Try to find the following pattern:
328
329 type a_t, b_t;
330 TYPE a_T, b_T, prod_T;
331
332 S1 a_t = ;
333 S2 b_t = ;
334 S3 a_T = (TYPE) a_t;
335 S4 b_T = (TYPE) b_t;
336 S5 prod_T = a_T * b_T;
337
338 where type 'TYPE' is at least double the size of type 'type'.
339
340 Input:
341
342 * LAST_STMT: A stmt from which the pattern search begins. In the example,
343 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
344
345 Output:
346
347 * TYPE_IN: The type of the input arguments to the pattern.
348
349 * TYPE_OUT: The type of the output of this pattern.
350
351 * Return value: A new stmt that will be used to replace the sequence of
352 stmts that constitute the pattern. In this case it will be:
353 WIDEN_MULT <a_t, b_t>
354 */
355
356 static gimple
357 vect_recog_widen_mult_pattern (gimple last_stmt,
358 tree *type_in,
359 tree *type_out)
360 {
361 gimple def_stmt0, def_stmt1;
362 tree oprnd0, oprnd1;
363 tree type, half_type0, half_type1;
364 gimple pattern_stmt;
365 tree vectype, vectype_out;
366 tree dummy;
367 tree var;
368 enum tree_code dummy_code;
369 int dummy_int;
370 VEC (tree, heap) *dummy_vec;
371
372 if (!is_gimple_assign (last_stmt))
373 return NULL;
374
375 type = gimple_expr_type (last_stmt);
376
377 /* Starting from LAST_STMT, follow the defs of its uses in search
378 of the above pattern. */
379
380 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
381 return NULL;
382
383 oprnd0 = gimple_assign_rhs1 (last_stmt);
384 oprnd1 = gimple_assign_rhs2 (last_stmt);
385 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
386 || !types_compatible_p (TREE_TYPE (oprnd1), type))
387 return NULL;
388
389 /* Check argument 0 */
390 if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
391 return NULL;
392 oprnd0 = gimple_assign_rhs1 (def_stmt0);
393
394 /* Check argument 1 */
395 if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
396 return NULL;
397 oprnd1 = gimple_assign_rhs1 (def_stmt1);
398
399 if (!types_compatible_p (half_type0, half_type1))
400 return NULL;
401
402 /* Pattern detected. */
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
405
406 /* Check target support */
407 vectype = get_vectype_for_scalar_type (half_type0);
408 vectype_out = get_vectype_for_scalar_type (type);
409 if (!vectype
410 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt,
411 vectype_out, vectype,
412 &dummy, &dummy, &dummy_code,
413 &dummy_code, &dummy_int, &dummy_vec))
414 return NULL;
415
416 *type_in = vectype;
417 *type_out = vectype_out;
418
419 /* Pattern supported. Create a stmt to be used to replace the pattern: */
420 var = vect_recog_temp_ssa_var (type, NULL);
421 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
422 oprnd1);
423 SSA_NAME_DEF_STMT (var) = pattern_stmt;
424
425 if (vect_print_dump_info (REPORT_DETAILS))
426 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
427
428 return pattern_stmt;
429 }
430
431
432 /* Function vect_recog_pow_pattern
433
434 Try to find the following pattern:
435
436 x = POW (y, N);
437
438 with POW being one of pow, powf, powi, powif and N being
439 either 2 or 0.5.
440
441 Input:
442
443 * LAST_STMT: A stmt from which the pattern search begins.
444
445 Output:
446
447 * TYPE_IN: The type of the input arguments to the pattern.
448
449 * TYPE_OUT: The type of the output of this pattern.
450
451 * Return value: A new stmt that will be used to replace the sequence of
452 stmts that constitute the pattern. In this case it will be:
453 x = x * x
454 or
455 x = sqrt (x)
456 */
457
458 static gimple
459 vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
460 {
461 tree fn, base, exp = NULL;
462 gimple stmt;
463 tree var;
464
465 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
466 return NULL;
467
468 fn = gimple_call_fndecl (last_stmt);
469 switch (DECL_FUNCTION_CODE (fn))
470 {
471 case BUILT_IN_POWIF:
472 case BUILT_IN_POWI:
473 case BUILT_IN_POWF:
474 case BUILT_IN_POW:
475 base = gimple_call_arg (last_stmt, 0);
476 exp = gimple_call_arg (last_stmt, 1);
477 if (TREE_CODE (exp) != REAL_CST
478 && TREE_CODE (exp) != INTEGER_CST)
479 return NULL;
480 break;
481
482 default:
483 return NULL;
484 }
485
486 /* We now have a pow or powi builtin function call with a constant
487 exponent. */
488
489 *type_out = NULL_TREE;
490
491 /* Catch squaring. */
492 if ((host_integerp (exp, 0)
493 && tree_low_cst (exp, 0) == 2)
494 || (TREE_CODE (exp) == REAL_CST
495 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
496 {
497 *type_in = TREE_TYPE (base);
498
499 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
500 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
501 SSA_NAME_DEF_STMT (var) = stmt;
502 return stmt;
503 }
504
505 /* Catch square root. */
506 if (TREE_CODE (exp) == REAL_CST
507 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
508 {
509 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
510 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
511 if (*type_in)
512 {
513 gimple stmt = gimple_build_call (newfn, 1, base);
514 if (vectorizable_function (stmt, *type_in, *type_in)
515 != NULL_TREE)
516 {
517 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
518 gimple_call_set_lhs (stmt, var);
519 return stmt;
520 }
521 }
522 }
523
524 return NULL;
525 }
526
527
528 /* Function vect_recog_widen_sum_pattern
529
530 Try to find the following pattern:
531
532 type x_t;
533 TYPE x_T, sum = init;
534 loop:
535 sum_0 = phi <init, sum_1>
536 S1 x_t = *p;
537 S2 x_T = (TYPE) x_t;
538 S3 sum_1 = x_T + sum_0;
539
540 where type 'TYPE' is at least double the size of type 'type', i.e - we're
541 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
542 a special case of a reduction computation.
543
544 Input:
545
546 * LAST_STMT: A stmt from which the pattern search begins. In the example,
547 when this function is called with S3, the pattern {S2,S3} will be detected.
548
549 Output:
550
551 * TYPE_IN: The type of the input arguments to the pattern.
552
553 * TYPE_OUT: The type of the output of this pattern.
554
555 * Return value: A new stmt that will be used to replace the sequence of
556 stmts that constitute the pattern. In this case it will be:
557 WIDEN_SUM <x_t, sum_0>
558
559 Note: The widening-sum idiom is a widening reduction pattern that is
560 vectorized without preserving all the intermediate results. It
561 produces only N/2 (widened) results (by summing up pairs of
562 intermediate results) rather than all N results. Therefore, we
563 cannot allow this pattern when we want to get all the results and in
564 the correct order (as is the case when this computation is in an
565 inner-loop nested in an outer-loop that us being vectorized). */
566
567 static gimple
568 vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
569 {
570 gimple stmt;
571 tree oprnd0, oprnd1;
572 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
573 tree type, half_type;
574 gimple pattern_stmt;
575 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
576 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
577 tree var;
578
579 if (!is_gimple_assign (last_stmt))
580 return NULL;
581
582 type = gimple_expr_type (last_stmt);
583
584 /* Look for the following pattern
585 DX = (TYPE) X;
586 sum_1 = DX + sum_0;
587 In which DX is at least double the size of X, and sum_1 has been
588 recognized as a reduction variable.
589 */
590
591 /* Starting from LAST_STMT, follow the defs of its uses in search
592 of the above pattern. */
593
594 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
595 return NULL;
596
597 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
598 return NULL;
599
600 oprnd0 = gimple_assign_rhs1 (last_stmt);
601 oprnd1 = gimple_assign_rhs2 (last_stmt);
602 if (!types_compatible_p (TREE_TYPE (oprnd0), type)
603 || !types_compatible_p (TREE_TYPE (oprnd1), type))
604 return NULL;
605
606 /* So far so good. Since last_stmt was detected as a (summation) reduction,
607 we know that oprnd1 is the reduction variable (defined by a loop-header
608 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
609 Left to check that oprnd0 is defined by a cast from type 'type' to type
610 'TYPE'. */
611
612 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
613 return NULL;
614
615 oprnd0 = gimple_assign_rhs1 (stmt);
616 *type_in = half_type;
617 *type_out = type;
618
619 /* Pattern detected. Create a stmt to be used to replace the pattern: */
620 var = vect_recog_temp_ssa_var (type, NULL);
621 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
622 oprnd0, oprnd1);
623 SSA_NAME_DEF_STMT (var) = pattern_stmt;
624
625 if (vect_print_dump_info (REPORT_DETAILS))
626 {
627 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
628 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
629 }
630
631 /* We don't allow changing the order of the computation in the inner-loop
632 when doing outer-loop vectorization. */
633 gcc_assert (!nested_in_vect_loop_p (loop, last_stmt));
634
635 return pattern_stmt;
636 }
637
638
639 /* Function vect_pattern_recog_1
640
641 Input:
642 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
643 computation pattern.
644 STMT: A stmt from which the pattern search should start.
645
646 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
647 expression that computes the same functionality and can be used to
648 replace the sequence of stmts that are involved in the pattern.
649
650 Output:
651 This function checks if the expression returned by PATTERN_RECOG_FUNC is
652 supported in vector form by the target. We use 'TYPE_IN' to obtain the
653 relevant vector type. If 'TYPE_IN' is already a vector type, then this
654 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
655 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
656 to the available target pattern.
657
658 This function also does some bookkeeping, as explained in the documentation
659 for vect_recog_pattern. */
660
661 static void
662 vect_pattern_recog_1 (
663 gimple (* vect_recog_func) (gimple, tree *, tree *),
664 gimple_stmt_iterator si)
665 {
666 gimple stmt = gsi_stmt (si), pattern_stmt;
667 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
668 stmt_vec_info pattern_stmt_info;
669 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
670 tree pattern_vectype;
671 tree type_in, type_out;
672 enum tree_code code;
673 int i;
674 gimple next;
675
676 pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
677 if (!pattern_stmt)
678 return;
679
680 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
681 {
682 /* No need to check target support (already checked by the pattern
683 recognition function). */
684 if (type_out)
685 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type_out)));
686 pattern_vectype = type_out ? type_out : type_in;
687 }
688 else
689 {
690 enum machine_mode vec_mode;
691 enum insn_code icode;
692 optab optab;
693
694 /* Check target support */
695 type_in = get_vectype_for_scalar_type (type_in);
696 if (!type_in)
697 return;
698 if (type_out)
699 type_out = get_vectype_for_scalar_type (type_out);
700 else
701 type_out = type_in;
702 if (!type_out)
703 return;
704 pattern_vectype = type_out;
705
706 if (is_gimple_assign (pattern_stmt))
707 code = gimple_assign_rhs_code (pattern_stmt);
708 else
709 {
710 gcc_assert (is_gimple_call (pattern_stmt));
711 code = CALL_EXPR;
712 }
713
714 optab = optab_for_tree_code (code, type_in, optab_default);
715 vec_mode = TYPE_MODE (type_in);
716 if (!optab
717 || (icode = optab_handler (optab, vec_mode)->insn_code) ==
718 CODE_FOR_nothing
719 || (insn_data[icode].operand[0].mode != TYPE_MODE (type_out)))
720 return;
721 }
722
723 /* Found a vectorizable pattern. */
724 if (vect_print_dump_info (REPORT_DETAILS))
725 {
726 fprintf (vect_dump, "pattern recognized: ");
727 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
728 }
729
730 /* Mark the stmts that are involved in the pattern. */
731 gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
732 set_vinfo_for_stmt (pattern_stmt,
733 new_stmt_vec_info (pattern_stmt, loop_vinfo, NULL));
734 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
735
736 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
737 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
738 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
739 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
740 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
741
742 /* Patterns cannot be vectorized using SLP, because they change the order of
743 computation. */
744 for (i = 0; VEC_iterate (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i,
745 next);
746 i++)
747 if (next == stmt)
748 VEC_ordered_remove (gimple, LOOP_VINFO_REDUCTIONS (loop_vinfo), i);
749 }
750
751
752 /* Function vect_pattern_recog
753
754 Input:
755 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
756 computation idioms.
757
758 Output - for each computation idiom that is detected we insert a new stmt
759 that provides the same functionality and that can be vectorized. We
760 also record some information in the struct_stmt_info of the relevant
761 stmts, as explained below:
762
763 At the entry to this function we have the following stmts, with the
764 following initial value in the STMT_VINFO fields:
765
766 stmt in_pattern_p related_stmt vec_stmt
767 S1: a_i = .... - - -
768 S2: a_2 = ..use(a_i).. - - -
769 S3: a_1 = ..use(a_2).. - - -
770 S4: a_0 = ..use(a_1).. - - -
771 S5: ... = ..use(a_0).. - - -
772
773 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
774 represented by a single stmt. We then:
775 - create a new stmt S6 that will replace the pattern.
776 - insert the new stmt S6 before the last stmt in the pattern
777 - fill in the STMT_VINFO fields as follows:
778
779 in_pattern_p related_stmt vec_stmt
780 S1: a_i = .... - - -
781 S2: a_2 = ..use(a_i).. - - -
782 S3: a_1 = ..use(a_2).. - - -
783 > S6: a_new = .... - S4 -
784 S4: a_0 = ..use(a_1).. true S6 -
785 S5: ... = ..use(a_0).. - - -
786
787 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
788 to each other through the RELATED_STMT field).
789
790 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
791 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
792 remain irrelevant unless used by stmts other than S4.
793
794 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
795 (because they are marked as irrelevant). It will vectorize S6, and record
796 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
797 from S4. We do that so that when we get to vectorizing stmts that use the
798 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
799 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
800
801 in_pattern_p related_stmt vec_stmt
802 S1: a_i = .... - - -
803 S2: a_2 = ..use(a_i).. - - -
804 S3: a_1 = ..use(a_2).. - - -
805 > VS6: va_new = .... - - -
806 S6: a_new = .... - S4 VS6
807 S4: a_0 = ..use(a_1).. true S6 VS6
808 > VS5: ... = ..vuse(va_new).. - - -
809 S5: ... = ..use(a_0).. - - -
810
811 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
812 elsewhere), and we'll end up with:
813
814 VS6: va_new = ....
815 VS5: ... = ..vuse(va_new)..
816
817 If vectorization does not succeed, DCE will clean S6 away (its def is
818 not used), and we'll end up with the original sequence.
819 */
820
821 void
822 vect_pattern_recog (loop_vec_info loop_vinfo)
823 {
824 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
825 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
826 unsigned int nbbs = loop->num_nodes;
827 gimple_stmt_iterator si;
828 unsigned int i, j;
829 gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
830
831 if (vect_print_dump_info (REPORT_DETAILS))
832 fprintf (vect_dump, "=== vect_pattern_recog ===");
833
834 /* Scan through the loop stmts, applying the pattern recognition
835 functions starting at each stmt visited: */
836 for (i = 0; i < nbbs; i++)
837 {
838 basic_block bb = bbs[i];
839 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
840 {
841 /* Scan over all generic vect_recog_xxx_pattern functions. */
842 for (j = 0; j < NUM_PATTERNS; j++)
843 {
844 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
845 vect_pattern_recog_1 (vect_recog_func_ptr, si);
846 }
847 }
848 }
849 }