re PR preprocessor/36674 (#include location is offset by one row in errors from prepr...
[gcc.git] / gcc / tree-vect-patterns.c
1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Dorit Nuzman <dorit@il.ibm.com>
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "tree.h"
27 #include "target.h"
28 #include "basic-block.h"
29 #include "diagnostic.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
32 #include "cfgloop.h"
33 #include "expr.h"
34 #include "optabs.h"
35 #include "params.h"
36 #include "tree-data-ref.h"
37 #include "tree-vectorizer.h"
38 #include "recog.h"
39 #include "toplev.h"
40
41 /* Function prototypes */
42 static void vect_pattern_recog_1
43 (gimple (* ) (gimple, tree *, tree *), gimple_stmt_iterator);
44 static bool widened_name_p (tree, gimple, tree *, gimple *);
45
46 /* Pattern recognition functions */
47 static gimple vect_recog_widen_sum_pattern (gimple, tree *, tree *);
48 static gimple vect_recog_widen_mult_pattern (gimple, tree *, tree *);
49 static gimple vect_recog_dot_prod_pattern (gimple, tree *, tree *);
50 static gimple vect_recog_pow_pattern (gimple, tree *, tree *);
51 static vect_recog_func_ptr vect_vect_recog_func_ptrs[NUM_PATTERNS] = {
52 vect_recog_widen_mult_pattern,
53 vect_recog_widen_sum_pattern,
54 vect_recog_dot_prod_pattern,
55 vect_recog_pow_pattern};
56
57
58 /* Function widened_name_p
59
60 Check whether NAME, an ssa-name used in USE_STMT,
61 is a result of a type-promotion, such that:
62 DEF_STMT: NAME = NOP (name0)
63 where the type of name0 (HALF_TYPE) is smaller than the type of NAME.
64 */
65
66 static bool
67 widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt)
68 {
69 tree dummy;
70 gimple dummy_gimple;
71 loop_vec_info loop_vinfo;
72 stmt_vec_info stmt_vinfo;
73 tree type = TREE_TYPE (name);
74 tree oprnd0;
75 enum vect_def_type dt;
76 tree def;
77
78 stmt_vinfo = vinfo_for_stmt (use_stmt);
79 loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
80
81 if (!vect_is_simple_use (name, loop_vinfo, def_stmt, &def, &dt))
82 return false;
83
84 if (dt != vect_internal_def
85 && dt != vect_external_def && dt != vect_constant_def)
86 return false;
87
88 if (! *def_stmt)
89 return false;
90
91 if (!is_gimple_assign (*def_stmt))
92 return false;
93
94 if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR)
95 return false;
96
97 oprnd0 = gimple_assign_rhs1 (*def_stmt);
98
99 *half_type = TREE_TYPE (oprnd0);
100 if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type)
101 || (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type))
102 || (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2)))
103 return false;
104
105 if (!vect_is_simple_use (oprnd0, loop_vinfo, &dummy_gimple, &dummy, &dt))
106 return false;
107
108 return true;
109 }
110
111 /* Helper to return a new temporary for pattern of TYPE for STMT. If STMT
112 is NULL, the caller must set SSA_NAME_DEF_STMT for the returned SSA var. */
113
114 static tree
115 vect_recog_temp_ssa_var (tree type, gimple stmt)
116 {
117 tree var = create_tmp_var (type, "patt");
118
119 add_referenced_var (var);
120 var = make_ssa_name (var, stmt);
121 return var;
122 }
123
124 /* Function vect_recog_dot_prod_pattern
125
126 Try to find the following pattern:
127
128 type x_t, y_t;
129 TYPE1 prod;
130 TYPE2 sum = init;
131 loop:
132 sum_0 = phi <init, sum_1>
133 S1 x_t = ...
134 S2 y_t = ...
135 S3 x_T = (TYPE1) x_t;
136 S4 y_T = (TYPE1) y_t;
137 S5 prod = x_T * y_T;
138 [S6 prod = (TYPE2) prod; #optional]
139 S7 sum_1 = prod + sum_0;
140
141 where 'TYPE1' is exactly double the size of type 'type', and 'TYPE2' is the
142 same size of 'TYPE1' or bigger. This is a special case of a reduction
143 computation.
144
145 Input:
146
147 * LAST_STMT: A stmt from which the pattern search begins. In the example,
148 when this function is called with S7, the pattern {S3,S4,S5,S6,S7} will be
149 detected.
150
151 Output:
152
153 * TYPE_IN: The type of the input arguments to the pattern.
154
155 * TYPE_OUT: The type of the output of this pattern.
156
157 * Return value: A new stmt that will be used to replace the sequence of
158 stmts that constitute the pattern. In this case it will be:
159 WIDEN_DOT_PRODUCT <x_t, y_t, sum_0>
160
161 Note: The dot-prod idiom is a widening reduction pattern that is
162 vectorized without preserving all the intermediate results. It
163 produces only N/2 (widened) results (by summing up pairs of
164 intermediate results) rather than all N results. Therefore, we
165 cannot allow this pattern when we want to get all the results and in
166 the correct order (as is the case when this computation is in an
167 inner-loop nested in an outer-loop that us being vectorized). */
168
169 static gimple
170 vect_recog_dot_prod_pattern (gimple last_stmt, tree *type_in, tree *type_out)
171 {
172 gimple stmt;
173 tree oprnd0, oprnd1;
174 tree oprnd00, oprnd01;
175 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
176 tree type, half_type;
177 gimple pattern_stmt;
178 tree prod_type;
179 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
180 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
181 tree var, rhs;
182
183 if (!is_gimple_assign (last_stmt))
184 return NULL;
185
186 type = gimple_expr_type (last_stmt);
187
188 /* Look for the following pattern
189 DX = (TYPE1) X;
190 DY = (TYPE1) Y;
191 DPROD = DX * DY;
192 DDPROD = (TYPE2) DPROD;
193 sum_1 = DDPROD + sum_0;
194 In which
195 - DX is double the size of X
196 - DY is double the size of Y
197 - DX, DY, DPROD all have the same type
198 - sum is the same size of DPROD or bigger
199 - sum has been recognized as a reduction variable.
200
201 This is equivalent to:
202 DPROD = X w* Y; #widen mult
203 sum_1 = DPROD w+ sum_0; #widen summation
204 or
205 DPROD = X w* Y; #widen mult
206 sum_1 = DPROD + sum_0; #summation
207 */
208
209 /* Starting from LAST_STMT, follow the defs of its uses in search
210 of the above pattern. */
211
212 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
213 return NULL;
214
215 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
216 {
217 /* Has been detected as widening-summation? */
218
219 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
220 type = gimple_expr_type (stmt);
221 if (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR)
222 return NULL;
223 oprnd0 = gimple_assign_rhs1 (stmt);
224 oprnd1 = gimple_assign_rhs2 (stmt);
225 half_type = TREE_TYPE (oprnd0);
226 }
227 else
228 {
229 gimple def_stmt;
230
231 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
232 return NULL;
233 oprnd0 = gimple_assign_rhs1 (last_stmt);
234 oprnd1 = gimple_assign_rhs2 (last_stmt);
235 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0)) != TYPE_MAIN_VARIANT (type)
236 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1)) != TYPE_MAIN_VARIANT (type))
237 return NULL;
238 stmt = last_stmt;
239
240 if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt))
241 {
242 stmt = def_stmt;
243 oprnd0 = gimple_assign_rhs1 (stmt);
244 }
245 else
246 half_type = type;
247 }
248
249 /* So far so good. Since last_stmt was detected as a (summation) reduction,
250 we know that oprnd1 is the reduction variable (defined by a loop-header
251 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
252 Left to check that oprnd0 is defined by a (widen_)mult_expr */
253
254 prod_type = half_type;
255 stmt = SSA_NAME_DEF_STMT (oprnd0);
256 /* FORNOW. Can continue analyzing the def-use chain when this stmt in a phi
257 inside the loop (in case we are analyzing an outer-loop). */
258 if (!is_gimple_assign (stmt))
259 return NULL;
260 stmt_vinfo = vinfo_for_stmt (stmt);
261 gcc_assert (stmt_vinfo);
262 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_internal_def)
263 return NULL;
264 if (gimple_assign_rhs_code (stmt) != MULT_EXPR)
265 return NULL;
266 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
267 {
268 /* Has been detected as a widening multiplication? */
269
270 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
271 if (gimple_assign_rhs_code (stmt) != WIDEN_MULT_EXPR)
272 return NULL;
273 stmt_vinfo = vinfo_for_stmt (stmt);
274 gcc_assert (stmt_vinfo);
275 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_internal_def);
276 oprnd00 = gimple_assign_rhs1 (stmt);
277 oprnd01 = gimple_assign_rhs2 (stmt);
278 }
279 else
280 {
281 tree half_type0, half_type1;
282 gimple def_stmt;
283 tree oprnd0, oprnd1;
284
285 oprnd0 = gimple_assign_rhs1 (stmt);
286 oprnd1 = gimple_assign_rhs2 (stmt);
287 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0))
288 != TYPE_MAIN_VARIANT (prod_type)
289 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1))
290 != TYPE_MAIN_VARIANT (prod_type))
291 return NULL;
292 if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt))
293 return NULL;
294 oprnd00 = gimple_assign_rhs1 (def_stmt);
295 if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt))
296 return NULL;
297 oprnd01 = gimple_assign_rhs1 (def_stmt);
298 if (TYPE_MAIN_VARIANT (half_type0) != TYPE_MAIN_VARIANT (half_type1))
299 return NULL;
300 if (TYPE_PRECISION (prod_type) != TYPE_PRECISION (half_type0) * 2)
301 return NULL;
302 }
303
304 half_type = TREE_TYPE (oprnd00);
305 *type_in = half_type;
306 *type_out = type;
307
308 /* Pattern detected. Create a stmt to be used to replace the pattern: */
309 var = vect_recog_temp_ssa_var (type, NULL);
310 rhs = build3 (DOT_PROD_EXPR, type, oprnd00, oprnd01, oprnd1),
311 pattern_stmt = gimple_build_assign (var, rhs);
312
313 if (vect_print_dump_info (REPORT_DETAILS))
314 {
315 fprintf (vect_dump, "vect_recog_dot_prod_pattern: detected: ");
316 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
317 }
318
319 /* We don't allow changing the order of the computation in the inner-loop
320 when doing outer-loop vectorization. */
321 if (nested_in_vect_loop_p (loop, last_stmt))
322 {
323 if (vect_print_dump_info (REPORT_DETAILS))
324 fprintf (vect_dump, "vect_recog_dot_prod_pattern: not allowed.");
325 return NULL;
326 }
327
328 return pattern_stmt;
329 }
330
331 /* Function vect_recog_widen_mult_pattern
332
333 Try to find the following pattern:
334
335 type a_t, b_t;
336 TYPE a_T, b_T, prod_T;
337
338 S1 a_t = ;
339 S2 b_t = ;
340 S3 a_T = (TYPE) a_t;
341 S4 b_T = (TYPE) b_t;
342 S5 prod_T = a_T * b_T;
343
344 where type 'TYPE' is at least double the size of type 'type'.
345
346 Input:
347
348 * LAST_STMT: A stmt from which the pattern search begins. In the example,
349 when this function is called with S5, the pattern {S3,S4,S5} is be detected.
350
351 Output:
352
353 * TYPE_IN: The type of the input arguments to the pattern.
354
355 * TYPE_OUT: The type of the output of this pattern.
356
357 * Return value: A new stmt that will be used to replace the sequence of
358 stmts that constitute the pattern. In this case it will be:
359 WIDEN_MULT <a_t, b_t>
360 */
361
362 static gimple
363 vect_recog_widen_mult_pattern (gimple last_stmt,
364 tree *type_in,
365 tree *type_out)
366 {
367 gimple def_stmt0, def_stmt1;
368 tree oprnd0, oprnd1;
369 tree type, half_type0, half_type1;
370 gimple pattern_stmt;
371 tree vectype;
372 tree dummy;
373 tree var;
374 enum tree_code dummy_code;
375 int dummy_int;
376 VEC (tree, heap) *dummy_vec;
377
378 if (!is_gimple_assign (last_stmt))
379 return NULL;
380
381 type = gimple_expr_type (last_stmt);
382
383 /* Starting from LAST_STMT, follow the defs of its uses in search
384 of the above pattern. */
385
386 if (gimple_assign_rhs_code (last_stmt) != MULT_EXPR)
387 return NULL;
388
389 oprnd0 = gimple_assign_rhs1 (last_stmt);
390 oprnd1 = gimple_assign_rhs2 (last_stmt);
391 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0)) != TYPE_MAIN_VARIANT (type)
392 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1)) != TYPE_MAIN_VARIANT (type))
393 return NULL;
394
395 /* Check argument 0 */
396 if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0))
397 return NULL;
398 oprnd0 = gimple_assign_rhs1 (def_stmt0);
399
400 /* Check argument 1 */
401 if (!widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1))
402 return NULL;
403 oprnd1 = gimple_assign_rhs1 (def_stmt1);
404
405 if (TYPE_MAIN_VARIANT (half_type0) != TYPE_MAIN_VARIANT (half_type1))
406 return NULL;
407
408 /* Pattern detected. */
409 if (vect_print_dump_info (REPORT_DETAILS))
410 fprintf (vect_dump, "vect_recog_widen_mult_pattern: detected: ");
411
412 /* Check target support */
413 vectype = get_vectype_for_scalar_type (half_type0);
414 if (!vectype
415 || !supportable_widening_operation (WIDEN_MULT_EXPR, last_stmt, vectype,
416 &dummy, &dummy, &dummy_code,
417 &dummy_code, &dummy_int, &dummy_vec))
418 return NULL;
419
420 *type_in = vectype;
421 *type_out = NULL_TREE;
422
423 /* Pattern supported. Create a stmt to be used to replace the pattern: */
424 var = vect_recog_temp_ssa_var (type, NULL);
425 pattern_stmt = gimple_build_assign_with_ops (WIDEN_MULT_EXPR, var, oprnd0,
426 oprnd1);
427 SSA_NAME_DEF_STMT (var) = pattern_stmt;
428
429 if (vect_print_dump_info (REPORT_DETAILS))
430 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
431
432 return pattern_stmt;
433 }
434
435
436 /* Function vect_recog_pow_pattern
437
438 Try to find the following pattern:
439
440 x = POW (y, N);
441
442 with POW being one of pow, powf, powi, powif and N being
443 either 2 or 0.5.
444
445 Input:
446
447 * LAST_STMT: A stmt from which the pattern search begins.
448
449 Output:
450
451 * TYPE_IN: The type of the input arguments to the pattern.
452
453 * TYPE_OUT: The type of the output of this pattern.
454
455 * Return value: A new stmt that will be used to replace the sequence of
456 stmts that constitute the pattern. In this case it will be:
457 x = x * x
458 or
459 x = sqrt (x)
460 */
461
462 static gimple
463 vect_recog_pow_pattern (gimple last_stmt, tree *type_in, tree *type_out)
464 {
465 tree type;
466 tree fn, base, exp = NULL;
467 gimple stmt;
468 tree var;
469
470 if (!is_gimple_call (last_stmt) || gimple_call_lhs (last_stmt) == NULL)
471 return NULL;
472
473 type = gimple_expr_type (last_stmt);
474
475 fn = gimple_call_fndecl (last_stmt);
476 switch (DECL_FUNCTION_CODE (fn))
477 {
478 case BUILT_IN_POWIF:
479 case BUILT_IN_POWI:
480 case BUILT_IN_POWF:
481 case BUILT_IN_POW:
482 base = gimple_call_arg (last_stmt, 0);
483 exp = gimple_call_arg (last_stmt, 1);
484 if (TREE_CODE (exp) != REAL_CST
485 && TREE_CODE (exp) != INTEGER_CST)
486 return NULL;
487 break;
488
489 default:
490 return NULL;
491 }
492
493 /* We now have a pow or powi builtin function call with a constant
494 exponent. */
495
496 *type_out = NULL_TREE;
497
498 /* Catch squaring. */
499 if ((host_integerp (exp, 0)
500 && tree_low_cst (exp, 0) == 2)
501 || (TREE_CODE (exp) == REAL_CST
502 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2)))
503 {
504 *type_in = TREE_TYPE (base);
505
506 var = vect_recog_temp_ssa_var (TREE_TYPE (base), NULL);
507 stmt = gimple_build_assign_with_ops (MULT_EXPR, var, base, base);
508 SSA_NAME_DEF_STMT (var) = stmt;
509 return stmt;
510 }
511
512 /* Catch square root. */
513 if (TREE_CODE (exp) == REAL_CST
514 && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconsthalf))
515 {
516 tree newfn = mathfn_built_in (TREE_TYPE (base), BUILT_IN_SQRT);
517 *type_in = get_vectype_for_scalar_type (TREE_TYPE (base));
518 if (*type_in)
519 {
520 gimple stmt = gimple_build_call (newfn, 1, base);
521 if (vectorizable_function (stmt, *type_in, *type_in)
522 != NULL_TREE)
523 {
524 var = vect_recog_temp_ssa_var (TREE_TYPE (base), stmt);
525 gimple_call_set_lhs (stmt, var);
526 return stmt;
527 }
528 }
529 }
530
531 return NULL;
532 }
533
534
535 /* Function vect_recog_widen_sum_pattern
536
537 Try to find the following pattern:
538
539 type x_t;
540 TYPE x_T, sum = init;
541 loop:
542 sum_0 = phi <init, sum_1>
543 S1 x_t = *p;
544 S2 x_T = (TYPE) x_t;
545 S3 sum_1 = x_T + sum_0;
546
547 where type 'TYPE' is at least double the size of type 'type', i.e - we're
548 summing elements of type 'type' into an accumulator of type 'TYPE'. This is
549 a special case of a reduction computation.
550
551 Input:
552
553 * LAST_STMT: A stmt from which the pattern search begins. In the example,
554 when this function is called with S3, the pattern {S2,S3} will be detected.
555
556 Output:
557
558 * TYPE_IN: The type of the input arguments to the pattern.
559
560 * TYPE_OUT: The type of the output of this pattern.
561
562 * Return value: A new stmt that will be used to replace the sequence of
563 stmts that constitute the pattern. In this case it will be:
564 WIDEN_SUM <x_t, sum_0>
565
566 Note: The widening-sum idiom is a widening reduction pattern that is
567 vectorized without preserving all the intermediate results. It
568 produces only N/2 (widened) results (by summing up pairs of
569 intermediate results) rather than all N results. Therefore, we
570 cannot allow this pattern when we want to get all the results and in
571 the correct order (as is the case when this computation is in an
572 inner-loop nested in an outer-loop that us being vectorized). */
573
574 static gimple
575 vect_recog_widen_sum_pattern (gimple last_stmt, tree *type_in, tree *type_out)
576 {
577 gimple stmt;
578 tree oprnd0, oprnd1;
579 stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
580 tree type, half_type;
581 gimple pattern_stmt;
582 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
583 struct loop *loop = LOOP_VINFO_LOOP (loop_info);
584 tree var;
585
586 if (!is_gimple_assign (last_stmt))
587 return NULL;
588
589 type = gimple_expr_type (last_stmt);
590
591 /* Look for the following pattern
592 DX = (TYPE) X;
593 sum_1 = DX + sum_0;
594 In which DX is at least double the size of X, and sum_1 has been
595 recognized as a reduction variable.
596 */
597
598 /* Starting from LAST_STMT, follow the defs of its uses in search
599 of the above pattern. */
600
601 if (gimple_assign_rhs_code (last_stmt) != PLUS_EXPR)
602 return NULL;
603
604 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def)
605 return NULL;
606
607 oprnd0 = gimple_assign_rhs1 (last_stmt);
608 oprnd1 = gimple_assign_rhs2 (last_stmt);
609 if (TYPE_MAIN_VARIANT (TREE_TYPE (oprnd0)) != TYPE_MAIN_VARIANT (type)
610 || TYPE_MAIN_VARIANT (TREE_TYPE (oprnd1)) != TYPE_MAIN_VARIANT (type))
611 return NULL;
612
613 /* So far so good. Since last_stmt was detected as a (summation) reduction,
614 we know that oprnd1 is the reduction variable (defined by a loop-header
615 phi), and oprnd0 is an ssa-name defined by a stmt in the loop body.
616 Left to check that oprnd0 is defined by a cast from type 'type' to type
617 'TYPE'. */
618
619 if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt))
620 return NULL;
621
622 oprnd0 = gimple_assign_rhs1 (stmt);
623 *type_in = half_type;
624 *type_out = type;
625
626 /* Pattern detected. Create a stmt to be used to replace the pattern: */
627 var = vect_recog_temp_ssa_var (type, NULL);
628 pattern_stmt = gimple_build_assign_with_ops (WIDEN_SUM_EXPR, var,
629 oprnd0, oprnd1);
630 SSA_NAME_DEF_STMT (var) = pattern_stmt;
631
632 if (vect_print_dump_info (REPORT_DETAILS))
633 {
634 fprintf (vect_dump, "vect_recog_widen_sum_pattern: detected: ");
635 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
636 }
637
638 /* We don't allow changing the order of the computation in the inner-loop
639 when doing outer-loop vectorization. */
640 if (nested_in_vect_loop_p (loop, last_stmt))
641 {
642 if (vect_print_dump_info (REPORT_DETAILS))
643 fprintf (vect_dump, "vect_recog_widen_sum_pattern: not allowed.");
644 return NULL;
645 }
646
647 return pattern_stmt;
648 }
649
650
651 /* Function vect_pattern_recog_1
652
653 Input:
654 PATTERN_RECOG_FUNC: A pointer to a function that detects a certain
655 computation pattern.
656 STMT: A stmt from which the pattern search should start.
657
658 If PATTERN_RECOG_FUNC successfully detected the pattern, it creates an
659 expression that computes the same functionality and can be used to
660 replace the sequence of stmts that are involved in the pattern.
661
662 Output:
663 This function checks if the expression returned by PATTERN_RECOG_FUNC is
664 supported in vector form by the target. We use 'TYPE_IN' to obtain the
665 relevant vector type. If 'TYPE_IN' is already a vector type, then this
666 indicates that target support had already been checked by PATTERN_RECOG_FUNC.
667 If 'TYPE_OUT' is also returned by PATTERN_RECOG_FUNC, we check that it fits
668 to the available target pattern.
669
670 This function also does some bookkeeping, as explained in the documentation
671 for vect_recog_pattern. */
672
673 static void
674 vect_pattern_recog_1 (
675 gimple (* vect_recog_func) (gimple, tree *, tree *),
676 gimple_stmt_iterator si)
677 {
678 gimple stmt = gsi_stmt (si), pattern_stmt;
679 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
680 stmt_vec_info pattern_stmt_info;
681 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
682 tree pattern_vectype;
683 tree type_in, type_out;
684 enum tree_code code;
685
686 pattern_stmt = (* vect_recog_func) (stmt, &type_in, &type_out);
687 if (!pattern_stmt)
688 return;
689
690 if (VECTOR_MODE_P (TYPE_MODE (type_in)))
691 {
692 /* No need to check target support (already checked by the pattern
693 recognition function). */
694 pattern_vectype = type_in;
695 }
696 else
697 {
698 enum machine_mode vec_mode;
699 enum insn_code icode;
700 optab optab;
701
702 /* Check target support */
703 pattern_vectype = get_vectype_for_scalar_type (type_in);
704 if (!pattern_vectype)
705 return;
706
707 if (is_gimple_assign (pattern_stmt))
708 code = gimple_assign_rhs_code (pattern_stmt);
709 else
710 {
711 gcc_assert (is_gimple_call (pattern_stmt));
712 code = CALL_EXPR;
713 }
714
715 optab = optab_for_tree_code (code, pattern_vectype, optab_default);
716 vec_mode = TYPE_MODE (pattern_vectype);
717 if (!optab
718 || (icode = optab_handler (optab, vec_mode)->insn_code) ==
719 CODE_FOR_nothing
720 || (type_out
721 && (!get_vectype_for_scalar_type (type_out)
722 || (insn_data[icode].operand[0].mode !=
723 TYPE_MODE (get_vectype_for_scalar_type (type_out))))))
724 return;
725 }
726
727 /* Found a vectorizable pattern. */
728 if (vect_print_dump_info (REPORT_DETAILS))
729 {
730 fprintf (vect_dump, "pattern recognized: ");
731 print_gimple_stmt (vect_dump, pattern_stmt, 0, TDF_SLIM);
732 }
733
734 /* Mark the stmts that are involved in the pattern. */
735 gsi_insert_before (&si, pattern_stmt, GSI_SAME_STMT);
736 set_vinfo_for_stmt (pattern_stmt,
737 new_stmt_vec_info (pattern_stmt, loop_vinfo));
738 pattern_stmt_info = vinfo_for_stmt (pattern_stmt);
739
740 STMT_VINFO_RELATED_STMT (pattern_stmt_info) = stmt;
741 STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (stmt_info);
742 STMT_VINFO_VECTYPE (pattern_stmt_info) = pattern_vectype;
743 STMT_VINFO_IN_PATTERN_P (stmt_info) = true;
744 STMT_VINFO_RELATED_STMT (stmt_info) = pattern_stmt;
745
746 return;
747 }
748
749
750 /* Function vect_pattern_recog
751
752 Input:
753 LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
754 computation idioms.
755
756 Output - for each computation idiom that is detected we insert a new stmt
757 that provides the same functionality and that can be vectorized. We
758 also record some information in the struct_stmt_info of the relevant
759 stmts, as explained below:
760
761 At the entry to this function we have the following stmts, with the
762 following initial value in the STMT_VINFO fields:
763
764 stmt in_pattern_p related_stmt vec_stmt
765 S1: a_i = .... - - -
766 S2: a_2 = ..use(a_i).. - - -
767 S3: a_1 = ..use(a_2).. - - -
768 S4: a_0 = ..use(a_1).. - - -
769 S5: ... = ..use(a_0).. - - -
770
771 Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
772 represented by a single stmt. We then:
773 - create a new stmt S6 that will replace the pattern.
774 - insert the new stmt S6 before the last stmt in the pattern
775 - fill in the STMT_VINFO fields as follows:
776
777 in_pattern_p related_stmt vec_stmt
778 S1: a_i = .... - - -
779 S2: a_2 = ..use(a_i).. - - -
780 S3: a_1 = ..use(a_2).. - - -
781 > S6: a_new = .... - S4 -
782 S4: a_0 = ..use(a_1).. true S6 -
783 S5: ... = ..use(a_0).. - - -
784
785 (the last stmt in the pattern (S4) and the new pattern stmt (S6) point
786 to each other through the RELATED_STMT field).
787
788 S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
789 of S4 because it will replace all its uses. Stmts {S1,S2,S3} will
790 remain irrelevant unless used by stmts other than S4.
791
792 If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
793 (because they are marked as irrelevant). It will vectorize S6, and record
794 a pointer to the new vector stmt VS6 both from S6 (as usual), and also
795 from S4. We do that so that when we get to vectorizing stmts that use the
796 def of S4 (like S5 that uses a_0), we'll know where to take the relevant
797 vector-def from. S4 will be skipped, and S5 will be vectorized as usual:
798
799 in_pattern_p related_stmt vec_stmt
800 S1: a_i = .... - - -
801 S2: a_2 = ..use(a_i).. - - -
802 S3: a_1 = ..use(a_2).. - - -
803 > VS6: va_new = .... - - -
804 S6: a_new = .... - S4 VS6
805 S4: a_0 = ..use(a_1).. true S6 VS6
806 > VS5: ... = ..vuse(va_new).. - - -
807 S5: ... = ..use(a_0).. - - -
808
809 DCE could then get rid of {S1,S2,S3,S4,S5,S6} (if their defs are not used
810 elsewhere), and we'll end up with:
811
812 VS6: va_new = ....
813 VS5: ... = ..vuse(va_new)..
814
815 If vectorization does not succeed, DCE will clean S6 away (its def is
816 not used), and we'll end up with the original sequence.
817 */
818
819 void
820 vect_pattern_recog (loop_vec_info loop_vinfo)
821 {
822 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
823 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
824 unsigned int nbbs = loop->num_nodes;
825 gimple_stmt_iterator si;
826 gimple stmt;
827 unsigned int i, j;
828 gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
829
830 if (vect_print_dump_info (REPORT_DETAILS))
831 fprintf (vect_dump, "=== vect_pattern_recog ===");
832
833 /* Scan through the loop stmts, applying the pattern recognition
834 functions starting at each stmt visited: */
835 for (i = 0; i < nbbs; i++)
836 {
837 basic_block bb = bbs[i];
838 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
839 {
840 stmt = gsi_stmt (si);
841
842 /* Scan over all generic vect_recog_xxx_pattern functions. */
843 for (j = 0; j < NUM_PATTERNS; j++)
844 {
845 vect_recog_func_ptr = vect_vect_recog_func_ptrs[j];
846 vect_pattern_recog_1 (vect_recog_func_ptr, si);
847 }
848 }
849 }
850 }