Vector shuffling patch from Artem Shinkarov.
[gcc.git] / gcc / tree-vect-generic.c
1 /* Lower vector operations to scalar operations.
2 Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3, or (at your option) any
10 later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tree.h"
25 #include "tm.h"
26 #include "langhooks.h"
27 #include "tree-flow.h"
28 #include "gimple.h"
29 #include "tree-iterator.h"
30 #include "tree-pass.h"
31 #include "flags.h"
32 #include "ggc.h"
33 #include "diagnostic.h"
34
35 /* Need to include rtl.h, expr.h, etc. for optabs. */
36 #include "expr.h"
37 #include "optabs.h"
38
39
40 static void expand_vector_operations_1 (gimple_stmt_iterator *);
41
42
43 /* Build a constant of type TYPE, made of VALUE's bits replicated
44 every TYPE_SIZE (INNER_TYPE) bits to fit TYPE's precision. */
45 static tree
46 build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value)
47 {
48 int width = tree_low_cst (TYPE_SIZE (inner_type), 1);
49 int n = HOST_BITS_PER_WIDE_INT / width;
50 unsigned HOST_WIDE_INT low, high, mask;
51 tree ret;
52
53 gcc_assert (n);
54
55 if (width == HOST_BITS_PER_WIDE_INT)
56 low = value;
57 else
58 {
59 mask = ((HOST_WIDE_INT)1 << width) - 1;
60 low = (unsigned HOST_WIDE_INT) ~0 / mask * (value & mask);
61 }
62
63 if (TYPE_PRECISION (type) < HOST_BITS_PER_WIDE_INT)
64 low &= ((HOST_WIDE_INT)1 << TYPE_PRECISION (type)) - 1, high = 0;
65 else if (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT)
66 high = 0;
67 else if (TYPE_PRECISION (type) == 2 * HOST_BITS_PER_WIDE_INT)
68 high = low;
69 else
70 gcc_unreachable ();
71
72 ret = build_int_cst_wide (type, low, high);
73 return ret;
74 }
75
76 static GTY(()) tree vector_inner_type;
77 static GTY(()) tree vector_last_type;
78 static GTY(()) int vector_last_nunits;
79
80 /* Return a suitable vector types made of SUBPARTS units each of mode
81 "word_mode" (the global variable). */
82 static tree
83 build_word_mode_vector_type (int nunits)
84 {
85 if (!vector_inner_type)
86 vector_inner_type = lang_hooks.types.type_for_mode (word_mode, 1);
87 else if (vector_last_nunits == nunits)
88 {
89 gcc_assert (TREE_CODE (vector_last_type) == VECTOR_TYPE);
90 return vector_last_type;
91 }
92
93 /* We build a new type, but we canonicalize it nevertheless,
94 because it still saves some memory. */
95 vector_last_nunits = nunits;
96 vector_last_type = type_hash_canon (nunits,
97 build_vector_type (vector_inner_type,
98 nunits));
99 return vector_last_type;
100 }
101
102 typedef tree (*elem_op_func) (gimple_stmt_iterator *,
103 tree, tree, tree, tree, tree, enum tree_code);
104
105 static inline tree
106 tree_vec_extract (gimple_stmt_iterator *gsi, tree type,
107 tree t, tree bitsize, tree bitpos)
108 {
109 if (bitpos)
110 return gimplify_build3 (gsi, BIT_FIELD_REF, type, t, bitsize, bitpos);
111 else
112 return gimplify_build1 (gsi, VIEW_CONVERT_EXPR, type, t);
113 }
114
115 static tree
116 do_unop (gimple_stmt_iterator *gsi, tree inner_type, tree a,
117 tree b ATTRIBUTE_UNUSED, tree bitpos, tree bitsize,
118 enum tree_code code)
119 {
120 a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
121 return gimplify_build1 (gsi, code, inner_type, a);
122 }
123
124 static tree
125 do_binop (gimple_stmt_iterator *gsi, tree inner_type, tree a, tree b,
126 tree bitpos, tree bitsize, enum tree_code code)
127 {
128 a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
129 b = tree_vec_extract (gsi, inner_type, b, bitsize, bitpos);
130 return gimplify_build2 (gsi, code, inner_type, a, b);
131 }
132
133 /* Construct expression (A[BITPOS] code B[BITPOS]) ? -1 : 0
134
135 INNER_TYPE is the type of A and B elements
136
137 returned expression is of signed integer type with the
138 size equal to the size of INNER_TYPE. */
139 static tree
140 do_compare (gimple_stmt_iterator *gsi, tree inner_type, tree a, tree b,
141 tree bitpos, tree bitsize, enum tree_code code)
142 {
143 tree comp_type;
144
145 a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos);
146 b = tree_vec_extract (gsi, inner_type, b, bitsize, bitpos);
147
148 comp_type = build_nonstandard_integer_type
149 (GET_MODE_BITSIZE (TYPE_MODE (inner_type)), 0);
150
151 return gimplify_build3 (gsi, COND_EXPR, comp_type,
152 fold_build2 (code, boolean_type_node, a, b),
153 build_int_cst (comp_type, -1),
154 build_int_cst (comp_type, 0));
155 }
156
157 /* Expand vector addition to scalars. This does bit twiddling
158 in order to increase parallelism:
159
160 a + b = (((int) a & 0x7f7f7f7f) + ((int) b & 0x7f7f7f7f)) ^
161 (a ^ b) & 0x80808080
162
163 a - b = (((int) a | 0x80808080) - ((int) b & 0x7f7f7f7f)) ^
164 (a ^ ~b) & 0x80808080
165
166 -b = (0x80808080 - ((int) b & 0x7f7f7f7f)) ^ (~b & 0x80808080)
167
168 This optimization should be done only if 4 vector items or more
169 fit into a word. */
170 static tree
171 do_plus_minus (gimple_stmt_iterator *gsi, tree word_type, tree a, tree b,
172 tree bitpos ATTRIBUTE_UNUSED, tree bitsize ATTRIBUTE_UNUSED,
173 enum tree_code code)
174 {
175 tree inner_type = TREE_TYPE (TREE_TYPE (a));
176 unsigned HOST_WIDE_INT max;
177 tree low_bits, high_bits, a_low, b_low, result_low, signs;
178
179 max = GET_MODE_MASK (TYPE_MODE (inner_type));
180 low_bits = build_replicated_const (word_type, inner_type, max >> 1);
181 high_bits = build_replicated_const (word_type, inner_type, max & ~(max >> 1));
182
183 a = tree_vec_extract (gsi, word_type, a, bitsize, bitpos);
184 b = tree_vec_extract (gsi, word_type, b, bitsize, bitpos);
185
186 signs = gimplify_build2 (gsi, BIT_XOR_EXPR, word_type, a, b);
187 b_low = gimplify_build2 (gsi, BIT_AND_EXPR, word_type, b, low_bits);
188 if (code == PLUS_EXPR)
189 a_low = gimplify_build2 (gsi, BIT_AND_EXPR, word_type, a, low_bits);
190 else
191 {
192 a_low = gimplify_build2 (gsi, BIT_IOR_EXPR, word_type, a, high_bits);
193 signs = gimplify_build1 (gsi, BIT_NOT_EXPR, word_type, signs);
194 }
195
196 signs = gimplify_build2 (gsi, BIT_AND_EXPR, word_type, signs, high_bits);
197 result_low = gimplify_build2 (gsi, code, word_type, a_low, b_low);
198 return gimplify_build2 (gsi, BIT_XOR_EXPR, word_type, result_low, signs);
199 }
200
201 static tree
202 do_negate (gimple_stmt_iterator *gsi, tree word_type, tree b,
203 tree unused ATTRIBUTE_UNUSED, tree bitpos ATTRIBUTE_UNUSED,
204 tree bitsize ATTRIBUTE_UNUSED,
205 enum tree_code code ATTRIBUTE_UNUSED)
206 {
207 tree inner_type = TREE_TYPE (TREE_TYPE (b));
208 HOST_WIDE_INT max;
209 tree low_bits, high_bits, b_low, result_low, signs;
210
211 max = GET_MODE_MASK (TYPE_MODE (inner_type));
212 low_bits = build_replicated_const (word_type, inner_type, max >> 1);
213 high_bits = build_replicated_const (word_type, inner_type, max & ~(max >> 1));
214
215 b = tree_vec_extract (gsi, word_type, b, bitsize, bitpos);
216
217 b_low = gimplify_build2 (gsi, BIT_AND_EXPR, word_type, b, low_bits);
218 signs = gimplify_build1 (gsi, BIT_NOT_EXPR, word_type, b);
219 signs = gimplify_build2 (gsi, BIT_AND_EXPR, word_type, signs, high_bits);
220 result_low = gimplify_build2 (gsi, MINUS_EXPR, word_type, high_bits, b_low);
221 return gimplify_build2 (gsi, BIT_XOR_EXPR, word_type, result_low, signs);
222 }
223
224 /* Expand a vector operation to scalars, by using many operations
225 whose type is the vector type's inner type. */
226 static tree
227 expand_vector_piecewise (gimple_stmt_iterator *gsi, elem_op_func f,
228 tree type, tree inner_type,
229 tree a, tree b, enum tree_code code)
230 {
231 VEC(constructor_elt,gc) *v;
232 tree part_width = TYPE_SIZE (inner_type);
233 tree index = bitsize_int (0);
234 int nunits = TYPE_VECTOR_SUBPARTS (type);
235 int delta = tree_low_cst (part_width, 1)
236 / tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1);
237 int i;
238
239 v = VEC_alloc(constructor_elt, gc, (nunits + delta - 1) / delta);
240 for (i = 0; i < nunits;
241 i += delta, index = int_const_binop (PLUS_EXPR, index, part_width))
242 {
243 tree result = f (gsi, inner_type, a, b, index, part_width, code);
244 constructor_elt *ce = VEC_quick_push (constructor_elt, v, NULL);
245 ce->index = NULL_TREE;
246 ce->value = result;
247 }
248
249 return build_constructor (type, v);
250 }
251
252 /* Expand a vector operation to scalars with the freedom to use
253 a scalar integer type, or to use a different size for the items
254 in the vector type. */
255 static tree
256 expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type,
257 tree a, tree b,
258 enum tree_code code)
259 {
260 tree result, compute_type;
261 enum machine_mode mode;
262 int n_words = tree_low_cst (TYPE_SIZE_UNIT (type), 1) / UNITS_PER_WORD;
263
264 /* We have three strategies. If the type is already correct, just do
265 the operation an element at a time. Else, if the vector is wider than
266 one word, do it a word at a time; finally, if the vector is smaller
267 than one word, do it as a scalar. */
268 if (TYPE_MODE (TREE_TYPE (type)) == word_mode)
269 return expand_vector_piecewise (gsi, f,
270 type, TREE_TYPE (type),
271 a, b, code);
272 else if (n_words > 1)
273 {
274 tree word_type = build_word_mode_vector_type (n_words);
275 result = expand_vector_piecewise (gsi, f,
276 word_type, TREE_TYPE (word_type),
277 a, b, code);
278 result = force_gimple_operand_gsi (gsi, result, true, NULL, true,
279 GSI_SAME_STMT);
280 }
281 else
282 {
283 /* Use a single scalar operation with a mode no wider than word_mode. */
284 mode = mode_for_size (tree_low_cst (TYPE_SIZE (type), 1), MODE_INT, 0);
285 compute_type = lang_hooks.types.type_for_mode (mode, 1);
286 result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code);
287 }
288
289 return result;
290 }
291
292 /* Expand a vector operation to scalars; for integer types we can use
293 special bit twiddling tricks to do the sums a word at a time, using
294 function F_PARALLEL instead of F. These tricks are done only if
295 they can process at least four items, that is, only if the vector
296 holds at least four items and if a word can hold four items. */
297 static tree
298 expand_vector_addition (gimple_stmt_iterator *gsi,
299 elem_op_func f, elem_op_func f_parallel,
300 tree type, tree a, tree b, enum tree_code code)
301 {
302 int parts_per_word = UNITS_PER_WORD
303 / tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1);
304
305 if (INTEGRAL_TYPE_P (TREE_TYPE (type))
306 && parts_per_word >= 4
307 && TYPE_VECTOR_SUBPARTS (type) >= 4)
308 return expand_vector_parallel (gsi, f_parallel,
309 type, a, b, code);
310 else
311 return expand_vector_piecewise (gsi, f,
312 type, TREE_TYPE (type),
313 a, b, code);
314 }
315
316 /* Check if vector VEC consists of all the equal elements and
317 that the number of elements corresponds to the type of VEC.
318 The function returns first element of the vector
319 or NULL_TREE if the vector is not uniform. */
320 static tree
321 uniform_vector_p (tree vec)
322 {
323 tree first, t, els;
324 unsigned i;
325
326 if (vec == NULL_TREE)
327 return NULL_TREE;
328
329 if (TREE_CODE (vec) == VECTOR_CST)
330 {
331 els = TREE_VECTOR_CST_ELTS (vec);
332 first = TREE_VALUE (els);
333 els = TREE_CHAIN (els);
334
335 for (t = els; t; t = TREE_CHAIN (t))
336 if (!operand_equal_p (first, TREE_VALUE (t), 0))
337 return NULL_TREE;
338
339 return first;
340 }
341
342 else if (TREE_CODE (vec) == CONSTRUCTOR)
343 {
344 first = error_mark_node;
345
346 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (vec), i, t)
347 {
348 if (i == 0)
349 {
350 first = t;
351 continue;
352 }
353 if (!operand_equal_p (first, t, 0))
354 return NULL_TREE;
355 }
356 if (i != TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec)))
357 return NULL_TREE;
358
359 return first;
360 }
361
362 return NULL_TREE;
363 }
364
365 /* Try to expand vector comparison expression OP0 CODE OP1 by
366 querying optab if the following expression:
367 VEC_COND_EXPR< OP0 CODE OP1, {-1,...}, {0,...}>
368 can be expanded. */
369 static tree
370 expand_vector_comparison (gimple_stmt_iterator *gsi, tree type, tree op0,
371 tree op1, enum tree_code code)
372 {
373 tree t;
374 if (! expand_vec_cond_expr_p (type, TREE_TYPE (op0)))
375 t = expand_vector_piecewise (gsi, do_compare, type,
376 TREE_TYPE (TREE_TYPE (op0)), op0, op1, code);
377 else
378 t = NULL_TREE;
379
380 return t;
381 }
382
383 static tree
384 expand_vector_operation (gimple_stmt_iterator *gsi, tree type, tree compute_type,
385 gimple assign, enum tree_code code)
386 {
387 enum machine_mode compute_mode = TYPE_MODE (compute_type);
388
389 /* If the compute mode is not a vector mode (hence we are not decomposing
390 a BLKmode vector to smaller, hardware-supported vectors), we may want
391 to expand the operations in parallel. */
392 if (GET_MODE_CLASS (compute_mode) != MODE_VECTOR_INT
393 && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_FLOAT
394 && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_FRACT
395 && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_UFRACT
396 && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_ACCUM
397 && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_UACCUM)
398 switch (code)
399 {
400 case PLUS_EXPR:
401 case MINUS_EXPR:
402 if (!TYPE_OVERFLOW_TRAPS (type))
403 return expand_vector_addition (gsi, do_binop, do_plus_minus, type,
404 gimple_assign_rhs1 (assign),
405 gimple_assign_rhs2 (assign), code);
406 break;
407
408 case NEGATE_EXPR:
409 if (!TYPE_OVERFLOW_TRAPS (type))
410 return expand_vector_addition (gsi, do_unop, do_negate, type,
411 gimple_assign_rhs1 (assign),
412 NULL_TREE, code);
413 break;
414
415 case BIT_AND_EXPR:
416 case BIT_IOR_EXPR:
417 case BIT_XOR_EXPR:
418 return expand_vector_parallel (gsi, do_binop, type,
419 gimple_assign_rhs1 (assign),
420 gimple_assign_rhs2 (assign), code);
421
422 case BIT_NOT_EXPR:
423 return expand_vector_parallel (gsi, do_unop, type,
424 gimple_assign_rhs1 (assign),
425 NULL_TREE, code);
426 case EQ_EXPR:
427 case NE_EXPR:
428 case GT_EXPR:
429 case LT_EXPR:
430 case GE_EXPR:
431 case LE_EXPR:
432 case UNEQ_EXPR:
433 case UNGT_EXPR:
434 case UNLT_EXPR:
435 case UNGE_EXPR:
436 case UNLE_EXPR:
437 case LTGT_EXPR:
438 case ORDERED_EXPR:
439 case UNORDERED_EXPR:
440 {
441 tree rhs1 = gimple_assign_rhs1 (assign);
442 tree rhs2 = gimple_assign_rhs2 (assign);
443
444 return expand_vector_comparison (gsi, type, rhs1, rhs2, code);
445 }
446 default:
447 break;
448 }
449
450 if (TREE_CODE_CLASS (code) == tcc_unary)
451 return expand_vector_piecewise (gsi, do_unop, type, compute_type,
452 gimple_assign_rhs1 (assign),
453 NULL_TREE, code);
454 else
455 return expand_vector_piecewise (gsi, do_binop, type, compute_type,
456 gimple_assign_rhs1 (assign),
457 gimple_assign_rhs2 (assign), code);
458 }
459 \f
460 /* Return a type for the widest vector mode whose components are of mode
461 INNER_MODE, or NULL_TREE if none is found.
462 SATP is true for saturating fixed-point types. */
463
464 static tree
465 type_for_widest_vector_mode (enum machine_mode inner_mode, optab op, int satp)
466 {
467 enum machine_mode best_mode = VOIDmode, mode;
468 int best_nunits = 0;
469
470 if (SCALAR_FLOAT_MODE_P (inner_mode))
471 mode = MIN_MODE_VECTOR_FLOAT;
472 else if (SCALAR_FRACT_MODE_P (inner_mode))
473 mode = MIN_MODE_VECTOR_FRACT;
474 else if (SCALAR_UFRACT_MODE_P (inner_mode))
475 mode = MIN_MODE_VECTOR_UFRACT;
476 else if (SCALAR_ACCUM_MODE_P (inner_mode))
477 mode = MIN_MODE_VECTOR_ACCUM;
478 else if (SCALAR_UACCUM_MODE_P (inner_mode))
479 mode = MIN_MODE_VECTOR_UACCUM;
480 else
481 mode = MIN_MODE_VECTOR_INT;
482
483 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
484 if (GET_MODE_INNER (mode) == inner_mode
485 && GET_MODE_NUNITS (mode) > best_nunits
486 && optab_handler (op, mode) != CODE_FOR_nothing)
487 best_mode = mode, best_nunits = GET_MODE_NUNITS (mode);
488
489 if (best_mode == VOIDmode)
490 return NULL_TREE;
491 else
492 {
493 /* For fixed-point modes, we need to pass satp as the 2nd parameter. */
494 if (ALL_FIXED_POINT_MODE_P (best_mode))
495 return lang_hooks.types.type_for_mode (best_mode, satp);
496
497 return lang_hooks.types.type_for_mode (best_mode, 1);
498 }
499 }
500
501
502 /* Build a reference to the element of the vector VECT. Function
503 returns either the element itself, either BIT_FIELD_REF, or an
504 ARRAY_REF expression.
505
506 GSI is requred to insert temporary variables while building a
507 refernece to the element of the vector VECT.
508
509 PTMPVEC is a pointer to the temporary variable for caching
510 purposes. In case when PTMPVEC is NULL new temporary variable
511 will be created. */
512 static tree
513 vector_element (gimple_stmt_iterator *gsi, tree vect, tree idx, tree *ptmpvec)
514 {
515 tree type;
516 gimple asgn;
517 tree tmpvec;
518 tree arraytype;
519 bool need_asgn = true;
520
521 gcc_assert (TREE_CODE (TREE_TYPE (vect)) == VECTOR_TYPE);
522
523 type = TREE_TYPE (vect);
524 if (TREE_CODE (idx) == INTEGER_CST)
525 {
526 unsigned HOST_WIDE_INT index;
527
528 if (!host_integerp (idx, 1)
529 || (index = tree_low_cst (idx, 1)) > TYPE_VECTOR_SUBPARTS (type)-1)
530 return error_mark_node;
531
532 if (TREE_CODE (vect) == VECTOR_CST)
533 {
534 unsigned i;
535 tree vals = TREE_VECTOR_CST_ELTS (vect);
536 for (i = 0; vals; vals = TREE_CHAIN (vals), ++i)
537 if (i == index)
538 return TREE_VALUE (vals);
539 return error_mark_node;
540 }
541 else if (TREE_CODE (vect) == CONSTRUCTOR)
542 {
543 unsigned i;
544 VEC (constructor_elt, gc) *vals = CONSTRUCTOR_ELTS (vect);
545 constructor_elt *elt;
546
547 for (i = 0; VEC_iterate (constructor_elt, vals, i, elt); i++)
548 if (operand_equal_p (elt->index, idx, 0))
549 return elt->value;
550 return fold_convert (TREE_TYPE (type), integer_zero_node);
551 }
552 else if (TREE_CODE (vect) == SSA_NAME)
553 {
554 tree size = TYPE_SIZE (TREE_TYPE (type));
555 tree pos = fold_build2 (MULT_EXPR, TREE_TYPE (idx), idx, size);
556 return fold_build3 (BIT_FIELD_REF, TREE_TYPE (type), vect, size, pos);
557 }
558 else
559 return error_mark_node;
560 }
561
562 if (!ptmpvec)
563 tmpvec = create_tmp_var (TREE_TYPE (vect), "vectmp");
564 else if (!*ptmpvec)
565 tmpvec = *ptmpvec = create_tmp_var (TREE_TYPE (vect), "vectmp");
566 else
567 {
568 tmpvec = *ptmpvec;
569 need_asgn = false;
570 }
571
572 if (need_asgn)
573 {
574 TREE_ADDRESSABLE (tmpvec) = 1;
575 asgn = gimple_build_assign (tmpvec, vect);
576 gsi_insert_before (gsi, asgn, GSI_SAME_STMT);
577 }
578
579 arraytype = build_array_type_nelts (TREE_TYPE (type),
580 TYPE_VECTOR_SUBPARTS (TREE_TYPE (vect)));
581
582 return build4 (ARRAY_REF, TREE_TYPE (type),
583 build1 (VIEW_CONVERT_EXPR, arraytype, tmpvec),
584 idx, NULL_TREE, NULL_TREE);
585 }
586
587 /* Check if VEC_SHUFFLE_EXPR within the given setting is supported
588 by hardware, or lower it piecewise. Function returns false when
589 the expression must be replaced with TRAP_RETURN, true otherwise.
590
591 When VEC_SHUFFLE_EXPR has the same first and second operands:
592 VEC_SHUFFLE_EXPR <v0, v0, mask> the lowered version would be
593 {v0[mask[0]], v0[mask[1]], ...}
594 MASK and V0 must have the same number of elements.
595
596 Otherwise VEC_SHUFFLE_EXPR <v0, v1, mask> is lowered to
597 {mask[0] < len(v0) ? v0[mask[0]] : v1[mask[0]], ...}
598 V0 and V1 must have the same type. MASK, V0, V1 must have the
599 same number of arguments. */
600 static bool
601 lower_vec_shuffle (gimple_stmt_iterator *gsi, location_t loc)
602 {
603
604 gimple stmt = gsi_stmt (*gsi);
605 tree mask = gimple_assign_rhs3 (stmt);
606 tree vec0 = gimple_assign_rhs1 (stmt);
607 tree vec1 = gimple_assign_rhs2 (stmt);
608 unsigned els = TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask));
609 tree type0 = TREE_TYPE (TREE_TYPE (vec0));
610 VEC(constructor_elt,gc) *v = NULL;
611 tree vectype, constr;
612 tree vec0tmp = NULL_TREE, masktmp = NULL_TREE;
613
614 if (expand_vec_shuffle_expr_p (TYPE_MODE (TREE_TYPE (vec0)), vec0, vec1, mask))
615 {
616 tree t;
617
618 t = gimplify_build3 (gsi, VEC_SHUFFLE_EXPR, TREE_TYPE (vec0),
619 vec0, vec1, mask);
620 gimple_assign_set_rhs_from_tree (gsi, t);
621 /* Statement should be updated by callee. */
622 return true;
623 }
624
625 if (operand_equal_p (vec0, vec1, 0))
626 {
627 unsigned i;
628 tree vec0tmp = NULL_TREE;
629
630 v = VEC_alloc (constructor_elt, gc, els);
631 for (i = 0; i < els; i++)
632 {
633 tree idxval, vecel, t;
634
635 idxval = vector_element (gsi, mask, size_int (i), &masktmp);
636 if (idxval == error_mark_node)
637 {
638 if (warning_at (loc, 0, "Invalid shuffling mask index %i", i))
639 inform (loc, "if this code is reached the programm will abort");
640 return false;
641 }
642
643 vecel = vector_element (gsi, vec0, idxval, &vec0tmp);
644 if (vecel == error_mark_node)
645 {
646 if (warning_at (loc, 0, "Invalid shuffling arguments"))
647 inform (loc, "if this code is reached the programm will abort");
648 return false;
649 }
650
651 t = force_gimple_operand_gsi (gsi, vecel, true,
652 NULL_TREE, true, GSI_SAME_STMT);
653 CONSTRUCTOR_APPEND_ELT (v, size_int (i), t);
654 }
655 }
656 else
657 {
658 unsigned i;
659 tree var = create_tmp_var (type0, "vecel");
660 tree vec1tmp = NULL_TREE;
661
662 v = VEC_alloc (constructor_elt, gc, els);
663 for (i = 0; i < els; i++)
664 {
665 tree idxval, idx1val, cond, elval0, elval1, condexpr, t, ssatmp;
666 tree vec0el, vec1el;
667 gimple asgn;
668
669 idxval = vector_element (gsi, mask, size_int (i), &masktmp);
670 if (idxval == error_mark_node)
671 {
672 if (warning_at (loc, 0, "Invalid shuffling mask index %i", i))
673 inform (loc, "if this code is reached the programm will abort");
674 return false;
675 }
676
677 if (TREE_CODE (idxval) == INTEGER_CST)
678 {
679 if (tree_int_cst_lt (idxval, size_int (els)))
680 {
681 vec0el = vector_element (gsi, vec0, idxval, &vec0tmp);
682 t = force_gimple_operand_gsi (gsi, vec0el,
683 true, NULL_TREE, true, GSI_SAME_STMT);
684 }
685 else if (tree_int_cst_lt (idxval, size_int (2*els)))
686 {
687 idx1val = fold_build2 (MINUS_EXPR, TREE_TYPE (idxval),
688 idxval, build_int_cst (TREE_TYPE (idxval), els));
689
690 vec1el = vector_element (gsi, vec1, idx1val, &vec1tmp);
691 t = force_gimple_operand_gsi (gsi, vec1el, true,
692 NULL_TREE, true, GSI_SAME_STMT);
693 }
694 else
695 {
696 if (warning_at (loc, 0, "Invalid shuffling mask index %i", i))
697 inform (loc, "if this code is reached the "
698 "programm will abort");
699 return false;
700 }
701 }
702 else
703 {
704
705 idx1val = fold_build2 (MINUS_EXPR, TREE_TYPE (idxval),
706 idxval, build_int_cst (TREE_TYPE (idxval), els));
707 idx1val = force_gimple_operand_gsi (gsi, idx1val,
708 true, NULL_TREE, true, GSI_SAME_STMT);
709 cond = fold_build2 (GT_EXPR, boolean_type_node, \
710 idxval, fold_convert (type0, size_int (els - 1)));
711
712 vec0el = vector_element (gsi, vec0, idxval, &vec0tmp);
713 if (vec0el == error_mark_node)
714 {
715 if (warning_at (loc, 0, "Invalid shuffling arguments"))
716 inform (loc, "if this code is reached the "
717 "programm will abort");
718 return false;
719 }
720
721 elval0 = force_gimple_operand_gsi (gsi, vec0el,
722 true, NULL_TREE, true, GSI_SAME_STMT);
723
724 vec1el = vector_element (gsi, vec1, idx1val, &vec1tmp);
725 if (vec1el == error_mark_node)
726 {
727 if (warning_at (loc, 0, "Invalid shuffling arguments"))
728 inform (loc, "if this code is reached the "
729 "programm will abort");
730 return false;
731 }
732
733 elval1 = force_gimple_operand_gsi (gsi, vec1el,
734 true, NULL_TREE, true, GSI_SAME_STMT);
735
736 condexpr = fold_build3 (COND_EXPR, type0, cond, \
737 elval1, elval0);
738
739 t = force_gimple_operand_gsi (gsi, condexpr, true, \
740 NULL_TREE, true, GSI_SAME_STMT);
741 }
742
743 asgn = gimple_build_assign (var, t);
744 ssatmp = make_ssa_name (var, asgn);
745 gimple_assign_set_lhs (asgn, ssatmp);
746 gsi_insert_before (gsi, asgn, GSI_SAME_STMT);
747 CONSTRUCTOR_APPEND_ELT (v, size_int (i), ssatmp);
748 }
749 }
750
751 vectype = build_vector_type (type0, els);
752 constr = build_constructor (vectype, v);
753 gimple_assign_set_rhs_from_tree (gsi, constr);
754 /* Statement should be updated by callee. */
755 return true;
756 }
757
758 /* Process one statement. If we identify a vector operation, expand it. */
759
760 static void
761 expand_vector_operations_1 (gimple_stmt_iterator *gsi)
762 {
763 gimple stmt = gsi_stmt (*gsi);
764 tree lhs, rhs1, rhs2 = NULL, type, compute_type;
765 enum tree_code code;
766 enum machine_mode compute_mode;
767 optab op = NULL;
768 enum gimple_rhs_class rhs_class;
769 tree new_rhs;
770
771 if (gimple_code (stmt) != GIMPLE_ASSIGN)
772 return;
773
774 code = gimple_assign_rhs_code (stmt);
775 rhs_class = get_gimple_rhs_class (code);
776 lhs = gimple_assign_lhs (stmt);
777
778 if (code == VEC_SHUFFLE_EXPR)
779 {
780 if (!lower_vec_shuffle (gsi, gimple_location (stmt)))
781 {
782 gimple new_stmt;
783 tree vec0;
784
785 vec0 = gimple_assign_rhs1 (stmt);
786 new_stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
787 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
788 split_block (gimple_bb (new_stmt), new_stmt);
789 new_stmt = gimple_build_assign (gimple_assign_lhs (stmt), vec0);
790 gsi_replace (gsi, new_stmt, false);
791 }
792
793 gimple_set_modified (gsi_stmt (*gsi), true);
794 update_stmt (gsi_stmt (*gsi));
795 }
796
797 if (rhs_class != GIMPLE_UNARY_RHS && rhs_class != GIMPLE_BINARY_RHS)
798 return;
799
800 rhs1 = gimple_assign_rhs1 (stmt);
801 type = gimple_expr_type (stmt);
802 if (rhs_class == GIMPLE_BINARY_RHS)
803 rhs2 = gimple_assign_rhs2 (stmt);
804
805 if (TREE_CODE (type) != VECTOR_TYPE)
806 return;
807
808 if (code == NOP_EXPR
809 || code == FLOAT_EXPR
810 || code == FIX_TRUNC_EXPR
811 || code == VIEW_CONVERT_EXPR)
812 return;
813
814 gcc_assert (code != CONVERT_EXPR);
815
816 /* The signedness is determined from input argument. */
817 if (code == VEC_UNPACK_FLOAT_HI_EXPR
818 || code == VEC_UNPACK_FLOAT_LO_EXPR)
819 type = TREE_TYPE (rhs1);
820
821 /* Choose between vector shift/rotate by vector and vector shift/rotate by
822 scalar */
823 if (code == LSHIFT_EXPR
824 || code == RSHIFT_EXPR
825 || code == LROTATE_EXPR
826 || code == RROTATE_EXPR)
827 {
828 bool vector_scalar_shift;
829 op = optab_for_tree_code (code, type, optab_scalar);
830
831 /* Vector/Scalar shift is supported. */
832 vector_scalar_shift = (op && (optab_handler (op, TYPE_MODE (type))
833 != CODE_FOR_nothing));
834
835 /* If the 2nd argument is vector, we need a vector/vector shift.
836 Except all the elements in the second vector are the same. */
837 if (VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (rhs2))))
838 {
839 tree first;
840 gimple def_stmt;
841
842 /* Check whether we have vector <op> {x,x,x,x} where x
843 could be a scalar variable or a constant. Transform
844 vector <op> {x,x,x,x} ==> vector <op> scalar. */
845 if (vector_scalar_shift
846 && ((TREE_CODE (rhs2) == VECTOR_CST
847 && (first = uniform_vector_p (rhs2)) != NULL_TREE)
848 || (TREE_CODE (rhs2) == SSA_NAME
849 && (def_stmt = SSA_NAME_DEF_STMT (rhs2))
850 && gimple_assign_single_p (def_stmt)
851 && (first = uniform_vector_p
852 (gimple_assign_rhs1 (def_stmt))) != NULL_TREE)))
853 {
854 gimple_assign_set_rhs2 (stmt, first);
855 update_stmt (stmt);
856 rhs2 = first;
857 }
858 else
859 op = optab_for_tree_code (code, type, optab_vector);
860 }
861
862 /* Try for a vector/scalar shift, and if we don't have one, see if we
863 have a vector/vector shift */
864 else if (!vector_scalar_shift)
865 {
866 op = optab_for_tree_code (code, type, optab_vector);
867
868 if (op && (optab_handler (op, TYPE_MODE (type))
869 != CODE_FOR_nothing))
870 {
871 /* Transform vector <op> scalar => vector <op> {x,x,x,x}. */
872 int n_parts = TYPE_VECTOR_SUBPARTS (type);
873 int part_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1);
874 tree part_type = lang_hooks.types.type_for_size (part_size, 1);
875 tree vect_type = build_vector_type (part_type, n_parts);
876
877 rhs2 = fold_convert (part_type, rhs2);
878 rhs2 = build_vector_from_val (vect_type, rhs2);
879 gimple_assign_set_rhs2 (stmt, rhs2);
880 update_stmt (stmt);
881 }
882 }
883 }
884 else
885 op = optab_for_tree_code (code, type, optab_default);
886
887 /* For widening/narrowing vector operations, the relevant type is of the
888 arguments, not the widened result. VEC_UNPACK_FLOAT_*_EXPR is
889 calculated in the same way above. */
890 if (code == WIDEN_SUM_EXPR
891 || code == VEC_WIDEN_MULT_HI_EXPR
892 || code == VEC_WIDEN_MULT_LO_EXPR
893 || code == VEC_UNPACK_HI_EXPR
894 || code == VEC_UNPACK_LO_EXPR
895 || code == VEC_PACK_TRUNC_EXPR
896 || code == VEC_PACK_SAT_EXPR
897 || code == VEC_PACK_FIX_TRUNC_EXPR)
898 type = TREE_TYPE (rhs1);
899
900 /* Optabs will try converting a negation into a subtraction, so
901 look for it as well. TODO: negation of floating-point vectors
902 might be turned into an exclusive OR toggling the sign bit. */
903 if (op == NULL
904 && code == NEGATE_EXPR
905 && INTEGRAL_TYPE_P (TREE_TYPE (type)))
906 op = optab_for_tree_code (MINUS_EXPR, type, optab_default);
907
908 /* For very wide vectors, try using a smaller vector mode. */
909 compute_type = type;
910 if (TYPE_MODE (type) == BLKmode && op)
911 {
912 tree vector_compute_type
913 = type_for_widest_vector_mode (TYPE_MODE (TREE_TYPE (type)), op,
914 TYPE_SATURATING (TREE_TYPE (type)));
915 if (vector_compute_type != NULL_TREE
916 && (TYPE_VECTOR_SUBPARTS (vector_compute_type)
917 < TYPE_VECTOR_SUBPARTS (compute_type)))
918 compute_type = vector_compute_type;
919 }
920
921 /* If we are breaking a BLKmode vector into smaller pieces,
922 type_for_widest_vector_mode has already looked into the optab,
923 so skip these checks. */
924 if (compute_type == type)
925 {
926 compute_mode = TYPE_MODE (compute_type);
927 if ((GET_MODE_CLASS (compute_mode) == MODE_VECTOR_INT
928 || GET_MODE_CLASS (compute_mode) == MODE_VECTOR_FLOAT
929 || GET_MODE_CLASS (compute_mode) == MODE_VECTOR_FRACT
930 || GET_MODE_CLASS (compute_mode) == MODE_VECTOR_UFRACT
931 || GET_MODE_CLASS (compute_mode) == MODE_VECTOR_ACCUM
932 || GET_MODE_CLASS (compute_mode) == MODE_VECTOR_UACCUM)
933 && op != NULL
934 && optab_handler (op, compute_mode) != CODE_FOR_nothing)
935 return;
936 else
937 /* There is no operation in hardware, so fall back to scalars. */
938 compute_type = TREE_TYPE (type);
939 }
940
941 gcc_assert (code != VEC_LSHIFT_EXPR && code != VEC_RSHIFT_EXPR);
942 new_rhs = expand_vector_operation (gsi, type, compute_type, stmt, code);
943
944 /* Leave expression untouched for later expansion. */
945 if (new_rhs == NULL_TREE)
946 return;
947
948 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (new_rhs)))
949 new_rhs = gimplify_build1 (gsi, VIEW_CONVERT_EXPR, TREE_TYPE (lhs),
950 new_rhs);
951
952 /* NOTE: We should avoid using gimple_assign_set_rhs_from_tree. One
953 way to do it is change expand_vector_operation and its callees to
954 return a tree_code, RHS1 and RHS2 instead of a tree. */
955 gimple_assign_set_rhs_from_tree (gsi, new_rhs);
956 update_stmt (gsi_stmt (*gsi));
957 }
958 \f
959 /* Use this to lower vector operations introduced by the vectorizer,
960 if it may need the bit-twiddling tricks implemented in this file. */
961
962 static bool
963 gate_expand_vector_operations_ssa (void)
964 {
965 return optimize == 0;
966 }
967
968 static unsigned int
969 expand_vector_operations (void)
970 {
971 gimple_stmt_iterator gsi;
972 basic_block bb;
973 bool cfg_changed = false;
974
975 FOR_EACH_BB (bb)
976 {
977 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
978 {
979 expand_vector_operations_1 (&gsi);
980 /* ??? If we do not cleanup EH then we will ICE in
981 verification. But in reality we have created wrong-code
982 as we did not properly transition EH info and edges to
983 the piecewise computations. */
984 if (maybe_clean_eh_stmt (gsi_stmt (gsi))
985 && gimple_purge_dead_eh_edges (bb))
986 cfg_changed = true;
987 }
988 }
989
990 return cfg_changed ? TODO_cleanup_cfg : 0;
991 }
992
993 struct gimple_opt_pass pass_lower_vector =
994 {
995 {
996 GIMPLE_PASS,
997 "veclower", /* name */
998 gate_expand_vector_operations_ssa, /* gate */
999 expand_vector_operations, /* execute */
1000 NULL, /* sub */
1001 NULL, /* next */
1002 0, /* static_pass_number */
1003 TV_NONE, /* tv_id */
1004 PROP_cfg, /* properties_required */
1005 0, /* properties_provided */
1006 0, /* properties_destroyed */
1007 0, /* todo_flags_start */
1008 TODO_update_ssa /* todo_flags_finish */
1009 | TODO_verify_ssa
1010 | TODO_verify_stmts | TODO_verify_flow
1011 | TODO_cleanup_cfg
1012 }
1013 };
1014
1015 struct gimple_opt_pass pass_lower_vector_ssa =
1016 {
1017 {
1018 GIMPLE_PASS,
1019 "veclower2", /* name */
1020 0, /* gate */
1021 expand_vector_operations, /* execute */
1022 NULL, /* sub */
1023 NULL, /* next */
1024 0, /* static_pass_number */
1025 TV_NONE, /* tv_id */
1026 PROP_cfg, /* properties_required */
1027 0, /* properties_provided */
1028 0, /* properties_destroyed */
1029 0, /* todo_flags_start */
1030 TODO_update_ssa /* todo_flags_finish */
1031 | TODO_verify_ssa
1032 | TODO_verify_stmts | TODO_verify_flow
1033 | TODO_cleanup_cfg
1034 }
1035 };
1036
1037 #include "gt-tree-vect-generic.h"