tree-core.h: Include symtab.h.
[gcc.git] / gcc / tree-vect-stmts.c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "backend.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "rtl.h"
30 #include "ssa.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "target.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
37 #include "tree-eh.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "cfgloop.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
46 #include "flags.h"
47 #include "insn-config.h"
48 #include "expmed.h"
49 #include "dojump.h"
50 #include "explow.h"
51 #include "calls.h"
52 #include "emit-rtl.h"
53 #include "varasm.h"
54 #include "stmt.h"
55 #include "expr.h"
56 #include "recog.h" /* FIXME: for insn_data */
57 #include "insn-codes.h"
58 #include "optabs.h"
59 #include "diagnostic-core.h"
60 #include "tree-vectorizer.h"
61 #include "cgraph.h"
62 #include "builtins.h"
63
64 /* For lang_hooks.types.type_for_mode. */
65 #include "langhooks.h"
66
67 /* Return the vectorized type for the given statement. */
68
69 tree
70 stmt_vectype (struct _stmt_vec_info *stmt_info)
71 {
72 return STMT_VINFO_VECTYPE (stmt_info);
73 }
74
75 /* Return TRUE iff the given statement is in an inner loop relative to
76 the loop being vectorized. */
77 bool
78 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
79 {
80 gimple stmt = STMT_VINFO_STMT (stmt_info);
81 basic_block bb = gimple_bb (stmt);
82 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
83 struct loop* loop;
84
85 if (!loop_vinfo)
86 return false;
87
88 loop = LOOP_VINFO_LOOP (loop_vinfo);
89
90 return (bb->loop_father == loop->inner);
91 }
92
93 /* Record the cost of a statement, either by directly informing the
94 target model or by saving it in a vector for later processing.
95 Return a preliminary estimate of the statement's cost. */
96
97 unsigned
98 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
99 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
100 int misalign, enum vect_cost_model_location where)
101 {
102 if (body_cost_vec)
103 {
104 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
105 add_stmt_info_to_vec (body_cost_vec, count, kind,
106 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
107 misalign);
108 return (unsigned)
109 (builtin_vectorization_cost (kind, vectype, misalign) * count);
110
111 }
112 else
113 {
114 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
115 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
116 void *target_cost_data;
117
118 if (loop_vinfo)
119 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
120 else
121 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
122
123 return add_stmt_cost (target_cost_data, count, kind, stmt_info,
124 misalign, where);
125 }
126 }
127
128 /* Return a variable of type ELEM_TYPE[NELEMS]. */
129
130 static tree
131 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
132 {
133 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
134 "vect_array");
135 }
136
137 /* ARRAY is an array of vectors created by create_vector_array.
138 Return an SSA_NAME for the vector in index N. The reference
139 is part of the vectorization of STMT and the vector is associated
140 with scalar destination SCALAR_DEST. */
141
142 static tree
143 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
144 tree array, unsigned HOST_WIDE_INT n)
145 {
146 tree vect_type, vect, vect_name, array_ref;
147 gimple new_stmt;
148
149 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
150 vect_type = TREE_TYPE (TREE_TYPE (array));
151 vect = vect_create_destination_var (scalar_dest, vect_type);
152 array_ref = build4 (ARRAY_REF, vect_type, array,
153 build_int_cst (size_type_node, n),
154 NULL_TREE, NULL_TREE);
155
156 new_stmt = gimple_build_assign (vect, array_ref);
157 vect_name = make_ssa_name (vect, new_stmt);
158 gimple_assign_set_lhs (new_stmt, vect_name);
159 vect_finish_stmt_generation (stmt, new_stmt, gsi);
160
161 return vect_name;
162 }
163
164 /* ARRAY is an array of vectors created by create_vector_array.
165 Emit code to store SSA_NAME VECT in index N of the array.
166 The store is part of the vectorization of STMT. */
167
168 static void
169 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
170 tree array, unsigned HOST_WIDE_INT n)
171 {
172 tree array_ref;
173 gimple new_stmt;
174
175 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
176 build_int_cst (size_type_node, n),
177 NULL_TREE, NULL_TREE);
178
179 new_stmt = gimple_build_assign (array_ref, vect);
180 vect_finish_stmt_generation (stmt, new_stmt, gsi);
181 }
182
183 /* PTR is a pointer to an array of type TYPE. Return a representation
184 of *PTR. The memory reference replaces those in FIRST_DR
185 (and its group). */
186
187 static tree
188 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
189 {
190 tree mem_ref, alias_ptr_type;
191
192 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
193 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
194 /* Arrays have the same alignment as their type. */
195 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
196 return mem_ref;
197 }
198
199 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
200
201 /* Function vect_mark_relevant.
202
203 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
204
205 static void
206 vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
207 enum vect_relevant relevant, bool live_p,
208 bool used_in_pattern)
209 {
210 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
211 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
212 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
213 gimple pattern_stmt;
214
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_NOTE, vect_location,
217 "mark relevant %d, live %d.\n", relevant, live_p);
218
219 /* If this stmt is an original stmt in a pattern, we might need to mark its
220 related pattern stmt instead of the original stmt. However, such stmts
221 may have their own uses that are not in any pattern, in such cases the
222 stmt itself should be marked. */
223 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
224 {
225 bool found = false;
226 if (!used_in_pattern)
227 {
228 imm_use_iterator imm_iter;
229 use_operand_p use_p;
230 gimple use_stmt;
231 tree lhs;
232 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
233 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
234
235 if (is_gimple_assign (stmt))
236 lhs = gimple_assign_lhs (stmt);
237 else
238 lhs = gimple_call_lhs (stmt);
239
240 /* This use is out of pattern use, if LHS has other uses that are
241 pattern uses, we should mark the stmt itself, and not the pattern
242 stmt. */
243 if (lhs && TREE_CODE (lhs) == SSA_NAME)
244 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
245 {
246 if (is_gimple_debug (USE_STMT (use_p)))
247 continue;
248 use_stmt = USE_STMT (use_p);
249
250 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
251 continue;
252
253 if (vinfo_for_stmt (use_stmt)
254 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
255 {
256 found = true;
257 break;
258 }
259 }
260 }
261
262 if (!found)
263 {
264 /* This is the last stmt in a sequence that was detected as a
265 pattern that can potentially be vectorized. Don't mark the stmt
266 as relevant/live because it's not going to be vectorized.
267 Instead mark the pattern-stmt that replaces it. */
268
269 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
270
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE, vect_location,
273 "last stmt in pattern. don't mark"
274 " relevant/live.\n");
275 stmt_info = vinfo_for_stmt (pattern_stmt);
276 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
277 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
278 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
279 stmt = pattern_stmt;
280 }
281 }
282
283 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
284 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
285 STMT_VINFO_RELEVANT (stmt_info) = relevant;
286
287 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
288 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
289 {
290 if (dump_enabled_p ())
291 dump_printf_loc (MSG_NOTE, vect_location,
292 "already marked relevant/live.\n");
293 return;
294 }
295
296 worklist->safe_push (stmt);
297 }
298
299
300 /* Function vect_stmt_relevant_p.
301
302 Return true if STMT in loop that is represented by LOOP_VINFO is
303 "relevant for vectorization".
304
305 A stmt is considered "relevant for vectorization" if:
306 - it has uses outside the loop.
307 - it has vdefs (it alters memory).
308 - control stmts in the loop (except for the exit condition).
309
310 CHECKME: what other side effects would the vectorizer allow? */
311
312 static bool
313 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
314 enum vect_relevant *relevant, bool *live_p)
315 {
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 ssa_op_iter op_iter;
318 imm_use_iterator imm_iter;
319 use_operand_p use_p;
320 def_operand_p def_p;
321
322 *relevant = vect_unused_in_scope;
323 *live_p = false;
324
325 /* cond stmt other than loop exit cond. */
326 if (is_ctrl_stmt (stmt)
327 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
328 != loop_exit_ctrl_vec_info_type)
329 *relevant = vect_used_in_scope;
330
331 /* changing memory. */
332 if (gimple_code (stmt) != GIMPLE_PHI)
333 if (gimple_vdef (stmt)
334 && !gimple_clobber_p (stmt))
335 {
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: stmt has vdefs.\n");
339 *relevant = vect_used_in_scope;
340 }
341
342 /* uses outside the loop. */
343 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
344 {
345 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
346 {
347 basic_block bb = gimple_bb (USE_STMT (use_p));
348 if (!flow_bb_inside_loop_p (loop, bb))
349 {
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location,
352 "vec_stmt_relevant_p: used out of loop.\n");
353
354 if (is_gimple_debug (USE_STMT (use_p)))
355 continue;
356
357 /* We expect all such uses to be in the loop exit phis
358 (because of loop closed form) */
359 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
360 gcc_assert (bb == single_exit (loop)->dest);
361
362 *live_p = true;
363 }
364 }
365 }
366
367 return (*live_p || *relevant);
368 }
369
370
371 /* Function exist_non_indexing_operands_for_use_p
372
373 USE is one of the uses attached to STMT. Check if USE is
374 used in STMT for anything other than indexing an array. */
375
376 static bool
377 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
378 {
379 tree operand;
380 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
381
382 /* USE corresponds to some operand in STMT. If there is no data
383 reference in STMT, then any operand that corresponds to USE
384 is not indexing an array. */
385 if (!STMT_VINFO_DATA_REF (stmt_info))
386 return true;
387
388 /* STMT has a data_ref. FORNOW this means that its of one of
389 the following forms:
390 -1- ARRAY_REF = var
391 -2- var = ARRAY_REF
392 (This should have been verified in analyze_data_refs).
393
394 'var' in the second case corresponds to a def, not a use,
395 so USE cannot correspond to any operands that are not used
396 for array indexing.
397
398 Therefore, all we need to check is if STMT falls into the
399 first case, and whether var corresponds to USE. */
400
401 if (!gimple_assign_copy_p (stmt))
402 {
403 if (is_gimple_call (stmt)
404 && gimple_call_internal_p (stmt))
405 switch (gimple_call_internal_fn (stmt))
406 {
407 case IFN_MASK_STORE:
408 operand = gimple_call_arg (stmt, 3);
409 if (operand == use)
410 return true;
411 /* FALLTHRU */
412 case IFN_MASK_LOAD:
413 operand = gimple_call_arg (stmt, 2);
414 if (operand == use)
415 return true;
416 break;
417 default:
418 break;
419 }
420 return false;
421 }
422
423 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
424 return false;
425 operand = gimple_assign_rhs1 (stmt);
426 if (TREE_CODE (operand) != SSA_NAME)
427 return false;
428
429 if (operand == use)
430 return true;
431
432 return false;
433 }
434
435
436 /*
437 Function process_use.
438
439 Inputs:
440 - a USE in STMT in a loop represented by LOOP_VINFO
441 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
442 that defined USE. This is done by calling mark_relevant and passing it
443 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
444 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
445 be performed.
446
447 Outputs:
448 Generally, LIVE_P and RELEVANT are used to define the liveness and
449 relevance info of the DEF_STMT of this USE:
450 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
451 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
452 Exceptions:
453 - case 1: If USE is used only for address computations (e.g. array indexing),
454 which does not need to be directly vectorized, then the liveness/relevance
455 of the respective DEF_STMT is left unchanged.
456 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
457 skip DEF_STMT cause it had already been processed.
458 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
459 be modified accordingly.
460
461 Return true if everything is as expected. Return false otherwise. */
462
463 static bool
464 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
465 enum vect_relevant relevant, vec<gimple> *worklist,
466 bool force)
467 {
468 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
469 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
470 stmt_vec_info dstmt_vinfo;
471 basic_block bb, def_bb;
472 tree def;
473 gimple def_stmt;
474 enum vect_def_type dt;
475
476 /* case 1: we are only interested in uses that need to be vectorized. Uses
477 that are used for address computation are not considered relevant. */
478 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
479 return true;
480
481 if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
482 {
483 if (dump_enabled_p ())
484 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
485 "not vectorized: unsupported use in stmt.\n");
486 return false;
487 }
488
489 if (!def_stmt || gimple_nop_p (def_stmt))
490 return true;
491
492 def_bb = gimple_bb (def_stmt);
493 if (!flow_bb_inside_loop_p (loop, def_bb))
494 {
495 if (dump_enabled_p ())
496 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
497 return true;
498 }
499
500 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
501 DEF_STMT must have already been processed, because this should be the
502 only way that STMT, which is a reduction-phi, was put in the worklist,
503 as there should be no other uses for DEF_STMT in the loop. So we just
504 check that everything is as expected, and we are done. */
505 dstmt_vinfo = vinfo_for_stmt (def_stmt);
506 bb = gimple_bb (stmt);
507 if (gimple_code (stmt) == GIMPLE_PHI
508 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
509 && gimple_code (def_stmt) != GIMPLE_PHI
510 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
511 && bb->loop_father == def_bb->loop_father)
512 {
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE, vect_location,
515 "reduc-stmt defining reduc-phi in the same nest.\n");
516 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
517 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
518 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
519 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
520 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
521 return true;
522 }
523
524 /* case 3a: outer-loop stmt defining an inner-loop stmt:
525 outer-loop-header-bb:
526 d = def_stmt
527 inner-loop:
528 stmt # use (d)
529 outer-loop-tail-bb:
530 ... */
531 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
532 {
533 if (dump_enabled_p ())
534 dump_printf_loc (MSG_NOTE, vect_location,
535 "outer-loop def-stmt defining inner-loop stmt.\n");
536
537 switch (relevant)
538 {
539 case vect_unused_in_scope:
540 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
541 vect_used_in_scope : vect_unused_in_scope;
542 break;
543
544 case vect_used_in_outer_by_reduction:
545 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
546 relevant = vect_used_by_reduction;
547 break;
548
549 case vect_used_in_outer:
550 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
551 relevant = vect_used_in_scope;
552 break;
553
554 case vect_used_in_scope:
555 break;
556
557 default:
558 gcc_unreachable ();
559 }
560 }
561
562 /* case 3b: inner-loop stmt defining an outer-loop stmt:
563 outer-loop-header-bb:
564 ...
565 inner-loop:
566 d = def_stmt
567 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
568 stmt # use (d) */
569 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
570 {
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE, vect_location,
573 "inner-loop def-stmt defining outer-loop stmt.\n");
574
575 switch (relevant)
576 {
577 case vect_unused_in_scope:
578 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
579 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
580 vect_used_in_outer_by_reduction : vect_unused_in_scope;
581 break;
582
583 case vect_used_by_reduction:
584 relevant = vect_used_in_outer_by_reduction;
585 break;
586
587 case vect_used_in_scope:
588 relevant = vect_used_in_outer;
589 break;
590
591 default:
592 gcc_unreachable ();
593 }
594 }
595
596 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
597 is_pattern_stmt_p (stmt_vinfo));
598 return true;
599 }
600
601
602 /* Function vect_mark_stmts_to_be_vectorized.
603
604 Not all stmts in the loop need to be vectorized. For example:
605
606 for i...
607 for j...
608 1. T0 = i + j
609 2. T1 = a[T0]
610
611 3. j = j + 1
612
613 Stmt 1 and 3 do not need to be vectorized, because loop control and
614 addressing of vectorized data-refs are handled differently.
615
616 This pass detects such stmts. */
617
618 bool
619 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
620 {
621 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
622 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
623 unsigned int nbbs = loop->num_nodes;
624 gimple_stmt_iterator si;
625 gimple stmt;
626 unsigned int i;
627 stmt_vec_info stmt_vinfo;
628 basic_block bb;
629 gimple phi;
630 bool live_p;
631 enum vect_relevant relevant, tmp_relevant;
632 enum vect_def_type def_type;
633
634 if (dump_enabled_p ())
635 dump_printf_loc (MSG_NOTE, vect_location,
636 "=== vect_mark_stmts_to_be_vectorized ===\n");
637
638 auto_vec<gimple, 64> worklist;
639
640 /* 1. Init worklist. */
641 for (i = 0; i < nbbs; i++)
642 {
643 bb = bbs[i];
644 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
645 {
646 phi = gsi_stmt (si);
647 if (dump_enabled_p ())
648 {
649 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
651 }
652
653 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
654 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
655 }
656 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
657 {
658 stmt = gsi_stmt (si);
659 if (dump_enabled_p ())
660 {
661 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
662 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
663 }
664
665 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
666 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
667 }
668 }
669
670 /* 2. Process_worklist */
671 while (worklist.length () > 0)
672 {
673 use_operand_p use_p;
674 ssa_op_iter iter;
675
676 stmt = worklist.pop ();
677 if (dump_enabled_p ())
678 {
679 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
680 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
681 }
682
683 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
684 (DEF_STMT) as relevant/irrelevant and live/dead according to the
685 liveness and relevance properties of STMT. */
686 stmt_vinfo = vinfo_for_stmt (stmt);
687 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
688 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
689
690 /* Generally, the liveness and relevance properties of STMT are
691 propagated as is to the DEF_STMTs of its USEs:
692 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
693 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
694
695 One exception is when STMT has been identified as defining a reduction
696 variable; in this case we set the liveness/relevance as follows:
697 live_p = false
698 relevant = vect_used_by_reduction
699 This is because we distinguish between two kinds of relevant stmts -
700 those that are used by a reduction computation, and those that are
701 (also) used by a regular computation. This allows us later on to
702 identify stmts that are used solely by a reduction, and therefore the
703 order of the results that they produce does not have to be kept. */
704
705 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
706 tmp_relevant = relevant;
707 switch (def_type)
708 {
709 case vect_reduction_def:
710 switch (tmp_relevant)
711 {
712 case vect_unused_in_scope:
713 relevant = vect_used_by_reduction;
714 break;
715
716 case vect_used_by_reduction:
717 if (gimple_code (stmt) == GIMPLE_PHI)
718 break;
719 /* fall through */
720
721 default:
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "unsupported use of reduction.\n");
725 return false;
726 }
727
728 live_p = false;
729 break;
730
731 case vect_nested_cycle:
732 if (tmp_relevant != vect_unused_in_scope
733 && tmp_relevant != vect_used_in_outer_by_reduction
734 && tmp_relevant != vect_used_in_outer)
735 {
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
738 "unsupported use of nested cycle.\n");
739
740 return false;
741 }
742
743 live_p = false;
744 break;
745
746 case vect_double_reduction_def:
747 if (tmp_relevant != vect_unused_in_scope
748 && tmp_relevant != vect_used_by_reduction)
749 {
750 if (dump_enabled_p ())
751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
752 "unsupported use of double reduction.\n");
753
754 return false;
755 }
756
757 live_p = false;
758 break;
759
760 default:
761 break;
762 }
763
764 if (is_pattern_stmt_p (stmt_vinfo))
765 {
766 /* Pattern statements are not inserted into the code, so
767 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
768 have to scan the RHS or function arguments instead. */
769 if (is_gimple_assign (stmt))
770 {
771 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
772 tree op = gimple_assign_rhs1 (stmt);
773
774 i = 1;
775 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
776 {
777 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
778 live_p, relevant, &worklist, false)
779 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
780 live_p, relevant, &worklist, false))
781 return false;
782 i = 2;
783 }
784 for (; i < gimple_num_ops (stmt); i++)
785 {
786 op = gimple_op (stmt, i);
787 if (TREE_CODE (op) == SSA_NAME
788 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
789 &worklist, false))
790 return false;
791 }
792 }
793 else if (is_gimple_call (stmt))
794 {
795 for (i = 0; i < gimple_call_num_args (stmt); i++)
796 {
797 tree arg = gimple_call_arg (stmt, i);
798 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
799 &worklist, false))
800 return false;
801 }
802 }
803 }
804 else
805 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
806 {
807 tree op = USE_FROM_PTR (use_p);
808 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
809 &worklist, false))
810 return false;
811 }
812
813 if (STMT_VINFO_GATHER_P (stmt_vinfo))
814 {
815 tree off;
816 tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
817 gcc_assert (decl);
818 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
819 &worklist, true))
820 return false;
821 }
822 } /* while worklist */
823
824 return true;
825 }
826
827
828 /* Function vect_model_simple_cost.
829
830 Models cost for simple operations, i.e. those that only emit ncopies of a
831 single op. Right now, this does not account for multiple insns that could
832 be generated for the single vector op. We will handle that shortly. */
833
834 void
835 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
836 enum vect_def_type *dt,
837 stmt_vector_for_cost *prologue_cost_vec,
838 stmt_vector_for_cost *body_cost_vec)
839 {
840 int i;
841 int inside_cost = 0, prologue_cost = 0;
842
843 /* The SLP costs were already calculated during SLP tree build. */
844 if (PURE_SLP_STMT (stmt_info))
845 return;
846
847 /* FORNOW: Assuming maximum 2 args per stmts. */
848 for (i = 0; i < 2; i++)
849 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
850 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
851 stmt_info, 0, vect_prologue);
852
853 /* Pass the inside-of-loop statements to the target-specific cost model. */
854 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
855 stmt_info, 0, vect_body);
856
857 if (dump_enabled_p ())
858 dump_printf_loc (MSG_NOTE, vect_location,
859 "vect_model_simple_cost: inside_cost = %d, "
860 "prologue_cost = %d .\n", inside_cost, prologue_cost);
861 }
862
863
864 /* Model cost for type demotion and promotion operations. PWR is normally
865 zero for single-step promotions and demotions. It will be one if
866 two-step promotion/demotion is required, and so on. Each additional
867 step doubles the number of instructions required. */
868
869 static void
870 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
871 enum vect_def_type *dt, int pwr)
872 {
873 int i, tmp;
874 int inside_cost = 0, prologue_cost = 0;
875 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
876 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
877 void *target_cost_data;
878
879 /* The SLP costs were already calculated during SLP tree build. */
880 if (PURE_SLP_STMT (stmt_info))
881 return;
882
883 if (loop_vinfo)
884 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
885 else
886 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
887
888 for (i = 0; i < pwr + 1; i++)
889 {
890 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
891 (i + 1) : i;
892 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
893 vec_promote_demote, stmt_info, 0,
894 vect_body);
895 }
896
897 /* FORNOW: Assuming maximum 2 args per stmts. */
898 for (i = 0; i < 2; i++)
899 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
900 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
901 stmt_info, 0, vect_prologue);
902
903 if (dump_enabled_p ())
904 dump_printf_loc (MSG_NOTE, vect_location,
905 "vect_model_promotion_demotion_cost: inside_cost = %d, "
906 "prologue_cost = %d .\n", inside_cost, prologue_cost);
907 }
908
909 /* Function vect_cost_group_size
910
911 For grouped load or store, return the group_size only if it is the first
912 load or store of a group, else return 1. This ensures that group size is
913 only returned once per group. */
914
915 static int
916 vect_cost_group_size (stmt_vec_info stmt_info)
917 {
918 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
919
920 if (first_stmt == STMT_VINFO_STMT (stmt_info))
921 return GROUP_SIZE (stmt_info);
922
923 return 1;
924 }
925
926
927 /* Function vect_model_store_cost
928
929 Models cost for stores. In the case of grouped accesses, one access
930 has the overhead of the grouped access attributed to it. */
931
932 void
933 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
934 bool store_lanes_p, enum vect_def_type dt,
935 slp_tree slp_node,
936 stmt_vector_for_cost *prologue_cost_vec,
937 stmt_vector_for_cost *body_cost_vec)
938 {
939 int group_size;
940 unsigned int inside_cost = 0, prologue_cost = 0;
941 struct data_reference *first_dr;
942 gimple first_stmt;
943
944 if (dt == vect_constant_def || dt == vect_external_def)
945 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
946 stmt_info, 0, vect_prologue);
947
948 /* Grouped access? */
949 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
950 {
951 if (slp_node)
952 {
953 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
954 group_size = 1;
955 }
956 else
957 {
958 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
959 group_size = vect_cost_group_size (stmt_info);
960 }
961
962 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
963 }
964 /* Not a grouped access. */
965 else
966 {
967 group_size = 1;
968 first_dr = STMT_VINFO_DATA_REF (stmt_info);
969 }
970
971 /* We assume that the cost of a single store-lanes instruction is
972 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
973 access is instead being provided by a permute-and-store operation,
974 include the cost of the permutes. */
975 if (!store_lanes_p && group_size > 1
976 && !STMT_VINFO_STRIDED_P (stmt_info))
977 {
978 /* Uses a high and low interleave or shuffle operations for each
979 needed permute. */
980 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
981 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
982 stmt_info, 0, vect_body);
983
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE, vect_location,
986 "vect_model_store_cost: strided group_size = %d .\n",
987 group_size);
988 }
989
990 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
991 /* Costs of the stores. */
992 if (STMT_VINFO_STRIDED_P (stmt_info)
993 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
994 {
995 /* N scalar stores plus extracting the elements. */
996 inside_cost += record_stmt_cost (body_cost_vec,
997 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
998 scalar_store, stmt_info, 0, vect_body);
999 }
1000 else
1001 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
1002
1003 if (STMT_VINFO_STRIDED_P (stmt_info))
1004 inside_cost += record_stmt_cost (body_cost_vec,
1005 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1006 vec_to_scalar, stmt_info, 0, vect_body);
1007
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE, vect_location,
1010 "vect_model_store_cost: inside_cost = %d, "
1011 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1012 }
1013
1014
1015 /* Calculate cost of DR's memory access. */
1016 void
1017 vect_get_store_cost (struct data_reference *dr, int ncopies,
1018 unsigned int *inside_cost,
1019 stmt_vector_for_cost *body_cost_vec)
1020 {
1021 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1022 gimple stmt = DR_STMT (dr);
1023 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1024
1025 switch (alignment_support_scheme)
1026 {
1027 case dr_aligned:
1028 {
1029 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1030 vector_store, stmt_info, 0,
1031 vect_body);
1032
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_NOTE, vect_location,
1035 "vect_model_store_cost: aligned.\n");
1036 break;
1037 }
1038
1039 case dr_unaligned_supported:
1040 {
1041 /* Here, we assign an additional cost for the unaligned store. */
1042 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1043 unaligned_store, stmt_info,
1044 DR_MISALIGNMENT (dr), vect_body);
1045 if (dump_enabled_p ())
1046 dump_printf_loc (MSG_NOTE, vect_location,
1047 "vect_model_store_cost: unaligned supported by "
1048 "hardware.\n");
1049 break;
1050 }
1051
1052 case dr_unaligned_unsupported:
1053 {
1054 *inside_cost = VECT_MAX_COST;
1055
1056 if (dump_enabled_p ())
1057 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1058 "vect_model_store_cost: unsupported access.\n");
1059 break;
1060 }
1061
1062 default:
1063 gcc_unreachable ();
1064 }
1065 }
1066
1067
1068 /* Function vect_model_load_cost
1069
1070 Models cost for loads. In the case of grouped accesses, the last access
1071 has the overhead of the grouped access attributed to it. Since unaligned
1072 accesses are supported for loads, we also account for the costs of the
1073 access scheme chosen. */
1074
1075 void
1076 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1077 bool load_lanes_p, slp_tree slp_node,
1078 stmt_vector_for_cost *prologue_cost_vec,
1079 stmt_vector_for_cost *body_cost_vec)
1080 {
1081 int group_size;
1082 gimple first_stmt;
1083 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1084 unsigned int inside_cost = 0, prologue_cost = 0;
1085
1086 /* Grouped accesses? */
1087 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1088 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1089 {
1090 group_size = vect_cost_group_size (stmt_info);
1091 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1092 }
1093 /* Not a grouped access. */
1094 else
1095 {
1096 group_size = 1;
1097 first_dr = dr;
1098 }
1099
1100 /* We assume that the cost of a single load-lanes instruction is
1101 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1102 access is instead being provided by a load-and-permute operation,
1103 include the cost of the permutes. */
1104 if (!load_lanes_p && group_size > 1
1105 && !STMT_VINFO_STRIDED_P (stmt_info))
1106 {
1107 /* Uses an even and odd extract operations or shuffle operations
1108 for each needed permute. */
1109 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1110 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1111 stmt_info, 0, vect_body);
1112
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE, vect_location,
1115 "vect_model_load_cost: strided group_size = %d .\n",
1116 group_size);
1117 }
1118
1119 /* The loads themselves. */
1120 if (STMT_VINFO_STRIDED_P (stmt_info)
1121 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1122 {
1123 /* N scalar loads plus gathering them into a vector. */
1124 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1125 inside_cost += record_stmt_cost (body_cost_vec,
1126 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1127 scalar_load, stmt_info, 0, vect_body);
1128 }
1129 else
1130 vect_get_load_cost (first_dr, ncopies,
1131 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1132 || group_size > 1 || slp_node),
1133 &inside_cost, &prologue_cost,
1134 prologue_cost_vec, body_cost_vec, true);
1135 if (STMT_VINFO_STRIDED_P (stmt_info))
1136 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1137 stmt_info, 0, vect_body);
1138
1139 if (dump_enabled_p ())
1140 dump_printf_loc (MSG_NOTE, vect_location,
1141 "vect_model_load_cost: inside_cost = %d, "
1142 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1143 }
1144
1145
1146 /* Calculate cost of DR's memory access. */
1147 void
1148 vect_get_load_cost (struct data_reference *dr, int ncopies,
1149 bool add_realign_cost, unsigned int *inside_cost,
1150 unsigned int *prologue_cost,
1151 stmt_vector_for_cost *prologue_cost_vec,
1152 stmt_vector_for_cost *body_cost_vec,
1153 bool record_prologue_costs)
1154 {
1155 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1156 gimple stmt = DR_STMT (dr);
1157 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1158
1159 switch (alignment_support_scheme)
1160 {
1161 case dr_aligned:
1162 {
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1164 stmt_info, 0, vect_body);
1165
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: aligned.\n");
1169
1170 break;
1171 }
1172 case dr_unaligned_supported:
1173 {
1174 /* Here, we assign an additional cost for the unaligned load. */
1175 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1176 unaligned_load, stmt_info,
1177 DR_MISALIGNMENT (dr), vect_body);
1178
1179 if (dump_enabled_p ())
1180 dump_printf_loc (MSG_NOTE, vect_location,
1181 "vect_model_load_cost: unaligned supported by "
1182 "hardware.\n");
1183
1184 break;
1185 }
1186 case dr_explicit_realign:
1187 {
1188 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1189 vector_load, stmt_info, 0, vect_body);
1190 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1191 vec_perm, stmt_info, 0, vect_body);
1192
1193 /* FIXME: If the misalignment remains fixed across the iterations of
1194 the containing loop, the following cost should be added to the
1195 prologue costs. */
1196 if (targetm.vectorize.builtin_mask_for_load)
1197 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1198 stmt_info, 0, vect_body);
1199
1200 if (dump_enabled_p ())
1201 dump_printf_loc (MSG_NOTE, vect_location,
1202 "vect_model_load_cost: explicit realign\n");
1203
1204 break;
1205 }
1206 case dr_explicit_realign_optimized:
1207 {
1208 if (dump_enabled_p ())
1209 dump_printf_loc (MSG_NOTE, vect_location,
1210 "vect_model_load_cost: unaligned software "
1211 "pipelined.\n");
1212
1213 /* Unaligned software pipeline has a load of an address, an initial
1214 load, and possibly a mask operation to "prime" the loop. However,
1215 if this is an access in a group of loads, which provide grouped
1216 access, then the above cost should only be considered for one
1217 access in the group. Inside the loop, there is a load op
1218 and a realignment op. */
1219
1220 if (add_realign_cost && record_prologue_costs)
1221 {
1222 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1223 vector_stmt, stmt_info,
1224 0, vect_prologue);
1225 if (targetm.vectorize.builtin_mask_for_load)
1226 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1227 vector_stmt, stmt_info,
1228 0, vect_prologue);
1229 }
1230
1231 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1232 stmt_info, 0, vect_body);
1233 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1234 stmt_info, 0, vect_body);
1235
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE, vect_location,
1238 "vect_model_load_cost: explicit realign optimized"
1239 "\n");
1240
1241 break;
1242 }
1243
1244 case dr_unaligned_unsupported:
1245 {
1246 *inside_cost = VECT_MAX_COST;
1247
1248 if (dump_enabled_p ())
1249 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1250 "vect_model_load_cost: unsupported access.\n");
1251 break;
1252 }
1253
1254 default:
1255 gcc_unreachable ();
1256 }
1257 }
1258
1259 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1260 the loop preheader for the vectorized stmt STMT. */
1261
1262 static void
1263 vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
1264 {
1265 if (gsi)
1266 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1267 else
1268 {
1269 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1270 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1271
1272 if (loop_vinfo)
1273 {
1274 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1275 basic_block new_bb;
1276 edge pe;
1277
1278 if (nested_in_vect_loop_p (loop, stmt))
1279 loop = loop->inner;
1280
1281 pe = loop_preheader_edge (loop);
1282 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1283 gcc_assert (!new_bb);
1284 }
1285 else
1286 {
1287 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1288 basic_block bb;
1289 gimple_stmt_iterator gsi_bb_start;
1290
1291 gcc_assert (bb_vinfo);
1292 bb = BB_VINFO_BB (bb_vinfo);
1293 gsi_bb_start = gsi_after_labels (bb);
1294 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1295 }
1296 }
1297
1298 if (dump_enabled_p ())
1299 {
1300 dump_printf_loc (MSG_NOTE, vect_location,
1301 "created new init_stmt: ");
1302 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1303 }
1304 }
1305
1306 /* Function vect_init_vector.
1307
1308 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1309 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1310 vector type a vector with all elements equal to VAL is created first.
1311 Place the initialization at BSI if it is not NULL. Otherwise, place the
1312 initialization at the loop preheader.
1313 Return the DEF of INIT_STMT.
1314 It will be used in the vectorization of STMT. */
1315
1316 tree
1317 vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1318 {
1319 tree new_var;
1320 gimple init_stmt;
1321 tree vec_oprnd;
1322 tree new_temp;
1323
1324 if (TREE_CODE (type) == VECTOR_TYPE
1325 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1326 {
1327 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1328 {
1329 if (CONSTANT_CLASS_P (val))
1330 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1331 else
1332 {
1333 new_temp = make_ssa_name (TREE_TYPE (type));
1334 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1335 vect_init_vector_1 (stmt, init_stmt, gsi);
1336 val = new_temp;
1337 }
1338 }
1339 val = build_vector_from_val (type, val);
1340 }
1341
1342 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1343 init_stmt = gimple_build_assign (new_var, val);
1344 new_temp = make_ssa_name (new_var, init_stmt);
1345 gimple_assign_set_lhs (init_stmt, new_temp);
1346 vect_init_vector_1 (stmt, init_stmt, gsi);
1347 vec_oprnd = gimple_assign_lhs (init_stmt);
1348 return vec_oprnd;
1349 }
1350
1351
1352 /* Function vect_get_vec_def_for_operand.
1353
1354 OP is an operand in STMT. This function returns a (vector) def that will be
1355 used in the vectorized stmt for STMT.
1356
1357 In the case that OP is an SSA_NAME which is defined in the loop, then
1358 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1359
1360 In case OP is an invariant or constant, a new stmt that creates a vector def
1361 needs to be introduced. */
1362
1363 tree
1364 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1365 {
1366 tree vec_oprnd;
1367 gimple vec_stmt;
1368 gimple def_stmt;
1369 stmt_vec_info def_stmt_info = NULL;
1370 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1371 unsigned int nunits;
1372 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1373 tree def;
1374 enum vect_def_type dt;
1375 bool is_simple_use;
1376 tree vector_type;
1377
1378 if (dump_enabled_p ())
1379 {
1380 dump_printf_loc (MSG_NOTE, vect_location,
1381 "vect_get_vec_def_for_operand: ");
1382 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1383 dump_printf (MSG_NOTE, "\n");
1384 }
1385
1386 is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
1387 &def_stmt, &def, &dt);
1388 gcc_assert (is_simple_use);
1389 if (dump_enabled_p ())
1390 {
1391 int loc_printed = 0;
1392 if (def)
1393 {
1394 dump_printf_loc (MSG_NOTE, vect_location, "def = ");
1395 loc_printed = 1;
1396 dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
1397 dump_printf (MSG_NOTE, "\n");
1398 }
1399 if (def_stmt)
1400 {
1401 if (loc_printed)
1402 dump_printf (MSG_NOTE, " def_stmt = ");
1403 else
1404 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1405 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1406 }
1407 }
1408
1409 switch (dt)
1410 {
1411 /* Case 1: operand is a constant. */
1412 case vect_constant_def:
1413 {
1414 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1415 gcc_assert (vector_type);
1416 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1417
1418 if (scalar_def)
1419 *scalar_def = op;
1420
1421 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1422 if (dump_enabled_p ())
1423 dump_printf_loc (MSG_NOTE, vect_location,
1424 "Create vector_cst. nunits = %d\n", nunits);
1425
1426 return vect_init_vector (stmt, op, vector_type, NULL);
1427 }
1428
1429 /* Case 2: operand is defined outside the loop - loop invariant. */
1430 case vect_external_def:
1431 {
1432 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1433 gcc_assert (vector_type);
1434
1435 if (scalar_def)
1436 *scalar_def = def;
1437
1438 /* Create 'vec_inv = {inv,inv,..,inv}' */
1439 if (dump_enabled_p ())
1440 dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
1441
1442 return vect_init_vector (stmt, def, vector_type, NULL);
1443 }
1444
1445 /* Case 3: operand is defined inside the loop. */
1446 case vect_internal_def:
1447 {
1448 if (scalar_def)
1449 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1450
1451 /* Get the def from the vectorized stmt. */
1452 def_stmt_info = vinfo_for_stmt (def_stmt);
1453
1454 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1455 /* Get vectorized pattern statement. */
1456 if (!vec_stmt
1457 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1458 && !STMT_VINFO_RELEVANT (def_stmt_info))
1459 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1460 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1461 gcc_assert (vec_stmt);
1462 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1463 vec_oprnd = PHI_RESULT (vec_stmt);
1464 else if (is_gimple_call (vec_stmt))
1465 vec_oprnd = gimple_call_lhs (vec_stmt);
1466 else
1467 vec_oprnd = gimple_assign_lhs (vec_stmt);
1468 return vec_oprnd;
1469 }
1470
1471 /* Case 4: operand is defined by a loop header phi - reduction */
1472 case vect_reduction_def:
1473 case vect_double_reduction_def:
1474 case vect_nested_cycle:
1475 {
1476 struct loop *loop;
1477
1478 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1479 loop = (gimple_bb (def_stmt))->loop_father;
1480
1481 /* Get the def before the loop */
1482 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1483 return get_initial_def_for_reduction (stmt, op, scalar_def);
1484 }
1485
1486 /* Case 5: operand is defined by loop-header phi - induction. */
1487 case vect_induction_def:
1488 {
1489 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1490
1491 /* Get the def from the vectorized stmt. */
1492 def_stmt_info = vinfo_for_stmt (def_stmt);
1493 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1494 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1495 vec_oprnd = PHI_RESULT (vec_stmt);
1496 else
1497 vec_oprnd = gimple_get_lhs (vec_stmt);
1498 return vec_oprnd;
1499 }
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506
1507 /* Function vect_get_vec_def_for_stmt_copy
1508
1509 Return a vector-def for an operand. This function is used when the
1510 vectorized stmt to be created (by the caller to this function) is a "copy"
1511 created in case the vectorized result cannot fit in one vector, and several
1512 copies of the vector-stmt are required. In this case the vector-def is
1513 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1514 of the stmt that defines VEC_OPRND.
1515 DT is the type of the vector def VEC_OPRND.
1516
1517 Context:
1518 In case the vectorization factor (VF) is bigger than the number
1519 of elements that can fit in a vectype (nunits), we have to generate
1520 more than one vector stmt to vectorize the scalar stmt. This situation
1521 arises when there are multiple data-types operated upon in the loop; the
1522 smallest data-type determines the VF, and as a result, when vectorizing
1523 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1524 vector stmt (each computing a vector of 'nunits' results, and together
1525 computing 'VF' results in each iteration). This function is called when
1526 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1527 which VF=16 and nunits=4, so the number of copies required is 4):
1528
1529 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1530
1531 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1532 VS1.1: vx.1 = memref1 VS1.2
1533 VS1.2: vx.2 = memref2 VS1.3
1534 VS1.3: vx.3 = memref3
1535
1536 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1537 VSnew.1: vz1 = vx.1 + ... VSnew.2
1538 VSnew.2: vz2 = vx.2 + ... VSnew.3
1539 VSnew.3: vz3 = vx.3 + ...
1540
1541 The vectorization of S1 is explained in vectorizable_load.
1542 The vectorization of S2:
1543 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1544 the function 'vect_get_vec_def_for_operand' is called to
1545 get the relevant vector-def for each operand of S2. For operand x it
1546 returns the vector-def 'vx.0'.
1547
1548 To create the remaining copies of the vector-stmt (VSnew.j), this
1549 function is called to get the relevant vector-def for each operand. It is
1550 obtained from the respective VS1.j stmt, which is recorded in the
1551 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1552
1553 For example, to obtain the vector-def 'vx.1' in order to create the
1554 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1555 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1556 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1557 and return its def ('vx.1').
1558 Overall, to create the above sequence this function will be called 3 times:
1559 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1560 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1561 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1562
1563 tree
1564 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1565 {
1566 gimple vec_stmt_for_operand;
1567 stmt_vec_info def_stmt_info;
1568
1569 /* Do nothing; can reuse same def. */
1570 if (dt == vect_external_def || dt == vect_constant_def )
1571 return vec_oprnd;
1572
1573 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1574 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1575 gcc_assert (def_stmt_info);
1576 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1577 gcc_assert (vec_stmt_for_operand);
1578 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1579 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1580 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1581 else
1582 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1583 return vec_oprnd;
1584 }
1585
1586
1587 /* Get vectorized definitions for the operands to create a copy of an original
1588 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1589
1590 static void
1591 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1592 vec<tree> *vec_oprnds0,
1593 vec<tree> *vec_oprnds1)
1594 {
1595 tree vec_oprnd = vec_oprnds0->pop ();
1596
1597 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1598 vec_oprnds0->quick_push (vec_oprnd);
1599
1600 if (vec_oprnds1 && vec_oprnds1->length ())
1601 {
1602 vec_oprnd = vec_oprnds1->pop ();
1603 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1604 vec_oprnds1->quick_push (vec_oprnd);
1605 }
1606 }
1607
1608
1609 /* Get vectorized definitions for OP0 and OP1.
1610 REDUC_INDEX is the index of reduction operand in case of reduction,
1611 and -1 otherwise. */
1612
1613 void
1614 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1615 vec<tree> *vec_oprnds0,
1616 vec<tree> *vec_oprnds1,
1617 slp_tree slp_node, int reduc_index)
1618 {
1619 if (slp_node)
1620 {
1621 int nops = (op1 == NULL_TREE) ? 1 : 2;
1622 auto_vec<tree> ops (nops);
1623 auto_vec<vec<tree> > vec_defs (nops);
1624
1625 ops.quick_push (op0);
1626 if (op1)
1627 ops.quick_push (op1);
1628
1629 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1630
1631 *vec_oprnds0 = vec_defs[0];
1632 if (op1)
1633 *vec_oprnds1 = vec_defs[1];
1634 }
1635 else
1636 {
1637 tree vec_oprnd;
1638
1639 vec_oprnds0->create (1);
1640 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1641 vec_oprnds0->quick_push (vec_oprnd);
1642
1643 if (op1)
1644 {
1645 vec_oprnds1->create (1);
1646 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1647 vec_oprnds1->quick_push (vec_oprnd);
1648 }
1649 }
1650 }
1651
1652
1653 /* Function vect_finish_stmt_generation.
1654
1655 Insert a new stmt. */
1656
1657 void
1658 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1659 gimple_stmt_iterator *gsi)
1660 {
1661 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1662 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1663 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1664
1665 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1666
1667 if (!gsi_end_p (*gsi)
1668 && gimple_has_mem_ops (vec_stmt))
1669 {
1670 gimple at_stmt = gsi_stmt (*gsi);
1671 tree vuse = gimple_vuse (at_stmt);
1672 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1673 {
1674 tree vdef = gimple_vdef (at_stmt);
1675 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1676 /* If we have an SSA vuse and insert a store, update virtual
1677 SSA form to avoid triggering the renamer. Do so only
1678 if we can easily see all uses - which is what almost always
1679 happens with the way vectorized stmts are inserted. */
1680 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1681 && ((is_gimple_assign (vec_stmt)
1682 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1683 || (is_gimple_call (vec_stmt)
1684 && !(gimple_call_flags (vec_stmt)
1685 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1686 {
1687 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1688 gimple_set_vdef (vec_stmt, new_vdef);
1689 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1690 }
1691 }
1692 }
1693 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1694
1695 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1696 bb_vinfo));
1697
1698 if (dump_enabled_p ())
1699 {
1700 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1701 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1702 }
1703
1704 gimple_set_location (vec_stmt, gimple_location (stmt));
1705
1706 /* While EH edges will generally prevent vectorization, stmt might
1707 e.g. be in a must-not-throw region. Ensure newly created stmts
1708 that could throw are part of the same region. */
1709 int lp_nr = lookup_stmt_eh_lp (stmt);
1710 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1711 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1712 }
1713
1714 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1715 a function declaration if the target has a vectorized version
1716 of the function, or NULL_TREE if the function cannot be vectorized. */
1717
1718 tree
1719 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1720 {
1721 tree fndecl = gimple_call_fndecl (call);
1722
1723 /* We only handle functions that do not read or clobber memory -- i.e.
1724 const or novops ones. */
1725 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1726 return NULL_TREE;
1727
1728 if (!fndecl
1729 || TREE_CODE (fndecl) != FUNCTION_DECL
1730 || !DECL_BUILT_IN (fndecl))
1731 return NULL_TREE;
1732
1733 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1734 vectype_in);
1735 }
1736
1737
1738 static tree permute_vec_elements (tree, tree, tree, gimple,
1739 gimple_stmt_iterator *);
1740
1741
1742 /* Function vectorizable_mask_load_store.
1743
1744 Check if STMT performs a conditional load or store that can be vectorized.
1745 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1746 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1747 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1748
1749 static bool
1750 vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
1751 gimple *vec_stmt, slp_tree slp_node)
1752 {
1753 tree vec_dest = NULL;
1754 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1755 stmt_vec_info prev_stmt_info;
1756 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1757 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1758 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1759 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1760 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1761 tree elem_type;
1762 gimple new_stmt;
1763 tree dummy;
1764 tree dataref_ptr = NULL_TREE;
1765 gimple ptr_incr;
1766 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1767 int ncopies;
1768 int i, j;
1769 bool inv_p;
1770 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1771 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1772 int gather_scale = 1;
1773 enum vect_def_type gather_dt = vect_unknown_def_type;
1774 bool is_store;
1775 tree mask;
1776 gimple def_stmt;
1777 tree def;
1778 enum vect_def_type dt;
1779
1780 if (slp_node != NULL)
1781 return false;
1782
1783 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1784 gcc_assert (ncopies >= 1);
1785
1786 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1787 mask = gimple_call_arg (stmt, 2);
1788 if (TYPE_PRECISION (TREE_TYPE (mask))
1789 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1790 return false;
1791
1792 /* FORNOW. This restriction should be relaxed. */
1793 if (nested_in_vect_loop && ncopies > 1)
1794 {
1795 if (dump_enabled_p ())
1796 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1797 "multiple types in nested loop.");
1798 return false;
1799 }
1800
1801 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1802 return false;
1803
1804 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1805 return false;
1806
1807 if (!STMT_VINFO_DATA_REF (stmt_info))
1808 return false;
1809
1810 elem_type = TREE_TYPE (vectype);
1811
1812 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1813 return false;
1814
1815 if (STMT_VINFO_STRIDED_P (stmt_info))
1816 return false;
1817
1818 if (STMT_VINFO_GATHER_P (stmt_info))
1819 {
1820 gimple def_stmt;
1821 tree def;
1822 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
1823 &gather_off, &gather_scale);
1824 gcc_assert (gather_decl);
1825 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
1826 &def_stmt, &def, &gather_dt,
1827 &gather_off_vectype))
1828 {
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1831 "gather index use not simple.");
1832 return false;
1833 }
1834
1835 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1836 tree masktype
1837 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1838 if (TREE_CODE (masktype) == INTEGER_TYPE)
1839 {
1840 if (dump_enabled_p ())
1841 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1842 "masked gather with integer mask not supported.");
1843 return false;
1844 }
1845 }
1846 else if (tree_int_cst_compare (nested_in_vect_loop
1847 ? STMT_VINFO_DR_STEP (stmt_info)
1848 : DR_STEP (dr), size_zero_node) <= 0)
1849 return false;
1850 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1851 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1852 return false;
1853
1854 if (TREE_CODE (mask) != SSA_NAME)
1855 return false;
1856
1857 if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
1858 &def_stmt, &def, &dt))
1859 return false;
1860
1861 if (is_store)
1862 {
1863 tree rhs = gimple_call_arg (stmt, 3);
1864 if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
1865 &def_stmt, &def, &dt))
1866 return false;
1867 }
1868
1869 if (!vec_stmt) /* transformation not required. */
1870 {
1871 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1872 if (is_store)
1873 vect_model_store_cost (stmt_info, ncopies, false, dt,
1874 NULL, NULL, NULL);
1875 else
1876 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1877 return true;
1878 }
1879
1880 /** Transform. **/
1881
1882 if (STMT_VINFO_GATHER_P (stmt_info))
1883 {
1884 tree vec_oprnd0 = NULL_TREE, op;
1885 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1886 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1887 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1888 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1889 tree mask_perm_mask = NULL_TREE;
1890 edge pe = loop_preheader_edge (loop);
1891 gimple_seq seq;
1892 basic_block new_bb;
1893 enum { NARROW, NONE, WIDEN } modifier;
1894 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1895
1896 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1897 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1898 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1899 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1900 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1901 scaletype = TREE_VALUE (arglist);
1902 gcc_checking_assert (types_compatible_p (srctype, rettype)
1903 && types_compatible_p (srctype, masktype));
1904
1905 if (nunits == gather_off_nunits)
1906 modifier = NONE;
1907 else if (nunits == gather_off_nunits / 2)
1908 {
1909 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1910 modifier = WIDEN;
1911
1912 for (i = 0; i < gather_off_nunits; ++i)
1913 sel[i] = i | nunits;
1914
1915 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1916 }
1917 else if (nunits == gather_off_nunits * 2)
1918 {
1919 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1920 modifier = NARROW;
1921
1922 for (i = 0; i < nunits; ++i)
1923 sel[i] = i < gather_off_nunits
1924 ? i : i + nunits - gather_off_nunits;
1925
1926 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1927 ncopies *= 2;
1928 for (i = 0; i < nunits; ++i)
1929 sel[i] = i | gather_off_nunits;
1930 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1931 }
1932 else
1933 gcc_unreachable ();
1934
1935 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1936
1937 ptr = fold_convert (ptrtype, gather_base);
1938 if (!is_gimple_min_invariant (ptr))
1939 {
1940 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1941 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1942 gcc_assert (!new_bb);
1943 }
1944
1945 scale = build_int_cst (scaletype, gather_scale);
1946
1947 prev_stmt_info = NULL;
1948 for (j = 0; j < ncopies; ++j)
1949 {
1950 if (modifier == WIDEN && (j & 1))
1951 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1952 perm_mask, stmt, gsi);
1953 else if (j == 0)
1954 op = vec_oprnd0
1955 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
1956 else
1957 op = vec_oprnd0
1958 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1959
1960 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1961 {
1962 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1963 == TYPE_VECTOR_SUBPARTS (idxtype));
1964 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1965 var = make_ssa_name (var);
1966 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1967 new_stmt
1968 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1969 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1970 op = var;
1971 }
1972
1973 if (mask_perm_mask && (j & 1))
1974 mask_op = permute_vec_elements (mask_op, mask_op,
1975 mask_perm_mask, stmt, gsi);
1976 else
1977 {
1978 if (j == 0)
1979 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
1980 else
1981 {
1982 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL,
1983 &def_stmt, &def, &dt);
1984 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1985 }
1986
1987 mask_op = vec_mask;
1988 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1989 {
1990 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1991 == TYPE_VECTOR_SUBPARTS (masktype));
1992 var = vect_get_new_vect_var (masktype, vect_simple_var,
1993 NULL);
1994 var = make_ssa_name (var);
1995 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1996 new_stmt
1997 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1998 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1999 mask_op = var;
2000 }
2001 }
2002
2003 new_stmt
2004 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
2005 scale);
2006
2007 if (!useless_type_conversion_p (vectype, rettype))
2008 {
2009 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2010 == TYPE_VECTOR_SUBPARTS (rettype));
2011 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
2012 op = make_ssa_name (var, new_stmt);
2013 gimple_call_set_lhs (new_stmt, op);
2014 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2015 var = make_ssa_name (vec_dest);
2016 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2017 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2018 }
2019 else
2020 {
2021 var = make_ssa_name (vec_dest, new_stmt);
2022 gimple_call_set_lhs (new_stmt, var);
2023 }
2024
2025 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2026
2027 if (modifier == NARROW)
2028 {
2029 if ((j & 1) == 0)
2030 {
2031 prev_res = var;
2032 continue;
2033 }
2034 var = permute_vec_elements (prev_res, var,
2035 perm_mask, stmt, gsi);
2036 new_stmt = SSA_NAME_DEF_STMT (var);
2037 }
2038
2039 if (prev_stmt_info == NULL)
2040 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2041 else
2042 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2043 prev_stmt_info = vinfo_for_stmt (new_stmt);
2044 }
2045
2046 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2047 from the IL. */
2048 tree lhs = gimple_call_lhs (stmt);
2049 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2050 set_vinfo_for_stmt (new_stmt, stmt_info);
2051 set_vinfo_for_stmt (stmt, NULL);
2052 STMT_VINFO_STMT (stmt_info) = new_stmt;
2053 gsi_replace (gsi, new_stmt, true);
2054 return true;
2055 }
2056 else if (is_store)
2057 {
2058 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2059 prev_stmt_info = NULL;
2060 for (i = 0; i < ncopies; i++)
2061 {
2062 unsigned align, misalign;
2063
2064 if (i == 0)
2065 {
2066 tree rhs = gimple_call_arg (stmt, 3);
2067 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
2068 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2069 /* We should have catched mismatched types earlier. */
2070 gcc_assert (useless_type_conversion_p (vectype,
2071 TREE_TYPE (vec_rhs)));
2072 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2073 NULL_TREE, &dummy, gsi,
2074 &ptr_incr, false, &inv_p);
2075 gcc_assert (!inv_p);
2076 }
2077 else
2078 {
2079 vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
2080 &def, &dt);
2081 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2082 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2083 &def, &dt);
2084 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2085 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2086 TYPE_SIZE_UNIT (vectype));
2087 }
2088
2089 align = TYPE_ALIGN_UNIT (vectype);
2090 if (aligned_access_p (dr))
2091 misalign = 0;
2092 else if (DR_MISALIGNMENT (dr) == -1)
2093 {
2094 align = TYPE_ALIGN_UNIT (elem_type);
2095 misalign = 0;
2096 }
2097 else
2098 misalign = DR_MISALIGNMENT (dr);
2099 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2100 misalign);
2101 new_stmt
2102 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2103 gimple_call_arg (stmt, 1),
2104 vec_mask, vec_rhs);
2105 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2106 if (i == 0)
2107 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2108 else
2109 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2110 prev_stmt_info = vinfo_for_stmt (new_stmt);
2111 }
2112 }
2113 else
2114 {
2115 tree vec_mask = NULL_TREE;
2116 prev_stmt_info = NULL;
2117 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2118 for (i = 0; i < ncopies; i++)
2119 {
2120 unsigned align, misalign;
2121
2122 if (i == 0)
2123 {
2124 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2125 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2126 NULL_TREE, &dummy, gsi,
2127 &ptr_incr, false, &inv_p);
2128 gcc_assert (!inv_p);
2129 }
2130 else
2131 {
2132 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2133 &def, &dt);
2134 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2135 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2136 TYPE_SIZE_UNIT (vectype));
2137 }
2138
2139 align = TYPE_ALIGN_UNIT (vectype);
2140 if (aligned_access_p (dr))
2141 misalign = 0;
2142 else if (DR_MISALIGNMENT (dr) == -1)
2143 {
2144 align = TYPE_ALIGN_UNIT (elem_type);
2145 misalign = 0;
2146 }
2147 else
2148 misalign = DR_MISALIGNMENT (dr);
2149 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2150 misalign);
2151 new_stmt
2152 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2153 gimple_call_arg (stmt, 1),
2154 vec_mask);
2155 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2157 if (i == 0)
2158 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2159 else
2160 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2161 prev_stmt_info = vinfo_for_stmt (new_stmt);
2162 }
2163 }
2164
2165 if (!is_store)
2166 {
2167 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2168 from the IL. */
2169 tree lhs = gimple_call_lhs (stmt);
2170 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2171 set_vinfo_for_stmt (new_stmt, stmt_info);
2172 set_vinfo_for_stmt (stmt, NULL);
2173 STMT_VINFO_STMT (stmt_info) = new_stmt;
2174 gsi_replace (gsi, new_stmt, true);
2175 }
2176
2177 return true;
2178 }
2179
2180
2181 /* Function vectorizable_call.
2182
2183 Check if GS performs a function call that can be vectorized.
2184 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2185 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2186 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2187
2188 static bool
2189 vectorizable_call (gimple gs, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2190 slp_tree slp_node)
2191 {
2192 gcall *stmt;
2193 tree vec_dest;
2194 tree scalar_dest;
2195 tree op, type;
2196 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2197 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2198 tree vectype_out, vectype_in;
2199 int nunits_in;
2200 int nunits_out;
2201 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2202 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2203 tree fndecl, new_temp, def, rhs_type;
2204 gimple def_stmt;
2205 enum vect_def_type dt[3]
2206 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2207 gimple new_stmt = NULL;
2208 int ncopies, j;
2209 vec<tree> vargs = vNULL;
2210 enum { NARROW, NONE, WIDEN } modifier;
2211 size_t i, nargs;
2212 tree lhs;
2213
2214 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2215 return false;
2216
2217 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2218 return false;
2219
2220 /* Is GS a vectorizable call? */
2221 stmt = dyn_cast <gcall *> (gs);
2222 if (!stmt)
2223 return false;
2224
2225 if (gimple_call_internal_p (stmt)
2226 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2227 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2228 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2229 slp_node);
2230
2231 if (gimple_call_lhs (stmt) == NULL_TREE
2232 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2233 return false;
2234
2235 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2236
2237 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2238
2239 /* Process function arguments. */
2240 rhs_type = NULL_TREE;
2241 vectype_in = NULL_TREE;
2242 nargs = gimple_call_num_args (stmt);
2243
2244 /* Bail out if the function has more than three arguments, we do not have
2245 interesting builtin functions to vectorize with more than two arguments
2246 except for fma. No arguments is also not good. */
2247 if (nargs == 0 || nargs > 3)
2248 return false;
2249
2250 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2251 if (gimple_call_internal_p (stmt)
2252 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2253 {
2254 nargs = 0;
2255 rhs_type = unsigned_type_node;
2256 }
2257
2258 for (i = 0; i < nargs; i++)
2259 {
2260 tree opvectype;
2261
2262 op = gimple_call_arg (stmt, i);
2263
2264 /* We can only handle calls with arguments of the same type. */
2265 if (rhs_type
2266 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2267 {
2268 if (dump_enabled_p ())
2269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2270 "argument types differ.\n");
2271 return false;
2272 }
2273 if (!rhs_type)
2274 rhs_type = TREE_TYPE (op);
2275
2276 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2277 &def_stmt, &def, &dt[i], &opvectype))
2278 {
2279 if (dump_enabled_p ())
2280 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2281 "use not simple.\n");
2282 return false;
2283 }
2284
2285 if (!vectype_in)
2286 vectype_in = opvectype;
2287 else if (opvectype
2288 && opvectype != vectype_in)
2289 {
2290 if (dump_enabled_p ())
2291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2292 "argument vector types differ.\n");
2293 return false;
2294 }
2295 }
2296 /* If all arguments are external or constant defs use a vector type with
2297 the same size as the output vector type. */
2298 if (!vectype_in)
2299 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2300 if (vec_stmt)
2301 gcc_assert (vectype_in);
2302 if (!vectype_in)
2303 {
2304 if (dump_enabled_p ())
2305 {
2306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2307 "no vectype for scalar type ");
2308 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2310 }
2311
2312 return false;
2313 }
2314
2315 /* FORNOW */
2316 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2317 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2318 if (nunits_in == nunits_out / 2)
2319 modifier = NARROW;
2320 else if (nunits_out == nunits_in)
2321 modifier = NONE;
2322 else if (nunits_out == nunits_in / 2)
2323 modifier = WIDEN;
2324 else
2325 return false;
2326
2327 /* For now, we only vectorize functions if a target specific builtin
2328 is available. TODO -- in some cases, it might be profitable to
2329 insert the calls for pieces of the vector, in order to be able
2330 to vectorize other operations in the loop. */
2331 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2332 if (fndecl == NULL_TREE)
2333 {
2334 if (gimple_call_internal_p (stmt)
2335 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2336 && !slp_node
2337 && loop_vinfo
2338 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2339 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2340 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2341 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2342 {
2343 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2344 { 0, 1, 2, ... vf - 1 } vector. */
2345 gcc_assert (nargs == 0);
2346 }
2347 else
2348 {
2349 if (dump_enabled_p ())
2350 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2351 "function is not vectorizable.\n");
2352 return false;
2353 }
2354 }
2355
2356 gcc_assert (!gimple_vuse (stmt));
2357
2358 if (slp_node || PURE_SLP_STMT (stmt_info))
2359 ncopies = 1;
2360 else if (modifier == NARROW)
2361 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2362 else
2363 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2364
2365 /* Sanity check: make sure that at least one copy of the vectorized stmt
2366 needs to be generated. */
2367 gcc_assert (ncopies >= 1);
2368
2369 if (!vec_stmt) /* transformation not required. */
2370 {
2371 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2374 "\n");
2375 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2376 return true;
2377 }
2378
2379 /** Transform. **/
2380
2381 if (dump_enabled_p ())
2382 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2383
2384 /* Handle def. */
2385 scalar_dest = gimple_call_lhs (stmt);
2386 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2387
2388 prev_stmt_info = NULL;
2389 switch (modifier)
2390 {
2391 case NONE:
2392 for (j = 0; j < ncopies; ++j)
2393 {
2394 /* Build argument list for the vectorized call. */
2395 if (j == 0)
2396 vargs.create (nargs);
2397 else
2398 vargs.truncate (0);
2399
2400 if (slp_node)
2401 {
2402 auto_vec<vec<tree> > vec_defs (nargs);
2403 vec<tree> vec_oprnds0;
2404
2405 for (i = 0; i < nargs; i++)
2406 vargs.quick_push (gimple_call_arg (stmt, i));
2407 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2408 vec_oprnds0 = vec_defs[0];
2409
2410 /* Arguments are ready. Create the new vector stmt. */
2411 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2412 {
2413 size_t k;
2414 for (k = 0; k < nargs; k++)
2415 {
2416 vec<tree> vec_oprndsk = vec_defs[k];
2417 vargs[k] = vec_oprndsk[i];
2418 }
2419 new_stmt = gimple_build_call_vec (fndecl, vargs);
2420 new_temp = make_ssa_name (vec_dest, new_stmt);
2421 gimple_call_set_lhs (new_stmt, new_temp);
2422 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2423 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2424 }
2425
2426 for (i = 0; i < nargs; i++)
2427 {
2428 vec<tree> vec_oprndsi = vec_defs[i];
2429 vec_oprndsi.release ();
2430 }
2431 continue;
2432 }
2433
2434 for (i = 0; i < nargs; i++)
2435 {
2436 op = gimple_call_arg (stmt, i);
2437 if (j == 0)
2438 vec_oprnd0
2439 = vect_get_vec_def_for_operand (op, stmt, NULL);
2440 else
2441 {
2442 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2443 vec_oprnd0
2444 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2445 }
2446
2447 vargs.quick_push (vec_oprnd0);
2448 }
2449
2450 if (gimple_call_internal_p (stmt)
2451 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2452 {
2453 tree *v = XALLOCAVEC (tree, nunits_out);
2454 int k;
2455 for (k = 0; k < nunits_out; ++k)
2456 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2457 tree cst = build_vector (vectype_out, v);
2458 tree new_var
2459 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2460 gimple init_stmt = gimple_build_assign (new_var, cst);
2461 new_temp = make_ssa_name (new_var, init_stmt);
2462 gimple_assign_set_lhs (init_stmt, new_temp);
2463 vect_init_vector_1 (stmt, init_stmt, NULL);
2464 new_temp = make_ssa_name (vec_dest);
2465 new_stmt = gimple_build_assign (new_temp,
2466 gimple_assign_lhs (init_stmt));
2467 }
2468 else
2469 {
2470 new_stmt = gimple_build_call_vec (fndecl, vargs);
2471 new_temp = make_ssa_name (vec_dest, new_stmt);
2472 gimple_call_set_lhs (new_stmt, new_temp);
2473 }
2474 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2475
2476 if (j == 0)
2477 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2478 else
2479 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2480
2481 prev_stmt_info = vinfo_for_stmt (new_stmt);
2482 }
2483
2484 break;
2485
2486 case NARROW:
2487 for (j = 0; j < ncopies; ++j)
2488 {
2489 /* Build argument list for the vectorized call. */
2490 if (j == 0)
2491 vargs.create (nargs * 2);
2492 else
2493 vargs.truncate (0);
2494
2495 if (slp_node)
2496 {
2497 auto_vec<vec<tree> > vec_defs (nargs);
2498 vec<tree> vec_oprnds0;
2499
2500 for (i = 0; i < nargs; i++)
2501 vargs.quick_push (gimple_call_arg (stmt, i));
2502 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2503 vec_oprnds0 = vec_defs[0];
2504
2505 /* Arguments are ready. Create the new vector stmt. */
2506 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2507 {
2508 size_t k;
2509 vargs.truncate (0);
2510 for (k = 0; k < nargs; k++)
2511 {
2512 vec<tree> vec_oprndsk = vec_defs[k];
2513 vargs.quick_push (vec_oprndsk[i]);
2514 vargs.quick_push (vec_oprndsk[i + 1]);
2515 }
2516 new_stmt = gimple_build_call_vec (fndecl, vargs);
2517 new_temp = make_ssa_name (vec_dest, new_stmt);
2518 gimple_call_set_lhs (new_stmt, new_temp);
2519 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2520 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2521 }
2522
2523 for (i = 0; i < nargs; i++)
2524 {
2525 vec<tree> vec_oprndsi = vec_defs[i];
2526 vec_oprndsi.release ();
2527 }
2528 continue;
2529 }
2530
2531 for (i = 0; i < nargs; i++)
2532 {
2533 op = gimple_call_arg (stmt, i);
2534 if (j == 0)
2535 {
2536 vec_oprnd0
2537 = vect_get_vec_def_for_operand (op, stmt, NULL);
2538 vec_oprnd1
2539 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2540 }
2541 else
2542 {
2543 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2544 vec_oprnd0
2545 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2546 vec_oprnd1
2547 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2548 }
2549
2550 vargs.quick_push (vec_oprnd0);
2551 vargs.quick_push (vec_oprnd1);
2552 }
2553
2554 new_stmt = gimple_build_call_vec (fndecl, vargs);
2555 new_temp = make_ssa_name (vec_dest, new_stmt);
2556 gimple_call_set_lhs (new_stmt, new_temp);
2557 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2558
2559 if (j == 0)
2560 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2561 else
2562 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2563
2564 prev_stmt_info = vinfo_for_stmt (new_stmt);
2565 }
2566
2567 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2568
2569 break;
2570
2571 case WIDEN:
2572 /* No current target implements this case. */
2573 return false;
2574 }
2575
2576 vargs.release ();
2577
2578 /* The call in STMT might prevent it from being removed in dce.
2579 We however cannot remove it here, due to the way the ssa name
2580 it defines is mapped to the new definition. So just replace
2581 rhs of the statement with something harmless. */
2582
2583 if (slp_node)
2584 return true;
2585
2586 type = TREE_TYPE (scalar_dest);
2587 if (is_pattern_stmt_p (stmt_info))
2588 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2589 else
2590 lhs = gimple_call_lhs (stmt);
2591
2592 if (gimple_call_internal_p (stmt)
2593 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2594 {
2595 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2596 with vf - 1 rather than 0, that is the last iteration of the
2597 vectorized loop. */
2598 imm_use_iterator iter;
2599 use_operand_p use_p;
2600 gimple use_stmt;
2601 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2602 {
2603 basic_block use_bb = gimple_bb (use_stmt);
2604 if (use_bb
2605 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2606 {
2607 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2608 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2609 ncopies * nunits_out - 1));
2610 update_stmt (use_stmt);
2611 }
2612 }
2613 }
2614
2615 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2616 set_vinfo_for_stmt (new_stmt, stmt_info);
2617 set_vinfo_for_stmt (stmt, NULL);
2618 STMT_VINFO_STMT (stmt_info) = new_stmt;
2619 gsi_replace (gsi, new_stmt, false);
2620
2621 return true;
2622 }
2623
2624
2625 struct simd_call_arg_info
2626 {
2627 tree vectype;
2628 tree op;
2629 enum vect_def_type dt;
2630 HOST_WIDE_INT linear_step;
2631 unsigned int align;
2632 };
2633
2634 /* Function vectorizable_simd_clone_call.
2635
2636 Check if STMT performs a function call that can be vectorized
2637 by calling a simd clone of the function.
2638 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2639 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2640 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2641
2642 static bool
2643 vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
2644 gimple *vec_stmt, slp_tree slp_node)
2645 {
2646 tree vec_dest;
2647 tree scalar_dest;
2648 tree op, type;
2649 tree vec_oprnd0 = NULL_TREE;
2650 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2651 tree vectype;
2652 unsigned int nunits;
2653 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2654 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2655 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2656 tree fndecl, new_temp, def;
2657 gimple def_stmt;
2658 gimple new_stmt = NULL;
2659 int ncopies, j;
2660 vec<simd_call_arg_info> arginfo = vNULL;
2661 vec<tree> vargs = vNULL;
2662 size_t i, nargs;
2663 tree lhs, rtype, ratype;
2664 vec<constructor_elt, va_gc> *ret_ctor_elts;
2665
2666 /* Is STMT a vectorizable call? */
2667 if (!is_gimple_call (stmt))
2668 return false;
2669
2670 fndecl = gimple_call_fndecl (stmt);
2671 if (fndecl == NULL_TREE)
2672 return false;
2673
2674 struct cgraph_node *node = cgraph_node::get (fndecl);
2675 if (node == NULL || node->simd_clones == NULL)
2676 return false;
2677
2678 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2679 return false;
2680
2681 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2682 return false;
2683
2684 if (gimple_call_lhs (stmt)
2685 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2686 return false;
2687
2688 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2689
2690 vectype = STMT_VINFO_VECTYPE (stmt_info);
2691
2692 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2693 return false;
2694
2695 /* FORNOW */
2696 if (slp_node || PURE_SLP_STMT (stmt_info))
2697 return false;
2698
2699 /* Process function arguments. */
2700 nargs = gimple_call_num_args (stmt);
2701
2702 /* Bail out if the function has zero arguments. */
2703 if (nargs == 0)
2704 return false;
2705
2706 arginfo.create (nargs);
2707
2708 for (i = 0; i < nargs; i++)
2709 {
2710 simd_call_arg_info thisarginfo;
2711 affine_iv iv;
2712
2713 thisarginfo.linear_step = 0;
2714 thisarginfo.align = 0;
2715 thisarginfo.op = NULL_TREE;
2716
2717 op = gimple_call_arg (stmt, i);
2718 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2719 &def_stmt, &def, &thisarginfo.dt,
2720 &thisarginfo.vectype)
2721 || thisarginfo.dt == vect_uninitialized_def)
2722 {
2723 if (dump_enabled_p ())
2724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2725 "use not simple.\n");
2726 arginfo.release ();
2727 return false;
2728 }
2729
2730 if (thisarginfo.dt == vect_constant_def
2731 || thisarginfo.dt == vect_external_def)
2732 gcc_assert (thisarginfo.vectype == NULL_TREE);
2733 else
2734 gcc_assert (thisarginfo.vectype != NULL_TREE);
2735
2736 /* For linear arguments, the analyze phase should have saved
2737 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2738 if (i * 2 + 3 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2739 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2])
2740 {
2741 gcc_assert (vec_stmt);
2742 thisarginfo.linear_step
2743 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2]);
2744 thisarginfo.op
2745 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 1];
2746 /* If loop has been peeled for alignment, we need to adjust it. */
2747 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2748 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2749 if (n1 != n2)
2750 {
2751 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2752 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2];
2753 tree opt = TREE_TYPE (thisarginfo.op);
2754 bias = fold_convert (TREE_TYPE (step), bias);
2755 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2756 thisarginfo.op
2757 = fold_build2 (POINTER_TYPE_P (opt)
2758 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2759 thisarginfo.op, bias);
2760 }
2761 }
2762 else if (!vec_stmt
2763 && thisarginfo.dt != vect_constant_def
2764 && thisarginfo.dt != vect_external_def
2765 && loop_vinfo
2766 && TREE_CODE (op) == SSA_NAME
2767 && simple_iv (loop, loop_containing_stmt (stmt), op,
2768 &iv, false)
2769 && tree_fits_shwi_p (iv.step))
2770 {
2771 thisarginfo.linear_step = tree_to_shwi (iv.step);
2772 thisarginfo.op = iv.base;
2773 }
2774 else if ((thisarginfo.dt == vect_constant_def
2775 || thisarginfo.dt == vect_external_def)
2776 && POINTER_TYPE_P (TREE_TYPE (op)))
2777 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2778
2779 arginfo.quick_push (thisarginfo);
2780 }
2781
2782 unsigned int badness = 0;
2783 struct cgraph_node *bestn = NULL;
2784 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2785 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2786 else
2787 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2788 n = n->simdclone->next_clone)
2789 {
2790 unsigned int this_badness = 0;
2791 if (n->simdclone->simdlen
2792 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2793 || n->simdclone->nargs != nargs)
2794 continue;
2795 if (n->simdclone->simdlen
2796 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2797 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2798 - exact_log2 (n->simdclone->simdlen)) * 1024;
2799 if (n->simdclone->inbranch)
2800 this_badness += 2048;
2801 int target_badness = targetm.simd_clone.usable (n);
2802 if (target_badness < 0)
2803 continue;
2804 this_badness += target_badness * 512;
2805 /* FORNOW: Have to add code to add the mask argument. */
2806 if (n->simdclone->inbranch)
2807 continue;
2808 for (i = 0; i < nargs; i++)
2809 {
2810 switch (n->simdclone->args[i].arg_type)
2811 {
2812 case SIMD_CLONE_ARG_TYPE_VECTOR:
2813 if (!useless_type_conversion_p
2814 (n->simdclone->args[i].orig_type,
2815 TREE_TYPE (gimple_call_arg (stmt, i))))
2816 i = -1;
2817 else if (arginfo[i].dt == vect_constant_def
2818 || arginfo[i].dt == vect_external_def
2819 || arginfo[i].linear_step)
2820 this_badness += 64;
2821 break;
2822 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2823 if (arginfo[i].dt != vect_constant_def
2824 && arginfo[i].dt != vect_external_def)
2825 i = -1;
2826 break;
2827 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2828 if (arginfo[i].dt == vect_constant_def
2829 || arginfo[i].dt == vect_external_def
2830 || (arginfo[i].linear_step
2831 != n->simdclone->args[i].linear_step))
2832 i = -1;
2833 break;
2834 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2835 /* FORNOW */
2836 i = -1;
2837 break;
2838 case SIMD_CLONE_ARG_TYPE_MASK:
2839 gcc_unreachable ();
2840 }
2841 if (i == (size_t) -1)
2842 break;
2843 if (n->simdclone->args[i].alignment > arginfo[i].align)
2844 {
2845 i = -1;
2846 break;
2847 }
2848 if (arginfo[i].align)
2849 this_badness += (exact_log2 (arginfo[i].align)
2850 - exact_log2 (n->simdclone->args[i].alignment));
2851 }
2852 if (i == (size_t) -1)
2853 continue;
2854 if (bestn == NULL || this_badness < badness)
2855 {
2856 bestn = n;
2857 badness = this_badness;
2858 }
2859 }
2860
2861 if (bestn == NULL)
2862 {
2863 arginfo.release ();
2864 return false;
2865 }
2866
2867 for (i = 0; i < nargs; i++)
2868 if ((arginfo[i].dt == vect_constant_def
2869 || arginfo[i].dt == vect_external_def)
2870 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2871 {
2872 arginfo[i].vectype
2873 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2874 i)));
2875 if (arginfo[i].vectype == NULL
2876 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2877 > bestn->simdclone->simdlen))
2878 {
2879 arginfo.release ();
2880 return false;
2881 }
2882 }
2883
2884 fndecl = bestn->decl;
2885 nunits = bestn->simdclone->simdlen;
2886 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2887
2888 /* If the function isn't const, only allow it in simd loops where user
2889 has asserted that at least nunits consecutive iterations can be
2890 performed using SIMD instructions. */
2891 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2892 && gimple_vuse (stmt))
2893 {
2894 arginfo.release ();
2895 return false;
2896 }
2897
2898 /* Sanity check: make sure that at least one copy of the vectorized stmt
2899 needs to be generated. */
2900 gcc_assert (ncopies >= 1);
2901
2902 if (!vec_stmt) /* transformation not required. */
2903 {
2904 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2905 for (i = 0; i < nargs; i++)
2906 if (bestn->simdclone->args[i].arg_type
2907 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2908 {
2909 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 2
2910 + 1);
2911 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2912 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2913 ? size_type_node : TREE_TYPE (arginfo[i].op);
2914 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2915 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2916 }
2917 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2918 if (dump_enabled_p ())
2919 dump_printf_loc (MSG_NOTE, vect_location,
2920 "=== vectorizable_simd_clone_call ===\n");
2921 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2922 arginfo.release ();
2923 return true;
2924 }
2925
2926 /** Transform. **/
2927
2928 if (dump_enabled_p ())
2929 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2930
2931 /* Handle def. */
2932 scalar_dest = gimple_call_lhs (stmt);
2933 vec_dest = NULL_TREE;
2934 rtype = NULL_TREE;
2935 ratype = NULL_TREE;
2936 if (scalar_dest)
2937 {
2938 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2939 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2940 if (TREE_CODE (rtype) == ARRAY_TYPE)
2941 {
2942 ratype = rtype;
2943 rtype = TREE_TYPE (ratype);
2944 }
2945 }
2946
2947 prev_stmt_info = NULL;
2948 for (j = 0; j < ncopies; ++j)
2949 {
2950 /* Build argument list for the vectorized call. */
2951 if (j == 0)
2952 vargs.create (nargs);
2953 else
2954 vargs.truncate (0);
2955
2956 for (i = 0; i < nargs; i++)
2957 {
2958 unsigned int k, l, m, o;
2959 tree atype;
2960 op = gimple_call_arg (stmt, i);
2961 switch (bestn->simdclone->args[i].arg_type)
2962 {
2963 case SIMD_CLONE_ARG_TYPE_VECTOR:
2964 atype = bestn->simdclone->args[i].vector_type;
2965 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2966 for (m = j * o; m < (j + 1) * o; m++)
2967 {
2968 if (TYPE_VECTOR_SUBPARTS (atype)
2969 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2970 {
2971 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2972 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2973 / TYPE_VECTOR_SUBPARTS (atype));
2974 gcc_assert ((k & (k - 1)) == 0);
2975 if (m == 0)
2976 vec_oprnd0
2977 = vect_get_vec_def_for_operand (op, stmt, NULL);
2978 else
2979 {
2980 vec_oprnd0 = arginfo[i].op;
2981 if ((m & (k - 1)) == 0)
2982 vec_oprnd0
2983 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2984 vec_oprnd0);
2985 }
2986 arginfo[i].op = vec_oprnd0;
2987 vec_oprnd0
2988 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2989 size_int (prec),
2990 bitsize_int ((m & (k - 1)) * prec));
2991 new_stmt
2992 = gimple_build_assign (make_ssa_name (atype),
2993 vec_oprnd0);
2994 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2995 vargs.safe_push (gimple_assign_lhs (new_stmt));
2996 }
2997 else
2998 {
2999 k = (TYPE_VECTOR_SUBPARTS (atype)
3000 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3001 gcc_assert ((k & (k - 1)) == 0);
3002 vec<constructor_elt, va_gc> *ctor_elts;
3003 if (k != 1)
3004 vec_alloc (ctor_elts, k);
3005 else
3006 ctor_elts = NULL;
3007 for (l = 0; l < k; l++)
3008 {
3009 if (m == 0 && l == 0)
3010 vec_oprnd0
3011 = vect_get_vec_def_for_operand (op, stmt, NULL);
3012 else
3013 vec_oprnd0
3014 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3015 arginfo[i].op);
3016 arginfo[i].op = vec_oprnd0;
3017 if (k == 1)
3018 break;
3019 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3020 vec_oprnd0);
3021 }
3022 if (k == 1)
3023 vargs.safe_push (vec_oprnd0);
3024 else
3025 {
3026 vec_oprnd0 = build_constructor (atype, ctor_elts);
3027 new_stmt
3028 = gimple_build_assign (make_ssa_name (atype),
3029 vec_oprnd0);
3030 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3031 vargs.safe_push (gimple_assign_lhs (new_stmt));
3032 }
3033 }
3034 }
3035 break;
3036 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3037 vargs.safe_push (op);
3038 break;
3039 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3040 if (j == 0)
3041 {
3042 gimple_seq stmts;
3043 arginfo[i].op
3044 = force_gimple_operand (arginfo[i].op, &stmts, true,
3045 NULL_TREE);
3046 if (stmts != NULL)
3047 {
3048 basic_block new_bb;
3049 edge pe = loop_preheader_edge (loop);
3050 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3051 gcc_assert (!new_bb);
3052 }
3053 tree phi_res = copy_ssa_name (op);
3054 gphi *new_phi = create_phi_node (phi_res, loop->header);
3055 set_vinfo_for_stmt (new_phi,
3056 new_stmt_vec_info (new_phi, loop_vinfo,
3057 NULL));
3058 add_phi_arg (new_phi, arginfo[i].op,
3059 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3060 enum tree_code code
3061 = POINTER_TYPE_P (TREE_TYPE (op))
3062 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3063 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3064 ? sizetype : TREE_TYPE (op);
3065 widest_int cst
3066 = wi::mul (bestn->simdclone->args[i].linear_step,
3067 ncopies * nunits);
3068 tree tcst = wide_int_to_tree (type, cst);
3069 tree phi_arg = copy_ssa_name (op);
3070 new_stmt
3071 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3072 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3073 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3074 set_vinfo_for_stmt (new_stmt,
3075 new_stmt_vec_info (new_stmt, loop_vinfo,
3076 NULL));
3077 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3078 UNKNOWN_LOCATION);
3079 arginfo[i].op = phi_res;
3080 vargs.safe_push (phi_res);
3081 }
3082 else
3083 {
3084 enum tree_code code
3085 = POINTER_TYPE_P (TREE_TYPE (op))
3086 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3087 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3088 ? sizetype : TREE_TYPE (op);
3089 widest_int cst
3090 = wi::mul (bestn->simdclone->args[i].linear_step,
3091 j * nunits);
3092 tree tcst = wide_int_to_tree (type, cst);
3093 new_temp = make_ssa_name (TREE_TYPE (op));
3094 new_stmt = gimple_build_assign (new_temp, code,
3095 arginfo[i].op, tcst);
3096 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3097 vargs.safe_push (new_temp);
3098 }
3099 break;
3100 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3101 default:
3102 gcc_unreachable ();
3103 }
3104 }
3105
3106 new_stmt = gimple_build_call_vec (fndecl, vargs);
3107 if (vec_dest)
3108 {
3109 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3110 if (ratype)
3111 new_temp = create_tmp_var (ratype);
3112 else if (TYPE_VECTOR_SUBPARTS (vectype)
3113 == TYPE_VECTOR_SUBPARTS (rtype))
3114 new_temp = make_ssa_name (vec_dest, new_stmt);
3115 else
3116 new_temp = make_ssa_name (rtype, new_stmt);
3117 gimple_call_set_lhs (new_stmt, new_temp);
3118 }
3119 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3120
3121 if (vec_dest)
3122 {
3123 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3124 {
3125 unsigned int k, l;
3126 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3127 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3128 gcc_assert ((k & (k - 1)) == 0);
3129 for (l = 0; l < k; l++)
3130 {
3131 tree t;
3132 if (ratype)
3133 {
3134 t = build_fold_addr_expr (new_temp);
3135 t = build2 (MEM_REF, vectype, t,
3136 build_int_cst (TREE_TYPE (t),
3137 l * prec / BITS_PER_UNIT));
3138 }
3139 else
3140 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3141 size_int (prec), bitsize_int (l * prec));
3142 new_stmt
3143 = gimple_build_assign (make_ssa_name (vectype), t);
3144 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3145 if (j == 0 && l == 0)
3146 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3147 else
3148 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3149
3150 prev_stmt_info = vinfo_for_stmt (new_stmt);
3151 }
3152
3153 if (ratype)
3154 {
3155 tree clobber = build_constructor (ratype, NULL);
3156 TREE_THIS_VOLATILE (clobber) = 1;
3157 new_stmt = gimple_build_assign (new_temp, clobber);
3158 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3159 }
3160 continue;
3161 }
3162 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3163 {
3164 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3165 / TYPE_VECTOR_SUBPARTS (rtype));
3166 gcc_assert ((k & (k - 1)) == 0);
3167 if ((j & (k - 1)) == 0)
3168 vec_alloc (ret_ctor_elts, k);
3169 if (ratype)
3170 {
3171 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3172 for (m = 0; m < o; m++)
3173 {
3174 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3175 size_int (m), NULL_TREE, NULL_TREE);
3176 new_stmt
3177 = gimple_build_assign (make_ssa_name (rtype), tem);
3178 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3179 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3180 gimple_assign_lhs (new_stmt));
3181 }
3182 tree clobber = build_constructor (ratype, NULL);
3183 TREE_THIS_VOLATILE (clobber) = 1;
3184 new_stmt = gimple_build_assign (new_temp, clobber);
3185 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3186 }
3187 else
3188 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3189 if ((j & (k - 1)) != k - 1)
3190 continue;
3191 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3192 new_stmt
3193 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3194 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3195
3196 if ((unsigned) j == k - 1)
3197 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3198 else
3199 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3200
3201 prev_stmt_info = vinfo_for_stmt (new_stmt);
3202 continue;
3203 }
3204 else if (ratype)
3205 {
3206 tree t = build_fold_addr_expr (new_temp);
3207 t = build2 (MEM_REF, vectype, t,
3208 build_int_cst (TREE_TYPE (t), 0));
3209 new_stmt
3210 = gimple_build_assign (make_ssa_name (vec_dest), t);
3211 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3212 tree clobber = build_constructor (ratype, NULL);
3213 TREE_THIS_VOLATILE (clobber) = 1;
3214 vect_finish_stmt_generation (stmt,
3215 gimple_build_assign (new_temp,
3216 clobber), gsi);
3217 }
3218 }
3219
3220 if (j == 0)
3221 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3222 else
3223 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3224
3225 prev_stmt_info = vinfo_for_stmt (new_stmt);
3226 }
3227
3228 vargs.release ();
3229
3230 /* The call in STMT might prevent it from being removed in dce.
3231 We however cannot remove it here, due to the way the ssa name
3232 it defines is mapped to the new definition. So just replace
3233 rhs of the statement with something harmless. */
3234
3235 if (slp_node)
3236 return true;
3237
3238 if (scalar_dest)
3239 {
3240 type = TREE_TYPE (scalar_dest);
3241 if (is_pattern_stmt_p (stmt_info))
3242 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3243 else
3244 lhs = gimple_call_lhs (stmt);
3245 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3246 }
3247 else
3248 new_stmt = gimple_build_nop ();
3249 set_vinfo_for_stmt (new_stmt, stmt_info);
3250 set_vinfo_for_stmt (stmt, NULL);
3251 STMT_VINFO_STMT (stmt_info) = new_stmt;
3252 gsi_replace (gsi, new_stmt, true);
3253 unlink_stmt_vdef (stmt);
3254
3255 return true;
3256 }
3257
3258
3259 /* Function vect_gen_widened_results_half
3260
3261 Create a vector stmt whose code, type, number of arguments, and result
3262 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3263 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3264 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3265 needs to be created (DECL is a function-decl of a target-builtin).
3266 STMT is the original scalar stmt that we are vectorizing. */
3267
3268 static gimple
3269 vect_gen_widened_results_half (enum tree_code code,
3270 tree decl,
3271 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3272 tree vec_dest, gimple_stmt_iterator *gsi,
3273 gimple stmt)
3274 {
3275 gimple new_stmt;
3276 tree new_temp;
3277
3278 /* Generate half of the widened result: */
3279 if (code == CALL_EXPR)
3280 {
3281 /* Target specific support */
3282 if (op_type == binary_op)
3283 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3284 else
3285 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3286 new_temp = make_ssa_name (vec_dest, new_stmt);
3287 gimple_call_set_lhs (new_stmt, new_temp);
3288 }
3289 else
3290 {
3291 /* Generic support */
3292 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3293 if (op_type != binary_op)
3294 vec_oprnd1 = NULL;
3295 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3296 new_temp = make_ssa_name (vec_dest, new_stmt);
3297 gimple_assign_set_lhs (new_stmt, new_temp);
3298 }
3299 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3300
3301 return new_stmt;
3302 }
3303
3304
3305 /* Get vectorized definitions for loop-based vectorization. For the first
3306 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3307 scalar operand), and for the rest we get a copy with
3308 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3309 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3310 The vectors are collected into VEC_OPRNDS. */
3311
3312 static void
3313 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
3314 vec<tree> *vec_oprnds, int multi_step_cvt)
3315 {
3316 tree vec_oprnd;
3317
3318 /* Get first vector operand. */
3319 /* All the vector operands except the very first one (that is scalar oprnd)
3320 are stmt copies. */
3321 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3322 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
3323 else
3324 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3325
3326 vec_oprnds->quick_push (vec_oprnd);
3327
3328 /* Get second vector operand. */
3329 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3330 vec_oprnds->quick_push (vec_oprnd);
3331
3332 *oprnd = vec_oprnd;
3333
3334 /* For conversion in multiple steps, continue to get operands
3335 recursively. */
3336 if (multi_step_cvt)
3337 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3338 }
3339
3340
3341 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3342 For multi-step conversions store the resulting vectors and call the function
3343 recursively. */
3344
3345 static void
3346 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3347 int multi_step_cvt, gimple stmt,
3348 vec<tree> vec_dsts,
3349 gimple_stmt_iterator *gsi,
3350 slp_tree slp_node, enum tree_code code,
3351 stmt_vec_info *prev_stmt_info)
3352 {
3353 unsigned int i;
3354 tree vop0, vop1, new_tmp, vec_dest;
3355 gimple new_stmt;
3356 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3357
3358 vec_dest = vec_dsts.pop ();
3359
3360 for (i = 0; i < vec_oprnds->length (); i += 2)
3361 {
3362 /* Create demotion operation. */
3363 vop0 = (*vec_oprnds)[i];
3364 vop1 = (*vec_oprnds)[i + 1];
3365 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3366 new_tmp = make_ssa_name (vec_dest, new_stmt);
3367 gimple_assign_set_lhs (new_stmt, new_tmp);
3368 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3369
3370 if (multi_step_cvt)
3371 /* Store the resulting vector for next recursive call. */
3372 (*vec_oprnds)[i/2] = new_tmp;
3373 else
3374 {
3375 /* This is the last step of the conversion sequence. Store the
3376 vectors in SLP_NODE or in vector info of the scalar statement
3377 (or in STMT_VINFO_RELATED_STMT chain). */
3378 if (slp_node)
3379 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3380 else
3381 {
3382 if (!*prev_stmt_info)
3383 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3384 else
3385 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3386
3387 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3388 }
3389 }
3390 }
3391
3392 /* For multi-step demotion operations we first generate demotion operations
3393 from the source type to the intermediate types, and then combine the
3394 results (stored in VEC_OPRNDS) in demotion operation to the destination
3395 type. */
3396 if (multi_step_cvt)
3397 {
3398 /* At each level of recursion we have half of the operands we had at the
3399 previous level. */
3400 vec_oprnds->truncate ((i+1)/2);
3401 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3402 stmt, vec_dsts, gsi, slp_node,
3403 VEC_PACK_TRUNC_EXPR,
3404 prev_stmt_info);
3405 }
3406
3407 vec_dsts.quick_push (vec_dest);
3408 }
3409
3410
3411 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3412 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3413 the resulting vectors and call the function recursively. */
3414
3415 static void
3416 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3417 vec<tree> *vec_oprnds1,
3418 gimple stmt, tree vec_dest,
3419 gimple_stmt_iterator *gsi,
3420 enum tree_code code1,
3421 enum tree_code code2, tree decl1,
3422 tree decl2, int op_type)
3423 {
3424 int i;
3425 tree vop0, vop1, new_tmp1, new_tmp2;
3426 gimple new_stmt1, new_stmt2;
3427 vec<tree> vec_tmp = vNULL;
3428
3429 vec_tmp.create (vec_oprnds0->length () * 2);
3430 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3431 {
3432 if (op_type == binary_op)
3433 vop1 = (*vec_oprnds1)[i];
3434 else
3435 vop1 = NULL_TREE;
3436
3437 /* Generate the two halves of promotion operation. */
3438 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3439 op_type, vec_dest, gsi, stmt);
3440 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3441 op_type, vec_dest, gsi, stmt);
3442 if (is_gimple_call (new_stmt1))
3443 {
3444 new_tmp1 = gimple_call_lhs (new_stmt1);
3445 new_tmp2 = gimple_call_lhs (new_stmt2);
3446 }
3447 else
3448 {
3449 new_tmp1 = gimple_assign_lhs (new_stmt1);
3450 new_tmp2 = gimple_assign_lhs (new_stmt2);
3451 }
3452
3453 /* Store the results for the next step. */
3454 vec_tmp.quick_push (new_tmp1);
3455 vec_tmp.quick_push (new_tmp2);
3456 }
3457
3458 vec_oprnds0->release ();
3459 *vec_oprnds0 = vec_tmp;
3460 }
3461
3462
3463 /* Check if STMT performs a conversion operation, that can be vectorized.
3464 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3465 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3466 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3467
3468 static bool
3469 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3470 gimple *vec_stmt, slp_tree slp_node)
3471 {
3472 tree vec_dest;
3473 tree scalar_dest;
3474 tree op0, op1 = NULL_TREE;
3475 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3476 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3477 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3478 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3479 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3480 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3481 tree new_temp;
3482 tree def;
3483 gimple def_stmt;
3484 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3485 gimple new_stmt = NULL;
3486 stmt_vec_info prev_stmt_info;
3487 int nunits_in;
3488 int nunits_out;
3489 tree vectype_out, vectype_in;
3490 int ncopies, i, j;
3491 tree lhs_type, rhs_type;
3492 enum { NARROW, NONE, WIDEN } modifier;
3493 vec<tree> vec_oprnds0 = vNULL;
3494 vec<tree> vec_oprnds1 = vNULL;
3495 tree vop0;
3496 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3497 int multi_step_cvt = 0;
3498 vec<tree> vec_dsts = vNULL;
3499 vec<tree> interm_types = vNULL;
3500 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3501 int op_type;
3502 machine_mode rhs_mode;
3503 unsigned short fltsz;
3504
3505 /* Is STMT a vectorizable conversion? */
3506
3507 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3508 return false;
3509
3510 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3511 return false;
3512
3513 if (!is_gimple_assign (stmt))
3514 return false;
3515
3516 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3517 return false;
3518
3519 code = gimple_assign_rhs_code (stmt);
3520 if (!CONVERT_EXPR_CODE_P (code)
3521 && code != FIX_TRUNC_EXPR
3522 && code != FLOAT_EXPR
3523 && code != WIDEN_MULT_EXPR
3524 && code != WIDEN_LSHIFT_EXPR)
3525 return false;
3526
3527 op_type = TREE_CODE_LENGTH (code);
3528
3529 /* Check types of lhs and rhs. */
3530 scalar_dest = gimple_assign_lhs (stmt);
3531 lhs_type = TREE_TYPE (scalar_dest);
3532 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3533
3534 op0 = gimple_assign_rhs1 (stmt);
3535 rhs_type = TREE_TYPE (op0);
3536
3537 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3538 && !((INTEGRAL_TYPE_P (lhs_type)
3539 && INTEGRAL_TYPE_P (rhs_type))
3540 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3541 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3542 return false;
3543
3544 if ((INTEGRAL_TYPE_P (lhs_type)
3545 && (TYPE_PRECISION (lhs_type)
3546 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3547 || (INTEGRAL_TYPE_P (rhs_type)
3548 && (TYPE_PRECISION (rhs_type)
3549 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3550 {
3551 if (dump_enabled_p ())
3552 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3553 "type conversion to/from bit-precision unsupported."
3554 "\n");
3555 return false;
3556 }
3557
3558 /* Check the operands of the operation. */
3559 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
3560 &def_stmt, &def, &dt[0], &vectype_in))
3561 {
3562 if (dump_enabled_p ())
3563 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3564 "use not simple.\n");
3565 return false;
3566 }
3567 if (op_type == binary_op)
3568 {
3569 bool ok;
3570
3571 op1 = gimple_assign_rhs2 (stmt);
3572 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3573 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3574 OP1. */
3575 if (CONSTANT_CLASS_P (op0))
3576 ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
3577 &def_stmt, &def, &dt[1], &vectype_in);
3578 else
3579 ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
3580 &def, &dt[1]);
3581
3582 if (!ok)
3583 {
3584 if (dump_enabled_p ())
3585 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3586 "use not simple.\n");
3587 return false;
3588 }
3589 }
3590
3591 /* If op0 is an external or constant defs use a vector type of
3592 the same size as the output vector type. */
3593 if (!vectype_in)
3594 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3595 if (vec_stmt)
3596 gcc_assert (vectype_in);
3597 if (!vectype_in)
3598 {
3599 if (dump_enabled_p ())
3600 {
3601 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3602 "no vectype for scalar type ");
3603 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3604 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3605 }
3606
3607 return false;
3608 }
3609
3610 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3611 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3612 if (nunits_in < nunits_out)
3613 modifier = NARROW;
3614 else if (nunits_out == nunits_in)
3615 modifier = NONE;
3616 else
3617 modifier = WIDEN;
3618
3619 /* Multiple types in SLP are handled by creating the appropriate number of
3620 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3621 case of SLP. */
3622 if (slp_node || PURE_SLP_STMT (stmt_info))
3623 ncopies = 1;
3624 else if (modifier == NARROW)
3625 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3626 else
3627 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3628
3629 /* Sanity check: make sure that at least one copy of the vectorized stmt
3630 needs to be generated. */
3631 gcc_assert (ncopies >= 1);
3632
3633 /* Supportable by target? */
3634 switch (modifier)
3635 {
3636 case NONE:
3637 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3638 return false;
3639 if (supportable_convert_operation (code, vectype_out, vectype_in,
3640 &decl1, &code1))
3641 break;
3642 /* FALLTHRU */
3643 unsupported:
3644 if (dump_enabled_p ())
3645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3646 "conversion not supported by target.\n");
3647 return false;
3648
3649 case WIDEN:
3650 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3651 &code1, &code2, &multi_step_cvt,
3652 &interm_types))
3653 {
3654 /* Binary widening operation can only be supported directly by the
3655 architecture. */
3656 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3657 break;
3658 }
3659
3660 if (code != FLOAT_EXPR
3661 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3662 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3663 goto unsupported;
3664
3665 rhs_mode = TYPE_MODE (rhs_type);
3666 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3667 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3668 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3669 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3670 {
3671 cvt_type
3672 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3673 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3674 if (cvt_type == NULL_TREE)
3675 goto unsupported;
3676
3677 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3678 {
3679 if (!supportable_convert_operation (code, vectype_out,
3680 cvt_type, &decl1, &codecvt1))
3681 goto unsupported;
3682 }
3683 else if (!supportable_widening_operation (code, stmt, vectype_out,
3684 cvt_type, &codecvt1,
3685 &codecvt2, &multi_step_cvt,
3686 &interm_types))
3687 continue;
3688 else
3689 gcc_assert (multi_step_cvt == 0);
3690
3691 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3692 vectype_in, &code1, &code2,
3693 &multi_step_cvt, &interm_types))
3694 break;
3695 }
3696
3697 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3698 goto unsupported;
3699
3700 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3701 codecvt2 = ERROR_MARK;
3702 else
3703 {
3704 multi_step_cvt++;
3705 interm_types.safe_push (cvt_type);
3706 cvt_type = NULL_TREE;
3707 }
3708 break;
3709
3710 case NARROW:
3711 gcc_assert (op_type == unary_op);
3712 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3713 &code1, &multi_step_cvt,
3714 &interm_types))
3715 break;
3716
3717 if (code != FIX_TRUNC_EXPR
3718 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3719 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3720 goto unsupported;
3721
3722 rhs_mode = TYPE_MODE (rhs_type);
3723 cvt_type
3724 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3725 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3726 if (cvt_type == NULL_TREE)
3727 goto unsupported;
3728 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3729 &decl1, &codecvt1))
3730 goto unsupported;
3731 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3732 &code1, &multi_step_cvt,
3733 &interm_types))
3734 break;
3735 goto unsupported;
3736
3737 default:
3738 gcc_unreachable ();
3739 }
3740
3741 if (!vec_stmt) /* transformation not required. */
3742 {
3743 if (dump_enabled_p ())
3744 dump_printf_loc (MSG_NOTE, vect_location,
3745 "=== vectorizable_conversion ===\n");
3746 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3747 {
3748 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3749 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3750 }
3751 else if (modifier == NARROW)
3752 {
3753 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3754 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3755 }
3756 else
3757 {
3758 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3759 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3760 }
3761 interm_types.release ();
3762 return true;
3763 }
3764
3765 /** Transform. **/
3766 if (dump_enabled_p ())
3767 dump_printf_loc (MSG_NOTE, vect_location,
3768 "transform conversion. ncopies = %d.\n", ncopies);
3769
3770 if (op_type == binary_op)
3771 {
3772 if (CONSTANT_CLASS_P (op0))
3773 op0 = fold_convert (TREE_TYPE (op1), op0);
3774 else if (CONSTANT_CLASS_P (op1))
3775 op1 = fold_convert (TREE_TYPE (op0), op1);
3776 }
3777
3778 /* In case of multi-step conversion, we first generate conversion operations
3779 to the intermediate types, and then from that types to the final one.
3780 We create vector destinations for the intermediate type (TYPES) received
3781 from supportable_*_operation, and store them in the correct order
3782 for future use in vect_create_vectorized_*_stmts (). */
3783 vec_dsts.create (multi_step_cvt + 1);
3784 vec_dest = vect_create_destination_var (scalar_dest,
3785 (cvt_type && modifier == WIDEN)
3786 ? cvt_type : vectype_out);
3787 vec_dsts.quick_push (vec_dest);
3788
3789 if (multi_step_cvt)
3790 {
3791 for (i = interm_types.length () - 1;
3792 interm_types.iterate (i, &intermediate_type); i--)
3793 {
3794 vec_dest = vect_create_destination_var (scalar_dest,
3795 intermediate_type);
3796 vec_dsts.quick_push (vec_dest);
3797 }
3798 }
3799
3800 if (cvt_type)
3801 vec_dest = vect_create_destination_var (scalar_dest,
3802 modifier == WIDEN
3803 ? vectype_out : cvt_type);
3804
3805 if (!slp_node)
3806 {
3807 if (modifier == WIDEN)
3808 {
3809 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3810 if (op_type == binary_op)
3811 vec_oprnds1.create (1);
3812 }
3813 else if (modifier == NARROW)
3814 vec_oprnds0.create (
3815 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3816 }
3817 else if (code == WIDEN_LSHIFT_EXPR)
3818 vec_oprnds1.create (slp_node->vec_stmts_size);
3819
3820 last_oprnd = op0;
3821 prev_stmt_info = NULL;
3822 switch (modifier)
3823 {
3824 case NONE:
3825 for (j = 0; j < ncopies; j++)
3826 {
3827 if (j == 0)
3828 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3829 -1);
3830 else
3831 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3832
3833 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3834 {
3835 /* Arguments are ready, create the new vector stmt. */
3836 if (code1 == CALL_EXPR)
3837 {
3838 new_stmt = gimple_build_call (decl1, 1, vop0);
3839 new_temp = make_ssa_name (vec_dest, new_stmt);
3840 gimple_call_set_lhs (new_stmt, new_temp);
3841 }
3842 else
3843 {
3844 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3845 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3846 new_temp = make_ssa_name (vec_dest, new_stmt);
3847 gimple_assign_set_lhs (new_stmt, new_temp);
3848 }
3849
3850 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3851 if (slp_node)
3852 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3853 else
3854 {
3855 if (!prev_stmt_info)
3856 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3857 else
3858 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3859 prev_stmt_info = vinfo_for_stmt (new_stmt);
3860 }
3861 }
3862 }
3863 break;
3864
3865 case WIDEN:
3866 /* In case the vectorization factor (VF) is bigger than the number
3867 of elements that we can fit in a vectype (nunits), we have to
3868 generate more than one vector stmt - i.e - we need to "unroll"
3869 the vector stmt by a factor VF/nunits. */
3870 for (j = 0; j < ncopies; j++)
3871 {
3872 /* Handle uses. */
3873 if (j == 0)
3874 {
3875 if (slp_node)
3876 {
3877 if (code == WIDEN_LSHIFT_EXPR)
3878 {
3879 unsigned int k;
3880
3881 vec_oprnd1 = op1;
3882 /* Store vec_oprnd1 for every vector stmt to be created
3883 for SLP_NODE. We check during the analysis that all
3884 the shift arguments are the same. */
3885 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3886 vec_oprnds1.quick_push (vec_oprnd1);
3887
3888 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3889 slp_node, -1);
3890 }
3891 else
3892 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3893 &vec_oprnds1, slp_node, -1);
3894 }
3895 else
3896 {
3897 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3898 vec_oprnds0.quick_push (vec_oprnd0);
3899 if (op_type == binary_op)
3900 {
3901 if (code == WIDEN_LSHIFT_EXPR)
3902 vec_oprnd1 = op1;
3903 else
3904 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
3905 NULL);
3906 vec_oprnds1.quick_push (vec_oprnd1);
3907 }
3908 }
3909 }
3910 else
3911 {
3912 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3913 vec_oprnds0.truncate (0);
3914 vec_oprnds0.quick_push (vec_oprnd0);
3915 if (op_type == binary_op)
3916 {
3917 if (code == WIDEN_LSHIFT_EXPR)
3918 vec_oprnd1 = op1;
3919 else
3920 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3921 vec_oprnd1);
3922 vec_oprnds1.truncate (0);
3923 vec_oprnds1.quick_push (vec_oprnd1);
3924 }
3925 }
3926
3927 /* Arguments are ready. Create the new vector stmts. */
3928 for (i = multi_step_cvt; i >= 0; i--)
3929 {
3930 tree this_dest = vec_dsts[i];
3931 enum tree_code c1 = code1, c2 = code2;
3932 if (i == 0 && codecvt2 != ERROR_MARK)
3933 {
3934 c1 = codecvt1;
3935 c2 = codecvt2;
3936 }
3937 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3938 &vec_oprnds1,
3939 stmt, this_dest, gsi,
3940 c1, c2, decl1, decl2,
3941 op_type);
3942 }
3943
3944 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3945 {
3946 if (cvt_type)
3947 {
3948 if (codecvt1 == CALL_EXPR)
3949 {
3950 new_stmt = gimple_build_call (decl1, 1, vop0);
3951 new_temp = make_ssa_name (vec_dest, new_stmt);
3952 gimple_call_set_lhs (new_stmt, new_temp);
3953 }
3954 else
3955 {
3956 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3957 new_temp = make_ssa_name (vec_dest);
3958 new_stmt = gimple_build_assign (new_temp, codecvt1,
3959 vop0);
3960 }
3961
3962 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3963 }
3964 else
3965 new_stmt = SSA_NAME_DEF_STMT (vop0);
3966
3967 if (slp_node)
3968 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3969 else
3970 {
3971 if (!prev_stmt_info)
3972 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3973 else
3974 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3975 prev_stmt_info = vinfo_for_stmt (new_stmt);
3976 }
3977 }
3978 }
3979
3980 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3981 break;
3982
3983 case NARROW:
3984 /* In case the vectorization factor (VF) is bigger than the number
3985 of elements that we can fit in a vectype (nunits), we have to
3986 generate more than one vector stmt - i.e - we need to "unroll"
3987 the vector stmt by a factor VF/nunits. */
3988 for (j = 0; j < ncopies; j++)
3989 {
3990 /* Handle uses. */
3991 if (slp_node)
3992 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3993 slp_node, -1);
3994 else
3995 {
3996 vec_oprnds0.truncate (0);
3997 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3998 vect_pow2 (multi_step_cvt) - 1);
3999 }
4000
4001 /* Arguments are ready. Create the new vector stmts. */
4002 if (cvt_type)
4003 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4004 {
4005 if (codecvt1 == CALL_EXPR)
4006 {
4007 new_stmt = gimple_build_call (decl1, 1, vop0);
4008 new_temp = make_ssa_name (vec_dest, new_stmt);
4009 gimple_call_set_lhs (new_stmt, new_temp);
4010 }
4011 else
4012 {
4013 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4014 new_temp = make_ssa_name (vec_dest);
4015 new_stmt = gimple_build_assign (new_temp, codecvt1,
4016 vop0);
4017 }
4018
4019 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4020 vec_oprnds0[i] = new_temp;
4021 }
4022
4023 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4024 stmt, vec_dsts, gsi,
4025 slp_node, code1,
4026 &prev_stmt_info);
4027 }
4028
4029 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4030 break;
4031 }
4032
4033 vec_oprnds0.release ();
4034 vec_oprnds1.release ();
4035 vec_dsts.release ();
4036 interm_types.release ();
4037
4038 return true;
4039 }
4040
4041
4042 /* Function vectorizable_assignment.
4043
4044 Check if STMT performs an assignment (copy) that can be vectorized.
4045 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4046 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4047 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4048
4049 static bool
4050 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
4051 gimple *vec_stmt, slp_tree slp_node)
4052 {
4053 tree vec_dest;
4054 tree scalar_dest;
4055 tree op;
4056 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4057 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4058 tree new_temp;
4059 tree def;
4060 gimple def_stmt;
4061 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4062 int ncopies;
4063 int i, j;
4064 vec<tree> vec_oprnds = vNULL;
4065 tree vop;
4066 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4067 gimple new_stmt = NULL;
4068 stmt_vec_info prev_stmt_info = NULL;
4069 enum tree_code code;
4070 tree vectype_in;
4071
4072 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4073 return false;
4074
4075 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4076 return false;
4077
4078 /* Is vectorizable assignment? */
4079 if (!is_gimple_assign (stmt))
4080 return false;
4081
4082 scalar_dest = gimple_assign_lhs (stmt);
4083 if (TREE_CODE (scalar_dest) != SSA_NAME)
4084 return false;
4085
4086 code = gimple_assign_rhs_code (stmt);
4087 if (gimple_assign_single_p (stmt)
4088 || code == PAREN_EXPR
4089 || CONVERT_EXPR_CODE_P (code))
4090 op = gimple_assign_rhs1 (stmt);
4091 else
4092 return false;
4093
4094 if (code == VIEW_CONVERT_EXPR)
4095 op = TREE_OPERAND (op, 0);
4096
4097 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4098 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4099
4100 /* Multiple types in SLP are handled by creating the appropriate number of
4101 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4102 case of SLP. */
4103 if (slp_node || PURE_SLP_STMT (stmt_info))
4104 ncopies = 1;
4105 else
4106 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4107
4108 gcc_assert (ncopies >= 1);
4109
4110 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
4111 &def_stmt, &def, &dt[0], &vectype_in))
4112 {
4113 if (dump_enabled_p ())
4114 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4115 "use not simple.\n");
4116 return false;
4117 }
4118
4119 /* We can handle NOP_EXPR conversions that do not change the number
4120 of elements or the vector size. */
4121 if ((CONVERT_EXPR_CODE_P (code)
4122 || code == VIEW_CONVERT_EXPR)
4123 && (!vectype_in
4124 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4125 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4126 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4127 return false;
4128
4129 /* We do not handle bit-precision changes. */
4130 if ((CONVERT_EXPR_CODE_P (code)
4131 || code == VIEW_CONVERT_EXPR)
4132 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4133 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4134 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4135 || ((TYPE_PRECISION (TREE_TYPE (op))
4136 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4137 /* But a conversion that does not change the bit-pattern is ok. */
4138 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4139 > TYPE_PRECISION (TREE_TYPE (op)))
4140 && TYPE_UNSIGNED (TREE_TYPE (op))))
4141 {
4142 if (dump_enabled_p ())
4143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4144 "type conversion to/from bit-precision "
4145 "unsupported.\n");
4146 return false;
4147 }
4148
4149 if (!vec_stmt) /* transformation not required. */
4150 {
4151 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4152 if (dump_enabled_p ())
4153 dump_printf_loc (MSG_NOTE, vect_location,
4154 "=== vectorizable_assignment ===\n");
4155 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4156 return true;
4157 }
4158
4159 /** Transform. **/
4160 if (dump_enabled_p ())
4161 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4162
4163 /* Handle def. */
4164 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4165
4166 /* Handle use. */
4167 for (j = 0; j < ncopies; j++)
4168 {
4169 /* Handle uses. */
4170 if (j == 0)
4171 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4172 else
4173 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4174
4175 /* Arguments are ready. create the new vector stmt. */
4176 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4177 {
4178 if (CONVERT_EXPR_CODE_P (code)
4179 || code == VIEW_CONVERT_EXPR)
4180 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4181 new_stmt = gimple_build_assign (vec_dest, vop);
4182 new_temp = make_ssa_name (vec_dest, new_stmt);
4183 gimple_assign_set_lhs (new_stmt, new_temp);
4184 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4185 if (slp_node)
4186 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4187 }
4188
4189 if (slp_node)
4190 continue;
4191
4192 if (j == 0)
4193 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4194 else
4195 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4196
4197 prev_stmt_info = vinfo_for_stmt (new_stmt);
4198 }
4199
4200 vec_oprnds.release ();
4201 return true;
4202 }
4203
4204
4205 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4206 either as shift by a scalar or by a vector. */
4207
4208 bool
4209 vect_supportable_shift (enum tree_code code, tree scalar_type)
4210 {
4211
4212 machine_mode vec_mode;
4213 optab optab;
4214 int icode;
4215 tree vectype;
4216
4217 vectype = get_vectype_for_scalar_type (scalar_type);
4218 if (!vectype)
4219 return false;
4220
4221 optab = optab_for_tree_code (code, vectype, optab_scalar);
4222 if (!optab
4223 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4224 {
4225 optab = optab_for_tree_code (code, vectype, optab_vector);
4226 if (!optab
4227 || (optab_handler (optab, TYPE_MODE (vectype))
4228 == CODE_FOR_nothing))
4229 return false;
4230 }
4231
4232 vec_mode = TYPE_MODE (vectype);
4233 icode = (int) optab_handler (optab, vec_mode);
4234 if (icode == CODE_FOR_nothing)
4235 return false;
4236
4237 return true;
4238 }
4239
4240
4241 /* Function vectorizable_shift.
4242
4243 Check if STMT performs a shift operation that can be vectorized.
4244 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4245 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4246 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4247
4248 static bool
4249 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
4250 gimple *vec_stmt, slp_tree slp_node)
4251 {
4252 tree vec_dest;
4253 tree scalar_dest;
4254 tree op0, op1 = NULL;
4255 tree vec_oprnd1 = NULL_TREE;
4256 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4257 tree vectype;
4258 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4259 enum tree_code code;
4260 machine_mode vec_mode;
4261 tree new_temp;
4262 optab optab;
4263 int icode;
4264 machine_mode optab_op2_mode;
4265 tree def;
4266 gimple def_stmt;
4267 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4268 gimple new_stmt = NULL;
4269 stmt_vec_info prev_stmt_info;
4270 int nunits_in;
4271 int nunits_out;
4272 tree vectype_out;
4273 tree op1_vectype;
4274 int ncopies;
4275 int j, i;
4276 vec<tree> vec_oprnds0 = vNULL;
4277 vec<tree> vec_oprnds1 = vNULL;
4278 tree vop0, vop1;
4279 unsigned int k;
4280 bool scalar_shift_arg = true;
4281 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4282 int vf;
4283
4284 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4285 return false;
4286
4287 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4288 return false;
4289
4290 /* Is STMT a vectorizable binary/unary operation? */
4291 if (!is_gimple_assign (stmt))
4292 return false;
4293
4294 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4295 return false;
4296
4297 code = gimple_assign_rhs_code (stmt);
4298
4299 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4300 || code == RROTATE_EXPR))
4301 return false;
4302
4303 scalar_dest = gimple_assign_lhs (stmt);
4304 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4305 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4306 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4307 {
4308 if (dump_enabled_p ())
4309 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4310 "bit-precision shifts not supported.\n");
4311 return false;
4312 }
4313
4314 op0 = gimple_assign_rhs1 (stmt);
4315 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4316 &def_stmt, &def, &dt[0], &vectype))
4317 {
4318 if (dump_enabled_p ())
4319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4320 "use not simple.\n");
4321 return false;
4322 }
4323 /* If op0 is an external or constant def use a vector type with
4324 the same size as the output vector type. */
4325 if (!vectype)
4326 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4327 if (vec_stmt)
4328 gcc_assert (vectype);
4329 if (!vectype)
4330 {
4331 if (dump_enabled_p ())
4332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4333 "no vectype for scalar type\n");
4334 return false;
4335 }
4336
4337 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4338 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4339 if (nunits_out != nunits_in)
4340 return false;
4341
4342 op1 = gimple_assign_rhs2 (stmt);
4343 if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4344 &def, &dt[1], &op1_vectype))
4345 {
4346 if (dump_enabled_p ())
4347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4348 "use not simple.\n");
4349 return false;
4350 }
4351
4352 if (loop_vinfo)
4353 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4354 else
4355 vf = 1;
4356
4357 /* Multiple types in SLP are handled by creating the appropriate number of
4358 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4359 case of SLP. */
4360 if (slp_node || PURE_SLP_STMT (stmt_info))
4361 ncopies = 1;
4362 else
4363 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4364
4365 gcc_assert (ncopies >= 1);
4366
4367 /* Determine whether the shift amount is a vector, or scalar. If the
4368 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4369
4370 if (dt[1] == vect_internal_def && !slp_node)
4371 scalar_shift_arg = false;
4372 else if (dt[1] == vect_constant_def
4373 || dt[1] == vect_external_def
4374 || dt[1] == vect_internal_def)
4375 {
4376 /* In SLP, need to check whether the shift count is the same,
4377 in loops if it is a constant or invariant, it is always
4378 a scalar shift. */
4379 if (slp_node)
4380 {
4381 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4382 gimple slpstmt;
4383
4384 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4385 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4386 scalar_shift_arg = false;
4387 }
4388 }
4389 else
4390 {
4391 if (dump_enabled_p ())
4392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4393 "operand mode requires invariant argument.\n");
4394 return false;
4395 }
4396
4397 /* Vector shifted by vector. */
4398 if (!scalar_shift_arg)
4399 {
4400 optab = optab_for_tree_code (code, vectype, optab_vector);
4401 if (dump_enabled_p ())
4402 dump_printf_loc (MSG_NOTE, vect_location,
4403 "vector/vector shift/rotate found.\n");
4404
4405 if (!op1_vectype)
4406 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4407 if (op1_vectype == NULL_TREE
4408 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4409 {
4410 if (dump_enabled_p ())
4411 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4412 "unusable type for last operand in"
4413 " vector/vector shift/rotate.\n");
4414 return false;
4415 }
4416 }
4417 /* See if the machine has a vector shifted by scalar insn and if not
4418 then see if it has a vector shifted by vector insn. */
4419 else
4420 {
4421 optab = optab_for_tree_code (code, vectype, optab_scalar);
4422 if (optab
4423 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4424 {
4425 if (dump_enabled_p ())
4426 dump_printf_loc (MSG_NOTE, vect_location,
4427 "vector/scalar shift/rotate found.\n");
4428 }
4429 else
4430 {
4431 optab = optab_for_tree_code (code, vectype, optab_vector);
4432 if (optab
4433 && (optab_handler (optab, TYPE_MODE (vectype))
4434 != CODE_FOR_nothing))
4435 {
4436 scalar_shift_arg = false;
4437
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE, vect_location,
4440 "vector/vector shift/rotate found.\n");
4441
4442 /* Unlike the other binary operators, shifts/rotates have
4443 the rhs being int, instead of the same type as the lhs,
4444 so make sure the scalar is the right type if we are
4445 dealing with vectors of long long/long/short/char. */
4446 if (dt[1] == vect_constant_def)
4447 op1 = fold_convert (TREE_TYPE (vectype), op1);
4448 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4449 TREE_TYPE (op1)))
4450 {
4451 if (slp_node
4452 && TYPE_MODE (TREE_TYPE (vectype))
4453 != TYPE_MODE (TREE_TYPE (op1)))
4454 {
4455 if (dump_enabled_p ())
4456 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4457 "unusable type for last operand in"
4458 " vector/vector shift/rotate.\n");
4459 return false;
4460 }
4461 if (vec_stmt && !slp_node)
4462 {
4463 op1 = fold_convert (TREE_TYPE (vectype), op1);
4464 op1 = vect_init_vector (stmt, op1,
4465 TREE_TYPE (vectype), NULL);
4466 }
4467 }
4468 }
4469 }
4470 }
4471
4472 /* Supportable by target? */
4473 if (!optab)
4474 {
4475 if (dump_enabled_p ())
4476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4477 "no optab.\n");
4478 return false;
4479 }
4480 vec_mode = TYPE_MODE (vectype);
4481 icode = (int) optab_handler (optab, vec_mode);
4482 if (icode == CODE_FOR_nothing)
4483 {
4484 if (dump_enabled_p ())
4485 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4486 "op not supported by target.\n");
4487 /* Check only during analysis. */
4488 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4489 || (vf < vect_min_worthwhile_factor (code)
4490 && !vec_stmt))
4491 return false;
4492 if (dump_enabled_p ())
4493 dump_printf_loc (MSG_NOTE, vect_location,
4494 "proceeding using word mode.\n");
4495 }
4496
4497 /* Worthwhile without SIMD support? Check only during analysis. */
4498 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4499 && vf < vect_min_worthwhile_factor (code)
4500 && !vec_stmt)
4501 {
4502 if (dump_enabled_p ())
4503 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4504 "not worthwhile without SIMD support.\n");
4505 return false;
4506 }
4507
4508 if (!vec_stmt) /* transformation not required. */
4509 {
4510 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4511 if (dump_enabled_p ())
4512 dump_printf_loc (MSG_NOTE, vect_location,
4513 "=== vectorizable_shift ===\n");
4514 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4515 return true;
4516 }
4517
4518 /** Transform. **/
4519
4520 if (dump_enabled_p ())
4521 dump_printf_loc (MSG_NOTE, vect_location,
4522 "transform binary/unary operation.\n");
4523
4524 /* Handle def. */
4525 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4526
4527 prev_stmt_info = NULL;
4528 for (j = 0; j < ncopies; j++)
4529 {
4530 /* Handle uses. */
4531 if (j == 0)
4532 {
4533 if (scalar_shift_arg)
4534 {
4535 /* Vector shl and shr insn patterns can be defined with scalar
4536 operand 2 (shift operand). In this case, use constant or loop
4537 invariant op1 directly, without extending it to vector mode
4538 first. */
4539 optab_op2_mode = insn_data[icode].operand[2].mode;
4540 if (!VECTOR_MODE_P (optab_op2_mode))
4541 {
4542 if (dump_enabled_p ())
4543 dump_printf_loc (MSG_NOTE, vect_location,
4544 "operand 1 using scalar mode.\n");
4545 vec_oprnd1 = op1;
4546 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4547 vec_oprnds1.quick_push (vec_oprnd1);
4548 if (slp_node)
4549 {
4550 /* Store vec_oprnd1 for every vector stmt to be created
4551 for SLP_NODE. We check during the analysis that all
4552 the shift arguments are the same.
4553 TODO: Allow different constants for different vector
4554 stmts generated for an SLP instance. */
4555 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4556 vec_oprnds1.quick_push (vec_oprnd1);
4557 }
4558 }
4559 }
4560
4561 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4562 (a special case for certain kind of vector shifts); otherwise,
4563 operand 1 should be of a vector type (the usual case). */
4564 if (vec_oprnd1)
4565 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4566 slp_node, -1);
4567 else
4568 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4569 slp_node, -1);
4570 }
4571 else
4572 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4573
4574 /* Arguments are ready. Create the new vector stmt. */
4575 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4576 {
4577 vop1 = vec_oprnds1[i];
4578 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4579 new_temp = make_ssa_name (vec_dest, new_stmt);
4580 gimple_assign_set_lhs (new_stmt, new_temp);
4581 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4582 if (slp_node)
4583 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4584 }
4585
4586 if (slp_node)
4587 continue;
4588
4589 if (j == 0)
4590 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4591 else
4592 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4593 prev_stmt_info = vinfo_for_stmt (new_stmt);
4594 }
4595
4596 vec_oprnds0.release ();
4597 vec_oprnds1.release ();
4598
4599 return true;
4600 }
4601
4602
4603 /* Function vectorizable_operation.
4604
4605 Check if STMT performs a binary, unary or ternary operation that can
4606 be vectorized.
4607 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4608 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4609 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4610
4611 static bool
4612 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
4613 gimple *vec_stmt, slp_tree slp_node)
4614 {
4615 tree vec_dest;
4616 tree scalar_dest;
4617 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4618 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4619 tree vectype;
4620 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4621 enum tree_code code;
4622 machine_mode vec_mode;
4623 tree new_temp;
4624 int op_type;
4625 optab optab;
4626 int icode;
4627 tree def;
4628 gimple def_stmt;
4629 enum vect_def_type dt[3]
4630 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4631 gimple new_stmt = NULL;
4632 stmt_vec_info prev_stmt_info;
4633 int nunits_in;
4634 int nunits_out;
4635 tree vectype_out;
4636 int ncopies;
4637 int j, i;
4638 vec<tree> vec_oprnds0 = vNULL;
4639 vec<tree> vec_oprnds1 = vNULL;
4640 vec<tree> vec_oprnds2 = vNULL;
4641 tree vop0, vop1, vop2;
4642 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4643 int vf;
4644
4645 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4646 return false;
4647
4648 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4649 return false;
4650
4651 /* Is STMT a vectorizable binary/unary operation? */
4652 if (!is_gimple_assign (stmt))
4653 return false;
4654
4655 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4656 return false;
4657
4658 code = gimple_assign_rhs_code (stmt);
4659
4660 /* For pointer addition, we should use the normal plus for
4661 the vector addition. */
4662 if (code == POINTER_PLUS_EXPR)
4663 code = PLUS_EXPR;
4664
4665 /* Support only unary or binary operations. */
4666 op_type = TREE_CODE_LENGTH (code);
4667 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4668 {
4669 if (dump_enabled_p ())
4670 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4671 "num. args = %d (not unary/binary/ternary op).\n",
4672 op_type);
4673 return false;
4674 }
4675
4676 scalar_dest = gimple_assign_lhs (stmt);
4677 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4678
4679 /* Most operations cannot handle bit-precision types without extra
4680 truncations. */
4681 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4682 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4683 /* Exception are bitwise binary operations. */
4684 && code != BIT_IOR_EXPR
4685 && code != BIT_XOR_EXPR
4686 && code != BIT_AND_EXPR)
4687 {
4688 if (dump_enabled_p ())
4689 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4690 "bit-precision arithmetic not supported.\n");
4691 return false;
4692 }
4693
4694 op0 = gimple_assign_rhs1 (stmt);
4695 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4696 &def_stmt, &def, &dt[0], &vectype))
4697 {
4698 if (dump_enabled_p ())
4699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4700 "use not simple.\n");
4701 return false;
4702 }
4703 /* If op0 is an external or constant def use a vector type with
4704 the same size as the output vector type. */
4705 if (!vectype)
4706 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4707 if (vec_stmt)
4708 gcc_assert (vectype);
4709 if (!vectype)
4710 {
4711 if (dump_enabled_p ())
4712 {
4713 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4714 "no vectype for scalar type ");
4715 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4716 TREE_TYPE (op0));
4717 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4718 }
4719
4720 return false;
4721 }
4722
4723 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4724 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4725 if (nunits_out != nunits_in)
4726 return false;
4727
4728 if (op_type == binary_op || op_type == ternary_op)
4729 {
4730 op1 = gimple_assign_rhs2 (stmt);
4731 if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4732 &def, &dt[1]))
4733 {
4734 if (dump_enabled_p ())
4735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4736 "use not simple.\n");
4737 return false;
4738 }
4739 }
4740 if (op_type == ternary_op)
4741 {
4742 op2 = gimple_assign_rhs3 (stmt);
4743 if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4744 &def, &dt[2]))
4745 {
4746 if (dump_enabled_p ())
4747 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4748 "use not simple.\n");
4749 return false;
4750 }
4751 }
4752
4753 if (loop_vinfo)
4754 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4755 else
4756 vf = 1;
4757
4758 /* Multiple types in SLP are handled by creating the appropriate number of
4759 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4760 case of SLP. */
4761 if (slp_node || PURE_SLP_STMT (stmt_info))
4762 ncopies = 1;
4763 else
4764 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4765
4766 gcc_assert (ncopies >= 1);
4767
4768 /* Shifts are handled in vectorizable_shift (). */
4769 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4770 || code == RROTATE_EXPR)
4771 return false;
4772
4773 /* Supportable by target? */
4774
4775 vec_mode = TYPE_MODE (vectype);
4776 if (code == MULT_HIGHPART_EXPR)
4777 {
4778 if (can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)))
4779 icode = LAST_INSN_CODE;
4780 else
4781 icode = CODE_FOR_nothing;
4782 }
4783 else
4784 {
4785 optab = optab_for_tree_code (code, vectype, optab_default);
4786 if (!optab)
4787 {
4788 if (dump_enabled_p ())
4789 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4790 "no optab.\n");
4791 return false;
4792 }
4793 icode = (int) optab_handler (optab, vec_mode);
4794 }
4795
4796 if (icode == CODE_FOR_nothing)
4797 {
4798 if (dump_enabled_p ())
4799 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4800 "op not supported by target.\n");
4801 /* Check only during analysis. */
4802 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4803 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4804 return false;
4805 if (dump_enabled_p ())
4806 dump_printf_loc (MSG_NOTE, vect_location,
4807 "proceeding using word mode.\n");
4808 }
4809
4810 /* Worthwhile without SIMD support? Check only during analysis. */
4811 if (!VECTOR_MODE_P (vec_mode)
4812 && !vec_stmt
4813 && vf < vect_min_worthwhile_factor (code))
4814 {
4815 if (dump_enabled_p ())
4816 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4817 "not worthwhile without SIMD support.\n");
4818 return false;
4819 }
4820
4821 if (!vec_stmt) /* transformation not required. */
4822 {
4823 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4824 if (dump_enabled_p ())
4825 dump_printf_loc (MSG_NOTE, vect_location,
4826 "=== vectorizable_operation ===\n");
4827 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4828 return true;
4829 }
4830
4831 /** Transform. **/
4832
4833 if (dump_enabled_p ())
4834 dump_printf_loc (MSG_NOTE, vect_location,
4835 "transform binary/unary operation.\n");
4836
4837 /* Handle def. */
4838 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4839
4840 /* In case the vectorization factor (VF) is bigger than the number
4841 of elements that we can fit in a vectype (nunits), we have to generate
4842 more than one vector stmt - i.e - we need to "unroll" the
4843 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4844 from one copy of the vector stmt to the next, in the field
4845 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4846 stages to find the correct vector defs to be used when vectorizing
4847 stmts that use the defs of the current stmt. The example below
4848 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4849 we need to create 4 vectorized stmts):
4850
4851 before vectorization:
4852 RELATED_STMT VEC_STMT
4853 S1: x = memref - -
4854 S2: z = x + 1 - -
4855
4856 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4857 there):
4858 RELATED_STMT VEC_STMT
4859 VS1_0: vx0 = memref0 VS1_1 -
4860 VS1_1: vx1 = memref1 VS1_2 -
4861 VS1_2: vx2 = memref2 VS1_3 -
4862 VS1_3: vx3 = memref3 - -
4863 S1: x = load - VS1_0
4864 S2: z = x + 1 - -
4865
4866 step2: vectorize stmt S2 (done here):
4867 To vectorize stmt S2 we first need to find the relevant vector
4868 def for the first operand 'x'. This is, as usual, obtained from
4869 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4870 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4871 relevant vector def 'vx0'. Having found 'vx0' we can generate
4872 the vector stmt VS2_0, and as usual, record it in the
4873 STMT_VINFO_VEC_STMT of stmt S2.
4874 When creating the second copy (VS2_1), we obtain the relevant vector
4875 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4876 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4877 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4878 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4879 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4880 chain of stmts and pointers:
4881 RELATED_STMT VEC_STMT
4882 VS1_0: vx0 = memref0 VS1_1 -
4883 VS1_1: vx1 = memref1 VS1_2 -
4884 VS1_2: vx2 = memref2 VS1_3 -
4885 VS1_3: vx3 = memref3 - -
4886 S1: x = load - VS1_0
4887 VS2_0: vz0 = vx0 + v1 VS2_1 -
4888 VS2_1: vz1 = vx1 + v1 VS2_2 -
4889 VS2_2: vz2 = vx2 + v1 VS2_3 -
4890 VS2_3: vz3 = vx3 + v1 - -
4891 S2: z = x + 1 - VS2_0 */
4892
4893 prev_stmt_info = NULL;
4894 for (j = 0; j < ncopies; j++)
4895 {
4896 /* Handle uses. */
4897 if (j == 0)
4898 {
4899 if (op_type == binary_op || op_type == ternary_op)
4900 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4901 slp_node, -1);
4902 else
4903 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4904 slp_node, -1);
4905 if (op_type == ternary_op)
4906 {
4907 vec_oprnds2.create (1);
4908 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4909 stmt,
4910 NULL));
4911 }
4912 }
4913 else
4914 {
4915 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4916 if (op_type == ternary_op)
4917 {
4918 tree vec_oprnd = vec_oprnds2.pop ();
4919 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4920 vec_oprnd));
4921 }
4922 }
4923
4924 /* Arguments are ready. Create the new vector stmt. */
4925 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4926 {
4927 vop1 = ((op_type == binary_op || op_type == ternary_op)
4928 ? vec_oprnds1[i] : NULL_TREE);
4929 vop2 = ((op_type == ternary_op)
4930 ? vec_oprnds2[i] : NULL_TREE);
4931 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4932 new_temp = make_ssa_name (vec_dest, new_stmt);
4933 gimple_assign_set_lhs (new_stmt, new_temp);
4934 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4935 if (slp_node)
4936 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4937 }
4938
4939 if (slp_node)
4940 continue;
4941
4942 if (j == 0)
4943 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4944 else
4945 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4946 prev_stmt_info = vinfo_for_stmt (new_stmt);
4947 }
4948
4949 vec_oprnds0.release ();
4950 vec_oprnds1.release ();
4951 vec_oprnds2.release ();
4952
4953 return true;
4954 }
4955
4956 /* A helper function to ensure data reference DR's base alignment
4957 for STMT_INFO. */
4958
4959 static void
4960 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4961 {
4962 if (!dr->aux)
4963 return;
4964
4965 if (((dataref_aux *)dr->aux)->base_misaligned)
4966 {
4967 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4968 tree base_decl = ((dataref_aux *)dr->aux)->base_decl;
4969
4970 if (decl_in_symtab_p (base_decl))
4971 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4972 else
4973 {
4974 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4975 DECL_USER_ALIGN (base_decl) = 1;
4976 }
4977 ((dataref_aux *)dr->aux)->base_misaligned = false;
4978 }
4979 }
4980
4981
4982 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4983 reversal of the vector elements. If that is impossible to do,
4984 returns NULL. */
4985
4986 static tree
4987 perm_mask_for_reverse (tree vectype)
4988 {
4989 int i, nunits;
4990 unsigned char *sel;
4991
4992 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4993 sel = XALLOCAVEC (unsigned char, nunits);
4994
4995 for (i = 0; i < nunits; ++i)
4996 sel[i] = nunits - 1 - i;
4997
4998 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
4999 return NULL_TREE;
5000 return vect_gen_perm_mask_checked (vectype, sel);
5001 }
5002
5003 /* Function vectorizable_store.
5004
5005 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5006 can be vectorized.
5007 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5008 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5009 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5010
5011 static bool
5012 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5013 slp_tree slp_node)
5014 {
5015 tree scalar_dest;
5016 tree data_ref;
5017 tree op;
5018 tree vec_oprnd = NULL_TREE;
5019 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5020 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5021 tree elem_type;
5022 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5023 struct loop *loop = NULL;
5024 machine_mode vec_mode;
5025 tree dummy;
5026 enum dr_alignment_support alignment_support_scheme;
5027 tree def;
5028 gimple def_stmt;
5029 enum vect_def_type dt;
5030 stmt_vec_info prev_stmt_info = NULL;
5031 tree dataref_ptr = NULL_TREE;
5032 tree dataref_offset = NULL_TREE;
5033 gimple ptr_incr = NULL;
5034 int ncopies;
5035 int j;
5036 gimple next_stmt, first_stmt = NULL;
5037 bool grouped_store = false;
5038 bool store_lanes_p = false;
5039 unsigned int group_size, i;
5040 vec<tree> dr_chain = vNULL;
5041 vec<tree> oprnds = vNULL;
5042 vec<tree> result_chain = vNULL;
5043 bool inv_p;
5044 bool negative = false;
5045 tree offset = NULL_TREE;
5046 vec<tree> vec_oprnds = vNULL;
5047 bool slp = (slp_node != NULL);
5048 unsigned int vec_num;
5049 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5050 tree aggr_type;
5051
5052 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5053 return false;
5054
5055 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5056 return false;
5057
5058 /* Is vectorizable store? */
5059
5060 if (!is_gimple_assign (stmt))
5061 return false;
5062
5063 scalar_dest = gimple_assign_lhs (stmt);
5064 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5065 && is_pattern_stmt_p (stmt_info))
5066 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5067 if (TREE_CODE (scalar_dest) != ARRAY_REF
5068 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5069 && TREE_CODE (scalar_dest) != INDIRECT_REF
5070 && TREE_CODE (scalar_dest) != COMPONENT_REF
5071 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5072 && TREE_CODE (scalar_dest) != REALPART_EXPR
5073 && TREE_CODE (scalar_dest) != MEM_REF)
5074 return false;
5075
5076 gcc_assert (gimple_assign_single_p (stmt));
5077
5078 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5079 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5080
5081 if (loop_vinfo)
5082 loop = LOOP_VINFO_LOOP (loop_vinfo);
5083
5084 /* Multiple types in SLP are handled by creating the appropriate number of
5085 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5086 case of SLP. */
5087 if (slp || PURE_SLP_STMT (stmt_info))
5088 ncopies = 1;
5089 else
5090 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5091
5092 gcc_assert (ncopies >= 1);
5093
5094 /* FORNOW. This restriction should be relaxed. */
5095 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5096 {
5097 if (dump_enabled_p ())
5098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5099 "multiple types in nested loop.\n");
5100 return false;
5101 }
5102
5103 op = gimple_assign_rhs1 (stmt);
5104 if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
5105 &def, &dt))
5106 {
5107 if (dump_enabled_p ())
5108 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5109 "use not simple.\n");
5110 return false;
5111 }
5112
5113 elem_type = TREE_TYPE (vectype);
5114 vec_mode = TYPE_MODE (vectype);
5115
5116 /* FORNOW. In some cases can vectorize even if data-type not supported
5117 (e.g. - array initialization with 0). */
5118 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5119 return false;
5120
5121 if (!STMT_VINFO_DATA_REF (stmt_info))
5122 return false;
5123
5124 if (!STMT_VINFO_STRIDED_P (stmt_info))
5125 {
5126 negative =
5127 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5128 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5129 size_zero_node) < 0;
5130 if (negative && ncopies > 1)
5131 {
5132 if (dump_enabled_p ())
5133 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5134 "multiple types with negative step.\n");
5135 return false;
5136 }
5137 if (negative)
5138 {
5139 gcc_assert (!grouped_store);
5140 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5141 if (alignment_support_scheme != dr_aligned
5142 && alignment_support_scheme != dr_unaligned_supported)
5143 {
5144 if (dump_enabled_p ())
5145 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5146 "negative step but alignment required.\n");
5147 return false;
5148 }
5149 if (dt != vect_constant_def
5150 && dt != vect_external_def
5151 && !perm_mask_for_reverse (vectype))
5152 {
5153 if (dump_enabled_p ())
5154 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5155 "negative step and reversing not supported.\n");
5156 return false;
5157 }
5158 }
5159 }
5160
5161 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5162 {
5163 grouped_store = true;
5164 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5165 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5166 if (!slp
5167 && !PURE_SLP_STMT (stmt_info)
5168 && !STMT_VINFO_STRIDED_P (stmt_info))
5169 {
5170 if (vect_store_lanes_supported (vectype, group_size))
5171 store_lanes_p = true;
5172 else if (!vect_grouped_store_supported (vectype, group_size))
5173 return false;
5174 }
5175
5176 if (STMT_VINFO_STRIDED_P (stmt_info)
5177 && (slp || PURE_SLP_STMT (stmt_info))
5178 && (group_size > nunits
5179 || nunits % group_size != 0))
5180 {
5181 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5182 "unhandled strided group store\n");
5183 return false;
5184 }
5185
5186 if (first_stmt == stmt)
5187 {
5188 /* STMT is the leader of the group. Check the operands of all the
5189 stmts of the group. */
5190 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5191 while (next_stmt)
5192 {
5193 gcc_assert (gimple_assign_single_p (next_stmt));
5194 op = gimple_assign_rhs1 (next_stmt);
5195 if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
5196 &def_stmt, &def, &dt))
5197 {
5198 if (dump_enabled_p ())
5199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5200 "use not simple.\n");
5201 return false;
5202 }
5203 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5204 }
5205 }
5206 }
5207
5208 if (!vec_stmt) /* transformation not required. */
5209 {
5210 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5211 /* The SLP costs are calculated during SLP analysis. */
5212 if (!PURE_SLP_STMT (stmt_info))
5213 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5214 NULL, NULL, NULL);
5215 return true;
5216 }
5217
5218 /** Transform. **/
5219
5220 ensure_base_align (stmt_info, dr);
5221
5222 if (grouped_store)
5223 {
5224 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5225 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5226
5227 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5228
5229 /* FORNOW */
5230 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5231
5232 /* We vectorize all the stmts of the interleaving group when we
5233 reach the last stmt in the group. */
5234 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5235 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5236 && !slp)
5237 {
5238 *vec_stmt = NULL;
5239 return true;
5240 }
5241
5242 if (slp)
5243 {
5244 grouped_store = false;
5245 /* VEC_NUM is the number of vect stmts to be created for this
5246 group. */
5247 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5248 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5249 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5250 op = gimple_assign_rhs1 (first_stmt);
5251 }
5252 else
5253 /* VEC_NUM is the number of vect stmts to be created for this
5254 group. */
5255 vec_num = group_size;
5256 }
5257 else
5258 {
5259 first_stmt = stmt;
5260 first_dr = dr;
5261 group_size = vec_num = 1;
5262 }
5263
5264 if (dump_enabled_p ())
5265 dump_printf_loc (MSG_NOTE, vect_location,
5266 "transform store. ncopies = %d\n", ncopies);
5267
5268 if (STMT_VINFO_STRIDED_P (stmt_info))
5269 {
5270 gimple_stmt_iterator incr_gsi;
5271 bool insert_after;
5272 gimple incr;
5273 tree offvar;
5274 tree ivstep;
5275 tree running_off;
5276 gimple_seq stmts = NULL;
5277 tree stride_base, stride_step, alias_off;
5278 tree vec_oprnd;
5279 unsigned int g;
5280
5281 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5282
5283 stride_base
5284 = fold_build_pointer_plus
5285 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5286 size_binop (PLUS_EXPR,
5287 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5288 convert_to_ptrofftype (DR_INIT(first_dr))));
5289 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5290
5291 /* For a store with loop-invariant (but other than power-of-2)
5292 stride (i.e. not a grouped access) like so:
5293
5294 for (i = 0; i < n; i += stride)
5295 array[i] = ...;
5296
5297 we generate a new induction variable and new stores from
5298 the components of the (vectorized) rhs:
5299
5300 for (j = 0; ; j += VF*stride)
5301 vectemp = ...;
5302 tmp1 = vectemp[0];
5303 array[j] = tmp1;
5304 tmp2 = vectemp[1];
5305 array[j + stride] = tmp2;
5306 ...
5307 */
5308
5309 unsigned nstores = nunits;
5310 tree ltype = elem_type;
5311 if (slp)
5312 {
5313 nstores = nunits / group_size;
5314 if (group_size < nunits)
5315 ltype = build_vector_type (elem_type, group_size);
5316 else
5317 ltype = vectype;
5318 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5319 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5320 group_size = 1;
5321 }
5322
5323 ivstep = stride_step;
5324 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5325 build_int_cst (TREE_TYPE (ivstep),
5326 ncopies * nstores));
5327
5328 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5329
5330 create_iv (stride_base, ivstep, NULL,
5331 loop, &incr_gsi, insert_after,
5332 &offvar, NULL);
5333 incr = gsi_stmt (incr_gsi);
5334 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
5335
5336 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5337 if (stmts)
5338 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5339
5340 prev_stmt_info = NULL;
5341 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5342 next_stmt = first_stmt;
5343 for (g = 0; g < group_size; g++)
5344 {
5345 running_off = offvar;
5346 if (g)
5347 {
5348 tree size = TYPE_SIZE_UNIT (ltype);
5349 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5350 size);
5351 tree newoff = copy_ssa_name (running_off, NULL);
5352 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5353 running_off, pos);
5354 vect_finish_stmt_generation (stmt, incr, gsi);
5355 running_off = newoff;
5356 }
5357 for (j = 0; j < ncopies; j++)
5358 {
5359 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5360 and first_stmt == stmt. */
5361 if (j == 0)
5362 {
5363 if (slp)
5364 {
5365 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5366 slp_node, -1);
5367 vec_oprnd = vec_oprnds[0];
5368 }
5369 else
5370 {
5371 gcc_assert (gimple_assign_single_p (next_stmt));
5372 op = gimple_assign_rhs1 (next_stmt);
5373 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5374 NULL);
5375 }
5376 }
5377 else
5378 {
5379 if (slp)
5380 vec_oprnd = vec_oprnds[j];
5381 else
5382 {
5383 vect_is_simple_use (vec_oprnd, NULL, loop_vinfo,
5384 bb_vinfo, &def_stmt, &def, &dt);
5385 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5386 }
5387 }
5388
5389 for (i = 0; i < nstores; i++)
5390 {
5391 tree newref, newoff;
5392 gimple incr, assign;
5393 tree size = TYPE_SIZE (ltype);
5394 /* Extract the i'th component. */
5395 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5396 bitsize_int (i), size);
5397 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5398 size, pos);
5399
5400 elem = force_gimple_operand_gsi (gsi, elem, true,
5401 NULL_TREE, true,
5402 GSI_SAME_STMT);
5403
5404 newref = build2 (MEM_REF, ltype,
5405 running_off, alias_off);
5406
5407 /* And store it to *running_off. */
5408 assign = gimple_build_assign (newref, elem);
5409 vect_finish_stmt_generation (stmt, assign, gsi);
5410
5411 newoff = copy_ssa_name (running_off, NULL);
5412 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5413 running_off, stride_step);
5414 vect_finish_stmt_generation (stmt, incr, gsi);
5415
5416 running_off = newoff;
5417 if (g == group_size - 1
5418 && !slp)
5419 {
5420 if (j == 0 && i == 0)
5421 STMT_VINFO_VEC_STMT (stmt_info)
5422 = *vec_stmt = assign;
5423 else
5424 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5425 prev_stmt_info = vinfo_for_stmt (assign);
5426 }
5427 }
5428 }
5429 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5430 }
5431 return true;
5432 }
5433
5434 dr_chain.create (group_size);
5435 oprnds.create (group_size);
5436
5437 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5438 gcc_assert (alignment_support_scheme);
5439 /* Targets with store-lane instructions must not require explicit
5440 realignment. */
5441 gcc_assert (!store_lanes_p
5442 || alignment_support_scheme == dr_aligned
5443 || alignment_support_scheme == dr_unaligned_supported);
5444
5445 if (negative)
5446 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5447
5448 if (store_lanes_p)
5449 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5450 else
5451 aggr_type = vectype;
5452
5453 /* In case the vectorization factor (VF) is bigger than the number
5454 of elements that we can fit in a vectype (nunits), we have to generate
5455 more than one vector stmt - i.e - we need to "unroll" the
5456 vector stmt by a factor VF/nunits. For more details see documentation in
5457 vect_get_vec_def_for_copy_stmt. */
5458
5459 /* In case of interleaving (non-unit grouped access):
5460
5461 S1: &base + 2 = x2
5462 S2: &base = x0
5463 S3: &base + 1 = x1
5464 S4: &base + 3 = x3
5465
5466 We create vectorized stores starting from base address (the access of the
5467 first stmt in the chain (S2 in the above example), when the last store stmt
5468 of the chain (S4) is reached:
5469
5470 VS1: &base = vx2
5471 VS2: &base + vec_size*1 = vx0
5472 VS3: &base + vec_size*2 = vx1
5473 VS4: &base + vec_size*3 = vx3
5474
5475 Then permutation statements are generated:
5476
5477 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5478 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5479 ...
5480
5481 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5482 (the order of the data-refs in the output of vect_permute_store_chain
5483 corresponds to the order of scalar stmts in the interleaving chain - see
5484 the documentation of vect_permute_store_chain()).
5485
5486 In case of both multiple types and interleaving, above vector stores and
5487 permutation stmts are created for every copy. The result vector stmts are
5488 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5489 STMT_VINFO_RELATED_STMT for the next copies.
5490 */
5491
5492 prev_stmt_info = NULL;
5493 for (j = 0; j < ncopies; j++)
5494 {
5495 gimple new_stmt;
5496
5497 if (j == 0)
5498 {
5499 if (slp)
5500 {
5501 /* Get vectorized arguments for SLP_NODE. */
5502 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5503 NULL, slp_node, -1);
5504
5505 vec_oprnd = vec_oprnds[0];
5506 }
5507 else
5508 {
5509 /* For interleaved stores we collect vectorized defs for all the
5510 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5511 used as an input to vect_permute_store_chain(), and OPRNDS as
5512 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5513
5514 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5515 OPRNDS are of size 1. */
5516 next_stmt = first_stmt;
5517 for (i = 0; i < group_size; i++)
5518 {
5519 /* Since gaps are not supported for interleaved stores,
5520 GROUP_SIZE is the exact number of stmts in the chain.
5521 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5522 there is no interleaving, GROUP_SIZE is 1, and only one
5523 iteration of the loop will be executed. */
5524 gcc_assert (next_stmt
5525 && gimple_assign_single_p (next_stmt));
5526 op = gimple_assign_rhs1 (next_stmt);
5527
5528 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5529 NULL);
5530 dr_chain.quick_push (vec_oprnd);
5531 oprnds.quick_push (vec_oprnd);
5532 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5533 }
5534 }
5535
5536 /* We should have catched mismatched types earlier. */
5537 gcc_assert (useless_type_conversion_p (vectype,
5538 TREE_TYPE (vec_oprnd)));
5539 bool simd_lane_access_p
5540 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5541 if (simd_lane_access_p
5542 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5543 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5544 && integer_zerop (DR_OFFSET (first_dr))
5545 && integer_zerop (DR_INIT (first_dr))
5546 && alias_sets_conflict_p (get_alias_set (aggr_type),
5547 get_alias_set (DR_REF (first_dr))))
5548 {
5549 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5550 dataref_offset = build_int_cst (reference_alias_ptr_type
5551 (DR_REF (first_dr)), 0);
5552 inv_p = false;
5553 }
5554 else
5555 dataref_ptr
5556 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5557 simd_lane_access_p ? loop : NULL,
5558 offset, &dummy, gsi, &ptr_incr,
5559 simd_lane_access_p, &inv_p);
5560 gcc_assert (bb_vinfo || !inv_p);
5561 }
5562 else
5563 {
5564 /* For interleaved stores we created vectorized defs for all the
5565 defs stored in OPRNDS in the previous iteration (previous copy).
5566 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5567 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5568 next copy.
5569 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5570 OPRNDS are of size 1. */
5571 for (i = 0; i < group_size; i++)
5572 {
5573 op = oprnds[i];
5574 vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
5575 &def, &dt);
5576 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5577 dr_chain[i] = vec_oprnd;
5578 oprnds[i] = vec_oprnd;
5579 }
5580 if (dataref_offset)
5581 dataref_offset
5582 = int_const_binop (PLUS_EXPR, dataref_offset,
5583 TYPE_SIZE_UNIT (aggr_type));
5584 else
5585 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5586 TYPE_SIZE_UNIT (aggr_type));
5587 }
5588
5589 if (store_lanes_p)
5590 {
5591 tree vec_array;
5592
5593 /* Combine all the vectors into an array. */
5594 vec_array = create_vector_array (vectype, vec_num);
5595 for (i = 0; i < vec_num; i++)
5596 {
5597 vec_oprnd = dr_chain[i];
5598 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5599 }
5600
5601 /* Emit:
5602 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5603 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5604 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5605 gimple_call_set_lhs (new_stmt, data_ref);
5606 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5607 }
5608 else
5609 {
5610 new_stmt = NULL;
5611 if (grouped_store)
5612 {
5613 if (j == 0)
5614 result_chain.create (group_size);
5615 /* Permute. */
5616 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5617 &result_chain);
5618 }
5619
5620 next_stmt = first_stmt;
5621 for (i = 0; i < vec_num; i++)
5622 {
5623 unsigned align, misalign;
5624
5625 if (i > 0)
5626 /* Bump the vector pointer. */
5627 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5628 stmt, NULL_TREE);
5629
5630 if (slp)
5631 vec_oprnd = vec_oprnds[i];
5632 else if (grouped_store)
5633 /* For grouped stores vectorized defs are interleaved in
5634 vect_permute_store_chain(). */
5635 vec_oprnd = result_chain[i];
5636
5637 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5638 dataref_ptr,
5639 dataref_offset
5640 ? dataref_offset
5641 : build_int_cst (reference_alias_ptr_type
5642 (DR_REF (first_dr)), 0));
5643 align = TYPE_ALIGN_UNIT (vectype);
5644 if (aligned_access_p (first_dr))
5645 misalign = 0;
5646 else if (DR_MISALIGNMENT (first_dr) == -1)
5647 {
5648 TREE_TYPE (data_ref)
5649 = build_aligned_type (TREE_TYPE (data_ref),
5650 TYPE_ALIGN (elem_type));
5651 align = TYPE_ALIGN_UNIT (elem_type);
5652 misalign = 0;
5653 }
5654 else
5655 {
5656 TREE_TYPE (data_ref)
5657 = build_aligned_type (TREE_TYPE (data_ref),
5658 TYPE_ALIGN (elem_type));
5659 misalign = DR_MISALIGNMENT (first_dr);
5660 }
5661 if (dataref_offset == NULL_TREE
5662 && TREE_CODE (dataref_ptr) == SSA_NAME)
5663 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5664 misalign);
5665
5666 if (negative
5667 && dt != vect_constant_def
5668 && dt != vect_external_def)
5669 {
5670 tree perm_mask = perm_mask_for_reverse (vectype);
5671 tree perm_dest
5672 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5673 vectype);
5674 tree new_temp = make_ssa_name (perm_dest);
5675
5676 /* Generate the permute statement. */
5677 gimple perm_stmt
5678 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5679 vec_oprnd, perm_mask);
5680 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5681
5682 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5683 vec_oprnd = new_temp;
5684 }
5685
5686 /* Arguments are ready. Create the new vector stmt. */
5687 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5688 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5689
5690 if (slp)
5691 continue;
5692
5693 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5694 if (!next_stmt)
5695 break;
5696 }
5697 }
5698 if (!slp)
5699 {
5700 if (j == 0)
5701 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5702 else
5703 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5704 prev_stmt_info = vinfo_for_stmt (new_stmt);
5705 }
5706 }
5707
5708 dr_chain.release ();
5709 oprnds.release ();
5710 result_chain.release ();
5711 vec_oprnds.release ();
5712
5713 return true;
5714 }
5715
5716 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5717 VECTOR_CST mask. No checks are made that the target platform supports the
5718 mask, so callers may wish to test can_vec_perm_p separately, or use
5719 vect_gen_perm_mask_checked. */
5720
5721 tree
5722 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5723 {
5724 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5725 int i, nunits;
5726
5727 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5728
5729 mask_elt_type = lang_hooks.types.type_for_mode
5730 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5731 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5732
5733 mask_elts = XALLOCAVEC (tree, nunits);
5734 for (i = nunits - 1; i >= 0; i--)
5735 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5736 mask_vec = build_vector (mask_type, mask_elts);
5737
5738 return mask_vec;
5739 }
5740
5741 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5742 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5743
5744 tree
5745 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5746 {
5747 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5748 return vect_gen_perm_mask_any (vectype, sel);
5749 }
5750
5751 /* Given a vector variable X and Y, that was generated for the scalar
5752 STMT, generate instructions to permute the vector elements of X and Y
5753 using permutation mask MASK_VEC, insert them at *GSI and return the
5754 permuted vector variable. */
5755
5756 static tree
5757 permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
5758 gimple_stmt_iterator *gsi)
5759 {
5760 tree vectype = TREE_TYPE (x);
5761 tree perm_dest, data_ref;
5762 gimple perm_stmt;
5763
5764 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5765 data_ref = make_ssa_name (perm_dest);
5766
5767 /* Generate the permute statement. */
5768 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5769 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5770
5771 return data_ref;
5772 }
5773
5774 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5775 inserting them on the loops preheader edge. Returns true if we
5776 were successful in doing so (and thus STMT can be moved then),
5777 otherwise returns false. */
5778
5779 static bool
5780 hoist_defs_of_uses (gimple stmt, struct loop *loop)
5781 {
5782 ssa_op_iter i;
5783 tree op;
5784 bool any = false;
5785
5786 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5787 {
5788 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5789 if (!gimple_nop_p (def_stmt)
5790 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5791 {
5792 /* Make sure we don't need to recurse. While we could do
5793 so in simple cases when there are more complex use webs
5794 we don't have an easy way to preserve stmt order to fulfil
5795 dependencies within them. */
5796 tree op2;
5797 ssa_op_iter i2;
5798 if (gimple_code (def_stmt) == GIMPLE_PHI)
5799 return false;
5800 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5801 {
5802 gimple def_stmt2 = SSA_NAME_DEF_STMT (op2);
5803 if (!gimple_nop_p (def_stmt2)
5804 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5805 return false;
5806 }
5807 any = true;
5808 }
5809 }
5810
5811 if (!any)
5812 return true;
5813
5814 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5815 {
5816 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5817 if (!gimple_nop_p (def_stmt)
5818 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5819 {
5820 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5821 gsi_remove (&gsi, false);
5822 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5823 }
5824 }
5825
5826 return true;
5827 }
5828
5829 /* vectorizable_load.
5830
5831 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5832 can be vectorized.
5833 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5834 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5835 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5836
5837 static bool
5838 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5839 slp_tree slp_node, slp_instance slp_node_instance)
5840 {
5841 tree scalar_dest;
5842 tree vec_dest = NULL;
5843 tree data_ref = NULL;
5844 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5845 stmt_vec_info prev_stmt_info;
5846 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5847 struct loop *loop = NULL;
5848 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5849 bool nested_in_vect_loop = false;
5850 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5851 tree elem_type;
5852 tree new_temp;
5853 machine_mode mode;
5854 gimple new_stmt = NULL;
5855 tree dummy;
5856 enum dr_alignment_support alignment_support_scheme;
5857 tree dataref_ptr = NULL_TREE;
5858 tree dataref_offset = NULL_TREE;
5859 gimple ptr_incr = NULL;
5860 int ncopies;
5861 int i, j, group_size = -1, group_gap_adj;
5862 tree msq = NULL_TREE, lsq;
5863 tree offset = NULL_TREE;
5864 tree byte_offset = NULL_TREE;
5865 tree realignment_token = NULL_TREE;
5866 gphi *phi = NULL;
5867 vec<tree> dr_chain = vNULL;
5868 bool grouped_load = false;
5869 bool load_lanes_p = false;
5870 gimple first_stmt;
5871 bool inv_p;
5872 bool negative = false;
5873 bool compute_in_loop = false;
5874 struct loop *at_loop;
5875 int vec_num;
5876 bool slp = (slp_node != NULL);
5877 bool slp_perm = false;
5878 enum tree_code code;
5879 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5880 int vf;
5881 tree aggr_type;
5882 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
5883 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
5884 int gather_scale = 1;
5885 enum vect_def_type gather_dt = vect_unknown_def_type;
5886
5887 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5888 return false;
5889
5890 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5891 return false;
5892
5893 /* Is vectorizable load? */
5894 if (!is_gimple_assign (stmt))
5895 return false;
5896
5897 scalar_dest = gimple_assign_lhs (stmt);
5898 if (TREE_CODE (scalar_dest) != SSA_NAME)
5899 return false;
5900
5901 code = gimple_assign_rhs_code (stmt);
5902 if (code != ARRAY_REF
5903 && code != BIT_FIELD_REF
5904 && code != INDIRECT_REF
5905 && code != COMPONENT_REF
5906 && code != IMAGPART_EXPR
5907 && code != REALPART_EXPR
5908 && code != MEM_REF
5909 && TREE_CODE_CLASS (code) != tcc_declaration)
5910 return false;
5911
5912 if (!STMT_VINFO_DATA_REF (stmt_info))
5913 return false;
5914
5915 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5916 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5917
5918 if (loop_vinfo)
5919 {
5920 loop = LOOP_VINFO_LOOP (loop_vinfo);
5921 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
5922 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5923 }
5924 else
5925 vf = 1;
5926
5927 /* Multiple types in SLP are handled by creating the appropriate number of
5928 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5929 case of SLP. */
5930 if (slp || PURE_SLP_STMT (stmt_info))
5931 ncopies = 1;
5932 else
5933 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5934
5935 gcc_assert (ncopies >= 1);
5936
5937 /* FORNOW. This restriction should be relaxed. */
5938 if (nested_in_vect_loop && ncopies > 1)
5939 {
5940 if (dump_enabled_p ())
5941 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5942 "multiple types in nested loop.\n");
5943 return false;
5944 }
5945
5946 /* Invalidate assumptions made by dependence analysis when vectorization
5947 on the unrolled body effectively re-orders stmts. */
5948 if (ncopies > 1
5949 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5950 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5951 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5952 {
5953 if (dump_enabled_p ())
5954 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5955 "cannot perform implicit CSE when unrolling "
5956 "with negative dependence distance\n");
5957 return false;
5958 }
5959
5960 elem_type = TREE_TYPE (vectype);
5961 mode = TYPE_MODE (vectype);
5962
5963 /* FORNOW. In some cases can vectorize even if data-type not supported
5964 (e.g. - data copies). */
5965 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
5966 {
5967 if (dump_enabled_p ())
5968 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5969 "Aligned load, but unsupported type.\n");
5970 return false;
5971 }
5972
5973 /* Check if the load is a part of an interleaving chain. */
5974 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5975 {
5976 grouped_load = true;
5977 /* FORNOW */
5978 gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
5979
5980 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5981
5982 /* If this is single-element interleaving with an element distance
5983 that leaves unused vector loads around punt - we at least create
5984 very sub-optimal code in that case (and blow up memory,
5985 see PR65518). */
5986 if (first_stmt == stmt
5987 && !GROUP_NEXT_ELEMENT (stmt_info)
5988 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
5989 {
5990 if (dump_enabled_p ())
5991 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5992 "single-element interleaving not supported "
5993 "for not adjacent vector loads\n");
5994 return false;
5995 }
5996
5997 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
5998 slp_perm = true;
5999
6000 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6001 if (!slp
6002 && !PURE_SLP_STMT (stmt_info)
6003 && !STMT_VINFO_STRIDED_P (stmt_info))
6004 {
6005 if (vect_load_lanes_supported (vectype, group_size))
6006 load_lanes_p = true;
6007 else if (!vect_grouped_load_supported (vectype, group_size))
6008 return false;
6009 }
6010
6011 /* Invalidate assumptions made by dependence analysis when vectorization
6012 on the unrolled body effectively re-orders stmts. */
6013 if (!PURE_SLP_STMT (stmt_info)
6014 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6015 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6016 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6017 {
6018 if (dump_enabled_p ())
6019 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6020 "cannot perform implicit CSE when performing "
6021 "group loads with negative dependence distance\n");
6022 return false;
6023 }
6024
6025 /* Similarly when the stmt is a load that is both part of a SLP
6026 instance and a loop vectorized stmt via the same-dr mechanism
6027 we have to give up. */
6028 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6029 && (STMT_SLP_TYPE (stmt_info)
6030 != STMT_SLP_TYPE (vinfo_for_stmt
6031 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6032 {
6033 if (dump_enabled_p ())
6034 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6035 "conflicting SLP types for CSEd load\n");
6036 return false;
6037 }
6038 }
6039
6040
6041 if (STMT_VINFO_GATHER_P (stmt_info))
6042 {
6043 gimple def_stmt;
6044 tree def;
6045 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
6046 &gather_off, &gather_scale);
6047 gcc_assert (gather_decl);
6048 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
6049 &def_stmt, &def, &gather_dt,
6050 &gather_off_vectype))
6051 {
6052 if (dump_enabled_p ())
6053 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6054 "gather index use not simple.\n");
6055 return false;
6056 }
6057 }
6058 else if (STMT_VINFO_STRIDED_P (stmt_info))
6059 {
6060 if ((grouped_load
6061 && (slp || PURE_SLP_STMT (stmt_info)))
6062 && (group_size > nunits
6063 || nunits % group_size != 0))
6064 {
6065 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6066 "unhandled strided group load\n");
6067 return false;
6068 }
6069 }
6070 else
6071 {
6072 negative = tree_int_cst_compare (nested_in_vect_loop
6073 ? STMT_VINFO_DR_STEP (stmt_info)
6074 : DR_STEP (dr),
6075 size_zero_node) < 0;
6076 if (negative && ncopies > 1)
6077 {
6078 if (dump_enabled_p ())
6079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6080 "multiple types with negative step.\n");
6081 return false;
6082 }
6083
6084 if (negative)
6085 {
6086 if (grouped_load)
6087 {
6088 if (dump_enabled_p ())
6089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6090 "negative step for group load not supported"
6091 "\n");
6092 return false;
6093 }
6094 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6095 if (alignment_support_scheme != dr_aligned
6096 && alignment_support_scheme != dr_unaligned_supported)
6097 {
6098 if (dump_enabled_p ())
6099 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6100 "negative step but alignment required.\n");
6101 return false;
6102 }
6103 if (!perm_mask_for_reverse (vectype))
6104 {
6105 if (dump_enabled_p ())
6106 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6107 "negative step and reversing not supported."
6108 "\n");
6109 return false;
6110 }
6111 }
6112 }
6113
6114 if (!vec_stmt) /* transformation not required. */
6115 {
6116 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6117 /* The SLP costs are calculated during SLP analysis. */
6118 if (!PURE_SLP_STMT (stmt_info))
6119 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6120 NULL, NULL, NULL);
6121 return true;
6122 }
6123
6124 if (dump_enabled_p ())
6125 dump_printf_loc (MSG_NOTE, vect_location,
6126 "transform load. ncopies = %d\n", ncopies);
6127
6128 /** Transform. **/
6129
6130 ensure_base_align (stmt_info, dr);
6131
6132 if (STMT_VINFO_GATHER_P (stmt_info))
6133 {
6134 tree vec_oprnd0 = NULL_TREE, op;
6135 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6136 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6137 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6138 edge pe = loop_preheader_edge (loop);
6139 gimple_seq seq;
6140 basic_block new_bb;
6141 enum { NARROW, NONE, WIDEN } modifier;
6142 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6143
6144 if (nunits == gather_off_nunits)
6145 modifier = NONE;
6146 else if (nunits == gather_off_nunits / 2)
6147 {
6148 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6149 modifier = WIDEN;
6150
6151 for (i = 0; i < gather_off_nunits; ++i)
6152 sel[i] = i | nunits;
6153
6154 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6155 }
6156 else if (nunits == gather_off_nunits * 2)
6157 {
6158 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6159 modifier = NARROW;
6160
6161 for (i = 0; i < nunits; ++i)
6162 sel[i] = i < gather_off_nunits
6163 ? i : i + nunits - gather_off_nunits;
6164
6165 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6166 ncopies *= 2;
6167 }
6168 else
6169 gcc_unreachable ();
6170
6171 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6172 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6173 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6174 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6175 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6176 scaletype = TREE_VALUE (arglist);
6177 gcc_checking_assert (types_compatible_p (srctype, rettype));
6178
6179 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6180
6181 ptr = fold_convert (ptrtype, gather_base);
6182 if (!is_gimple_min_invariant (ptr))
6183 {
6184 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6185 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6186 gcc_assert (!new_bb);
6187 }
6188
6189 /* Currently we support only unconditional gather loads,
6190 so mask should be all ones. */
6191 if (TREE_CODE (masktype) == INTEGER_TYPE)
6192 mask = build_int_cst (masktype, -1);
6193 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6194 {
6195 mask = build_int_cst (TREE_TYPE (masktype), -1);
6196 mask = build_vector_from_val (masktype, mask);
6197 mask = vect_init_vector (stmt, mask, masktype, NULL);
6198 }
6199 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6200 {
6201 REAL_VALUE_TYPE r;
6202 long tmp[6];
6203 for (j = 0; j < 6; ++j)
6204 tmp[j] = -1;
6205 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6206 mask = build_real (TREE_TYPE (masktype), r);
6207 mask = build_vector_from_val (masktype, mask);
6208 mask = vect_init_vector (stmt, mask, masktype, NULL);
6209 }
6210 else
6211 gcc_unreachable ();
6212
6213 scale = build_int_cst (scaletype, gather_scale);
6214
6215 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6216 merge = build_int_cst (TREE_TYPE (rettype), 0);
6217 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6218 {
6219 REAL_VALUE_TYPE r;
6220 long tmp[6];
6221 for (j = 0; j < 6; ++j)
6222 tmp[j] = 0;
6223 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6224 merge = build_real (TREE_TYPE (rettype), r);
6225 }
6226 else
6227 gcc_unreachable ();
6228 merge = build_vector_from_val (rettype, merge);
6229 merge = vect_init_vector (stmt, merge, rettype, NULL);
6230
6231 prev_stmt_info = NULL;
6232 for (j = 0; j < ncopies; ++j)
6233 {
6234 if (modifier == WIDEN && (j & 1))
6235 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6236 perm_mask, stmt, gsi);
6237 else if (j == 0)
6238 op = vec_oprnd0
6239 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
6240 else
6241 op = vec_oprnd0
6242 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6243
6244 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6245 {
6246 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6247 == TYPE_VECTOR_SUBPARTS (idxtype));
6248 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
6249 var = make_ssa_name (var);
6250 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6251 new_stmt
6252 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6253 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6254 op = var;
6255 }
6256
6257 new_stmt
6258 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6259
6260 if (!useless_type_conversion_p (vectype, rettype))
6261 {
6262 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6263 == TYPE_VECTOR_SUBPARTS (rettype));
6264 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
6265 op = make_ssa_name (var, new_stmt);
6266 gimple_call_set_lhs (new_stmt, op);
6267 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6268 var = make_ssa_name (vec_dest);
6269 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6270 new_stmt
6271 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6272 }
6273 else
6274 {
6275 var = make_ssa_name (vec_dest, new_stmt);
6276 gimple_call_set_lhs (new_stmt, var);
6277 }
6278
6279 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6280
6281 if (modifier == NARROW)
6282 {
6283 if ((j & 1) == 0)
6284 {
6285 prev_res = var;
6286 continue;
6287 }
6288 var = permute_vec_elements (prev_res, var,
6289 perm_mask, stmt, gsi);
6290 new_stmt = SSA_NAME_DEF_STMT (var);
6291 }
6292
6293 if (prev_stmt_info == NULL)
6294 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6295 else
6296 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6297 prev_stmt_info = vinfo_for_stmt (new_stmt);
6298 }
6299 return true;
6300 }
6301 else if (STMT_VINFO_STRIDED_P (stmt_info))
6302 {
6303 gimple_stmt_iterator incr_gsi;
6304 bool insert_after;
6305 gimple incr;
6306 tree offvar;
6307 tree ivstep;
6308 tree running_off;
6309 vec<constructor_elt, va_gc> *v = NULL;
6310 gimple_seq stmts = NULL;
6311 tree stride_base, stride_step, alias_off;
6312
6313 gcc_assert (!nested_in_vect_loop);
6314
6315 if (slp && grouped_load)
6316 first_dr = STMT_VINFO_DATA_REF
6317 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6318 else
6319 first_dr = dr;
6320
6321 stride_base
6322 = fold_build_pointer_plus
6323 (DR_BASE_ADDRESS (first_dr),
6324 size_binop (PLUS_EXPR,
6325 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6326 convert_to_ptrofftype (DR_INIT (first_dr))));
6327 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6328
6329 /* For a load with loop-invariant (but other than power-of-2)
6330 stride (i.e. not a grouped access) like so:
6331
6332 for (i = 0; i < n; i += stride)
6333 ... = array[i];
6334
6335 we generate a new induction variable and new accesses to
6336 form a new vector (or vectors, depending on ncopies):
6337
6338 for (j = 0; ; j += VF*stride)
6339 tmp1 = array[j];
6340 tmp2 = array[j + stride];
6341 ...
6342 vectemp = {tmp1, tmp2, ...}
6343 */
6344
6345 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6346 build_int_cst (TREE_TYPE (stride_step), vf));
6347
6348 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6349
6350 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6351 loop, &incr_gsi, insert_after,
6352 &offvar, NULL);
6353 incr = gsi_stmt (incr_gsi);
6354 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
6355
6356 stride_step = force_gimple_operand (unshare_expr (stride_step),
6357 &stmts, true, NULL_TREE);
6358 if (stmts)
6359 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6360
6361 prev_stmt_info = NULL;
6362 running_off = offvar;
6363 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6364 int nloads = nunits;
6365 tree ltype = TREE_TYPE (vectype);
6366 auto_vec<tree> dr_chain;
6367 if (slp)
6368 {
6369 nloads = nunits / group_size;
6370 if (group_size < nunits)
6371 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6372 else
6373 ltype = vectype;
6374 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6375 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6376 if (slp_perm)
6377 dr_chain.create (ncopies);
6378 }
6379 for (j = 0; j < ncopies; j++)
6380 {
6381 tree vec_inv;
6382
6383 if (nloads > 1)
6384 {
6385 vec_alloc (v, nloads);
6386 for (i = 0; i < nloads; i++)
6387 {
6388 tree newref, newoff;
6389 gimple incr;
6390 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6391
6392 newref = force_gimple_operand_gsi (gsi, newref, true,
6393 NULL_TREE, true,
6394 GSI_SAME_STMT);
6395 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6396 newoff = copy_ssa_name (running_off);
6397 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6398 running_off, stride_step);
6399 vect_finish_stmt_generation (stmt, incr, gsi);
6400
6401 running_off = newoff;
6402 }
6403
6404 vec_inv = build_constructor (vectype, v);
6405 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6406 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6407 }
6408 else
6409 {
6410 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6411 build2 (MEM_REF, ltype,
6412 running_off, alias_off));
6413 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6414
6415 tree newoff = copy_ssa_name (running_off);
6416 gimple incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6417 running_off, stride_step);
6418 vect_finish_stmt_generation (stmt, incr, gsi);
6419
6420 running_off = newoff;
6421 }
6422
6423 if (slp)
6424 {
6425 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6426 if (slp_perm)
6427 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6428 }
6429 else
6430 {
6431 if (j == 0)
6432 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6433 else
6434 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6435 prev_stmt_info = vinfo_for_stmt (new_stmt);
6436 }
6437 }
6438 if (slp_perm)
6439 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6440 slp_node_instance, false);
6441 return true;
6442 }
6443
6444 if (grouped_load)
6445 {
6446 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6447 if (slp
6448 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6449 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6450 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6451
6452 /* Check if the chain of loads is already vectorized. */
6453 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6454 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6455 ??? But we can only do so if there is exactly one
6456 as we have no way to get at the rest. Leave the CSE
6457 opportunity alone.
6458 ??? With the group load eventually participating
6459 in multiple different permutations (having multiple
6460 slp nodes which refer to the same group) the CSE
6461 is even wrong code. See PR56270. */
6462 && !slp)
6463 {
6464 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6465 return true;
6466 }
6467 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6468 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6469 group_gap_adj = 0;
6470
6471 /* VEC_NUM is the number of vect stmts to be created for this group. */
6472 if (slp)
6473 {
6474 grouped_load = false;
6475 /* For SLP permutation support we need to load the whole group,
6476 not only the number of vector stmts the permutation result
6477 fits in. */
6478 if (slp_perm)
6479 vec_num = (group_size * vf + nunits - 1) / nunits;
6480 else
6481 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6482 group_gap_adj = vf * group_size - nunits * vec_num;
6483 }
6484 else
6485 vec_num = group_size;
6486 }
6487 else
6488 {
6489 first_stmt = stmt;
6490 first_dr = dr;
6491 group_size = vec_num = 1;
6492 group_gap_adj = 0;
6493 }
6494
6495 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6496 gcc_assert (alignment_support_scheme);
6497 /* Targets with load-lane instructions must not require explicit
6498 realignment. */
6499 gcc_assert (!load_lanes_p
6500 || alignment_support_scheme == dr_aligned
6501 || alignment_support_scheme == dr_unaligned_supported);
6502
6503 /* In case the vectorization factor (VF) is bigger than the number
6504 of elements that we can fit in a vectype (nunits), we have to generate
6505 more than one vector stmt - i.e - we need to "unroll" the
6506 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6507 from one copy of the vector stmt to the next, in the field
6508 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6509 stages to find the correct vector defs to be used when vectorizing
6510 stmts that use the defs of the current stmt. The example below
6511 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6512 need to create 4 vectorized stmts):
6513
6514 before vectorization:
6515 RELATED_STMT VEC_STMT
6516 S1: x = memref - -
6517 S2: z = x + 1 - -
6518
6519 step 1: vectorize stmt S1:
6520 We first create the vector stmt VS1_0, and, as usual, record a
6521 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6522 Next, we create the vector stmt VS1_1, and record a pointer to
6523 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6524 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6525 stmts and pointers:
6526 RELATED_STMT VEC_STMT
6527 VS1_0: vx0 = memref0 VS1_1 -
6528 VS1_1: vx1 = memref1 VS1_2 -
6529 VS1_2: vx2 = memref2 VS1_3 -
6530 VS1_3: vx3 = memref3 - -
6531 S1: x = load - VS1_0
6532 S2: z = x + 1 - -
6533
6534 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6535 information we recorded in RELATED_STMT field is used to vectorize
6536 stmt S2. */
6537
6538 /* In case of interleaving (non-unit grouped access):
6539
6540 S1: x2 = &base + 2
6541 S2: x0 = &base
6542 S3: x1 = &base + 1
6543 S4: x3 = &base + 3
6544
6545 Vectorized loads are created in the order of memory accesses
6546 starting from the access of the first stmt of the chain:
6547
6548 VS1: vx0 = &base
6549 VS2: vx1 = &base + vec_size*1
6550 VS3: vx3 = &base + vec_size*2
6551 VS4: vx4 = &base + vec_size*3
6552
6553 Then permutation statements are generated:
6554
6555 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6556 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6557 ...
6558
6559 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6560 (the order of the data-refs in the output of vect_permute_load_chain
6561 corresponds to the order of scalar stmts in the interleaving chain - see
6562 the documentation of vect_permute_load_chain()).
6563 The generation of permutation stmts and recording them in
6564 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6565
6566 In case of both multiple types and interleaving, the vector loads and
6567 permutation stmts above are created for every copy. The result vector
6568 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6569 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6570
6571 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6572 on a target that supports unaligned accesses (dr_unaligned_supported)
6573 we generate the following code:
6574 p = initial_addr;
6575 indx = 0;
6576 loop {
6577 p = p + indx * vectype_size;
6578 vec_dest = *(p);
6579 indx = indx + 1;
6580 }
6581
6582 Otherwise, the data reference is potentially unaligned on a target that
6583 does not support unaligned accesses (dr_explicit_realign_optimized) -
6584 then generate the following code, in which the data in each iteration is
6585 obtained by two vector loads, one from the previous iteration, and one
6586 from the current iteration:
6587 p1 = initial_addr;
6588 msq_init = *(floor(p1))
6589 p2 = initial_addr + VS - 1;
6590 realignment_token = call target_builtin;
6591 indx = 0;
6592 loop {
6593 p2 = p2 + indx * vectype_size
6594 lsq = *(floor(p2))
6595 vec_dest = realign_load (msq, lsq, realignment_token)
6596 indx = indx + 1;
6597 msq = lsq;
6598 } */
6599
6600 /* If the misalignment remains the same throughout the execution of the
6601 loop, we can create the init_addr and permutation mask at the loop
6602 preheader. Otherwise, it needs to be created inside the loop.
6603 This can only occur when vectorizing memory accesses in the inner-loop
6604 nested within an outer-loop that is being vectorized. */
6605
6606 if (nested_in_vect_loop
6607 && (TREE_INT_CST_LOW (DR_STEP (dr))
6608 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6609 {
6610 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6611 compute_in_loop = true;
6612 }
6613
6614 if ((alignment_support_scheme == dr_explicit_realign_optimized
6615 || alignment_support_scheme == dr_explicit_realign)
6616 && !compute_in_loop)
6617 {
6618 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6619 alignment_support_scheme, NULL_TREE,
6620 &at_loop);
6621 if (alignment_support_scheme == dr_explicit_realign_optimized)
6622 {
6623 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6624 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6625 size_one_node);
6626 }
6627 }
6628 else
6629 at_loop = loop;
6630
6631 if (negative)
6632 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6633
6634 if (load_lanes_p)
6635 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6636 else
6637 aggr_type = vectype;
6638
6639 prev_stmt_info = NULL;
6640 for (j = 0; j < ncopies; j++)
6641 {
6642 /* 1. Create the vector or array pointer update chain. */
6643 if (j == 0)
6644 {
6645 bool simd_lane_access_p
6646 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6647 if (simd_lane_access_p
6648 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6649 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6650 && integer_zerop (DR_OFFSET (first_dr))
6651 && integer_zerop (DR_INIT (first_dr))
6652 && alias_sets_conflict_p (get_alias_set (aggr_type),
6653 get_alias_set (DR_REF (first_dr)))
6654 && (alignment_support_scheme == dr_aligned
6655 || alignment_support_scheme == dr_unaligned_supported))
6656 {
6657 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6658 dataref_offset = build_int_cst (reference_alias_ptr_type
6659 (DR_REF (first_dr)), 0);
6660 inv_p = false;
6661 }
6662 else
6663 dataref_ptr
6664 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6665 offset, &dummy, gsi, &ptr_incr,
6666 simd_lane_access_p, &inv_p,
6667 byte_offset);
6668 }
6669 else if (dataref_offset)
6670 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6671 TYPE_SIZE_UNIT (aggr_type));
6672 else
6673 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6674 TYPE_SIZE_UNIT (aggr_type));
6675
6676 if (grouped_load || slp_perm)
6677 dr_chain.create (vec_num);
6678
6679 if (load_lanes_p)
6680 {
6681 tree vec_array;
6682
6683 vec_array = create_vector_array (vectype, vec_num);
6684
6685 /* Emit:
6686 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6687 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6688 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6689 gimple_call_set_lhs (new_stmt, vec_array);
6690 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6691
6692 /* Extract each vector into an SSA_NAME. */
6693 for (i = 0; i < vec_num; i++)
6694 {
6695 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6696 vec_array, i);
6697 dr_chain.quick_push (new_temp);
6698 }
6699
6700 /* Record the mapping between SSA_NAMEs and statements. */
6701 vect_record_grouped_load_vectors (stmt, dr_chain);
6702 }
6703 else
6704 {
6705 for (i = 0; i < vec_num; i++)
6706 {
6707 if (i > 0)
6708 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6709 stmt, NULL_TREE);
6710
6711 /* 2. Create the vector-load in the loop. */
6712 switch (alignment_support_scheme)
6713 {
6714 case dr_aligned:
6715 case dr_unaligned_supported:
6716 {
6717 unsigned int align, misalign;
6718
6719 data_ref
6720 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6721 dataref_offset
6722 ? dataref_offset
6723 : build_int_cst (reference_alias_ptr_type
6724 (DR_REF (first_dr)), 0));
6725 align = TYPE_ALIGN_UNIT (vectype);
6726 if (alignment_support_scheme == dr_aligned)
6727 {
6728 gcc_assert (aligned_access_p (first_dr));
6729 misalign = 0;
6730 }
6731 else if (DR_MISALIGNMENT (first_dr) == -1)
6732 {
6733 TREE_TYPE (data_ref)
6734 = build_aligned_type (TREE_TYPE (data_ref),
6735 TYPE_ALIGN (elem_type));
6736 align = TYPE_ALIGN_UNIT (elem_type);
6737 misalign = 0;
6738 }
6739 else
6740 {
6741 TREE_TYPE (data_ref)
6742 = build_aligned_type (TREE_TYPE (data_ref),
6743 TYPE_ALIGN (elem_type));
6744 misalign = DR_MISALIGNMENT (first_dr);
6745 }
6746 if (dataref_offset == NULL_TREE
6747 && TREE_CODE (dataref_ptr) == SSA_NAME)
6748 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6749 align, misalign);
6750 break;
6751 }
6752 case dr_explicit_realign:
6753 {
6754 tree ptr, bump;
6755
6756 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6757
6758 if (compute_in_loop)
6759 msq = vect_setup_realignment (first_stmt, gsi,
6760 &realignment_token,
6761 dr_explicit_realign,
6762 dataref_ptr, NULL);
6763
6764 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6765 ptr = copy_ssa_name (dataref_ptr);
6766 else
6767 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6768 new_stmt = gimple_build_assign
6769 (ptr, BIT_AND_EXPR, dataref_ptr,
6770 build_int_cst
6771 (TREE_TYPE (dataref_ptr),
6772 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6773 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6774 data_ref
6775 = build2 (MEM_REF, vectype, ptr,
6776 build_int_cst (reference_alias_ptr_type
6777 (DR_REF (first_dr)), 0));
6778 vec_dest = vect_create_destination_var (scalar_dest,
6779 vectype);
6780 new_stmt = gimple_build_assign (vec_dest, data_ref);
6781 new_temp = make_ssa_name (vec_dest, new_stmt);
6782 gimple_assign_set_lhs (new_stmt, new_temp);
6783 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6784 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6785 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6786 msq = new_temp;
6787
6788 bump = size_binop (MULT_EXPR, vs,
6789 TYPE_SIZE_UNIT (elem_type));
6790 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6791 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6792 new_stmt = gimple_build_assign
6793 (NULL_TREE, BIT_AND_EXPR, ptr,
6794 build_int_cst
6795 (TREE_TYPE (ptr),
6796 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6797 ptr = copy_ssa_name (ptr, new_stmt);
6798 gimple_assign_set_lhs (new_stmt, ptr);
6799 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6800 data_ref
6801 = build2 (MEM_REF, vectype, ptr,
6802 build_int_cst (reference_alias_ptr_type
6803 (DR_REF (first_dr)), 0));
6804 break;
6805 }
6806 case dr_explicit_realign_optimized:
6807 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6808 new_temp = copy_ssa_name (dataref_ptr);
6809 else
6810 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
6811 new_stmt = gimple_build_assign
6812 (new_temp, BIT_AND_EXPR, dataref_ptr,
6813 build_int_cst
6814 (TREE_TYPE (dataref_ptr),
6815 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6816 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6817 data_ref
6818 = build2 (MEM_REF, vectype, new_temp,
6819 build_int_cst (reference_alias_ptr_type
6820 (DR_REF (first_dr)), 0));
6821 break;
6822 default:
6823 gcc_unreachable ();
6824 }
6825 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6826 new_stmt = gimple_build_assign (vec_dest, data_ref);
6827 new_temp = make_ssa_name (vec_dest, new_stmt);
6828 gimple_assign_set_lhs (new_stmt, new_temp);
6829 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6830
6831 /* 3. Handle explicit realignment if necessary/supported.
6832 Create in loop:
6833 vec_dest = realign_load (msq, lsq, realignment_token) */
6834 if (alignment_support_scheme == dr_explicit_realign_optimized
6835 || alignment_support_scheme == dr_explicit_realign)
6836 {
6837 lsq = gimple_assign_lhs (new_stmt);
6838 if (!realignment_token)
6839 realignment_token = dataref_ptr;
6840 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6841 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
6842 msq, lsq, realignment_token);
6843 new_temp = make_ssa_name (vec_dest, new_stmt);
6844 gimple_assign_set_lhs (new_stmt, new_temp);
6845 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6846
6847 if (alignment_support_scheme == dr_explicit_realign_optimized)
6848 {
6849 gcc_assert (phi);
6850 if (i == vec_num - 1 && j == ncopies - 1)
6851 add_phi_arg (phi, lsq,
6852 loop_latch_edge (containing_loop),
6853 UNKNOWN_LOCATION);
6854 msq = lsq;
6855 }
6856 }
6857
6858 /* 4. Handle invariant-load. */
6859 if (inv_p && !bb_vinfo)
6860 {
6861 gcc_assert (!grouped_load);
6862 /* If we have versioned for aliasing or the loop doesn't
6863 have any data dependencies that would preclude this,
6864 then we are sure this is a loop invariant load and
6865 thus we can insert it on the preheader edge. */
6866 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
6867 && !nested_in_vect_loop
6868 && hoist_defs_of_uses (stmt, loop))
6869 {
6870 if (dump_enabled_p ())
6871 {
6872 dump_printf_loc (MSG_NOTE, vect_location,
6873 "hoisting out of the vectorized "
6874 "loop: ");
6875 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6876 }
6877 tree tem = copy_ssa_name (scalar_dest);
6878 gsi_insert_on_edge_immediate
6879 (loop_preheader_edge (loop),
6880 gimple_build_assign (tem,
6881 unshare_expr
6882 (gimple_assign_rhs1 (stmt))));
6883 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
6884 }
6885 else
6886 {
6887 gimple_stmt_iterator gsi2 = *gsi;
6888 gsi_next (&gsi2);
6889 new_temp = vect_init_vector (stmt, scalar_dest,
6890 vectype, &gsi2);
6891 }
6892 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6893 set_vinfo_for_stmt (new_stmt,
6894 new_stmt_vec_info (new_stmt, loop_vinfo,
6895 bb_vinfo));
6896 }
6897
6898 if (negative)
6899 {
6900 tree perm_mask = perm_mask_for_reverse (vectype);
6901 new_temp = permute_vec_elements (new_temp, new_temp,
6902 perm_mask, stmt, gsi);
6903 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6904 }
6905
6906 /* Collect vector loads and later create their permutation in
6907 vect_transform_grouped_load (). */
6908 if (grouped_load || slp_perm)
6909 dr_chain.quick_push (new_temp);
6910
6911 /* Store vector loads in the corresponding SLP_NODE. */
6912 if (slp && !slp_perm)
6913 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6914 }
6915 /* Bump the vector pointer to account for a gap or for excess
6916 elements loaded for a permuted SLP load. */
6917 if (group_gap_adj != 0)
6918 {
6919 bool ovf;
6920 tree bump
6921 = wide_int_to_tree (sizetype,
6922 wi::smul (TYPE_SIZE_UNIT (elem_type),
6923 group_gap_adj, &ovf));
6924 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6925 stmt, bump);
6926 }
6927 }
6928
6929 if (slp && !slp_perm)
6930 continue;
6931
6932 if (slp_perm)
6933 {
6934 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6935 slp_node_instance, false))
6936 {
6937 dr_chain.release ();
6938 return false;
6939 }
6940 }
6941 else
6942 {
6943 if (grouped_load)
6944 {
6945 if (!load_lanes_p)
6946 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
6947 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6948 }
6949 else
6950 {
6951 if (j == 0)
6952 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6953 else
6954 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6955 prev_stmt_info = vinfo_for_stmt (new_stmt);
6956 }
6957 }
6958 dr_chain.release ();
6959 }
6960
6961 return true;
6962 }
6963
6964 /* Function vect_is_simple_cond.
6965
6966 Input:
6967 LOOP - the loop that is being vectorized.
6968 COND - Condition that is checked for simple use.
6969
6970 Output:
6971 *COMP_VECTYPE - the vector type for the comparison.
6972
6973 Returns whether a COND can be vectorized. Checks whether
6974 condition operands are supportable using vec_is_simple_use. */
6975
6976 static bool
6977 vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
6978 bb_vec_info bb_vinfo, tree *comp_vectype)
6979 {
6980 tree lhs, rhs;
6981 tree def;
6982 enum vect_def_type dt;
6983 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
6984
6985 if (!COMPARISON_CLASS_P (cond))
6986 return false;
6987
6988 lhs = TREE_OPERAND (cond, 0);
6989 rhs = TREE_OPERAND (cond, 1);
6990
6991 if (TREE_CODE (lhs) == SSA_NAME)
6992 {
6993 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
6994 if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
6995 &lhs_def_stmt, &def, &dt, &vectype1))
6996 return false;
6997 }
6998 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
6999 && TREE_CODE (lhs) != FIXED_CST)
7000 return false;
7001
7002 if (TREE_CODE (rhs) == SSA_NAME)
7003 {
7004 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7005 if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
7006 &rhs_def_stmt, &def, &dt, &vectype2))
7007 return false;
7008 }
7009 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7010 && TREE_CODE (rhs) != FIXED_CST)
7011 return false;
7012
7013 *comp_vectype = vectype1 ? vectype1 : vectype2;
7014 return true;
7015 }
7016
7017 /* vectorizable_condition.
7018
7019 Check if STMT is conditional modify expression that can be vectorized.
7020 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7021 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7022 at GSI.
7023
7024 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7025 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7026 else caluse if it is 2).
7027
7028 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7029
7030 bool
7031 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
7032 gimple *vec_stmt, tree reduc_def, int reduc_index,
7033 slp_tree slp_node)
7034 {
7035 tree scalar_dest = NULL_TREE;
7036 tree vec_dest = NULL_TREE;
7037 tree cond_expr, then_clause, else_clause;
7038 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7039 tree comp_vectype = NULL_TREE;
7040 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7041 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7042 tree vec_compare, vec_cond_expr;
7043 tree new_temp;
7044 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7045 tree def;
7046 enum vect_def_type dt, dts[4];
7047 int ncopies;
7048 enum tree_code code;
7049 stmt_vec_info prev_stmt_info = NULL;
7050 int i, j;
7051 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7052 vec<tree> vec_oprnds0 = vNULL;
7053 vec<tree> vec_oprnds1 = vNULL;
7054 vec<tree> vec_oprnds2 = vNULL;
7055 vec<tree> vec_oprnds3 = vNULL;
7056 tree vec_cmp_type;
7057
7058 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7059 return false;
7060
7061 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7062 return false;
7063
7064 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7065 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7066 && reduc_def))
7067 return false;
7068
7069 /* FORNOW: not yet supported. */
7070 if (STMT_VINFO_LIVE_P (stmt_info))
7071 {
7072 if (dump_enabled_p ())
7073 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7074 "value used after loop.\n");
7075 return false;
7076 }
7077
7078 /* Is vectorizable conditional operation? */
7079 if (!is_gimple_assign (stmt))
7080 return false;
7081
7082 code = gimple_assign_rhs_code (stmt);
7083
7084 if (code != COND_EXPR)
7085 return false;
7086
7087 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7088 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7089
7090 if (slp_node || PURE_SLP_STMT (stmt_info))
7091 ncopies = 1;
7092 else
7093 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7094
7095 gcc_assert (ncopies >= 1);
7096 if (reduc_index && ncopies > 1)
7097 return false; /* FORNOW */
7098
7099 cond_expr = gimple_assign_rhs1 (stmt);
7100 then_clause = gimple_assign_rhs2 (stmt);
7101 else_clause = gimple_assign_rhs3 (stmt);
7102
7103 if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
7104 &comp_vectype)
7105 || !comp_vectype)
7106 return false;
7107
7108 if (TREE_CODE (then_clause) == SSA_NAME)
7109 {
7110 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
7111 if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
7112 &then_def_stmt, &def, &dt))
7113 return false;
7114 }
7115 else if (TREE_CODE (then_clause) != INTEGER_CST
7116 && TREE_CODE (then_clause) != REAL_CST
7117 && TREE_CODE (then_clause) != FIXED_CST)
7118 return false;
7119
7120 if (TREE_CODE (else_clause) == SSA_NAME)
7121 {
7122 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
7123 if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
7124 &else_def_stmt, &def, &dt))
7125 return false;
7126 }
7127 else if (TREE_CODE (else_clause) != INTEGER_CST
7128 && TREE_CODE (else_clause) != REAL_CST
7129 && TREE_CODE (else_clause) != FIXED_CST)
7130 return false;
7131
7132 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
7133 /* The result of a vector comparison should be signed type. */
7134 tree cmp_type = build_nonstandard_integer_type (prec, 0);
7135 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
7136 if (vec_cmp_type == NULL_TREE)
7137 return false;
7138
7139 if (!vec_stmt)
7140 {
7141 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7142 return expand_vec_cond_expr_p (vectype, comp_vectype);
7143 }
7144
7145 /* Transform. */
7146
7147 if (!slp_node)
7148 {
7149 vec_oprnds0.create (1);
7150 vec_oprnds1.create (1);
7151 vec_oprnds2.create (1);
7152 vec_oprnds3.create (1);
7153 }
7154
7155 /* Handle def. */
7156 scalar_dest = gimple_assign_lhs (stmt);
7157 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7158
7159 /* Handle cond expr. */
7160 for (j = 0; j < ncopies; j++)
7161 {
7162 gassign *new_stmt = NULL;
7163 if (j == 0)
7164 {
7165 if (slp_node)
7166 {
7167 auto_vec<tree, 4> ops;
7168 auto_vec<vec<tree>, 4> vec_defs;
7169
7170 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7171 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7172 ops.safe_push (then_clause);
7173 ops.safe_push (else_clause);
7174 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7175 vec_oprnds3 = vec_defs.pop ();
7176 vec_oprnds2 = vec_defs.pop ();
7177 vec_oprnds1 = vec_defs.pop ();
7178 vec_oprnds0 = vec_defs.pop ();
7179
7180 ops.release ();
7181 vec_defs.release ();
7182 }
7183 else
7184 {
7185 gimple gtemp;
7186 vec_cond_lhs =
7187 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7188 stmt, NULL);
7189 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
7190 loop_vinfo, NULL, &gtemp, &def, &dts[0]);
7191
7192 vec_cond_rhs =
7193 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7194 stmt, NULL);
7195 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
7196 loop_vinfo, NULL, &gtemp, &def, &dts[1]);
7197 if (reduc_index == 1)
7198 vec_then_clause = reduc_def;
7199 else
7200 {
7201 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7202 stmt, NULL);
7203 vect_is_simple_use (then_clause, stmt, loop_vinfo,
7204 NULL, &gtemp, &def, &dts[2]);
7205 }
7206 if (reduc_index == 2)
7207 vec_else_clause = reduc_def;
7208 else
7209 {
7210 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7211 stmt, NULL);
7212 vect_is_simple_use (else_clause, stmt, loop_vinfo,
7213 NULL, &gtemp, &def, &dts[3]);
7214 }
7215 }
7216 }
7217 else
7218 {
7219 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7220 vec_oprnds0.pop ());
7221 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7222 vec_oprnds1.pop ());
7223 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7224 vec_oprnds2.pop ());
7225 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7226 vec_oprnds3.pop ());
7227 }
7228
7229 if (!slp_node)
7230 {
7231 vec_oprnds0.quick_push (vec_cond_lhs);
7232 vec_oprnds1.quick_push (vec_cond_rhs);
7233 vec_oprnds2.quick_push (vec_then_clause);
7234 vec_oprnds3.quick_push (vec_else_clause);
7235 }
7236
7237 /* Arguments are ready. Create the new vector stmt. */
7238 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7239 {
7240 vec_cond_rhs = vec_oprnds1[i];
7241 vec_then_clause = vec_oprnds2[i];
7242 vec_else_clause = vec_oprnds3[i];
7243
7244 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7245 vec_cond_lhs, vec_cond_rhs);
7246 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7247 vec_compare, vec_then_clause, vec_else_clause);
7248
7249 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7250 new_temp = make_ssa_name (vec_dest, new_stmt);
7251 gimple_assign_set_lhs (new_stmt, new_temp);
7252 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7253 if (slp_node)
7254 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7255 }
7256
7257 if (slp_node)
7258 continue;
7259
7260 if (j == 0)
7261 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7262 else
7263 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7264
7265 prev_stmt_info = vinfo_for_stmt (new_stmt);
7266 }
7267
7268 vec_oprnds0.release ();
7269 vec_oprnds1.release ();
7270 vec_oprnds2.release ();
7271 vec_oprnds3.release ();
7272
7273 return true;
7274 }
7275
7276
7277 /* Make sure the statement is vectorizable. */
7278
7279 bool
7280 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
7281 {
7282 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7283 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7284 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7285 bool ok;
7286 tree scalar_type, vectype;
7287 gimple pattern_stmt;
7288 gimple_seq pattern_def_seq;
7289
7290 if (dump_enabled_p ())
7291 {
7292 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7293 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7294 }
7295
7296 if (gimple_has_volatile_ops (stmt))
7297 {
7298 if (dump_enabled_p ())
7299 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7300 "not vectorized: stmt has volatile operands\n");
7301
7302 return false;
7303 }
7304
7305 /* Skip stmts that do not need to be vectorized. In loops this is expected
7306 to include:
7307 - the COND_EXPR which is the loop exit condition
7308 - any LABEL_EXPRs in the loop
7309 - computations that are used only for array indexing or loop control.
7310 In basic blocks we only analyze statements that are a part of some SLP
7311 instance, therefore, all the statements are relevant.
7312
7313 Pattern statement needs to be analyzed instead of the original statement
7314 if the original statement is not relevant. Otherwise, we analyze both
7315 statements. In basic blocks we are called from some SLP instance
7316 traversal, don't analyze pattern stmts instead, the pattern stmts
7317 already will be part of SLP instance. */
7318
7319 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7320 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7321 && !STMT_VINFO_LIVE_P (stmt_info))
7322 {
7323 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7324 && pattern_stmt
7325 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7326 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7327 {
7328 /* Analyze PATTERN_STMT instead of the original stmt. */
7329 stmt = pattern_stmt;
7330 stmt_info = vinfo_for_stmt (pattern_stmt);
7331 if (dump_enabled_p ())
7332 {
7333 dump_printf_loc (MSG_NOTE, vect_location,
7334 "==> examining pattern statement: ");
7335 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7336 }
7337 }
7338 else
7339 {
7340 if (dump_enabled_p ())
7341 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7342
7343 return true;
7344 }
7345 }
7346 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7347 && node == NULL
7348 && pattern_stmt
7349 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7350 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7351 {
7352 /* Analyze PATTERN_STMT too. */
7353 if (dump_enabled_p ())
7354 {
7355 dump_printf_loc (MSG_NOTE, vect_location,
7356 "==> examining pattern statement: ");
7357 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7358 }
7359
7360 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7361 return false;
7362 }
7363
7364 if (is_pattern_stmt_p (stmt_info)
7365 && node == NULL
7366 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7367 {
7368 gimple_stmt_iterator si;
7369
7370 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7371 {
7372 gimple pattern_def_stmt = gsi_stmt (si);
7373 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7374 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7375 {
7376 /* Analyze def stmt of STMT if it's a pattern stmt. */
7377 if (dump_enabled_p ())
7378 {
7379 dump_printf_loc (MSG_NOTE, vect_location,
7380 "==> examining pattern def statement: ");
7381 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7382 }
7383
7384 if (!vect_analyze_stmt (pattern_def_stmt,
7385 need_to_vectorize, node))
7386 return false;
7387 }
7388 }
7389 }
7390
7391 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7392 {
7393 case vect_internal_def:
7394 break;
7395
7396 case vect_reduction_def:
7397 case vect_nested_cycle:
7398 gcc_assert (!bb_vinfo
7399 && (relevance == vect_used_in_outer
7400 || relevance == vect_used_in_outer_by_reduction
7401 || relevance == vect_used_by_reduction
7402 || relevance == vect_unused_in_scope));
7403 break;
7404
7405 case vect_induction_def:
7406 case vect_constant_def:
7407 case vect_external_def:
7408 case vect_unknown_def_type:
7409 default:
7410 gcc_unreachable ();
7411 }
7412
7413 if (bb_vinfo)
7414 {
7415 gcc_assert (PURE_SLP_STMT (stmt_info));
7416
7417 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7418 if (dump_enabled_p ())
7419 {
7420 dump_printf_loc (MSG_NOTE, vect_location,
7421 "get vectype for scalar type: ");
7422 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7423 dump_printf (MSG_NOTE, "\n");
7424 }
7425
7426 vectype = get_vectype_for_scalar_type (scalar_type);
7427 if (!vectype)
7428 {
7429 if (dump_enabled_p ())
7430 {
7431 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7432 "not SLPed: unsupported data-type ");
7433 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7434 scalar_type);
7435 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7436 }
7437 return false;
7438 }
7439
7440 if (dump_enabled_p ())
7441 {
7442 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7443 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7444 dump_printf (MSG_NOTE, "\n");
7445 }
7446
7447 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7448 }
7449
7450 if (STMT_VINFO_RELEVANT_P (stmt_info))
7451 {
7452 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7453 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7454 || (is_gimple_call (stmt)
7455 && gimple_call_lhs (stmt) == NULL_TREE));
7456 *need_to_vectorize = true;
7457 }
7458
7459 if (PURE_SLP_STMT (stmt_info) && !node)
7460 {
7461 dump_printf_loc (MSG_NOTE, vect_location,
7462 "handled only by SLP analysis\n");
7463 return true;
7464 }
7465
7466 ok = true;
7467 if (!bb_vinfo
7468 && (STMT_VINFO_RELEVANT_P (stmt_info)
7469 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7470 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7471 || vectorizable_conversion (stmt, NULL, NULL, node)
7472 || vectorizable_shift (stmt, NULL, NULL, node)
7473 || vectorizable_operation (stmt, NULL, NULL, node)
7474 || vectorizable_assignment (stmt, NULL, NULL, node)
7475 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7476 || vectorizable_call (stmt, NULL, NULL, node)
7477 || vectorizable_store (stmt, NULL, NULL, node)
7478 || vectorizable_reduction (stmt, NULL, NULL, node)
7479 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7480 else
7481 {
7482 if (bb_vinfo)
7483 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7484 || vectorizable_conversion (stmt, NULL, NULL, node)
7485 || vectorizable_shift (stmt, NULL, NULL, node)
7486 || vectorizable_operation (stmt, NULL, NULL, node)
7487 || vectorizable_assignment (stmt, NULL, NULL, node)
7488 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7489 || vectorizable_call (stmt, NULL, NULL, node)
7490 || vectorizable_store (stmt, NULL, NULL, node)
7491 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7492 }
7493
7494 if (!ok)
7495 {
7496 if (dump_enabled_p ())
7497 {
7498 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7499 "not vectorized: relevant stmt not ");
7500 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7501 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7502 }
7503
7504 return false;
7505 }
7506
7507 if (bb_vinfo)
7508 return true;
7509
7510 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7511 need extra handling, except for vectorizable reductions. */
7512 if (STMT_VINFO_LIVE_P (stmt_info)
7513 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7514 ok = vectorizable_live_operation (stmt, NULL, NULL);
7515
7516 if (!ok)
7517 {
7518 if (dump_enabled_p ())
7519 {
7520 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7521 "not vectorized: live stmt not ");
7522 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7523 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7524 }
7525
7526 return false;
7527 }
7528
7529 return true;
7530 }
7531
7532
7533 /* Function vect_transform_stmt.
7534
7535 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7536
7537 bool
7538 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7539 bool *grouped_store, slp_tree slp_node,
7540 slp_instance slp_node_instance)
7541 {
7542 bool is_store = false;
7543 gimple vec_stmt = NULL;
7544 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7545 bool done;
7546
7547 gimple old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7548
7549 switch (STMT_VINFO_TYPE (stmt_info))
7550 {
7551 case type_demotion_vec_info_type:
7552 case type_promotion_vec_info_type:
7553 case type_conversion_vec_info_type:
7554 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7555 gcc_assert (done);
7556 break;
7557
7558 case induc_vec_info_type:
7559 gcc_assert (!slp_node);
7560 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7561 gcc_assert (done);
7562 break;
7563
7564 case shift_vec_info_type:
7565 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7566 gcc_assert (done);
7567 break;
7568
7569 case op_vec_info_type:
7570 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7571 gcc_assert (done);
7572 break;
7573
7574 case assignment_vec_info_type:
7575 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7576 gcc_assert (done);
7577 break;
7578
7579 case load_vec_info_type:
7580 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7581 slp_node_instance);
7582 gcc_assert (done);
7583 break;
7584
7585 case store_vec_info_type:
7586 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7587 gcc_assert (done);
7588 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7589 {
7590 /* In case of interleaving, the whole chain is vectorized when the
7591 last store in the chain is reached. Store stmts before the last
7592 one are skipped, and there vec_stmt_info shouldn't be freed
7593 meanwhile. */
7594 *grouped_store = true;
7595 if (STMT_VINFO_VEC_STMT (stmt_info))
7596 is_store = true;
7597 }
7598 else
7599 is_store = true;
7600 break;
7601
7602 case condition_vec_info_type:
7603 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7604 gcc_assert (done);
7605 break;
7606
7607 case call_vec_info_type:
7608 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7609 stmt = gsi_stmt (*gsi);
7610 if (is_gimple_call (stmt)
7611 && gimple_call_internal_p (stmt)
7612 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7613 is_store = true;
7614 break;
7615
7616 case call_simd_clone_vec_info_type:
7617 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7618 stmt = gsi_stmt (*gsi);
7619 break;
7620
7621 case reduc_vec_info_type:
7622 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7623 gcc_assert (done);
7624 break;
7625
7626 default:
7627 if (!STMT_VINFO_LIVE_P (stmt_info))
7628 {
7629 if (dump_enabled_p ())
7630 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7631 "stmt not supported.\n");
7632 gcc_unreachable ();
7633 }
7634 }
7635
7636 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7637 This would break hybrid SLP vectorization. */
7638 if (slp_node)
7639 gcc_assert (!vec_stmt
7640 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
7641
7642 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7643 is being vectorized, but outside the immediately enclosing loop. */
7644 if (vec_stmt
7645 && STMT_VINFO_LOOP_VINFO (stmt_info)
7646 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7647 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7648 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7649 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7650 || STMT_VINFO_RELEVANT (stmt_info) ==
7651 vect_used_in_outer_by_reduction))
7652 {
7653 struct loop *innerloop = LOOP_VINFO_LOOP (
7654 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7655 imm_use_iterator imm_iter;
7656 use_operand_p use_p;
7657 tree scalar_dest;
7658 gimple exit_phi;
7659
7660 if (dump_enabled_p ())
7661 dump_printf_loc (MSG_NOTE, vect_location,
7662 "Record the vdef for outer-loop vectorization.\n");
7663
7664 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7665 (to be used when vectorizing outer-loop stmts that use the DEF of
7666 STMT). */
7667 if (gimple_code (stmt) == GIMPLE_PHI)
7668 scalar_dest = PHI_RESULT (stmt);
7669 else
7670 scalar_dest = gimple_assign_lhs (stmt);
7671
7672 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7673 {
7674 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7675 {
7676 exit_phi = USE_STMT (use_p);
7677 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7678 }
7679 }
7680 }
7681
7682 /* Handle stmts whose DEF is used outside the loop-nest that is
7683 being vectorized. */
7684 if (STMT_VINFO_LIVE_P (stmt_info)
7685 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7686 {
7687 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7688 gcc_assert (done);
7689 }
7690
7691 if (vec_stmt)
7692 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7693
7694 return is_store;
7695 }
7696
7697
7698 /* Remove a group of stores (for SLP or interleaving), free their
7699 stmt_vec_info. */
7700
7701 void
7702 vect_remove_stores (gimple first_stmt)
7703 {
7704 gimple next = first_stmt;
7705 gimple tmp;
7706 gimple_stmt_iterator next_si;
7707
7708 while (next)
7709 {
7710 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7711
7712 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7713 if (is_pattern_stmt_p (stmt_info))
7714 next = STMT_VINFO_RELATED_STMT (stmt_info);
7715 /* Free the attached stmt_vec_info and remove the stmt. */
7716 next_si = gsi_for_stmt (next);
7717 unlink_stmt_vdef (next);
7718 gsi_remove (&next_si, true);
7719 release_defs (next);
7720 free_stmt_vec_info (next);
7721 next = tmp;
7722 }
7723 }
7724
7725
7726 /* Function new_stmt_vec_info.
7727
7728 Create and initialize a new stmt_vec_info struct for STMT. */
7729
7730 stmt_vec_info
7731 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
7732 bb_vec_info bb_vinfo)
7733 {
7734 stmt_vec_info res;
7735 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7736
7737 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7738 STMT_VINFO_STMT (res) = stmt;
7739 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
7740 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
7741 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7742 STMT_VINFO_LIVE_P (res) = false;
7743 STMT_VINFO_VECTYPE (res) = NULL;
7744 STMT_VINFO_VEC_STMT (res) = NULL;
7745 STMT_VINFO_VECTORIZABLE (res) = true;
7746 STMT_VINFO_IN_PATTERN_P (res) = false;
7747 STMT_VINFO_RELATED_STMT (res) = NULL;
7748 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7749 STMT_VINFO_DATA_REF (res) = NULL;
7750
7751 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7752 STMT_VINFO_DR_OFFSET (res) = NULL;
7753 STMT_VINFO_DR_INIT (res) = NULL;
7754 STMT_VINFO_DR_STEP (res) = NULL;
7755 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7756
7757 if (gimple_code (stmt) == GIMPLE_PHI
7758 && is_loop_header_bb_p (gimple_bb (stmt)))
7759 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7760 else
7761 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7762
7763 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7764 STMT_SLP_TYPE (res) = loop_vect;
7765 GROUP_FIRST_ELEMENT (res) = NULL;
7766 GROUP_NEXT_ELEMENT (res) = NULL;
7767 GROUP_SIZE (res) = 0;
7768 GROUP_STORE_COUNT (res) = 0;
7769 GROUP_GAP (res) = 0;
7770 GROUP_SAME_DR_STMT (res) = NULL;
7771
7772 return res;
7773 }
7774
7775
7776 /* Create a hash table for stmt_vec_info. */
7777
7778 void
7779 init_stmt_vec_info_vec (void)
7780 {
7781 gcc_assert (!stmt_vec_info_vec.exists ());
7782 stmt_vec_info_vec.create (50);
7783 }
7784
7785
7786 /* Free hash table for stmt_vec_info. */
7787
7788 void
7789 free_stmt_vec_info_vec (void)
7790 {
7791 unsigned int i;
7792 vec_void_p info;
7793 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7794 if (info != NULL)
7795 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info) info));
7796 gcc_assert (stmt_vec_info_vec.exists ());
7797 stmt_vec_info_vec.release ();
7798 }
7799
7800
7801 /* Free stmt vectorization related info. */
7802
7803 void
7804 free_stmt_vec_info (gimple stmt)
7805 {
7806 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7807
7808 if (!stmt_info)
7809 return;
7810
7811 /* Check if this statement has a related "pattern stmt"
7812 (introduced by the vectorizer during the pattern recognition
7813 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7814 too. */
7815 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7816 {
7817 stmt_vec_info patt_info
7818 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7819 if (patt_info)
7820 {
7821 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7822 gimple patt_stmt = STMT_VINFO_STMT (patt_info);
7823 gimple_set_bb (patt_stmt, NULL);
7824 tree lhs = gimple_get_lhs (patt_stmt);
7825 if (TREE_CODE (lhs) == SSA_NAME)
7826 release_ssa_name (lhs);
7827 if (seq)
7828 {
7829 gimple_stmt_iterator si;
7830 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7831 {
7832 gimple seq_stmt = gsi_stmt (si);
7833 gimple_set_bb (seq_stmt, NULL);
7834 lhs = gimple_get_lhs (patt_stmt);
7835 if (TREE_CODE (lhs) == SSA_NAME)
7836 release_ssa_name (lhs);
7837 free_stmt_vec_info (seq_stmt);
7838 }
7839 }
7840 free_stmt_vec_info (patt_stmt);
7841 }
7842 }
7843
7844 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7845 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7846 set_vinfo_for_stmt (stmt, NULL);
7847 free (stmt_info);
7848 }
7849
7850
7851 /* Function get_vectype_for_scalar_type_and_size.
7852
7853 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7854 by the target. */
7855
7856 static tree
7857 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7858 {
7859 machine_mode inner_mode = TYPE_MODE (scalar_type);
7860 machine_mode simd_mode;
7861 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7862 int nunits;
7863 tree vectype;
7864
7865 if (nbytes == 0)
7866 return NULL_TREE;
7867
7868 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7869 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7870 return NULL_TREE;
7871
7872 /* For vector types of elements whose mode precision doesn't
7873 match their types precision we use a element type of mode
7874 precision. The vectorization routines will have to make sure
7875 they support the proper result truncation/extension.
7876 We also make sure to build vector types with INTEGER_TYPE
7877 component type only. */
7878 if (INTEGRAL_TYPE_P (scalar_type)
7879 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
7880 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7881 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
7882 TYPE_UNSIGNED (scalar_type));
7883
7884 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7885 When the component mode passes the above test simply use a type
7886 corresponding to that mode. The theory is that any use that
7887 would cause problems with this will disable vectorization anyway. */
7888 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
7889 && !INTEGRAL_TYPE_P (scalar_type))
7890 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
7891
7892 /* We can't build a vector type of elements with alignment bigger than
7893 their size. */
7894 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
7895 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
7896 TYPE_UNSIGNED (scalar_type));
7897
7898 /* If we felt back to using the mode fail if there was
7899 no scalar type for it. */
7900 if (scalar_type == NULL_TREE)
7901 return NULL_TREE;
7902
7903 /* If no size was supplied use the mode the target prefers. Otherwise
7904 lookup a vector mode of the specified size. */
7905 if (size == 0)
7906 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
7907 else
7908 simd_mode = mode_for_vector (inner_mode, size / nbytes);
7909 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
7910 if (nunits <= 1)
7911 return NULL_TREE;
7912
7913 vectype = build_vector_type (scalar_type, nunits);
7914
7915 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
7916 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
7917 return NULL_TREE;
7918
7919 return vectype;
7920 }
7921
7922 unsigned int current_vector_size;
7923
7924 /* Function get_vectype_for_scalar_type.
7925
7926 Returns the vector type corresponding to SCALAR_TYPE as supported
7927 by the target. */
7928
7929 tree
7930 get_vectype_for_scalar_type (tree scalar_type)
7931 {
7932 tree vectype;
7933 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
7934 current_vector_size);
7935 if (vectype
7936 && current_vector_size == 0)
7937 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
7938 return vectype;
7939 }
7940
7941 /* Function get_same_sized_vectype
7942
7943 Returns a vector type corresponding to SCALAR_TYPE of size
7944 VECTOR_TYPE if supported by the target. */
7945
7946 tree
7947 get_same_sized_vectype (tree scalar_type, tree vector_type)
7948 {
7949 return get_vectype_for_scalar_type_and_size
7950 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
7951 }
7952
7953 /* Function vect_is_simple_use.
7954
7955 Input:
7956 LOOP_VINFO - the vect info of the loop that is being vectorized.
7957 BB_VINFO - the vect info of the basic block that is being vectorized.
7958 OPERAND - operand of STMT in the loop or bb.
7959 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7960
7961 Returns whether a stmt with OPERAND can be vectorized.
7962 For loops, supportable operands are constants, loop invariants, and operands
7963 that are defined by the current iteration of the loop. Unsupportable
7964 operands are those that are defined by a previous iteration of the loop (as
7965 is the case in reduction/induction computations).
7966 For basic blocks, supportable operands are constants and bb invariants.
7967 For now, operands defined outside the basic block are not supported. */
7968
7969 bool
7970 vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7971 bb_vec_info bb_vinfo, gimple *def_stmt,
7972 tree *def, enum vect_def_type *dt)
7973 {
7974 *def_stmt = NULL;
7975 *def = NULL_TREE;
7976 *dt = vect_unknown_def_type;
7977
7978 if (dump_enabled_p ())
7979 {
7980 dump_printf_loc (MSG_NOTE, vect_location,
7981 "vect_is_simple_use: operand ");
7982 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
7983 dump_printf (MSG_NOTE, "\n");
7984 }
7985
7986 if (CONSTANT_CLASS_P (operand))
7987 {
7988 *dt = vect_constant_def;
7989 return true;
7990 }
7991
7992 if (is_gimple_min_invariant (operand))
7993 {
7994 *def = operand;
7995 *dt = vect_external_def;
7996 return true;
7997 }
7998
7999 if (TREE_CODE (operand) != SSA_NAME)
8000 {
8001 if (dump_enabled_p ())
8002 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8003 "not ssa-name.\n");
8004 return false;
8005 }
8006
8007 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8008 {
8009 *def = operand;
8010 *dt = vect_external_def;
8011 return true;
8012 }
8013
8014 *def_stmt = SSA_NAME_DEF_STMT (operand);
8015 if (dump_enabled_p ())
8016 {
8017 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8018 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8019 }
8020
8021 basic_block bb = gimple_bb (*def_stmt);
8022 if ((loop_vinfo && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), bb))
8023 || (bb_vinfo
8024 && (bb != BB_VINFO_BB (bb_vinfo)
8025 || gimple_code (*def_stmt) == GIMPLE_PHI)))
8026 *dt = vect_external_def;
8027 else
8028 {
8029 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8030 if (bb_vinfo && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8031 *dt = vect_external_def;
8032 else
8033 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8034 }
8035
8036 if (dump_enabled_p ())
8037 {
8038 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8039 switch (*dt)
8040 {
8041 case vect_uninitialized_def:
8042 dump_printf (MSG_NOTE, "uninitialized\n");
8043 break;
8044 case vect_constant_def:
8045 dump_printf (MSG_NOTE, "constant\n");
8046 break;
8047 case vect_external_def:
8048 dump_printf (MSG_NOTE, "external\n");
8049 break;
8050 case vect_internal_def:
8051 dump_printf (MSG_NOTE, "internal\n");
8052 break;
8053 case vect_induction_def:
8054 dump_printf (MSG_NOTE, "induction\n");
8055 break;
8056 case vect_reduction_def:
8057 dump_printf (MSG_NOTE, "reduction\n");
8058 break;
8059 case vect_double_reduction_def:
8060 dump_printf (MSG_NOTE, "double reduction\n");
8061 break;
8062 case vect_nested_cycle:
8063 dump_printf (MSG_NOTE, "nested cycle\n");
8064 break;
8065 case vect_unknown_def_type:
8066 dump_printf (MSG_NOTE, "unknown\n");
8067 break;
8068 }
8069 }
8070
8071 if (*dt == vect_unknown_def_type
8072 || (stmt
8073 && *dt == vect_double_reduction_def
8074 && gimple_code (stmt) != GIMPLE_PHI))
8075 {
8076 if (dump_enabled_p ())
8077 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8078 "Unsupported pattern.\n");
8079 return false;
8080 }
8081
8082 switch (gimple_code (*def_stmt))
8083 {
8084 case GIMPLE_PHI:
8085 *def = gimple_phi_result (*def_stmt);
8086 break;
8087
8088 case GIMPLE_ASSIGN:
8089 *def = gimple_assign_lhs (*def_stmt);
8090 break;
8091
8092 case GIMPLE_CALL:
8093 *def = gimple_call_lhs (*def_stmt);
8094 if (*def != NULL)
8095 break;
8096 /* FALLTHRU */
8097 default:
8098 if (dump_enabled_p ())
8099 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8100 "unsupported defining stmt:\n");
8101 return false;
8102 }
8103
8104 return true;
8105 }
8106
8107 /* Function vect_is_simple_use_1.
8108
8109 Same as vect_is_simple_use_1 but also determines the vector operand
8110 type of OPERAND and stores it to *VECTYPE. If the definition of
8111 OPERAND is vect_uninitialized_def, vect_constant_def or
8112 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8113 is responsible to compute the best suited vector type for the
8114 scalar operand. */
8115
8116 bool
8117 vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
8118 bb_vec_info bb_vinfo, gimple *def_stmt,
8119 tree *def, enum vect_def_type *dt, tree *vectype)
8120 {
8121 if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
8122 def, dt))
8123 return false;
8124
8125 /* Now get a vector type if the def is internal, otherwise supply
8126 NULL_TREE and leave it up to the caller to figure out a proper
8127 type for the use stmt. */
8128 if (*dt == vect_internal_def
8129 || *dt == vect_induction_def
8130 || *dt == vect_reduction_def
8131 || *dt == vect_double_reduction_def
8132 || *dt == vect_nested_cycle)
8133 {
8134 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8135
8136 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8137 && !STMT_VINFO_RELEVANT (stmt_info)
8138 && !STMT_VINFO_LIVE_P (stmt_info))
8139 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8140
8141 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8142 gcc_assert (*vectype != NULL_TREE);
8143 }
8144 else if (*dt == vect_uninitialized_def
8145 || *dt == vect_constant_def
8146 || *dt == vect_external_def)
8147 *vectype = NULL_TREE;
8148 else
8149 gcc_unreachable ();
8150
8151 return true;
8152 }
8153
8154
8155 /* Function supportable_widening_operation
8156
8157 Check whether an operation represented by the code CODE is a
8158 widening operation that is supported by the target platform in
8159 vector form (i.e., when operating on arguments of type VECTYPE_IN
8160 producing a result of type VECTYPE_OUT).
8161
8162 Widening operations we currently support are NOP (CONVERT), FLOAT
8163 and WIDEN_MULT. This function checks if these operations are supported
8164 by the target platform either directly (via vector tree-codes), or via
8165 target builtins.
8166
8167 Output:
8168 - CODE1 and CODE2 are codes of vector operations to be used when
8169 vectorizing the operation, if available.
8170 - MULTI_STEP_CVT determines the number of required intermediate steps in
8171 case of multi-step conversion (like char->short->int - in that case
8172 MULTI_STEP_CVT will be 1).
8173 - INTERM_TYPES contains the intermediate type required to perform the
8174 widening operation (short in the above example). */
8175
8176 bool
8177 supportable_widening_operation (enum tree_code code, gimple stmt,
8178 tree vectype_out, tree vectype_in,
8179 enum tree_code *code1, enum tree_code *code2,
8180 int *multi_step_cvt,
8181 vec<tree> *interm_types)
8182 {
8183 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8184 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8185 struct loop *vect_loop = NULL;
8186 machine_mode vec_mode;
8187 enum insn_code icode1, icode2;
8188 optab optab1, optab2;
8189 tree vectype = vectype_in;
8190 tree wide_vectype = vectype_out;
8191 enum tree_code c1, c2;
8192 int i;
8193 tree prev_type, intermediate_type;
8194 machine_mode intermediate_mode, prev_mode;
8195 optab optab3, optab4;
8196
8197 *multi_step_cvt = 0;
8198 if (loop_info)
8199 vect_loop = LOOP_VINFO_LOOP (loop_info);
8200
8201 switch (code)
8202 {
8203 case WIDEN_MULT_EXPR:
8204 /* The result of a vectorized widening operation usually requires
8205 two vectors (because the widened results do not fit into one vector).
8206 The generated vector results would normally be expected to be
8207 generated in the same order as in the original scalar computation,
8208 i.e. if 8 results are generated in each vector iteration, they are
8209 to be organized as follows:
8210 vect1: [res1,res2,res3,res4],
8211 vect2: [res5,res6,res7,res8].
8212
8213 However, in the special case that the result of the widening
8214 operation is used in a reduction computation only, the order doesn't
8215 matter (because when vectorizing a reduction we change the order of
8216 the computation). Some targets can take advantage of this and
8217 generate more efficient code. For example, targets like Altivec,
8218 that support widen_mult using a sequence of {mult_even,mult_odd}
8219 generate the following vectors:
8220 vect1: [res1,res3,res5,res7],
8221 vect2: [res2,res4,res6,res8].
8222
8223 When vectorizing outer-loops, we execute the inner-loop sequentially
8224 (each vectorized inner-loop iteration contributes to VF outer-loop
8225 iterations in parallel). We therefore don't allow to change the
8226 order of the computation in the inner-loop during outer-loop
8227 vectorization. */
8228 /* TODO: Another case in which order doesn't *really* matter is when we
8229 widen and then contract again, e.g. (short)((int)x * y >> 8).
8230 Normally, pack_trunc performs an even/odd permute, whereas the
8231 repack from an even/odd expansion would be an interleave, which
8232 would be significantly simpler for e.g. AVX2. */
8233 /* In any case, in order to avoid duplicating the code below, recurse
8234 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8235 are properly set up for the caller. If we fail, we'll continue with
8236 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8237 if (vect_loop
8238 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8239 && !nested_in_vect_loop_p (vect_loop, stmt)
8240 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8241 stmt, vectype_out, vectype_in,
8242 code1, code2, multi_step_cvt,
8243 interm_types))
8244 {
8245 /* Elements in a vector with vect_used_by_reduction property cannot
8246 be reordered if the use chain with this property does not have the
8247 same operation. One such an example is s += a * b, where elements
8248 in a and b cannot be reordered. Here we check if the vector defined
8249 by STMT is only directly used in the reduction statement. */
8250 tree lhs = gimple_assign_lhs (stmt);
8251 use_operand_p dummy;
8252 gimple use_stmt;
8253 stmt_vec_info use_stmt_info = NULL;
8254 if (single_imm_use (lhs, &dummy, &use_stmt)
8255 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8256 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8257 return true;
8258 }
8259 c1 = VEC_WIDEN_MULT_LO_EXPR;
8260 c2 = VEC_WIDEN_MULT_HI_EXPR;
8261 break;
8262
8263 case VEC_WIDEN_MULT_EVEN_EXPR:
8264 /* Support the recursion induced just above. */
8265 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8266 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8267 break;
8268
8269 case WIDEN_LSHIFT_EXPR:
8270 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8271 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8272 break;
8273
8274 CASE_CONVERT:
8275 c1 = VEC_UNPACK_LO_EXPR;
8276 c2 = VEC_UNPACK_HI_EXPR;
8277 break;
8278
8279 case FLOAT_EXPR:
8280 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8281 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8282 break;
8283
8284 case FIX_TRUNC_EXPR:
8285 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8286 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8287 computing the operation. */
8288 return false;
8289
8290 default:
8291 gcc_unreachable ();
8292 }
8293
8294 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8295 std::swap (c1, c2);
8296
8297 if (code == FIX_TRUNC_EXPR)
8298 {
8299 /* The signedness is determined from output operand. */
8300 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8301 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8302 }
8303 else
8304 {
8305 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8306 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8307 }
8308
8309 if (!optab1 || !optab2)
8310 return false;
8311
8312 vec_mode = TYPE_MODE (vectype);
8313 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8314 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8315 return false;
8316
8317 *code1 = c1;
8318 *code2 = c2;
8319
8320 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8321 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8322 return true;
8323
8324 /* Check if it's a multi-step conversion that can be done using intermediate
8325 types. */
8326
8327 prev_type = vectype;
8328 prev_mode = vec_mode;
8329
8330 if (!CONVERT_EXPR_CODE_P (code))
8331 return false;
8332
8333 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8334 intermediate steps in promotion sequence. We try
8335 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8336 not. */
8337 interm_types->create (MAX_INTERM_CVT_STEPS);
8338 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8339 {
8340 intermediate_mode = insn_data[icode1].operand[0].mode;
8341 intermediate_type
8342 = lang_hooks.types.type_for_mode (intermediate_mode,
8343 TYPE_UNSIGNED (prev_type));
8344 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8345 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8346
8347 if (!optab3 || !optab4
8348 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8349 || insn_data[icode1].operand[0].mode != intermediate_mode
8350 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8351 || insn_data[icode2].operand[0].mode != intermediate_mode
8352 || ((icode1 = optab_handler (optab3, intermediate_mode))
8353 == CODE_FOR_nothing)
8354 || ((icode2 = optab_handler (optab4, intermediate_mode))
8355 == CODE_FOR_nothing))
8356 break;
8357
8358 interm_types->quick_push (intermediate_type);
8359 (*multi_step_cvt)++;
8360
8361 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8362 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8363 return true;
8364
8365 prev_type = intermediate_type;
8366 prev_mode = intermediate_mode;
8367 }
8368
8369 interm_types->release ();
8370 return false;
8371 }
8372
8373
8374 /* Function supportable_narrowing_operation
8375
8376 Check whether an operation represented by the code CODE is a
8377 narrowing operation that is supported by the target platform in
8378 vector form (i.e., when operating on arguments of type VECTYPE_IN
8379 and producing a result of type VECTYPE_OUT).
8380
8381 Narrowing operations we currently support are NOP (CONVERT) and
8382 FIX_TRUNC. This function checks if these operations are supported by
8383 the target platform directly via vector tree-codes.
8384
8385 Output:
8386 - CODE1 is the code of a vector operation to be used when
8387 vectorizing the operation, if available.
8388 - MULTI_STEP_CVT determines the number of required intermediate steps in
8389 case of multi-step conversion (like int->short->char - in that case
8390 MULTI_STEP_CVT will be 1).
8391 - INTERM_TYPES contains the intermediate type required to perform the
8392 narrowing operation (short in the above example). */
8393
8394 bool
8395 supportable_narrowing_operation (enum tree_code code,
8396 tree vectype_out, tree vectype_in,
8397 enum tree_code *code1, int *multi_step_cvt,
8398 vec<tree> *interm_types)
8399 {
8400 machine_mode vec_mode;
8401 enum insn_code icode1;
8402 optab optab1, interm_optab;
8403 tree vectype = vectype_in;
8404 tree narrow_vectype = vectype_out;
8405 enum tree_code c1;
8406 tree intermediate_type;
8407 machine_mode intermediate_mode, prev_mode;
8408 int i;
8409 bool uns;
8410
8411 *multi_step_cvt = 0;
8412 switch (code)
8413 {
8414 CASE_CONVERT:
8415 c1 = VEC_PACK_TRUNC_EXPR;
8416 break;
8417
8418 case FIX_TRUNC_EXPR:
8419 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8420 break;
8421
8422 case FLOAT_EXPR:
8423 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8424 tree code and optabs used for computing the operation. */
8425 return false;
8426
8427 default:
8428 gcc_unreachable ();
8429 }
8430
8431 if (code == FIX_TRUNC_EXPR)
8432 /* The signedness is determined from output operand. */
8433 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8434 else
8435 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8436
8437 if (!optab1)
8438 return false;
8439
8440 vec_mode = TYPE_MODE (vectype);
8441 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8442 return false;
8443
8444 *code1 = c1;
8445
8446 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8447 return true;
8448
8449 /* Check if it's a multi-step conversion that can be done using intermediate
8450 types. */
8451 prev_mode = vec_mode;
8452 if (code == FIX_TRUNC_EXPR)
8453 uns = TYPE_UNSIGNED (vectype_out);
8454 else
8455 uns = TYPE_UNSIGNED (vectype);
8456
8457 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8458 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8459 costly than signed. */
8460 if (code == FIX_TRUNC_EXPR && uns)
8461 {
8462 enum insn_code icode2;
8463
8464 intermediate_type
8465 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8466 interm_optab
8467 = optab_for_tree_code (c1, intermediate_type, optab_default);
8468 if (interm_optab != unknown_optab
8469 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8470 && insn_data[icode1].operand[0].mode
8471 == insn_data[icode2].operand[0].mode)
8472 {
8473 uns = false;
8474 optab1 = interm_optab;
8475 icode1 = icode2;
8476 }
8477 }
8478
8479 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8480 intermediate steps in promotion sequence. We try
8481 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8482 interm_types->create (MAX_INTERM_CVT_STEPS);
8483 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8484 {
8485 intermediate_mode = insn_data[icode1].operand[0].mode;
8486 intermediate_type
8487 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8488 interm_optab
8489 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8490 optab_default);
8491 if (!interm_optab
8492 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8493 || insn_data[icode1].operand[0].mode != intermediate_mode
8494 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8495 == CODE_FOR_nothing))
8496 break;
8497
8498 interm_types->quick_push (intermediate_type);
8499 (*multi_step_cvt)++;
8500
8501 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8502 return true;
8503
8504 prev_mode = intermediate_mode;
8505 optab1 = interm_optab;
8506 }
8507
8508 interm_types->release ();
8509 return false;
8510 }