Don't allow latch with phi in try_transform_to_exit_first_loop_alt
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
4
5 Contributed by Diego Novillo <dnovillo@redhat.com>
6
7 Copyright (C) 2005-2016 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "target.h"
30 #include "rtl.h"
31 #include "tree.h"
32 #include "gimple.h"
33 #include "cfghooks.h"
34 #include "alloc-pool.h"
35 #include "tree-pass.h"
36 #include "ssa.h"
37 #include "expmed.h"
38 #include "optabs.h"
39 #include "emit-rtl.h"
40 #include "cgraph.h"
41 #include "pretty-print.h"
42 #include "diagnostic-core.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "stor-layout.h"
46 #include "cfganal.h"
47 #include "internal-fn.h"
48 #include "gimple-fold.h"
49 #include "gimplify.h"
50 #include "gimple-iterator.h"
51 #include "gimplify-me.h"
52 #include "gimple-walk.h"
53 #include "tree-iterator.h"
54 #include "tree-inline.h"
55 #include "langhooks.h"
56 #include "tree-cfg.h"
57 #include "tree-into-ssa.h"
58 #include "flags.h"
59 #include "dojump.h"
60 #include "explow.h"
61 #include "calls.h"
62 #include "varasm.h"
63 #include "stmt.h"
64 #include "expr.h"
65 #include "tree-dfa.h"
66 #include "tree-ssa.h"
67 #include "except.h"
68 #include "splay-tree.h"
69 #include "cfgloop.h"
70 #include "common/common-target.h"
71 #include "omp-low.h"
72 #include "gimple-low.h"
73 #include "tree-cfgcleanup.h"
74 #include "symbol-summary.h"
75 #include "ipa-prop.h"
76 #include "tree-nested.h"
77 #include "tree-eh.h"
78 #include "cilk.h"
79 #include "context.h"
80 #include "lto-section-names.h"
81 #include "gomp-constants.h"
82 #include "gimple-pretty-print.h"
83
84 /* Lowering of OMP parallel and workshare constructs proceeds in two
85 phases. The first phase scans the function looking for OMP statements
86 and then for variables that must be replaced to satisfy data sharing
87 clauses. The second phase expands code for the constructs, as well as
88 re-gimplifying things when variables have been replaced with complex
89 expressions.
90
91 Final code generation is done by pass_expand_omp. The flowgraph is
92 scanned for regions which are then moved to a new
93 function, to be invoked by the thread library, or offloaded. */
94
95 /* OMP region information. Every parallel and workshare
96 directive is enclosed between two markers, the OMP_* directive
97 and a corresponding GIMPLE_OMP_RETURN statement. */
98
99 struct omp_region
100 {
101 /* The enclosing region. */
102 struct omp_region *outer;
103
104 /* First child region. */
105 struct omp_region *inner;
106
107 /* Next peer region. */
108 struct omp_region *next;
109
110 /* Block containing the omp directive as its last stmt. */
111 basic_block entry;
112
113 /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
114 basic_block exit;
115
116 /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
117 basic_block cont;
118
119 /* If this is a combined parallel+workshare region, this is a list
120 of additional arguments needed by the combined parallel+workshare
121 library call. */
122 vec<tree, va_gc> *ws_args;
123
124 /* The code for the omp directive of this region. */
125 enum gimple_code type;
126
127 /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
128 enum omp_clause_schedule_kind sched_kind;
129
130 /* Schedule modifiers. */
131 unsigned char sched_modifiers;
132
133 /* True if this is a combined parallel+workshare region. */
134 bool is_combined_parallel;
135
136 /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
137 a depend clause. */
138 gomp_ordered *ord_stmt;
139 };
140
141 /* Context structure. Used to store information about each parallel
142 directive in the code. */
143
144 struct omp_context
145 {
146 /* This field must be at the beginning, as we do "inheritance": Some
147 callback functions for tree-inline.c (e.g., omp_copy_decl)
148 receive a copy_body_data pointer that is up-casted to an
149 omp_context pointer. */
150 copy_body_data cb;
151
152 /* The tree of contexts corresponding to the encountered constructs. */
153 struct omp_context *outer;
154 gimple *stmt;
155
156 /* Map variables to fields in a structure that allows communication
157 between sending and receiving threads. */
158 splay_tree field_map;
159 tree record_type;
160 tree sender_decl;
161 tree receiver_decl;
162
163 /* These are used just by task contexts, if task firstprivate fn is
164 needed. srecord_type is used to communicate from the thread
165 that encountered the task construct to task firstprivate fn,
166 record_type is allocated by GOMP_task, initialized by task firstprivate
167 fn and passed to the task body fn. */
168 splay_tree sfield_map;
169 tree srecord_type;
170
171 /* A chain of variables to add to the top-level block surrounding the
172 construct. In the case of a parallel, this is in the child function. */
173 tree block_vars;
174
175 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
176 barriers should jump to during omplower pass. */
177 tree cancel_label;
178
179 /* What to do with variables with implicitly determined sharing
180 attributes. */
181 enum omp_clause_default_kind default_kind;
182
183 /* Nesting depth of this context. Used to beautify error messages re
184 invalid gotos. The outermost ctx is depth 1, with depth 0 being
185 reserved for the main body of the function. */
186 int depth;
187
188 /* True if this parallel directive is nested within another. */
189 bool is_nested;
190
191 /* True if this construct can be cancelled. */
192 bool cancellable;
193 };
194
195 /* A structure holding the elements of:
196 for (V = N1; V cond N2; V += STEP) [...] */
197
198 struct omp_for_data_loop
199 {
200 tree v, n1, n2, step;
201 enum tree_code cond_code;
202 };
203
204 /* A structure describing the main elements of a parallel loop. */
205
206 struct omp_for_data
207 {
208 struct omp_for_data_loop loop;
209 tree chunk_size;
210 gomp_for *for_stmt;
211 tree pre, iter_type;
212 int collapse;
213 int ordered;
214 bool have_nowait, have_ordered, simd_schedule;
215 unsigned char sched_modifiers;
216 enum omp_clause_schedule_kind sched_kind;
217 struct omp_for_data_loop *loops;
218 };
219
220 /* Describe the OpenACC looping structure of a function. The entire
221 function is held in a 'NULL' loop. */
222
223 struct oacc_loop
224 {
225 oacc_loop *parent; /* Containing loop. */
226
227 oacc_loop *child; /* First inner loop. */
228
229 oacc_loop *sibling; /* Next loop within same parent. */
230
231 location_t loc; /* Location of the loop start. */
232
233 gcall *marker; /* Initial head marker. */
234
235 gcall *heads[GOMP_DIM_MAX]; /* Head marker functions. */
236 gcall *tails[GOMP_DIM_MAX]; /* Tail marker functions. */
237
238 tree routine; /* Pseudo-loop enclosing a routine. */
239
240 unsigned mask; /* Partitioning mask. */
241 unsigned flags; /* Partitioning flags. */
242 tree chunk_size; /* Chunk size. */
243 gcall *head_end; /* Final marker of head sequence. */
244 };
245
246 /* Flags for an OpenACC loop. */
247
248 enum oacc_loop_flags {
249 OLF_SEQ = 1u << 0, /* Explicitly sequential */
250 OLF_AUTO = 1u << 1, /* Compiler chooses axes. */
251 OLF_INDEPENDENT = 1u << 2, /* Iterations are known independent. */
252 OLF_GANG_STATIC = 1u << 3, /* Gang partitioning is static (has op). */
253
254 /* Explicitly specified loop axes. */
255 OLF_DIM_BASE = 4,
256 OLF_DIM_GANG = 1u << (OLF_DIM_BASE + GOMP_DIM_GANG),
257 OLF_DIM_WORKER = 1u << (OLF_DIM_BASE + GOMP_DIM_WORKER),
258 OLF_DIM_VECTOR = 1u << (OLF_DIM_BASE + GOMP_DIM_VECTOR),
259
260 OLF_MAX = OLF_DIM_BASE + GOMP_DIM_MAX
261 };
262
263
264 static splay_tree all_contexts;
265 static int taskreg_nesting_level;
266 static int target_nesting_level;
267 static struct omp_region *root_omp_region;
268 static bitmap task_shared_vars;
269 static vec<omp_context *> taskreg_contexts;
270 static bool omp_any_child_fn_dumped;
271
272 static void scan_omp (gimple_seq *, omp_context *);
273 static tree scan_omp_1_op (tree *, int *, void *);
274 static gphi *find_phi_with_arg_on_edge (tree, edge);
275
276 #define WALK_SUBSTMTS \
277 case GIMPLE_BIND: \
278 case GIMPLE_TRY: \
279 case GIMPLE_CATCH: \
280 case GIMPLE_EH_FILTER: \
281 case GIMPLE_TRANSACTION: \
282 /* The sub-statements for these should be walked. */ \
283 *handled_ops_p = false; \
284 break;
285
286 /* Return true if CTX corresponds to an oacc parallel region. */
287
288 static bool
289 is_oacc_parallel (omp_context *ctx)
290 {
291 enum gimple_code outer_type = gimple_code (ctx->stmt);
292 return ((outer_type == GIMPLE_OMP_TARGET)
293 && (gimple_omp_target_kind (ctx->stmt)
294 == GF_OMP_TARGET_KIND_OACC_PARALLEL));
295 }
296
297 /* Return true if CTX corresponds to an oacc kernels region. */
298
299 static bool
300 is_oacc_kernels (omp_context *ctx)
301 {
302 enum gimple_code outer_type = gimple_code (ctx->stmt);
303 return ((outer_type == GIMPLE_OMP_TARGET)
304 && (gimple_omp_target_kind (ctx->stmt)
305 == GF_OMP_TARGET_KIND_OACC_KERNELS));
306 }
307
308 /* If DECL is the artificial dummy VAR_DECL created for non-static
309 data member privatization, return the underlying "this" parameter,
310 otherwise return NULL. */
311
312 tree
313 omp_member_access_dummy_var (tree decl)
314 {
315 if (!VAR_P (decl)
316 || !DECL_ARTIFICIAL (decl)
317 || !DECL_IGNORED_P (decl)
318 || !DECL_HAS_VALUE_EXPR_P (decl)
319 || !lang_hooks.decls.omp_disregard_value_expr (decl, false))
320 return NULL_TREE;
321
322 tree v = DECL_VALUE_EXPR (decl);
323 if (TREE_CODE (v) != COMPONENT_REF)
324 return NULL_TREE;
325
326 while (1)
327 switch (TREE_CODE (v))
328 {
329 case COMPONENT_REF:
330 case MEM_REF:
331 case INDIRECT_REF:
332 CASE_CONVERT:
333 case POINTER_PLUS_EXPR:
334 v = TREE_OPERAND (v, 0);
335 continue;
336 case PARM_DECL:
337 if (DECL_CONTEXT (v) == current_function_decl
338 && DECL_ARTIFICIAL (v)
339 && TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
340 return v;
341 return NULL_TREE;
342 default:
343 return NULL_TREE;
344 }
345 }
346
347 /* Helper for unshare_and_remap, called through walk_tree. */
348
349 static tree
350 unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
351 {
352 tree *pair = (tree *) data;
353 if (*tp == pair[0])
354 {
355 *tp = unshare_expr (pair[1]);
356 *walk_subtrees = 0;
357 }
358 else if (IS_TYPE_OR_DECL_P (*tp))
359 *walk_subtrees = 0;
360 return NULL_TREE;
361 }
362
363 /* Return unshare_expr (X) with all occurrences of FROM
364 replaced with TO. */
365
366 static tree
367 unshare_and_remap (tree x, tree from, tree to)
368 {
369 tree pair[2] = { from, to };
370 x = unshare_expr (x);
371 walk_tree (&x, unshare_and_remap_1, pair, NULL);
372 return x;
373 }
374
375 /* Holds offload tables with decls. */
376 vec<tree, va_gc> *offload_funcs, *offload_vars;
377
378 /* Convenience function for calling scan_omp_1_op on tree operands. */
379
380 static inline tree
381 scan_omp_op (tree *tp, omp_context *ctx)
382 {
383 struct walk_stmt_info wi;
384
385 memset (&wi, 0, sizeof (wi));
386 wi.info = ctx;
387 wi.want_locations = true;
388
389 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
390 }
391
392 static void lower_omp (gimple_seq *, omp_context *);
393 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
394 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
395
396 /* Find an OMP clause of type KIND within CLAUSES. */
397
398 tree
399 find_omp_clause (tree clauses, enum omp_clause_code kind)
400 {
401 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
402 if (OMP_CLAUSE_CODE (clauses) == kind)
403 return clauses;
404
405 return NULL_TREE;
406 }
407
408 /* Return true if CTX is for an omp parallel. */
409
410 static inline bool
411 is_parallel_ctx (omp_context *ctx)
412 {
413 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
414 }
415
416
417 /* Return true if CTX is for an omp task. */
418
419 static inline bool
420 is_task_ctx (omp_context *ctx)
421 {
422 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
423 }
424
425
426 /* Return true if CTX is for an omp taskloop. */
427
428 static inline bool
429 is_taskloop_ctx (omp_context *ctx)
430 {
431 return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
432 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
433 }
434
435
436 /* Return true if CTX is for an omp parallel or omp task. */
437
438 static inline bool
439 is_taskreg_ctx (omp_context *ctx)
440 {
441 return is_parallel_ctx (ctx) || is_task_ctx (ctx);
442 }
443
444
445 /* Return true if REGION is a combined parallel+workshare region. */
446
447 static inline bool
448 is_combined_parallel (struct omp_region *region)
449 {
450 return region->is_combined_parallel;
451 }
452
453
454 /* Extract the header elements of parallel loop FOR_STMT and store
455 them into *FD. */
456
457 static void
458 extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
459 struct omp_for_data_loop *loops)
460 {
461 tree t, var, *collapse_iter, *collapse_count;
462 tree count = NULL_TREE, iter_type = long_integer_type_node;
463 struct omp_for_data_loop *loop;
464 int i;
465 struct omp_for_data_loop dummy_loop;
466 location_t loc = gimple_location (for_stmt);
467 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
468 bool distribute = gimple_omp_for_kind (for_stmt)
469 == GF_OMP_FOR_KIND_DISTRIBUTE;
470 bool taskloop = gimple_omp_for_kind (for_stmt)
471 == GF_OMP_FOR_KIND_TASKLOOP;
472 tree iterv, countv;
473
474 fd->for_stmt = for_stmt;
475 fd->pre = NULL;
476 if (gimple_omp_for_collapse (for_stmt) > 1)
477 fd->loops = loops;
478 else
479 fd->loops = &fd->loop;
480
481 fd->have_nowait = distribute || simd;
482 fd->have_ordered = false;
483 fd->collapse = 1;
484 fd->ordered = 0;
485 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
486 fd->sched_modifiers = 0;
487 fd->chunk_size = NULL_TREE;
488 fd->simd_schedule = false;
489 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
490 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
491 collapse_iter = NULL;
492 collapse_count = NULL;
493
494 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
495 switch (OMP_CLAUSE_CODE (t))
496 {
497 case OMP_CLAUSE_NOWAIT:
498 fd->have_nowait = true;
499 break;
500 case OMP_CLAUSE_ORDERED:
501 fd->have_ordered = true;
502 if (OMP_CLAUSE_ORDERED_EXPR (t))
503 fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
504 break;
505 case OMP_CLAUSE_SCHEDULE:
506 gcc_assert (!distribute && !taskloop);
507 fd->sched_kind
508 = (enum omp_clause_schedule_kind)
509 (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
510 fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
511 & ~OMP_CLAUSE_SCHEDULE_MASK);
512 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
513 fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
514 break;
515 case OMP_CLAUSE_DIST_SCHEDULE:
516 gcc_assert (distribute);
517 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
518 break;
519 case OMP_CLAUSE_COLLAPSE:
520 fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
521 if (fd->collapse > 1)
522 {
523 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
524 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
525 }
526 break;
527 default:
528 break;
529 }
530 if (fd->ordered && fd->collapse == 1 && loops != NULL)
531 {
532 fd->loops = loops;
533 iterv = NULL_TREE;
534 countv = NULL_TREE;
535 collapse_iter = &iterv;
536 collapse_count = &countv;
537 }
538
539 /* FIXME: for now map schedule(auto) to schedule(static).
540 There should be analysis to determine whether all iterations
541 are approximately the same amount of work (then schedule(static)
542 is best) or if it varies (then schedule(dynamic,N) is better). */
543 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
544 {
545 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
546 gcc_assert (fd->chunk_size == NULL);
547 }
548 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
549 if (taskloop)
550 fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
551 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
552 gcc_assert (fd->chunk_size == NULL);
553 else if (fd->chunk_size == NULL)
554 {
555 /* We only need to compute a default chunk size for ordered
556 static loops and dynamic loops. */
557 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
558 || fd->have_ordered)
559 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
560 ? integer_zero_node : integer_one_node;
561 }
562
563 int cnt = fd->ordered ? fd->ordered : fd->collapse;
564 for (i = 0; i < cnt; i++)
565 {
566 if (i == 0 && fd->collapse == 1 && (fd->ordered == 0 || loops == NULL))
567 loop = &fd->loop;
568 else if (loops != NULL)
569 loop = loops + i;
570 else
571 loop = &dummy_loop;
572
573 loop->v = gimple_omp_for_index (for_stmt, i);
574 gcc_assert (SSA_VAR_P (loop->v));
575 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
576 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
577 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
578 loop->n1 = gimple_omp_for_initial (for_stmt, i);
579
580 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
581 loop->n2 = gimple_omp_for_final (for_stmt, i);
582 switch (loop->cond_code)
583 {
584 case LT_EXPR:
585 case GT_EXPR:
586 break;
587 case NE_EXPR:
588 gcc_assert (gimple_omp_for_kind (for_stmt)
589 == GF_OMP_FOR_KIND_CILKSIMD
590 || (gimple_omp_for_kind (for_stmt)
591 == GF_OMP_FOR_KIND_CILKFOR));
592 break;
593 case LE_EXPR:
594 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
595 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
596 else
597 loop->n2 = fold_build2_loc (loc,
598 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
599 build_int_cst (TREE_TYPE (loop->n2), 1));
600 loop->cond_code = LT_EXPR;
601 break;
602 case GE_EXPR:
603 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
604 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
605 else
606 loop->n2 = fold_build2_loc (loc,
607 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
608 build_int_cst (TREE_TYPE (loop->n2), 1));
609 loop->cond_code = GT_EXPR;
610 break;
611 default:
612 gcc_unreachable ();
613 }
614
615 t = gimple_omp_for_incr (for_stmt, i);
616 gcc_assert (TREE_OPERAND (t, 0) == var);
617 switch (TREE_CODE (t))
618 {
619 case PLUS_EXPR:
620 loop->step = TREE_OPERAND (t, 1);
621 break;
622 case POINTER_PLUS_EXPR:
623 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
624 break;
625 case MINUS_EXPR:
626 loop->step = TREE_OPERAND (t, 1);
627 loop->step = fold_build1_loc (loc,
628 NEGATE_EXPR, TREE_TYPE (loop->step),
629 loop->step);
630 break;
631 default:
632 gcc_unreachable ();
633 }
634
635 if (simd
636 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
637 && !fd->have_ordered))
638 {
639 if (fd->collapse == 1)
640 iter_type = TREE_TYPE (loop->v);
641 else if (i == 0
642 || TYPE_PRECISION (iter_type)
643 < TYPE_PRECISION (TREE_TYPE (loop->v)))
644 iter_type
645 = build_nonstandard_integer_type
646 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
647 }
648 else if (iter_type != long_long_unsigned_type_node)
649 {
650 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
651 iter_type = long_long_unsigned_type_node;
652 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
653 && TYPE_PRECISION (TREE_TYPE (loop->v))
654 >= TYPE_PRECISION (iter_type))
655 {
656 tree n;
657
658 if (loop->cond_code == LT_EXPR)
659 n = fold_build2_loc (loc,
660 PLUS_EXPR, TREE_TYPE (loop->v),
661 loop->n2, loop->step);
662 else
663 n = loop->n1;
664 if (TREE_CODE (n) != INTEGER_CST
665 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
666 iter_type = long_long_unsigned_type_node;
667 }
668 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
669 > TYPE_PRECISION (iter_type))
670 {
671 tree n1, n2;
672
673 if (loop->cond_code == LT_EXPR)
674 {
675 n1 = loop->n1;
676 n2 = fold_build2_loc (loc,
677 PLUS_EXPR, TREE_TYPE (loop->v),
678 loop->n2, loop->step);
679 }
680 else
681 {
682 n1 = fold_build2_loc (loc,
683 MINUS_EXPR, TREE_TYPE (loop->v),
684 loop->n2, loop->step);
685 n2 = loop->n1;
686 }
687 if (TREE_CODE (n1) != INTEGER_CST
688 || TREE_CODE (n2) != INTEGER_CST
689 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
690 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
691 iter_type = long_long_unsigned_type_node;
692 }
693 }
694
695 if (i >= fd->collapse)
696 continue;
697
698 if (collapse_count && *collapse_count == NULL)
699 {
700 t = fold_binary (loop->cond_code, boolean_type_node,
701 fold_convert (TREE_TYPE (loop->v), loop->n1),
702 fold_convert (TREE_TYPE (loop->v), loop->n2));
703 if (t && integer_zerop (t))
704 count = build_zero_cst (long_long_unsigned_type_node);
705 else if ((i == 0 || count != NULL_TREE)
706 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
707 && TREE_CONSTANT (loop->n1)
708 && TREE_CONSTANT (loop->n2)
709 && TREE_CODE (loop->step) == INTEGER_CST)
710 {
711 tree itype = TREE_TYPE (loop->v);
712
713 if (POINTER_TYPE_P (itype))
714 itype = signed_type_for (itype);
715 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
716 t = fold_build2_loc (loc,
717 PLUS_EXPR, itype,
718 fold_convert_loc (loc, itype, loop->step), t);
719 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
720 fold_convert_loc (loc, itype, loop->n2));
721 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
722 fold_convert_loc (loc, itype, loop->n1));
723 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
724 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
725 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
726 fold_build1_loc (loc, NEGATE_EXPR, itype,
727 fold_convert_loc (loc, itype,
728 loop->step)));
729 else
730 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
731 fold_convert_loc (loc, itype, loop->step));
732 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
733 if (count != NULL_TREE)
734 count = fold_build2_loc (loc,
735 MULT_EXPR, long_long_unsigned_type_node,
736 count, t);
737 else
738 count = t;
739 if (TREE_CODE (count) != INTEGER_CST)
740 count = NULL_TREE;
741 }
742 else if (count && !integer_zerop (count))
743 count = NULL_TREE;
744 }
745 }
746
747 if (count
748 && !simd
749 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
750 || fd->have_ordered))
751 {
752 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
753 iter_type = long_long_unsigned_type_node;
754 else
755 iter_type = long_integer_type_node;
756 }
757 else if (collapse_iter && *collapse_iter != NULL)
758 iter_type = TREE_TYPE (*collapse_iter);
759 fd->iter_type = iter_type;
760 if (collapse_iter && *collapse_iter == NULL)
761 *collapse_iter = create_tmp_var (iter_type, ".iter");
762 if (collapse_count && *collapse_count == NULL)
763 {
764 if (count)
765 *collapse_count = fold_convert_loc (loc, iter_type, count);
766 else
767 *collapse_count = create_tmp_var (iter_type, ".count");
768 }
769
770 if (fd->collapse > 1 || (fd->ordered && loops))
771 {
772 fd->loop.v = *collapse_iter;
773 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
774 fd->loop.n2 = *collapse_count;
775 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
776 fd->loop.cond_code = LT_EXPR;
777 }
778 else if (loops)
779 loops[0] = fd->loop;
780 }
781
782
783 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
784 is the immediate dominator of PAR_ENTRY_BB, return true if there
785 are no data dependencies that would prevent expanding the parallel
786 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
787
788 When expanding a combined parallel+workshare region, the call to
789 the child function may need additional arguments in the case of
790 GIMPLE_OMP_FOR regions. In some cases, these arguments are
791 computed out of variables passed in from the parent to the child
792 via 'struct .omp_data_s'. For instance:
793
794 #pragma omp parallel for schedule (guided, i * 4)
795 for (j ...)
796
797 Is lowered into:
798
799 # BLOCK 2 (PAR_ENTRY_BB)
800 .omp_data_o.i = i;
801 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
802
803 # BLOCK 3 (WS_ENTRY_BB)
804 .omp_data_i = &.omp_data_o;
805 D.1667 = .omp_data_i->i;
806 D.1598 = D.1667 * 4;
807 #pragma omp for schedule (guided, D.1598)
808
809 When we outline the parallel region, the call to the child function
810 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
811 that value is computed *after* the call site. So, in principle we
812 cannot do the transformation.
813
814 To see whether the code in WS_ENTRY_BB blocks the combined
815 parallel+workshare call, we collect all the variables used in the
816 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
817 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
818 call.
819
820 FIXME. If we had the SSA form built at this point, we could merely
821 hoist the code in block 3 into block 2 and be done with it. But at
822 this point we don't have dataflow information and though we could
823 hack something up here, it is really not worth the aggravation. */
824
825 static bool
826 workshare_safe_to_combine_p (basic_block ws_entry_bb)
827 {
828 struct omp_for_data fd;
829 gimple *ws_stmt = last_stmt (ws_entry_bb);
830
831 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
832 return true;
833
834 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
835
836 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
837
838 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
839 return false;
840 if (fd.iter_type != long_integer_type_node)
841 return false;
842
843 /* FIXME. We give up too easily here. If any of these arguments
844 are not constants, they will likely involve variables that have
845 been mapped into fields of .omp_data_s for sharing with the child
846 function. With appropriate data flow, it would be possible to
847 see through this. */
848 if (!is_gimple_min_invariant (fd.loop.n1)
849 || !is_gimple_min_invariant (fd.loop.n2)
850 || !is_gimple_min_invariant (fd.loop.step)
851 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
852 return false;
853
854 return true;
855 }
856
857
858 static int omp_max_vf (void);
859
860 /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
861 presence (SIMD_SCHEDULE). */
862
863 static tree
864 omp_adjust_chunk_size (tree chunk_size, bool simd_schedule)
865 {
866 if (!simd_schedule)
867 return chunk_size;
868
869 int vf = omp_max_vf ();
870 if (vf == 1)
871 return chunk_size;
872
873 tree type = TREE_TYPE (chunk_size);
874 chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size,
875 build_int_cst (type, vf - 1));
876 return fold_build2 (BIT_AND_EXPR, type, chunk_size,
877 build_int_cst (type, -vf));
878 }
879
880
881 /* Collect additional arguments needed to emit a combined
882 parallel+workshare call. WS_STMT is the workshare directive being
883 expanded. */
884
885 static vec<tree, va_gc> *
886 get_ws_args_for (gimple *par_stmt, gimple *ws_stmt)
887 {
888 tree t;
889 location_t loc = gimple_location (ws_stmt);
890 vec<tree, va_gc> *ws_args;
891
892 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
893 {
894 struct omp_for_data fd;
895 tree n1, n2;
896
897 extract_omp_for_data (for_stmt, &fd, NULL);
898 n1 = fd.loop.n1;
899 n2 = fd.loop.n2;
900
901 if (gimple_omp_for_combined_into_p (for_stmt))
902 {
903 tree innerc
904 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
905 OMP_CLAUSE__LOOPTEMP_);
906 gcc_assert (innerc);
907 n1 = OMP_CLAUSE_DECL (innerc);
908 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
909 OMP_CLAUSE__LOOPTEMP_);
910 gcc_assert (innerc);
911 n2 = OMP_CLAUSE_DECL (innerc);
912 }
913
914 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
915
916 t = fold_convert_loc (loc, long_integer_type_node, n1);
917 ws_args->quick_push (t);
918
919 t = fold_convert_loc (loc, long_integer_type_node, n2);
920 ws_args->quick_push (t);
921
922 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
923 ws_args->quick_push (t);
924
925 if (fd.chunk_size)
926 {
927 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
928 t = omp_adjust_chunk_size (t, fd.simd_schedule);
929 ws_args->quick_push (t);
930 }
931
932 return ws_args;
933 }
934 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
935 {
936 /* Number of sections is equal to the number of edges from the
937 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
938 the exit of the sections region. */
939 basic_block bb = single_succ (gimple_bb (ws_stmt));
940 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
941 vec_alloc (ws_args, 1);
942 ws_args->quick_push (t);
943 return ws_args;
944 }
945
946 gcc_unreachable ();
947 }
948
949
950 /* Discover whether REGION is a combined parallel+workshare region. */
951
952 static void
953 determine_parallel_type (struct omp_region *region)
954 {
955 basic_block par_entry_bb, par_exit_bb;
956 basic_block ws_entry_bb, ws_exit_bb;
957
958 if (region == NULL || region->inner == NULL
959 || region->exit == NULL || region->inner->exit == NULL
960 || region->inner->cont == NULL)
961 return;
962
963 /* We only support parallel+for and parallel+sections. */
964 if (region->type != GIMPLE_OMP_PARALLEL
965 || (region->inner->type != GIMPLE_OMP_FOR
966 && region->inner->type != GIMPLE_OMP_SECTIONS))
967 return;
968
969 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
970 WS_EXIT_BB -> PAR_EXIT_BB. */
971 par_entry_bb = region->entry;
972 par_exit_bb = region->exit;
973 ws_entry_bb = region->inner->entry;
974 ws_exit_bb = region->inner->exit;
975
976 if (single_succ (par_entry_bb) == ws_entry_bb
977 && single_succ (ws_exit_bb) == par_exit_bb
978 && workshare_safe_to_combine_p (ws_entry_bb)
979 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
980 || (last_and_only_stmt (ws_entry_bb)
981 && last_and_only_stmt (par_exit_bb))))
982 {
983 gimple *par_stmt = last_stmt (par_entry_bb);
984 gimple *ws_stmt = last_stmt (ws_entry_bb);
985
986 if (region->inner->type == GIMPLE_OMP_FOR)
987 {
988 /* If this is a combined parallel loop, we need to determine
989 whether or not to use the combined library calls. There
990 are two cases where we do not apply the transformation:
991 static loops and any kind of ordered loop. In the first
992 case, we already open code the loop so there is no need
993 to do anything else. In the latter case, the combined
994 parallel loop call would still need extra synchronization
995 to implement ordered semantics, so there would not be any
996 gain in using the combined call. */
997 tree clauses = gimple_omp_for_clauses (ws_stmt);
998 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
999 if (c == NULL
1000 || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
1001 == OMP_CLAUSE_SCHEDULE_STATIC)
1002 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
1003 {
1004 region->is_combined_parallel = false;
1005 region->inner->is_combined_parallel = false;
1006 return;
1007 }
1008 }
1009
1010 region->is_combined_parallel = true;
1011 region->inner->is_combined_parallel = true;
1012 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
1013 }
1014 }
1015
1016
1017 /* Return true if EXPR is variable sized. */
1018
1019 static inline bool
1020 is_variable_sized (const_tree expr)
1021 {
1022 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
1023 }
1024
1025 /* Return true if DECL is a reference type. */
1026
1027 static inline bool
1028 is_reference (tree decl)
1029 {
1030 return lang_hooks.decls.omp_privatize_by_reference (decl);
1031 }
1032
1033 /* Return the type of a decl. If the decl is reference type,
1034 return its base type. */
1035 static inline tree
1036 get_base_type (tree decl)
1037 {
1038 tree type = TREE_TYPE (decl);
1039 if (is_reference (decl))
1040 type = TREE_TYPE (type);
1041 return type;
1042 }
1043
1044 /* Lookup variables. The "maybe" form
1045 allows for the variable form to not have been entered, otherwise we
1046 assert that the variable must have been entered. */
1047
1048 static inline tree
1049 lookup_decl (tree var, omp_context *ctx)
1050 {
1051 tree *n = ctx->cb.decl_map->get (var);
1052 return *n;
1053 }
1054
1055 static inline tree
1056 maybe_lookup_decl (const_tree var, omp_context *ctx)
1057 {
1058 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
1059 return n ? *n : NULL_TREE;
1060 }
1061
1062 static inline tree
1063 lookup_field (tree var, omp_context *ctx)
1064 {
1065 splay_tree_node n;
1066 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
1067 return (tree) n->value;
1068 }
1069
1070 static inline tree
1071 lookup_sfield (splay_tree_key key, omp_context *ctx)
1072 {
1073 splay_tree_node n;
1074 n = splay_tree_lookup (ctx->sfield_map
1075 ? ctx->sfield_map : ctx->field_map, key);
1076 return (tree) n->value;
1077 }
1078
1079 static inline tree
1080 lookup_sfield (tree var, omp_context *ctx)
1081 {
1082 return lookup_sfield ((splay_tree_key) var, ctx);
1083 }
1084
1085 static inline tree
1086 maybe_lookup_field (splay_tree_key key, omp_context *ctx)
1087 {
1088 splay_tree_node n;
1089 n = splay_tree_lookup (ctx->field_map, key);
1090 return n ? (tree) n->value : NULL_TREE;
1091 }
1092
1093 static inline tree
1094 maybe_lookup_field (tree var, omp_context *ctx)
1095 {
1096 return maybe_lookup_field ((splay_tree_key) var, ctx);
1097 }
1098
1099 /* Return true if DECL should be copied by pointer. SHARED_CTX is
1100 the parallel context if DECL is to be shared. */
1101
1102 static bool
1103 use_pointer_for_field (tree decl, omp_context *shared_ctx)
1104 {
1105 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
1106 return true;
1107
1108 /* We can only use copy-in/copy-out semantics for shared variables
1109 when we know the value is not accessible from an outer scope. */
1110 if (shared_ctx)
1111 {
1112 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
1113
1114 /* ??? Trivially accessible from anywhere. But why would we even
1115 be passing an address in this case? Should we simply assert
1116 this to be false, or should we have a cleanup pass that removes
1117 these from the list of mappings? */
1118 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
1119 return true;
1120
1121 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1122 without analyzing the expression whether or not its location
1123 is accessible to anyone else. In the case of nested parallel
1124 regions it certainly may be. */
1125 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1126 return true;
1127
1128 /* Do not use copy-in/copy-out for variables that have their
1129 address taken. */
1130 if (TREE_ADDRESSABLE (decl))
1131 return true;
1132
1133 /* lower_send_shared_vars only uses copy-in, but not copy-out
1134 for these. */
1135 if (TREE_READONLY (decl)
1136 || ((TREE_CODE (decl) == RESULT_DECL
1137 || TREE_CODE (decl) == PARM_DECL)
1138 && DECL_BY_REFERENCE (decl)))
1139 return false;
1140
1141 /* Disallow copy-in/out in nested parallel if
1142 decl is shared in outer parallel, otherwise
1143 each thread could store the shared variable
1144 in its own copy-in location, making the
1145 variable no longer really shared. */
1146 if (shared_ctx->is_nested)
1147 {
1148 omp_context *up;
1149
1150 for (up = shared_ctx->outer; up; up = up->outer)
1151 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
1152 break;
1153
1154 if (up)
1155 {
1156 tree c;
1157
1158 for (c = gimple_omp_taskreg_clauses (up->stmt);
1159 c; c = OMP_CLAUSE_CHAIN (c))
1160 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1161 && OMP_CLAUSE_DECL (c) == decl)
1162 break;
1163
1164 if (c)
1165 goto maybe_mark_addressable_and_ret;
1166 }
1167 }
1168
1169 /* For tasks avoid using copy-in/out. As tasks can be
1170 deferred or executed in different thread, when GOMP_task
1171 returns, the task hasn't necessarily terminated. */
1172 if (is_task_ctx (shared_ctx))
1173 {
1174 tree outer;
1175 maybe_mark_addressable_and_ret:
1176 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
1177 if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
1178 {
1179 /* Taking address of OUTER in lower_send_shared_vars
1180 might need regimplification of everything that uses the
1181 variable. */
1182 if (!task_shared_vars)
1183 task_shared_vars = BITMAP_ALLOC (NULL);
1184 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
1185 TREE_ADDRESSABLE (outer) = 1;
1186 }
1187 return true;
1188 }
1189 }
1190
1191 return false;
1192 }
1193
1194 /* Construct a new automatic decl similar to VAR. */
1195
1196 static tree
1197 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
1198 {
1199 tree copy = copy_var_decl (var, name, type);
1200
1201 DECL_CONTEXT (copy) = current_function_decl;
1202 DECL_CHAIN (copy) = ctx->block_vars;
1203 /* If VAR is listed in task_shared_vars, it means it wasn't
1204 originally addressable and is just because task needs to take
1205 it's address. But we don't need to take address of privatizations
1206 from that var. */
1207 if (TREE_ADDRESSABLE (var)
1208 && task_shared_vars
1209 && bitmap_bit_p (task_shared_vars, DECL_UID (var)))
1210 TREE_ADDRESSABLE (copy) = 0;
1211 ctx->block_vars = copy;
1212
1213 return copy;
1214 }
1215
1216 static tree
1217 omp_copy_decl_1 (tree var, omp_context *ctx)
1218 {
1219 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1220 }
1221
1222 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1223 as appropriate. */
1224 static tree
1225 omp_build_component_ref (tree obj, tree field)
1226 {
1227 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1228 if (TREE_THIS_VOLATILE (field))
1229 TREE_THIS_VOLATILE (ret) |= 1;
1230 if (TREE_READONLY (field))
1231 TREE_READONLY (ret) |= 1;
1232 return ret;
1233 }
1234
1235 /* Build tree nodes to access the field for VAR on the receiver side. */
1236
1237 static tree
1238 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1239 {
1240 tree x, field = lookup_field (var, ctx);
1241
1242 /* If the receiver record type was remapped in the child function,
1243 remap the field into the new record type. */
1244 x = maybe_lookup_field (field, ctx);
1245 if (x != NULL)
1246 field = x;
1247
1248 x = build_simple_mem_ref (ctx->receiver_decl);
1249 TREE_THIS_NOTRAP (x) = 1;
1250 x = omp_build_component_ref (x, field);
1251 if (by_ref)
1252 {
1253 x = build_simple_mem_ref (x);
1254 TREE_THIS_NOTRAP (x) = 1;
1255 }
1256
1257 return x;
1258 }
1259
1260 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1261 of a parallel, this is a component reference; for workshare constructs
1262 this is some variable. */
1263
1264 static tree
1265 build_outer_var_ref (tree var, omp_context *ctx, bool lastprivate = false)
1266 {
1267 tree x;
1268
1269 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1270 x = var;
1271 else if (is_variable_sized (var))
1272 {
1273 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1274 x = build_outer_var_ref (x, ctx, lastprivate);
1275 x = build_simple_mem_ref (x);
1276 }
1277 else if (is_taskreg_ctx (ctx))
1278 {
1279 bool by_ref = use_pointer_for_field (var, NULL);
1280 x = build_receiver_ref (var, by_ref, ctx);
1281 }
1282 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1283 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1284 {
1285 /* #pragma omp simd isn't a worksharing construct, and can reference even
1286 private vars in its linear etc. clauses. */
1287 x = NULL_TREE;
1288 if (ctx->outer && is_taskreg_ctx (ctx))
1289 x = lookup_decl (var, ctx->outer);
1290 else if (ctx->outer)
1291 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1292 if (x == NULL_TREE)
1293 x = var;
1294 }
1295 else if (lastprivate && is_taskloop_ctx (ctx))
1296 {
1297 gcc_assert (ctx->outer);
1298 splay_tree_node n
1299 = splay_tree_lookup (ctx->outer->field_map,
1300 (splay_tree_key) &DECL_UID (var));
1301 if (n == NULL)
1302 {
1303 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx->outer)))
1304 x = var;
1305 else
1306 x = lookup_decl (var, ctx->outer);
1307 }
1308 else
1309 {
1310 tree field = (tree) n->value;
1311 /* If the receiver record type was remapped in the child function,
1312 remap the field into the new record type. */
1313 x = maybe_lookup_field (field, ctx->outer);
1314 if (x != NULL)
1315 field = x;
1316
1317 x = build_simple_mem_ref (ctx->outer->receiver_decl);
1318 x = omp_build_component_ref (x, field);
1319 if (use_pointer_for_field (var, ctx->outer))
1320 x = build_simple_mem_ref (x);
1321 }
1322 }
1323 else if (ctx->outer)
1324 x = lookup_decl (var, ctx->outer);
1325 else if (is_reference (var))
1326 /* This can happen with orphaned constructs. If var is reference, it is
1327 possible it is shared and as such valid. */
1328 x = var;
1329 else if (omp_member_access_dummy_var (var))
1330 x = var;
1331 else
1332 gcc_unreachable ();
1333
1334 if (x == var)
1335 {
1336 tree t = omp_member_access_dummy_var (var);
1337 if (t)
1338 {
1339 x = DECL_VALUE_EXPR (var);
1340 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
1341 if (o != t)
1342 x = unshare_and_remap (x, t, o);
1343 else
1344 x = unshare_expr (x);
1345 }
1346 }
1347
1348 if (is_reference (var))
1349 x = build_simple_mem_ref (x);
1350
1351 return x;
1352 }
1353
1354 /* Build tree nodes to access the field for VAR on the sender side. */
1355
1356 static tree
1357 build_sender_ref (splay_tree_key key, omp_context *ctx)
1358 {
1359 tree field = lookup_sfield (key, ctx);
1360 return omp_build_component_ref (ctx->sender_decl, field);
1361 }
1362
1363 static tree
1364 build_sender_ref (tree var, omp_context *ctx)
1365 {
1366 return build_sender_ref ((splay_tree_key) var, ctx);
1367 }
1368
1369 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
1370 BASE_POINTERS_RESTRICT, declare the field with restrict. */
1371
1372 static void
1373 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx,
1374 bool base_pointers_restrict = false)
1375 {
1376 tree field, type, sfield = NULL_TREE;
1377 splay_tree_key key = (splay_tree_key) var;
1378
1379 if ((mask & 8) != 0)
1380 {
1381 key = (splay_tree_key) &DECL_UID (var);
1382 gcc_checking_assert (key != (splay_tree_key) var);
1383 }
1384 gcc_assert ((mask & 1) == 0
1385 || !splay_tree_lookup (ctx->field_map, key));
1386 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1387 || !splay_tree_lookup (ctx->sfield_map, key));
1388 gcc_assert ((mask & 3) == 3
1389 || !is_gimple_omp_oacc (ctx->stmt));
1390
1391 type = TREE_TYPE (var);
1392 /* Prevent redeclaring the var in the split-off function with a restrict
1393 pointer type. Note that we only clear type itself, restrict qualifiers in
1394 the pointed-to type will be ignored by points-to analysis. */
1395 if (POINTER_TYPE_P (type)
1396 && TYPE_RESTRICT (type))
1397 type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
1398
1399 if (mask & 4)
1400 {
1401 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1402 type = build_pointer_type (build_pointer_type (type));
1403 }
1404 else if (by_ref)
1405 {
1406 type = build_pointer_type (type);
1407 if (base_pointers_restrict)
1408 type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
1409 }
1410 else if ((mask & 3) == 1 && is_reference (var))
1411 type = TREE_TYPE (type);
1412
1413 field = build_decl (DECL_SOURCE_LOCATION (var),
1414 FIELD_DECL, DECL_NAME (var), type);
1415
1416 /* Remember what variable this field was created for. This does have a
1417 side effect of making dwarf2out ignore this member, so for helpful
1418 debugging we clear it later in delete_omp_context. */
1419 DECL_ABSTRACT_ORIGIN (field) = var;
1420 if (type == TREE_TYPE (var))
1421 {
1422 DECL_ALIGN (field) = DECL_ALIGN (var);
1423 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1424 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1425 }
1426 else
1427 DECL_ALIGN (field) = TYPE_ALIGN (type);
1428
1429 if ((mask & 3) == 3)
1430 {
1431 insert_field_into_struct (ctx->record_type, field);
1432 if (ctx->srecord_type)
1433 {
1434 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1435 FIELD_DECL, DECL_NAME (var), type);
1436 DECL_ABSTRACT_ORIGIN (sfield) = var;
1437 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1438 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1439 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1440 insert_field_into_struct (ctx->srecord_type, sfield);
1441 }
1442 }
1443 else
1444 {
1445 if (ctx->srecord_type == NULL_TREE)
1446 {
1447 tree t;
1448
1449 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1450 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1451 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1452 {
1453 sfield = build_decl (DECL_SOURCE_LOCATION (t),
1454 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1455 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1456 insert_field_into_struct (ctx->srecord_type, sfield);
1457 splay_tree_insert (ctx->sfield_map,
1458 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1459 (splay_tree_value) sfield);
1460 }
1461 }
1462 sfield = field;
1463 insert_field_into_struct ((mask & 1) ? ctx->record_type
1464 : ctx->srecord_type, field);
1465 }
1466
1467 if (mask & 1)
1468 splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
1469 if ((mask & 2) && ctx->sfield_map)
1470 splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
1471 }
1472
1473 static tree
1474 install_var_local (tree var, omp_context *ctx)
1475 {
1476 tree new_var = omp_copy_decl_1 (var, ctx);
1477 insert_decl_map (&ctx->cb, var, new_var);
1478 return new_var;
1479 }
1480
1481 /* Adjust the replacement for DECL in CTX for the new context. This means
1482 copying the DECL_VALUE_EXPR, and fixing up the type. */
1483
1484 static void
1485 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1486 {
1487 tree new_decl, size;
1488
1489 new_decl = lookup_decl (decl, ctx);
1490
1491 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1492
1493 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1494 && DECL_HAS_VALUE_EXPR_P (decl))
1495 {
1496 tree ve = DECL_VALUE_EXPR (decl);
1497 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1498 SET_DECL_VALUE_EXPR (new_decl, ve);
1499 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1500 }
1501
1502 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1503 {
1504 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1505 if (size == error_mark_node)
1506 size = TYPE_SIZE (TREE_TYPE (new_decl));
1507 DECL_SIZE (new_decl) = size;
1508
1509 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1510 if (size == error_mark_node)
1511 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1512 DECL_SIZE_UNIT (new_decl) = size;
1513 }
1514 }
1515
1516 /* The callback for remap_decl. Search all containing contexts for a
1517 mapping of the variable; this avoids having to duplicate the splay
1518 tree ahead of time. We know a mapping doesn't already exist in the
1519 given context. Create new mappings to implement default semantics. */
1520
1521 static tree
1522 omp_copy_decl (tree var, copy_body_data *cb)
1523 {
1524 omp_context *ctx = (omp_context *) cb;
1525 tree new_var;
1526
1527 if (TREE_CODE (var) == LABEL_DECL)
1528 {
1529 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1530 DECL_CONTEXT (new_var) = current_function_decl;
1531 insert_decl_map (&ctx->cb, var, new_var);
1532 return new_var;
1533 }
1534
1535 while (!is_taskreg_ctx (ctx))
1536 {
1537 ctx = ctx->outer;
1538 if (ctx == NULL)
1539 return var;
1540 new_var = maybe_lookup_decl (var, ctx);
1541 if (new_var)
1542 return new_var;
1543 }
1544
1545 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1546 return var;
1547
1548 return error_mark_node;
1549 }
1550
1551
1552 /* Debugging dumps for parallel regions. */
1553 void dump_omp_region (FILE *, struct omp_region *, int);
1554 void debug_omp_region (struct omp_region *);
1555 void debug_all_omp_regions (void);
1556
1557 /* Dump the parallel region tree rooted at REGION. */
1558
1559 void
1560 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1561 {
1562 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1563 gimple_code_name[region->type]);
1564
1565 if (region->inner)
1566 dump_omp_region (file, region->inner, indent + 4);
1567
1568 if (region->cont)
1569 {
1570 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1571 region->cont->index);
1572 }
1573
1574 if (region->exit)
1575 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1576 region->exit->index);
1577 else
1578 fprintf (file, "%*s[no exit marker]\n", indent, "");
1579
1580 if (region->next)
1581 dump_omp_region (file, region->next, indent);
1582 }
1583
1584 DEBUG_FUNCTION void
1585 debug_omp_region (struct omp_region *region)
1586 {
1587 dump_omp_region (stderr, region, 0);
1588 }
1589
1590 DEBUG_FUNCTION void
1591 debug_all_omp_regions (void)
1592 {
1593 dump_omp_region (stderr, root_omp_region, 0);
1594 }
1595
1596
1597 /* Create a new parallel region starting at STMT inside region PARENT. */
1598
1599 static struct omp_region *
1600 new_omp_region (basic_block bb, enum gimple_code type,
1601 struct omp_region *parent)
1602 {
1603 struct omp_region *region = XCNEW (struct omp_region);
1604
1605 region->outer = parent;
1606 region->entry = bb;
1607 region->type = type;
1608
1609 if (parent)
1610 {
1611 /* This is a nested region. Add it to the list of inner
1612 regions in PARENT. */
1613 region->next = parent->inner;
1614 parent->inner = region;
1615 }
1616 else
1617 {
1618 /* This is a toplevel region. Add it to the list of toplevel
1619 regions in ROOT_OMP_REGION. */
1620 region->next = root_omp_region;
1621 root_omp_region = region;
1622 }
1623
1624 return region;
1625 }
1626
1627 /* Release the memory associated with the region tree rooted at REGION. */
1628
1629 static void
1630 free_omp_region_1 (struct omp_region *region)
1631 {
1632 struct omp_region *i, *n;
1633
1634 for (i = region->inner; i ; i = n)
1635 {
1636 n = i->next;
1637 free_omp_region_1 (i);
1638 }
1639
1640 free (region);
1641 }
1642
1643 /* Release the memory for the entire omp region tree. */
1644
1645 void
1646 free_omp_regions (void)
1647 {
1648 struct omp_region *r, *n;
1649 for (r = root_omp_region; r ; r = n)
1650 {
1651 n = r->next;
1652 free_omp_region_1 (r);
1653 }
1654 root_omp_region = NULL;
1655 }
1656
1657
1658 /* Create a new context, with OUTER_CTX being the surrounding context. */
1659
1660 static omp_context *
1661 new_omp_context (gimple *stmt, omp_context *outer_ctx)
1662 {
1663 omp_context *ctx = XCNEW (omp_context);
1664
1665 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1666 (splay_tree_value) ctx);
1667 ctx->stmt = stmt;
1668
1669 if (outer_ctx)
1670 {
1671 ctx->outer = outer_ctx;
1672 ctx->cb = outer_ctx->cb;
1673 ctx->cb.block = NULL;
1674 ctx->depth = outer_ctx->depth + 1;
1675 }
1676 else
1677 {
1678 ctx->cb.src_fn = current_function_decl;
1679 ctx->cb.dst_fn = current_function_decl;
1680 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1681 gcc_checking_assert (ctx->cb.src_node);
1682 ctx->cb.dst_node = ctx->cb.src_node;
1683 ctx->cb.src_cfun = cfun;
1684 ctx->cb.copy_decl = omp_copy_decl;
1685 ctx->cb.eh_lp_nr = 0;
1686 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1687 ctx->depth = 1;
1688 }
1689
1690 ctx->cb.decl_map = new hash_map<tree, tree>;
1691
1692 return ctx;
1693 }
1694
1695 static gimple_seq maybe_catch_exception (gimple_seq);
1696
1697 /* Finalize task copyfn. */
1698
1699 static void
1700 finalize_task_copyfn (gomp_task *task_stmt)
1701 {
1702 struct function *child_cfun;
1703 tree child_fn;
1704 gimple_seq seq = NULL, new_seq;
1705 gbind *bind;
1706
1707 child_fn = gimple_omp_task_copy_fn (task_stmt);
1708 if (child_fn == NULL_TREE)
1709 return;
1710
1711 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1712 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1713
1714 push_cfun (child_cfun);
1715 bind = gimplify_body (child_fn, false);
1716 gimple_seq_add_stmt (&seq, bind);
1717 new_seq = maybe_catch_exception (seq);
1718 if (new_seq != seq)
1719 {
1720 bind = gimple_build_bind (NULL, new_seq, NULL);
1721 seq = NULL;
1722 gimple_seq_add_stmt (&seq, bind);
1723 }
1724 gimple_set_body (child_fn, seq);
1725 pop_cfun ();
1726
1727 /* Inform the callgraph about the new function. */
1728 cgraph_node *node = cgraph_node::get_create (child_fn);
1729 node->parallelized_function = 1;
1730 cgraph_node::add_new_function (child_fn, false);
1731 }
1732
1733 /* Destroy a omp_context data structures. Called through the splay tree
1734 value delete callback. */
1735
1736 static void
1737 delete_omp_context (splay_tree_value value)
1738 {
1739 omp_context *ctx = (omp_context *) value;
1740
1741 delete ctx->cb.decl_map;
1742
1743 if (ctx->field_map)
1744 splay_tree_delete (ctx->field_map);
1745 if (ctx->sfield_map)
1746 splay_tree_delete (ctx->sfield_map);
1747
1748 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1749 it produces corrupt debug information. */
1750 if (ctx->record_type)
1751 {
1752 tree t;
1753 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1754 DECL_ABSTRACT_ORIGIN (t) = NULL;
1755 }
1756 if (ctx->srecord_type)
1757 {
1758 tree t;
1759 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1760 DECL_ABSTRACT_ORIGIN (t) = NULL;
1761 }
1762
1763 if (is_task_ctx (ctx))
1764 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1765
1766 XDELETE (ctx);
1767 }
1768
1769 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1770 context. */
1771
1772 static void
1773 fixup_child_record_type (omp_context *ctx)
1774 {
1775 tree f, type = ctx->record_type;
1776
1777 /* ??? It isn't sufficient to just call remap_type here, because
1778 variably_modified_type_p doesn't work the way we expect for
1779 record types. Testing each field for whether it needs remapping
1780 and creating a new record by hand works, however. */
1781 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1782 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1783 break;
1784 if (f)
1785 {
1786 tree name, new_fields = NULL;
1787
1788 type = lang_hooks.types.make_type (RECORD_TYPE);
1789 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1790 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1791 TYPE_DECL, name, type);
1792 TYPE_NAME (type) = name;
1793
1794 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1795 {
1796 tree new_f = copy_node (f);
1797 DECL_CONTEXT (new_f) = type;
1798 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1799 DECL_CHAIN (new_f) = new_fields;
1800 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1801 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1802 &ctx->cb, NULL);
1803 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1804 &ctx->cb, NULL);
1805 new_fields = new_f;
1806
1807 /* Arrange to be able to look up the receiver field
1808 given the sender field. */
1809 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1810 (splay_tree_value) new_f);
1811 }
1812 TYPE_FIELDS (type) = nreverse (new_fields);
1813 layout_type (type);
1814 }
1815
1816 /* In a target region we never modify any of the pointers in *.omp_data_i,
1817 so attempt to help the optimizers. */
1818 if (is_gimple_omp_offloaded (ctx->stmt))
1819 type = build_qualified_type (type, TYPE_QUAL_CONST);
1820
1821 TREE_TYPE (ctx->receiver_decl)
1822 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1823 }
1824
1825 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1826 specified by CLAUSES. If BASE_POINTERS_RESTRICT, install var field with
1827 restrict. */
1828
1829 static void
1830 scan_sharing_clauses (tree clauses, omp_context *ctx,
1831 bool base_pointers_restrict = false)
1832 {
1833 tree c, decl;
1834 bool scan_array_reductions = false;
1835
1836 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1837 {
1838 bool by_ref;
1839
1840 switch (OMP_CLAUSE_CODE (c))
1841 {
1842 case OMP_CLAUSE_PRIVATE:
1843 decl = OMP_CLAUSE_DECL (c);
1844 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1845 goto do_private;
1846 else if (!is_variable_sized (decl))
1847 install_var_local (decl, ctx);
1848 break;
1849
1850 case OMP_CLAUSE_SHARED:
1851 decl = OMP_CLAUSE_DECL (c);
1852 /* Ignore shared directives in teams construct. */
1853 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1854 {
1855 /* Global variables don't need to be copied,
1856 the receiver side will use them directly. */
1857 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1858 if (is_global_var (odecl))
1859 break;
1860 insert_decl_map (&ctx->cb, decl, odecl);
1861 break;
1862 }
1863 gcc_assert (is_taskreg_ctx (ctx));
1864 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1865 || !is_variable_sized (decl));
1866 /* Global variables don't need to be copied,
1867 the receiver side will use them directly. */
1868 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1869 break;
1870 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
1871 {
1872 use_pointer_for_field (decl, ctx);
1873 break;
1874 }
1875 by_ref = use_pointer_for_field (decl, NULL);
1876 if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
1877 || TREE_ADDRESSABLE (decl)
1878 || by_ref
1879 || is_reference (decl))
1880 {
1881 by_ref = use_pointer_for_field (decl, ctx);
1882 install_var_field (decl, by_ref, 3, ctx);
1883 install_var_local (decl, ctx);
1884 break;
1885 }
1886 /* We don't need to copy const scalar vars back. */
1887 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1888 goto do_private;
1889
1890 case OMP_CLAUSE_REDUCTION:
1891 decl = OMP_CLAUSE_DECL (c);
1892 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1893 && TREE_CODE (decl) == MEM_REF)
1894 {
1895 tree t = TREE_OPERAND (decl, 0);
1896 if (TREE_CODE (t) == POINTER_PLUS_EXPR)
1897 t = TREE_OPERAND (t, 0);
1898 if (TREE_CODE (t) == INDIRECT_REF
1899 || TREE_CODE (t) == ADDR_EXPR)
1900 t = TREE_OPERAND (t, 0);
1901 install_var_local (t, ctx);
1902 if (is_taskreg_ctx (ctx)
1903 && !is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
1904 && !is_variable_sized (t))
1905 {
1906 by_ref = use_pointer_for_field (t, ctx);
1907 install_var_field (t, by_ref, 3, ctx);
1908 }
1909 break;
1910 }
1911 goto do_private;
1912
1913 case OMP_CLAUSE_LASTPRIVATE:
1914 /* Let the corresponding firstprivate clause create
1915 the variable. */
1916 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1917 break;
1918 /* FALLTHRU */
1919
1920 case OMP_CLAUSE_FIRSTPRIVATE:
1921 case OMP_CLAUSE_LINEAR:
1922 decl = OMP_CLAUSE_DECL (c);
1923 do_private:
1924 if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
1925 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
1926 && is_gimple_omp_offloaded (ctx->stmt))
1927 {
1928 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
1929 install_var_field (decl, !is_reference (decl), 3, ctx);
1930 else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1931 install_var_field (decl, true, 3, ctx);
1932 else
1933 install_var_field (decl, false, 3, ctx);
1934 }
1935 if (is_variable_sized (decl))
1936 {
1937 if (is_task_ctx (ctx))
1938 install_var_field (decl, false, 1, ctx);
1939 break;
1940 }
1941 else if (is_taskreg_ctx (ctx))
1942 {
1943 bool global
1944 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1945 by_ref = use_pointer_for_field (decl, NULL);
1946
1947 if (is_task_ctx (ctx)
1948 && (global || by_ref || is_reference (decl)))
1949 {
1950 install_var_field (decl, false, 1, ctx);
1951 if (!global)
1952 install_var_field (decl, by_ref, 2, ctx);
1953 }
1954 else if (!global)
1955 install_var_field (decl, by_ref, 3, ctx);
1956 }
1957 install_var_local (decl, ctx);
1958 break;
1959
1960 case OMP_CLAUSE_USE_DEVICE_PTR:
1961 decl = OMP_CLAUSE_DECL (c);
1962 if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1963 install_var_field (decl, true, 3, ctx);
1964 else
1965 install_var_field (decl, false, 3, ctx);
1966 if (DECL_SIZE (decl)
1967 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1968 {
1969 tree decl2 = DECL_VALUE_EXPR (decl);
1970 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1971 decl2 = TREE_OPERAND (decl2, 0);
1972 gcc_assert (DECL_P (decl2));
1973 install_var_local (decl2, ctx);
1974 }
1975 install_var_local (decl, ctx);
1976 break;
1977
1978 case OMP_CLAUSE_IS_DEVICE_PTR:
1979 decl = OMP_CLAUSE_DECL (c);
1980 goto do_private;
1981
1982 case OMP_CLAUSE__LOOPTEMP_:
1983 gcc_assert (is_taskreg_ctx (ctx));
1984 decl = OMP_CLAUSE_DECL (c);
1985 install_var_field (decl, false, 3, ctx);
1986 install_var_local (decl, ctx);
1987 break;
1988
1989 case OMP_CLAUSE_COPYPRIVATE:
1990 case OMP_CLAUSE_COPYIN:
1991 decl = OMP_CLAUSE_DECL (c);
1992 by_ref = use_pointer_for_field (decl, NULL);
1993 install_var_field (decl, by_ref, 3, ctx);
1994 break;
1995
1996 case OMP_CLAUSE_DEFAULT:
1997 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1998 break;
1999
2000 case OMP_CLAUSE_FINAL:
2001 case OMP_CLAUSE_IF:
2002 case OMP_CLAUSE_NUM_THREADS:
2003 case OMP_CLAUSE_NUM_TEAMS:
2004 case OMP_CLAUSE_THREAD_LIMIT:
2005 case OMP_CLAUSE_DEVICE:
2006 case OMP_CLAUSE_SCHEDULE:
2007 case OMP_CLAUSE_DIST_SCHEDULE:
2008 case OMP_CLAUSE_DEPEND:
2009 case OMP_CLAUSE_PRIORITY:
2010 case OMP_CLAUSE_GRAINSIZE:
2011 case OMP_CLAUSE_NUM_TASKS:
2012 case OMP_CLAUSE__CILK_FOR_COUNT_:
2013 case OMP_CLAUSE_NUM_GANGS:
2014 case OMP_CLAUSE_NUM_WORKERS:
2015 case OMP_CLAUSE_VECTOR_LENGTH:
2016 if (ctx->outer)
2017 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
2018 break;
2019
2020 case OMP_CLAUSE_TO:
2021 case OMP_CLAUSE_FROM:
2022 case OMP_CLAUSE_MAP:
2023 if (ctx->outer)
2024 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
2025 decl = OMP_CLAUSE_DECL (c);
2026 /* Global variables with "omp declare target" attribute
2027 don't need to be copied, the receiver side will use them
2028 directly. However, global variables with "omp declare target link"
2029 attribute need to be copied. */
2030 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2031 && DECL_P (decl)
2032 && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
2033 && (OMP_CLAUSE_MAP_KIND (c)
2034 != GOMP_MAP_FIRSTPRIVATE_REFERENCE))
2035 || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2036 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
2037 && varpool_node::get_create (decl)->offloadable
2038 && !lookup_attribute ("omp declare target link",
2039 DECL_ATTRIBUTES (decl)))
2040 break;
2041 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2042 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
2043 {
2044 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
2045 not offloaded; there is nothing to map for those. */
2046 if (!is_gimple_omp_offloaded (ctx->stmt)
2047 && !POINTER_TYPE_P (TREE_TYPE (decl))
2048 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
2049 break;
2050 }
2051 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2052 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
2053 || (OMP_CLAUSE_MAP_KIND (c)
2054 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
2055 {
2056 if (TREE_CODE (decl) == COMPONENT_REF
2057 || (TREE_CODE (decl) == INDIRECT_REF
2058 && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
2059 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
2060 == REFERENCE_TYPE)))
2061 break;
2062 if (DECL_SIZE (decl)
2063 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2064 {
2065 tree decl2 = DECL_VALUE_EXPR (decl);
2066 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2067 decl2 = TREE_OPERAND (decl2, 0);
2068 gcc_assert (DECL_P (decl2));
2069 install_var_local (decl2, ctx);
2070 }
2071 install_var_local (decl, ctx);
2072 break;
2073 }
2074 if (DECL_P (decl))
2075 {
2076 if (DECL_SIZE (decl)
2077 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2078 {
2079 tree decl2 = DECL_VALUE_EXPR (decl);
2080 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2081 decl2 = TREE_OPERAND (decl2, 0);
2082 gcc_assert (DECL_P (decl2));
2083 install_var_field (decl2, true, 3, ctx);
2084 install_var_local (decl2, ctx);
2085 install_var_local (decl, ctx);
2086 }
2087 else
2088 {
2089 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2090 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
2091 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
2092 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2093 install_var_field (decl, true, 7, ctx);
2094 else
2095 install_var_field (decl, true, 3, ctx,
2096 base_pointers_restrict);
2097 if (is_gimple_omp_offloaded (ctx->stmt))
2098 install_var_local (decl, ctx);
2099 }
2100 }
2101 else
2102 {
2103 tree base = get_base_address (decl);
2104 tree nc = OMP_CLAUSE_CHAIN (c);
2105 if (DECL_P (base)
2106 && nc != NULL_TREE
2107 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
2108 && OMP_CLAUSE_DECL (nc) == base
2109 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
2110 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
2111 {
2112 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
2113 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
2114 }
2115 else
2116 {
2117 if (ctx->outer)
2118 {
2119 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
2120 decl = OMP_CLAUSE_DECL (c);
2121 }
2122 gcc_assert (!splay_tree_lookup (ctx->field_map,
2123 (splay_tree_key) decl));
2124 tree field
2125 = build_decl (OMP_CLAUSE_LOCATION (c),
2126 FIELD_DECL, NULL_TREE, ptr_type_node);
2127 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
2128 insert_field_into_struct (ctx->record_type, field);
2129 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
2130 (splay_tree_value) field);
2131 }
2132 }
2133 break;
2134
2135 case OMP_CLAUSE_NOWAIT:
2136 case OMP_CLAUSE_ORDERED:
2137 case OMP_CLAUSE_COLLAPSE:
2138 case OMP_CLAUSE_UNTIED:
2139 case OMP_CLAUSE_MERGEABLE:
2140 case OMP_CLAUSE_PROC_BIND:
2141 case OMP_CLAUSE_SAFELEN:
2142 case OMP_CLAUSE_SIMDLEN:
2143 case OMP_CLAUSE_THREADS:
2144 case OMP_CLAUSE_SIMD:
2145 case OMP_CLAUSE_NOGROUP:
2146 case OMP_CLAUSE_DEFAULTMAP:
2147 case OMP_CLAUSE_ASYNC:
2148 case OMP_CLAUSE_WAIT:
2149 case OMP_CLAUSE_GANG:
2150 case OMP_CLAUSE_WORKER:
2151 case OMP_CLAUSE_VECTOR:
2152 case OMP_CLAUSE_TILE:
2153 case OMP_CLAUSE_INDEPENDENT:
2154 case OMP_CLAUSE_AUTO:
2155 case OMP_CLAUSE_SEQ:
2156 break;
2157
2158 case OMP_CLAUSE_ALIGNED:
2159 decl = OMP_CLAUSE_DECL (c);
2160 if (is_global_var (decl)
2161 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2162 install_var_local (decl, ctx);
2163 break;
2164
2165 case OMP_CLAUSE_DEVICE_RESIDENT:
2166 case OMP_CLAUSE__CACHE_:
2167 sorry ("Clause not supported yet");
2168 break;
2169
2170 default:
2171 gcc_unreachable ();
2172 }
2173 }
2174
2175 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2176 {
2177 switch (OMP_CLAUSE_CODE (c))
2178 {
2179 case OMP_CLAUSE_LASTPRIVATE:
2180 /* Let the corresponding firstprivate clause create
2181 the variable. */
2182 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2183 scan_array_reductions = true;
2184 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2185 break;
2186 /* FALLTHRU */
2187
2188 case OMP_CLAUSE_FIRSTPRIVATE:
2189 case OMP_CLAUSE_PRIVATE:
2190 case OMP_CLAUSE_LINEAR:
2191 case OMP_CLAUSE_IS_DEVICE_PTR:
2192 decl = OMP_CLAUSE_DECL (c);
2193 if (is_variable_sized (decl))
2194 {
2195 if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
2196 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
2197 && is_gimple_omp_offloaded (ctx->stmt))
2198 {
2199 tree decl2 = DECL_VALUE_EXPR (decl);
2200 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2201 decl2 = TREE_OPERAND (decl2, 0);
2202 gcc_assert (DECL_P (decl2));
2203 install_var_local (decl2, ctx);
2204 fixup_remapped_decl (decl2, ctx, false);
2205 }
2206 install_var_local (decl, ctx);
2207 }
2208 fixup_remapped_decl (decl, ctx,
2209 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
2210 && OMP_CLAUSE_PRIVATE_DEBUG (c));
2211 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2212 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2213 scan_array_reductions = true;
2214 break;
2215
2216 case OMP_CLAUSE_REDUCTION:
2217 decl = OMP_CLAUSE_DECL (c);
2218 if (TREE_CODE (decl) != MEM_REF)
2219 {
2220 if (is_variable_sized (decl))
2221 install_var_local (decl, ctx);
2222 fixup_remapped_decl (decl, ctx, false);
2223 }
2224 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2225 scan_array_reductions = true;
2226 break;
2227
2228 case OMP_CLAUSE_SHARED:
2229 /* Ignore shared directives in teams construct. */
2230 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2231 break;
2232 decl = OMP_CLAUSE_DECL (c);
2233 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2234 break;
2235 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
2236 {
2237 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
2238 ctx->outer)))
2239 break;
2240 bool by_ref = use_pointer_for_field (decl, ctx);
2241 install_var_field (decl, by_ref, 11, ctx);
2242 break;
2243 }
2244 fixup_remapped_decl (decl, ctx, false);
2245 break;
2246
2247 case OMP_CLAUSE_MAP:
2248 if (!is_gimple_omp_offloaded (ctx->stmt))
2249 break;
2250 decl = OMP_CLAUSE_DECL (c);
2251 if (DECL_P (decl)
2252 && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
2253 && (OMP_CLAUSE_MAP_KIND (c)
2254 != GOMP_MAP_FIRSTPRIVATE_REFERENCE))
2255 || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
2256 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
2257 && varpool_node::get_create (decl)->offloadable)
2258 break;
2259 if (DECL_P (decl))
2260 {
2261 if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
2262 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
2263 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
2264 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
2265 {
2266 tree new_decl = lookup_decl (decl, ctx);
2267 TREE_TYPE (new_decl)
2268 = remap_type (TREE_TYPE (decl), &ctx->cb);
2269 }
2270 else if (DECL_SIZE (decl)
2271 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2272 {
2273 tree decl2 = DECL_VALUE_EXPR (decl);
2274 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2275 decl2 = TREE_OPERAND (decl2, 0);
2276 gcc_assert (DECL_P (decl2));
2277 fixup_remapped_decl (decl2, ctx, false);
2278 fixup_remapped_decl (decl, ctx, true);
2279 }
2280 else
2281 fixup_remapped_decl (decl, ctx, false);
2282 }
2283 break;
2284
2285 case OMP_CLAUSE_COPYPRIVATE:
2286 case OMP_CLAUSE_COPYIN:
2287 case OMP_CLAUSE_DEFAULT:
2288 case OMP_CLAUSE_IF:
2289 case OMP_CLAUSE_NUM_THREADS:
2290 case OMP_CLAUSE_NUM_TEAMS:
2291 case OMP_CLAUSE_THREAD_LIMIT:
2292 case OMP_CLAUSE_DEVICE:
2293 case OMP_CLAUSE_SCHEDULE:
2294 case OMP_CLAUSE_DIST_SCHEDULE:
2295 case OMP_CLAUSE_NOWAIT:
2296 case OMP_CLAUSE_ORDERED:
2297 case OMP_CLAUSE_COLLAPSE:
2298 case OMP_CLAUSE_UNTIED:
2299 case OMP_CLAUSE_FINAL:
2300 case OMP_CLAUSE_MERGEABLE:
2301 case OMP_CLAUSE_PROC_BIND:
2302 case OMP_CLAUSE_SAFELEN:
2303 case OMP_CLAUSE_SIMDLEN:
2304 case OMP_CLAUSE_ALIGNED:
2305 case OMP_CLAUSE_DEPEND:
2306 case OMP_CLAUSE__LOOPTEMP_:
2307 case OMP_CLAUSE_TO:
2308 case OMP_CLAUSE_FROM:
2309 case OMP_CLAUSE_PRIORITY:
2310 case OMP_CLAUSE_GRAINSIZE:
2311 case OMP_CLAUSE_NUM_TASKS:
2312 case OMP_CLAUSE_THREADS:
2313 case OMP_CLAUSE_SIMD:
2314 case OMP_CLAUSE_NOGROUP:
2315 case OMP_CLAUSE_DEFAULTMAP:
2316 case OMP_CLAUSE_USE_DEVICE_PTR:
2317 case OMP_CLAUSE__CILK_FOR_COUNT_:
2318 case OMP_CLAUSE_ASYNC:
2319 case OMP_CLAUSE_WAIT:
2320 case OMP_CLAUSE_NUM_GANGS:
2321 case OMP_CLAUSE_NUM_WORKERS:
2322 case OMP_CLAUSE_VECTOR_LENGTH:
2323 case OMP_CLAUSE_GANG:
2324 case OMP_CLAUSE_WORKER:
2325 case OMP_CLAUSE_VECTOR:
2326 case OMP_CLAUSE_TILE:
2327 case OMP_CLAUSE_INDEPENDENT:
2328 case OMP_CLAUSE_AUTO:
2329 case OMP_CLAUSE_SEQ:
2330 break;
2331
2332 case OMP_CLAUSE_DEVICE_RESIDENT:
2333 case OMP_CLAUSE__CACHE_:
2334 sorry ("Clause not supported yet");
2335 break;
2336
2337 default:
2338 gcc_unreachable ();
2339 }
2340 }
2341
2342 gcc_checking_assert (!scan_array_reductions
2343 || !is_gimple_omp_oacc (ctx->stmt));
2344 if (scan_array_reductions)
2345 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2346 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
2347 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2348 {
2349 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2350 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2351 }
2352 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2353 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2354 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2355 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2356 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2357 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
2358 }
2359
2360 /* Create a new name for omp child function. Returns an identifier. If
2361 IS_CILK_FOR is true then the suffix for the child function is
2362 "_cilk_for_fn." */
2363
2364 static tree
2365 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
2366 {
2367 if (is_cilk_for)
2368 return clone_function_name (current_function_decl, "_cilk_for_fn");
2369 return clone_function_name (current_function_decl,
2370 task_copy ? "_omp_cpyfn" : "_omp_fn");
2371 }
2372
2373 /* Returns the type of the induction variable for the child function for
2374 _Cilk_for and the types for _high and _low variables based on TYPE. */
2375
2376 static tree
2377 cilk_for_check_loop_diff_type (tree type)
2378 {
2379 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
2380 {
2381 if (TYPE_UNSIGNED (type))
2382 return uint32_type_node;
2383 else
2384 return integer_type_node;
2385 }
2386 else
2387 {
2388 if (TYPE_UNSIGNED (type))
2389 return uint64_type_node;
2390 else
2391 return long_long_integer_type_node;
2392 }
2393 }
2394
2395 /* Build a decl for the omp child function. It'll not contain a body
2396 yet, just the bare decl. */
2397
2398 static void
2399 create_omp_child_function (omp_context *ctx, bool task_copy)
2400 {
2401 tree decl, type, name, t;
2402
2403 tree cilk_for_count
2404 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2405 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2406 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
2407 tree cilk_var_type = NULL_TREE;
2408
2409 name = create_omp_child_function_name (task_copy,
2410 cilk_for_count != NULL_TREE);
2411 if (task_copy)
2412 type = build_function_type_list (void_type_node, ptr_type_node,
2413 ptr_type_node, NULL_TREE);
2414 else if (cilk_for_count)
2415 {
2416 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
2417 cilk_var_type = cilk_for_check_loop_diff_type (type);
2418 type = build_function_type_list (void_type_node, ptr_type_node,
2419 cilk_var_type, cilk_var_type, NULL_TREE);
2420 }
2421 else
2422 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
2423
2424 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
2425
2426 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
2427 || !task_copy);
2428 if (!task_copy)
2429 ctx->cb.dst_fn = decl;
2430 else
2431 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
2432
2433 TREE_STATIC (decl) = 1;
2434 TREE_USED (decl) = 1;
2435 DECL_ARTIFICIAL (decl) = 1;
2436 DECL_IGNORED_P (decl) = 0;
2437 TREE_PUBLIC (decl) = 0;
2438 DECL_UNINLINABLE (decl) = 1;
2439 DECL_EXTERNAL (decl) = 0;
2440 DECL_CONTEXT (decl) = NULL_TREE;
2441 DECL_INITIAL (decl) = make_node (BLOCK);
2442 if (cgraph_node::get (current_function_decl)->offloadable)
2443 cgraph_node::get_create (decl)->offloadable = 1;
2444 else
2445 {
2446 omp_context *octx;
2447 for (octx = ctx; octx; octx = octx->outer)
2448 if (is_gimple_omp_offloaded (octx->stmt))
2449 {
2450 cgraph_node::get_create (decl)->offloadable = 1;
2451 if (ENABLE_OFFLOADING)
2452 g->have_offload = true;
2453
2454 break;
2455 }
2456 }
2457
2458 if (cgraph_node::get_create (decl)->offloadable
2459 && !lookup_attribute ("omp declare target",
2460 DECL_ATTRIBUTES (current_function_decl)))
2461 DECL_ATTRIBUTES (decl)
2462 = tree_cons (get_identifier ("omp target entrypoint"),
2463 NULL_TREE, DECL_ATTRIBUTES (decl));
2464
2465 t = build_decl (DECL_SOURCE_LOCATION (decl),
2466 RESULT_DECL, NULL_TREE, void_type_node);
2467 DECL_ARTIFICIAL (t) = 1;
2468 DECL_IGNORED_P (t) = 1;
2469 DECL_CONTEXT (t) = decl;
2470 DECL_RESULT (decl) = t;
2471
2472 /* _Cilk_for's child function requires two extra parameters called
2473 __low and __high that are set the by Cilk runtime when it calls this
2474 function. */
2475 if (cilk_for_count)
2476 {
2477 t = build_decl (DECL_SOURCE_LOCATION (decl),
2478 PARM_DECL, get_identifier ("__high"), cilk_var_type);
2479 DECL_ARTIFICIAL (t) = 1;
2480 DECL_NAMELESS (t) = 1;
2481 DECL_ARG_TYPE (t) = ptr_type_node;
2482 DECL_CONTEXT (t) = current_function_decl;
2483 TREE_USED (t) = 1;
2484 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2485 DECL_ARGUMENTS (decl) = t;
2486
2487 t = build_decl (DECL_SOURCE_LOCATION (decl),
2488 PARM_DECL, get_identifier ("__low"), cilk_var_type);
2489 DECL_ARTIFICIAL (t) = 1;
2490 DECL_NAMELESS (t) = 1;
2491 DECL_ARG_TYPE (t) = ptr_type_node;
2492 DECL_CONTEXT (t) = current_function_decl;
2493 TREE_USED (t) = 1;
2494 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2495 DECL_ARGUMENTS (decl) = t;
2496 }
2497
2498 tree data_name = get_identifier (".omp_data_i");
2499 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2500 ptr_type_node);
2501 DECL_ARTIFICIAL (t) = 1;
2502 DECL_NAMELESS (t) = 1;
2503 DECL_ARG_TYPE (t) = ptr_type_node;
2504 DECL_CONTEXT (t) = current_function_decl;
2505 TREE_USED (t) = 1;
2506 TREE_READONLY (t) = 1;
2507 if (cilk_for_count)
2508 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2509 DECL_ARGUMENTS (decl) = t;
2510 if (!task_copy)
2511 ctx->receiver_decl = t;
2512 else
2513 {
2514 t = build_decl (DECL_SOURCE_LOCATION (decl),
2515 PARM_DECL, get_identifier (".omp_data_o"),
2516 ptr_type_node);
2517 DECL_ARTIFICIAL (t) = 1;
2518 DECL_NAMELESS (t) = 1;
2519 DECL_ARG_TYPE (t) = ptr_type_node;
2520 DECL_CONTEXT (t) = current_function_decl;
2521 TREE_USED (t) = 1;
2522 TREE_ADDRESSABLE (t) = 1;
2523 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2524 DECL_ARGUMENTS (decl) = t;
2525 }
2526
2527 /* Allocate memory for the function structure. The call to
2528 allocate_struct_function clobbers CFUN, so we need to restore
2529 it afterward. */
2530 push_struct_function (decl);
2531 cfun->function_end_locus = gimple_location (ctx->stmt);
2532 pop_cfun ();
2533 }
2534
2535 /* Callback for walk_gimple_seq. Check if combined parallel
2536 contains gimple_omp_for_combined_into_p OMP_FOR. */
2537
2538 static tree
2539 find_combined_for (gimple_stmt_iterator *gsi_p,
2540 bool *handled_ops_p,
2541 struct walk_stmt_info *wi)
2542 {
2543 gimple *stmt = gsi_stmt (*gsi_p);
2544
2545 *handled_ops_p = true;
2546 switch (gimple_code (stmt))
2547 {
2548 WALK_SUBSTMTS;
2549
2550 case GIMPLE_OMP_FOR:
2551 if (gimple_omp_for_combined_into_p (stmt)
2552 && gimple_omp_for_kind (stmt)
2553 == *(const enum gf_mask *) (wi->info))
2554 {
2555 wi->info = stmt;
2556 return integer_zero_node;
2557 }
2558 break;
2559 default:
2560 break;
2561 }
2562 return NULL;
2563 }
2564
2565 /* Add _LOOPTEMP_ clauses on OpenMP parallel or task. */
2566
2567 static void
2568 add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
2569 omp_context *outer_ctx)
2570 {
2571 struct walk_stmt_info wi;
2572
2573 memset (&wi, 0, sizeof (wi));
2574 wi.val_only = true;
2575 wi.info = (void *) &msk;
2576 walk_gimple_seq (gimple_omp_body (stmt), find_combined_for, NULL, &wi);
2577 if (wi.info != (void *) &msk)
2578 {
2579 gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
2580 struct omp_for_data fd;
2581 extract_omp_for_data (for_stmt, &fd, NULL);
2582 /* We need two temporaries with fd.loop.v type (istart/iend)
2583 and then (fd.collapse - 1) temporaries with the same
2584 type for count2 ... countN-1 vars if not constant. */
2585 size_t count = 2, i;
2586 tree type = fd.iter_type;
2587 if (fd.collapse > 1
2588 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2589 {
2590 count += fd.collapse - 1;
2591 /* If there are lastprivate clauses on the inner
2592 GIMPLE_OMP_FOR, add one more temporaries for the total number
2593 of iterations (product of count1 ... countN-1). */
2594 if (find_omp_clause (gimple_omp_for_clauses (for_stmt),
2595 OMP_CLAUSE_LASTPRIVATE))
2596 count++;
2597 else if (msk == GF_OMP_FOR_KIND_FOR
2598 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2599 OMP_CLAUSE_LASTPRIVATE))
2600 count++;
2601 }
2602 for (i = 0; i < count; i++)
2603 {
2604 tree temp = create_tmp_var (type);
2605 tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
2606 insert_decl_map (&outer_ctx->cb, temp, temp);
2607 OMP_CLAUSE_DECL (c) = temp;
2608 OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
2609 gimple_omp_taskreg_set_clauses (stmt, c);
2610 }
2611 }
2612 }
2613
2614 /* Scan an OpenMP parallel directive. */
2615
2616 static void
2617 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2618 {
2619 omp_context *ctx;
2620 tree name;
2621 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2622
2623 /* Ignore parallel directives with empty bodies, unless there
2624 are copyin clauses. */
2625 if (optimize > 0
2626 && empty_body_p (gimple_omp_body (stmt))
2627 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2628 OMP_CLAUSE_COPYIN) == NULL)
2629 {
2630 gsi_replace (gsi, gimple_build_nop (), false);
2631 return;
2632 }
2633
2634 if (gimple_omp_parallel_combined_p (stmt))
2635 add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
2636
2637 ctx = new_omp_context (stmt, outer_ctx);
2638 taskreg_contexts.safe_push (ctx);
2639 if (taskreg_nesting_level > 1)
2640 ctx->is_nested = true;
2641 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2642 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2643 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2644 name = create_tmp_var_name (".omp_data_s");
2645 name = build_decl (gimple_location (stmt),
2646 TYPE_DECL, name, ctx->record_type);
2647 DECL_ARTIFICIAL (name) = 1;
2648 DECL_NAMELESS (name) = 1;
2649 TYPE_NAME (ctx->record_type) = name;
2650 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2651 create_omp_child_function (ctx, false);
2652 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2653
2654 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2655 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2656
2657 if (TYPE_FIELDS (ctx->record_type) == NULL)
2658 ctx->record_type = ctx->receiver_decl = NULL;
2659 }
2660
2661 /* Scan an OpenMP task directive. */
2662
2663 static void
2664 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2665 {
2666 omp_context *ctx;
2667 tree name, t;
2668 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2669
2670 /* Ignore task directives with empty bodies. */
2671 if (optimize > 0
2672 && empty_body_p (gimple_omp_body (stmt)))
2673 {
2674 gsi_replace (gsi, gimple_build_nop (), false);
2675 return;
2676 }
2677
2678 if (gimple_omp_task_taskloop_p (stmt))
2679 add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
2680
2681 ctx = new_omp_context (stmt, outer_ctx);
2682 taskreg_contexts.safe_push (ctx);
2683 if (taskreg_nesting_level > 1)
2684 ctx->is_nested = true;
2685 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2686 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2687 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2688 name = create_tmp_var_name (".omp_data_s");
2689 name = build_decl (gimple_location (stmt),
2690 TYPE_DECL, name, ctx->record_type);
2691 DECL_ARTIFICIAL (name) = 1;
2692 DECL_NAMELESS (name) = 1;
2693 TYPE_NAME (ctx->record_type) = name;
2694 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2695 create_omp_child_function (ctx, false);
2696 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2697
2698 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2699
2700 if (ctx->srecord_type)
2701 {
2702 name = create_tmp_var_name (".omp_data_a");
2703 name = build_decl (gimple_location (stmt),
2704 TYPE_DECL, name, ctx->srecord_type);
2705 DECL_ARTIFICIAL (name) = 1;
2706 DECL_NAMELESS (name) = 1;
2707 TYPE_NAME (ctx->srecord_type) = name;
2708 TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
2709 create_omp_child_function (ctx, true);
2710 }
2711
2712 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2713
2714 if (TYPE_FIELDS (ctx->record_type) == NULL)
2715 {
2716 ctx->record_type = ctx->receiver_decl = NULL;
2717 t = build_int_cst (long_integer_type_node, 0);
2718 gimple_omp_task_set_arg_size (stmt, t);
2719 t = build_int_cst (long_integer_type_node, 1);
2720 gimple_omp_task_set_arg_align (stmt, t);
2721 }
2722 }
2723
2724
2725 /* If any decls have been made addressable during scan_omp,
2726 adjust their fields if needed, and layout record types
2727 of parallel/task constructs. */
2728
2729 static void
2730 finish_taskreg_scan (omp_context *ctx)
2731 {
2732 if (ctx->record_type == NULL_TREE)
2733 return;
2734
2735 /* If any task_shared_vars were needed, verify all
2736 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2737 statements if use_pointer_for_field hasn't changed
2738 because of that. If it did, update field types now. */
2739 if (task_shared_vars)
2740 {
2741 tree c;
2742
2743 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2744 c; c = OMP_CLAUSE_CHAIN (c))
2745 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
2746 && !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
2747 {
2748 tree decl = OMP_CLAUSE_DECL (c);
2749
2750 /* Global variables don't need to be copied,
2751 the receiver side will use them directly. */
2752 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2753 continue;
2754 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2755 || !use_pointer_for_field (decl, ctx))
2756 continue;
2757 tree field = lookup_field (decl, ctx);
2758 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2759 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2760 continue;
2761 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2762 TREE_THIS_VOLATILE (field) = 0;
2763 DECL_USER_ALIGN (field) = 0;
2764 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2765 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2766 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2767 if (ctx->srecord_type)
2768 {
2769 tree sfield = lookup_sfield (decl, ctx);
2770 TREE_TYPE (sfield) = TREE_TYPE (field);
2771 TREE_THIS_VOLATILE (sfield) = 0;
2772 DECL_USER_ALIGN (sfield) = 0;
2773 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2774 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2775 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2776 }
2777 }
2778 }
2779
2780 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2781 {
2782 layout_type (ctx->record_type);
2783 fixup_child_record_type (ctx);
2784 }
2785 else
2786 {
2787 location_t loc = gimple_location (ctx->stmt);
2788 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2789 /* Move VLA fields to the end. */
2790 p = &TYPE_FIELDS (ctx->record_type);
2791 while (*p)
2792 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2793 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2794 {
2795 *q = *p;
2796 *p = TREE_CHAIN (*p);
2797 TREE_CHAIN (*q) = NULL_TREE;
2798 q = &TREE_CHAIN (*q);
2799 }
2800 else
2801 p = &DECL_CHAIN (*p);
2802 *p = vla_fields;
2803 if (gimple_omp_task_taskloop_p (ctx->stmt))
2804 {
2805 /* Move fields corresponding to first and second _looptemp_
2806 clause first. There are filled by GOMP_taskloop
2807 and thus need to be in specific positions. */
2808 tree c1 = gimple_omp_task_clauses (ctx->stmt);
2809 c1 = find_omp_clause (c1, OMP_CLAUSE__LOOPTEMP_);
2810 tree c2 = find_omp_clause (OMP_CLAUSE_CHAIN (c1),
2811 OMP_CLAUSE__LOOPTEMP_);
2812 tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
2813 tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
2814 p = &TYPE_FIELDS (ctx->record_type);
2815 while (*p)
2816 if (*p == f1 || *p == f2)
2817 *p = DECL_CHAIN (*p);
2818 else
2819 p = &DECL_CHAIN (*p);
2820 DECL_CHAIN (f1) = f2;
2821 DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
2822 TYPE_FIELDS (ctx->record_type) = f1;
2823 if (ctx->srecord_type)
2824 {
2825 f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
2826 f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
2827 p = &TYPE_FIELDS (ctx->srecord_type);
2828 while (*p)
2829 if (*p == f1 || *p == f2)
2830 *p = DECL_CHAIN (*p);
2831 else
2832 p = &DECL_CHAIN (*p);
2833 DECL_CHAIN (f1) = f2;
2834 DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
2835 TYPE_FIELDS (ctx->srecord_type) = f1;
2836 }
2837 }
2838 layout_type (ctx->record_type);
2839 fixup_child_record_type (ctx);
2840 if (ctx->srecord_type)
2841 layout_type (ctx->srecord_type);
2842 tree t = fold_convert_loc (loc, long_integer_type_node,
2843 TYPE_SIZE_UNIT (ctx->record_type));
2844 gimple_omp_task_set_arg_size (ctx->stmt, t);
2845 t = build_int_cst (long_integer_type_node,
2846 TYPE_ALIGN_UNIT (ctx->record_type));
2847 gimple_omp_task_set_arg_align (ctx->stmt, t);
2848 }
2849 }
2850
2851 /* Find the enclosing offload context. */
2852
2853 static omp_context *
2854 enclosing_target_ctx (omp_context *ctx)
2855 {
2856 for (; ctx; ctx = ctx->outer)
2857 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
2858 break;
2859
2860 return ctx;
2861 }
2862
2863 /* Return true if ctx is part of an oacc kernels region. */
2864
2865 static bool
2866 ctx_in_oacc_kernels_region (omp_context *ctx)
2867 {
2868 for (;ctx != NULL; ctx = ctx->outer)
2869 {
2870 gimple *stmt = ctx->stmt;
2871 if (gimple_code (stmt) == GIMPLE_OMP_TARGET
2872 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
2873 return true;
2874 }
2875
2876 return false;
2877 }
2878
2879 /* Check the parallelism clauses inside a kernels regions.
2880 Until kernels handling moves to use the same loop indirection
2881 scheme as parallel, we need to do this checking early. */
2882
2883 static unsigned
2884 check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
2885 {
2886 bool checking = true;
2887 unsigned outer_mask = 0;
2888 unsigned this_mask = 0;
2889 bool has_seq = false, has_auto = false;
2890
2891 if (ctx->outer)
2892 outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
2893 if (!stmt)
2894 {
2895 checking = false;
2896 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
2897 return outer_mask;
2898 stmt = as_a <gomp_for *> (ctx->stmt);
2899 }
2900
2901 for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
2902 {
2903 switch (OMP_CLAUSE_CODE (c))
2904 {
2905 case OMP_CLAUSE_GANG:
2906 this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
2907 break;
2908 case OMP_CLAUSE_WORKER:
2909 this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
2910 break;
2911 case OMP_CLAUSE_VECTOR:
2912 this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
2913 break;
2914 case OMP_CLAUSE_SEQ:
2915 has_seq = true;
2916 break;
2917 case OMP_CLAUSE_AUTO:
2918 has_auto = true;
2919 break;
2920 default:
2921 break;
2922 }
2923 }
2924
2925 if (checking)
2926 {
2927 if (has_seq && (this_mask || has_auto))
2928 error_at (gimple_location (stmt), "%<seq%> overrides other"
2929 " OpenACC loop specifiers");
2930 else if (has_auto && this_mask)
2931 error_at (gimple_location (stmt), "%<auto%> conflicts with other"
2932 " OpenACC loop specifiers");
2933
2934 if (this_mask & outer_mask)
2935 error_at (gimple_location (stmt), "inner loop uses same"
2936 " OpenACC parallelism as containing loop");
2937 }
2938
2939 return outer_mask | this_mask;
2940 }
2941
2942 /* Scan a GIMPLE_OMP_FOR. */
2943
2944 static void
2945 scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2946 {
2947 omp_context *ctx;
2948 size_t i;
2949 tree clauses = gimple_omp_for_clauses (stmt);
2950
2951 ctx = new_omp_context (stmt, outer_ctx);
2952
2953 if (is_gimple_omp_oacc (stmt))
2954 {
2955 omp_context *tgt = enclosing_target_ctx (outer_ctx);
2956
2957 if (!tgt || is_oacc_parallel (tgt))
2958 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2959 {
2960 char const *check = NULL;
2961
2962 switch (OMP_CLAUSE_CODE (c))
2963 {
2964 case OMP_CLAUSE_GANG:
2965 check = "gang";
2966 break;
2967
2968 case OMP_CLAUSE_WORKER:
2969 check = "worker";
2970 break;
2971
2972 case OMP_CLAUSE_VECTOR:
2973 check = "vector";
2974 break;
2975
2976 default:
2977 break;
2978 }
2979
2980 if (check && OMP_CLAUSE_OPERAND (c, 0))
2981 error_at (gimple_location (stmt),
2982 "argument not permitted on %qs clause in"
2983 " OpenACC %<parallel%>", check);
2984 }
2985
2986 if (tgt && is_oacc_kernels (tgt))
2987 {
2988 /* Strip out reductions, as they are not handled yet. */
2989 tree *prev_ptr = &clauses;
2990
2991 while (tree probe = *prev_ptr)
2992 {
2993 tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
2994
2995 if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
2996 *prev_ptr = *next_ptr;
2997 else
2998 prev_ptr = next_ptr;
2999 }
3000
3001 gimple_omp_for_set_clauses (stmt, clauses);
3002 check_oacc_kernel_gwv (stmt, ctx);
3003 }
3004 }
3005
3006 scan_sharing_clauses (clauses, ctx);
3007
3008 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
3009 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
3010 {
3011 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
3012 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
3013 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
3014 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
3015 }
3016 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3017 }
3018
3019 /* Scan an OpenMP sections directive. */
3020
3021 static void
3022 scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
3023 {
3024 omp_context *ctx;
3025
3026 ctx = new_omp_context (stmt, outer_ctx);
3027 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
3028 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3029 }
3030
3031 /* Scan an OpenMP single directive. */
3032
3033 static void
3034 scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
3035 {
3036 omp_context *ctx;
3037 tree name;
3038
3039 ctx = new_omp_context (stmt, outer_ctx);
3040 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
3041 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
3042 name = create_tmp_var_name (".omp_copy_s");
3043 name = build_decl (gimple_location (stmt),
3044 TYPE_DECL, name, ctx->record_type);
3045 TYPE_NAME (ctx->record_type) = name;
3046
3047 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
3048 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3049
3050 if (TYPE_FIELDS (ctx->record_type) == NULL)
3051 ctx->record_type = NULL;
3052 else
3053 layout_type (ctx->record_type);
3054 }
3055
3056 /* Return true if the CLAUSES of an omp target guarantee that the base pointers
3057 used in the corresponding offloaded function are restrict. */
3058
3059 static bool
3060 omp_target_base_pointers_restrict_p (tree clauses)
3061 {
3062 /* The analysis relies on the GOMP_MAP_FORCE_* mapping kinds, which are only
3063 used by OpenACC. */
3064 if (flag_openacc == 0)
3065 return false;
3066
3067 /* I. Basic example:
3068
3069 void foo (void)
3070 {
3071 unsigned int a[2], b[2];
3072
3073 #pragma acc kernels \
3074 copyout (a) \
3075 copyout (b)
3076 {
3077 a[0] = 0;
3078 b[0] = 1;
3079 }
3080 }
3081
3082 After gimplification, we have:
3083
3084 #pragma omp target oacc_kernels \
3085 map(force_from:a [len: 8]) \
3086 map(force_from:b [len: 8])
3087 {
3088 a[0] = 0;
3089 b[0] = 1;
3090 }
3091
3092 Because both mappings have the force prefix, we know that they will be
3093 allocated when calling the corresponding offloaded function, which means we
3094 can mark the base pointers for a and b in the offloaded function as
3095 restrict. */
3096
3097 tree c;
3098 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
3099 {
3100 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
3101 return false;
3102
3103 switch (OMP_CLAUSE_MAP_KIND (c))
3104 {
3105 case GOMP_MAP_FORCE_ALLOC:
3106 case GOMP_MAP_FORCE_TO:
3107 case GOMP_MAP_FORCE_FROM:
3108 case GOMP_MAP_FORCE_TOFROM:
3109 break;
3110 default:
3111 return false;
3112 }
3113 }
3114
3115 return true;
3116 }
3117
3118 /* Scan a GIMPLE_OMP_TARGET. */
3119
3120 static void
3121 scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
3122 {
3123 omp_context *ctx;
3124 tree name;
3125 bool offloaded = is_gimple_omp_offloaded (stmt);
3126 tree clauses = gimple_omp_target_clauses (stmt);
3127
3128 ctx = new_omp_context (stmt, outer_ctx);
3129 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
3130 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
3131 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
3132 name = create_tmp_var_name (".omp_data_t");
3133 name = build_decl (gimple_location (stmt),
3134 TYPE_DECL, name, ctx->record_type);
3135 DECL_ARTIFICIAL (name) = 1;
3136 DECL_NAMELESS (name) = 1;
3137 TYPE_NAME (ctx->record_type) = name;
3138 TYPE_ARTIFICIAL (ctx->record_type) = 1;
3139
3140 bool base_pointers_restrict = false;
3141 if (offloaded)
3142 {
3143 create_omp_child_function (ctx, false);
3144 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
3145
3146 base_pointers_restrict = omp_target_base_pointers_restrict_p (clauses);
3147 if (base_pointers_restrict
3148 && dump_file && (dump_flags & TDF_DETAILS))
3149 fprintf (dump_file,
3150 "Base pointers in offloaded function are restrict\n");
3151 }
3152
3153 scan_sharing_clauses (clauses, ctx, base_pointers_restrict);
3154 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3155
3156 if (TYPE_FIELDS (ctx->record_type) == NULL)
3157 ctx->record_type = ctx->receiver_decl = NULL;
3158 else
3159 {
3160 TYPE_FIELDS (ctx->record_type)
3161 = nreverse (TYPE_FIELDS (ctx->record_type));
3162 if (flag_checking)
3163 {
3164 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
3165 for (tree field = TYPE_FIELDS (ctx->record_type);
3166 field;
3167 field = DECL_CHAIN (field))
3168 gcc_assert (DECL_ALIGN (field) == align);
3169 }
3170 layout_type (ctx->record_type);
3171 if (offloaded)
3172 fixup_child_record_type (ctx);
3173 }
3174 }
3175
3176 /* Scan an OpenMP teams directive. */
3177
3178 static void
3179 scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
3180 {
3181 omp_context *ctx = new_omp_context (stmt, outer_ctx);
3182 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
3183 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3184 }
3185
3186 /* Check nesting restrictions. */
3187 static bool
3188 check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
3189 {
3190 tree c;
3191
3192 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
3193 inside an OpenACC CTX. */
3194 if (!(is_gimple_omp (stmt)
3195 && is_gimple_omp_oacc (stmt)))
3196 {
3197 for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
3198 if (is_gimple_omp (octx->stmt)
3199 && is_gimple_omp_oacc (octx->stmt)
3200 /* Except for atomic codes that we share with OpenMP. */
3201 && ! (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
3202 || gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
3203 {
3204 error_at (gimple_location (stmt),
3205 "non-OpenACC construct inside of OpenACC region");
3206 return false;
3207 }
3208 }
3209
3210 if (ctx != NULL)
3211 {
3212 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3213 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3214 {
3215 c = NULL_TREE;
3216 if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
3217 {
3218 c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
3219 if (find_omp_clause (c, OMP_CLAUSE_SIMD))
3220 {
3221 if (find_omp_clause (c, OMP_CLAUSE_THREADS)
3222 && (ctx->outer == NULL
3223 || !gimple_omp_for_combined_into_p (ctx->stmt)
3224 || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
3225 || (gimple_omp_for_kind (ctx->outer->stmt)
3226 != GF_OMP_FOR_KIND_FOR)
3227 || !gimple_omp_for_combined_p (ctx->outer->stmt)))
3228 {
3229 error_at (gimple_location (stmt),
3230 "%<ordered simd threads%> must be closely "
3231 "nested inside of %<for simd%> region");
3232 return false;
3233 }
3234 return true;
3235 }
3236 }
3237 error_at (gimple_location (stmt),
3238 "OpenMP constructs other than %<#pragma omp ordered simd%>"
3239 " may not be nested inside %<simd%> region");
3240 return false;
3241 }
3242 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3243 {
3244 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
3245 || (gimple_omp_for_kind (stmt)
3246 != GF_OMP_FOR_KIND_DISTRIBUTE))
3247 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
3248 {
3249 error_at (gimple_location (stmt),
3250 "only %<distribute%> or %<parallel%> regions are "
3251 "allowed to be strictly nested inside %<teams%> "
3252 "region");
3253 return false;
3254 }
3255 }
3256 }
3257 switch (gimple_code (stmt))
3258 {
3259 case GIMPLE_OMP_FOR:
3260 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
3261 return true;
3262 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
3263 {
3264 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
3265 {
3266 error_at (gimple_location (stmt),
3267 "%<distribute%> region must be strictly nested "
3268 "inside %<teams%> construct");
3269 return false;
3270 }
3271 return true;
3272 }
3273 /* We split taskloop into task and nested taskloop in it. */
3274 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
3275 return true;
3276 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
3277 {
3278 bool ok = false;
3279
3280 if (ctx)
3281 switch (gimple_code (ctx->stmt))
3282 {
3283 case GIMPLE_OMP_FOR:
3284 ok = (gimple_omp_for_kind (ctx->stmt)
3285 == GF_OMP_FOR_KIND_OACC_LOOP);
3286 break;
3287
3288 case GIMPLE_OMP_TARGET:
3289 switch (gimple_omp_target_kind (ctx->stmt))
3290 {
3291 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
3292 case GF_OMP_TARGET_KIND_OACC_KERNELS:
3293 ok = true;
3294 break;
3295
3296 default:
3297 break;
3298 }
3299
3300 default:
3301 break;
3302 }
3303 else if (get_oacc_fn_attrib (current_function_decl))
3304 ok = true;
3305 if (!ok)
3306 {
3307 error_at (gimple_location (stmt),
3308 "OpenACC loop directive must be associated with"
3309 " an OpenACC compute region");
3310 return false;
3311 }
3312 }
3313 /* FALLTHRU */
3314 case GIMPLE_CALL:
3315 if (is_gimple_call (stmt)
3316 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3317 == BUILT_IN_GOMP_CANCEL
3318 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3319 == BUILT_IN_GOMP_CANCELLATION_POINT))
3320 {
3321 const char *bad = NULL;
3322 const char *kind = NULL;
3323 const char *construct
3324 = (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3325 == BUILT_IN_GOMP_CANCEL)
3326 ? "#pragma omp cancel"
3327 : "#pragma omp cancellation point";
3328 if (ctx == NULL)
3329 {
3330 error_at (gimple_location (stmt), "orphaned %qs construct",
3331 construct);
3332 return false;
3333 }
3334 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
3335 ? tree_to_shwi (gimple_call_arg (stmt, 0))
3336 : 0)
3337 {
3338 case 1:
3339 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
3340 bad = "#pragma omp parallel";
3341 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3342 == BUILT_IN_GOMP_CANCEL
3343 && !integer_zerop (gimple_call_arg (stmt, 1)))
3344 ctx->cancellable = true;
3345 kind = "parallel";
3346 break;
3347 case 2:
3348 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3349 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
3350 bad = "#pragma omp for";
3351 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3352 == BUILT_IN_GOMP_CANCEL
3353 && !integer_zerop (gimple_call_arg (stmt, 1)))
3354 {
3355 ctx->cancellable = true;
3356 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3357 OMP_CLAUSE_NOWAIT))
3358 warning_at (gimple_location (stmt), 0,
3359 "%<#pragma omp cancel for%> inside "
3360 "%<nowait%> for construct");
3361 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3362 OMP_CLAUSE_ORDERED))
3363 warning_at (gimple_location (stmt), 0,
3364 "%<#pragma omp cancel for%> inside "
3365 "%<ordered%> for construct");
3366 }
3367 kind = "for";
3368 break;
3369 case 4:
3370 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
3371 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
3372 bad = "#pragma omp sections";
3373 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3374 == BUILT_IN_GOMP_CANCEL
3375 && !integer_zerop (gimple_call_arg (stmt, 1)))
3376 {
3377 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
3378 {
3379 ctx->cancellable = true;
3380 if (find_omp_clause (gimple_omp_sections_clauses
3381 (ctx->stmt),
3382 OMP_CLAUSE_NOWAIT))
3383 warning_at (gimple_location (stmt), 0,
3384 "%<#pragma omp cancel sections%> inside "
3385 "%<nowait%> sections construct");
3386 }
3387 else
3388 {
3389 gcc_assert (ctx->outer
3390 && gimple_code (ctx->outer->stmt)
3391 == GIMPLE_OMP_SECTIONS);
3392 ctx->outer->cancellable = true;
3393 if (find_omp_clause (gimple_omp_sections_clauses
3394 (ctx->outer->stmt),
3395 OMP_CLAUSE_NOWAIT))
3396 warning_at (gimple_location (stmt), 0,
3397 "%<#pragma omp cancel sections%> inside "
3398 "%<nowait%> sections construct");
3399 }
3400 }
3401 kind = "sections";
3402 break;
3403 case 8:
3404 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
3405 bad = "#pragma omp task";
3406 else
3407 {
3408 for (omp_context *octx = ctx->outer;
3409 octx; octx = octx->outer)
3410 {
3411 switch (gimple_code (octx->stmt))
3412 {
3413 case GIMPLE_OMP_TASKGROUP:
3414 break;
3415 case GIMPLE_OMP_TARGET:
3416 if (gimple_omp_target_kind (octx->stmt)
3417 != GF_OMP_TARGET_KIND_REGION)
3418 continue;
3419 /* FALLTHRU */
3420 case GIMPLE_OMP_PARALLEL:
3421 case GIMPLE_OMP_TEAMS:
3422 error_at (gimple_location (stmt),
3423 "%<%s taskgroup%> construct not closely "
3424 "nested inside of %<taskgroup%> region",
3425 construct);
3426 return false;
3427 default:
3428 continue;
3429 }
3430 break;
3431 }
3432 ctx->cancellable = true;
3433 }
3434 kind = "taskgroup";
3435 break;
3436 default:
3437 error_at (gimple_location (stmt), "invalid arguments");
3438 return false;
3439 }
3440 if (bad)
3441 {
3442 error_at (gimple_location (stmt),
3443 "%<%s %s%> construct not closely nested inside of %qs",
3444 construct, kind, bad);
3445 return false;
3446 }
3447 }
3448 /* FALLTHRU */
3449 case GIMPLE_OMP_SECTIONS:
3450 case GIMPLE_OMP_SINGLE:
3451 for (; ctx != NULL; ctx = ctx->outer)
3452 switch (gimple_code (ctx->stmt))
3453 {
3454 case GIMPLE_OMP_FOR:
3455 if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
3456 && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
3457 break;
3458 /* FALLTHRU */
3459 case GIMPLE_OMP_SECTIONS:
3460 case GIMPLE_OMP_SINGLE:
3461 case GIMPLE_OMP_ORDERED:
3462 case GIMPLE_OMP_MASTER:
3463 case GIMPLE_OMP_TASK:
3464 case GIMPLE_OMP_CRITICAL:
3465 if (is_gimple_call (stmt))
3466 {
3467 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
3468 != BUILT_IN_GOMP_BARRIER)
3469 return true;
3470 error_at (gimple_location (stmt),
3471 "barrier region may not be closely nested inside "
3472 "of work-sharing, %<critical%>, %<ordered%>, "
3473 "%<master%>, explicit %<task%> or %<taskloop%> "
3474 "region");
3475 return false;
3476 }
3477 error_at (gimple_location (stmt),
3478 "work-sharing region may not be closely nested inside "
3479 "of work-sharing, %<critical%>, %<ordered%>, "
3480 "%<master%>, explicit %<task%> or %<taskloop%> region");
3481 return false;
3482 case GIMPLE_OMP_PARALLEL:
3483 case GIMPLE_OMP_TEAMS:
3484 return true;
3485 case GIMPLE_OMP_TARGET:
3486 if (gimple_omp_target_kind (ctx->stmt)
3487 == GF_OMP_TARGET_KIND_REGION)
3488 return true;
3489 break;
3490 default:
3491 break;
3492 }
3493 break;
3494 case GIMPLE_OMP_MASTER:
3495 for (; ctx != NULL; ctx = ctx->outer)
3496 switch (gimple_code (ctx->stmt))
3497 {
3498 case GIMPLE_OMP_FOR:
3499 if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
3500 && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
3501 break;
3502 /* FALLTHRU */
3503 case GIMPLE_OMP_SECTIONS:
3504 case GIMPLE_OMP_SINGLE:
3505 case GIMPLE_OMP_TASK:
3506 error_at (gimple_location (stmt),
3507 "%<master%> region may not be closely nested inside "
3508 "of work-sharing, explicit %<task%> or %<taskloop%> "
3509 "region");
3510 return false;
3511 case GIMPLE_OMP_PARALLEL:
3512 case GIMPLE_OMP_TEAMS:
3513 return true;
3514 case GIMPLE_OMP_TARGET:
3515 if (gimple_omp_target_kind (ctx->stmt)
3516 == GF_OMP_TARGET_KIND_REGION)
3517 return true;
3518 break;
3519 default:
3520 break;
3521 }
3522 break;
3523 case GIMPLE_OMP_TASK:
3524 for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
3525 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
3526 && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
3527 || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
3528 {
3529 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
3530 error_at (OMP_CLAUSE_LOCATION (c),
3531 "%<depend(%s)%> is only allowed in %<omp ordered%>",
3532 kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
3533 return false;
3534 }
3535 break;
3536 case GIMPLE_OMP_ORDERED:
3537 for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
3538 c; c = OMP_CLAUSE_CHAIN (c))
3539 {
3540 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
3541 {
3542 gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
3543 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
3544 continue;
3545 }
3546 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
3547 if (kind == OMP_CLAUSE_DEPEND_SOURCE
3548 || kind == OMP_CLAUSE_DEPEND_SINK)
3549 {
3550 tree oclause;
3551 /* Look for containing ordered(N) loop. */
3552 if (ctx == NULL
3553 || gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3554 || (oclause
3555 = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3556 OMP_CLAUSE_ORDERED)) == NULL_TREE)
3557 {
3558 error_at (OMP_CLAUSE_LOCATION (c),
3559 "%<ordered%> construct with %<depend%> clause "
3560 "must be closely nested inside an %<ordered%> "
3561 "loop");
3562 return false;
3563 }
3564 else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
3565 {
3566 error_at (OMP_CLAUSE_LOCATION (c),
3567 "%<ordered%> construct with %<depend%> clause "
3568 "must be closely nested inside a loop with "
3569 "%<ordered%> clause with a parameter");
3570 return false;
3571 }
3572 }
3573 else
3574 {
3575 error_at (OMP_CLAUSE_LOCATION (c),
3576 "invalid depend kind in omp %<ordered%> %<depend%>");
3577 return false;
3578 }
3579 }
3580 c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
3581 if (find_omp_clause (c, OMP_CLAUSE_SIMD))
3582 {
3583 /* ordered simd must be closely nested inside of simd region,
3584 and simd region must not encounter constructs other than
3585 ordered simd, therefore ordered simd may be either orphaned,
3586 or ctx->stmt must be simd. The latter case is handled already
3587 earlier. */
3588 if (ctx != NULL)
3589 {
3590 error_at (gimple_location (stmt),
3591 "%<ordered%> %<simd%> must be closely nested inside "
3592 "%<simd%> region");
3593 return false;
3594 }
3595 }
3596 for (; ctx != NULL; ctx = ctx->outer)
3597 switch (gimple_code (ctx->stmt))
3598 {
3599 case GIMPLE_OMP_CRITICAL:
3600 case GIMPLE_OMP_TASK:
3601 case GIMPLE_OMP_ORDERED:
3602 ordered_in_taskloop:
3603 error_at (gimple_location (stmt),
3604 "%<ordered%> region may not be closely nested inside "
3605 "of %<critical%>, %<ordered%>, explicit %<task%> or "
3606 "%<taskloop%> region");
3607 return false;
3608 case GIMPLE_OMP_FOR:
3609 if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
3610 goto ordered_in_taskloop;
3611 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3612 OMP_CLAUSE_ORDERED) == NULL)
3613 {
3614 error_at (gimple_location (stmt),
3615 "%<ordered%> region must be closely nested inside "
3616 "a loop region with an %<ordered%> clause");
3617 return false;
3618 }
3619 return true;
3620 case GIMPLE_OMP_TARGET:
3621 if (gimple_omp_target_kind (ctx->stmt)
3622 != GF_OMP_TARGET_KIND_REGION)
3623 break;
3624 /* FALLTHRU */
3625 case GIMPLE_OMP_PARALLEL:
3626 case GIMPLE_OMP_TEAMS:
3627 error_at (gimple_location (stmt),
3628 "%<ordered%> region must be closely nested inside "
3629 "a loop region with an %<ordered%> clause");
3630 return false;
3631 default:
3632 break;
3633 }
3634 break;
3635 case GIMPLE_OMP_CRITICAL:
3636 {
3637 tree this_stmt_name
3638 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
3639 for (; ctx != NULL; ctx = ctx->outer)
3640 if (gomp_critical *other_crit
3641 = dyn_cast <gomp_critical *> (ctx->stmt))
3642 if (this_stmt_name == gimple_omp_critical_name (other_crit))
3643 {
3644 error_at (gimple_location (stmt),
3645 "%<critical%> region may not be nested inside "
3646 "a %<critical%> region with the same name");
3647 return false;
3648 }
3649 }
3650 break;
3651 case GIMPLE_OMP_TEAMS:
3652 if (ctx == NULL
3653 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
3654 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
3655 {
3656 error_at (gimple_location (stmt),
3657 "%<teams%> construct not closely nested inside of "
3658 "%<target%> construct");
3659 return false;
3660 }
3661 break;
3662 case GIMPLE_OMP_TARGET:
3663 for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
3664 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
3665 && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
3666 || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
3667 {
3668 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
3669 error_at (OMP_CLAUSE_LOCATION (c),
3670 "%<depend(%s)%> is only allowed in %<omp ordered%>",
3671 kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
3672 return false;
3673 }
3674 for (; ctx != NULL; ctx = ctx->outer)
3675 {
3676 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
3677 {
3678 if (is_gimple_omp (stmt)
3679 && is_gimple_omp_oacc (stmt)
3680 && is_gimple_omp (ctx->stmt))
3681 {
3682 error_at (gimple_location (stmt),
3683 "OpenACC construct inside of non-OpenACC region");
3684 return false;
3685 }
3686 continue;
3687 }
3688
3689 const char *stmt_name, *ctx_stmt_name;
3690 switch (gimple_omp_target_kind (stmt))
3691 {
3692 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
3693 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
3694 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
3695 case GF_OMP_TARGET_KIND_ENTER_DATA:
3696 stmt_name = "target enter data"; break;
3697 case GF_OMP_TARGET_KIND_EXIT_DATA:
3698 stmt_name = "target exit data"; break;
3699 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
3700 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
3701 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
3702 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
3703 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
3704 stmt_name = "enter/exit data"; break;
3705 case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
3706 break;
3707 default: gcc_unreachable ();
3708 }
3709 switch (gimple_omp_target_kind (ctx->stmt))
3710 {
3711 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
3712 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
3713 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
3714 ctx_stmt_name = "parallel"; break;
3715 case GF_OMP_TARGET_KIND_OACC_KERNELS:
3716 ctx_stmt_name = "kernels"; break;
3717 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
3718 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
3719 ctx_stmt_name = "host_data"; break;
3720 default: gcc_unreachable ();
3721 }
3722
3723 /* OpenACC/OpenMP mismatch? */
3724 if (is_gimple_omp_oacc (stmt)
3725 != is_gimple_omp_oacc (ctx->stmt))
3726 {
3727 error_at (gimple_location (stmt),
3728 "%s %qs construct inside of %s %qs region",
3729 (is_gimple_omp_oacc (stmt)
3730 ? "OpenACC" : "OpenMP"), stmt_name,
3731 (is_gimple_omp_oacc (ctx->stmt)
3732 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
3733 return false;
3734 }
3735 if (is_gimple_omp_offloaded (ctx->stmt))
3736 {
3737 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3738 if (is_gimple_omp_oacc (ctx->stmt))
3739 {
3740 error_at (gimple_location (stmt),
3741 "%qs construct inside of %qs region",
3742 stmt_name, ctx_stmt_name);
3743 return false;
3744 }
3745 else
3746 {
3747 warning_at (gimple_location (stmt), 0,
3748 "%qs construct inside of %qs region",
3749 stmt_name, ctx_stmt_name);
3750 }
3751 }
3752 }
3753 break;
3754 default:
3755 break;
3756 }
3757 return true;
3758 }
3759
3760
3761 /* Helper function scan_omp.
3762
3763 Callback for walk_tree or operators in walk_gimple_stmt used to
3764 scan for OMP directives in TP. */
3765
3766 static tree
3767 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
3768 {
3769 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
3770 omp_context *ctx = (omp_context *) wi->info;
3771 tree t = *tp;
3772
3773 switch (TREE_CODE (t))
3774 {
3775 case VAR_DECL:
3776 case PARM_DECL:
3777 case LABEL_DECL:
3778 case RESULT_DECL:
3779 if (ctx)
3780 *tp = remap_decl (t, &ctx->cb);
3781 break;
3782
3783 default:
3784 if (ctx && TYPE_P (t))
3785 *tp = remap_type (t, &ctx->cb);
3786 else if (!DECL_P (t))
3787 {
3788 *walk_subtrees = 1;
3789 if (ctx)
3790 {
3791 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3792 if (tem != TREE_TYPE (t))
3793 {
3794 if (TREE_CODE (t) == INTEGER_CST)
3795 *tp = wide_int_to_tree (tem, t);
3796 else
3797 TREE_TYPE (t) = tem;
3798 }
3799 }
3800 }
3801 break;
3802 }
3803
3804 return NULL_TREE;
3805 }
3806
3807 /* Return true if FNDECL is a setjmp or a longjmp. */
3808
3809 static bool
3810 setjmp_or_longjmp_p (const_tree fndecl)
3811 {
3812 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3813 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3814 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3815 return true;
3816
3817 tree declname = DECL_NAME (fndecl);
3818 if (!declname)
3819 return false;
3820 const char *name = IDENTIFIER_POINTER (declname);
3821 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3822 }
3823
3824
3825 /* Helper function for scan_omp.
3826
3827 Callback for walk_gimple_stmt used to scan for OMP directives in
3828 the current statement in GSI. */
3829
3830 static tree
3831 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3832 struct walk_stmt_info *wi)
3833 {
3834 gimple *stmt = gsi_stmt (*gsi);
3835 omp_context *ctx = (omp_context *) wi->info;
3836
3837 if (gimple_has_location (stmt))
3838 input_location = gimple_location (stmt);
3839
3840 /* Check the nesting restrictions. */
3841 bool remove = false;
3842 if (is_gimple_omp (stmt))
3843 remove = !check_omp_nesting_restrictions (stmt, ctx);
3844 else if (is_gimple_call (stmt))
3845 {
3846 tree fndecl = gimple_call_fndecl (stmt);
3847 if (fndecl)
3848 {
3849 if (setjmp_or_longjmp_p (fndecl)
3850 && ctx
3851 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3852 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3853 {
3854 remove = true;
3855 error_at (gimple_location (stmt),
3856 "setjmp/longjmp inside simd construct");
3857 }
3858 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3859 switch (DECL_FUNCTION_CODE (fndecl))
3860 {
3861 case BUILT_IN_GOMP_BARRIER:
3862 case BUILT_IN_GOMP_CANCEL:
3863 case BUILT_IN_GOMP_CANCELLATION_POINT:
3864 case BUILT_IN_GOMP_TASKYIELD:
3865 case BUILT_IN_GOMP_TASKWAIT:
3866 case BUILT_IN_GOMP_TASKGROUP_START:
3867 case BUILT_IN_GOMP_TASKGROUP_END:
3868 remove = !check_omp_nesting_restrictions (stmt, ctx);
3869 break;
3870 default:
3871 break;
3872 }
3873 }
3874 }
3875 if (remove)
3876 {
3877 stmt = gimple_build_nop ();
3878 gsi_replace (gsi, stmt, false);
3879 }
3880
3881 *handled_ops_p = true;
3882
3883 switch (gimple_code (stmt))
3884 {
3885 case GIMPLE_OMP_PARALLEL:
3886 taskreg_nesting_level++;
3887 scan_omp_parallel (gsi, ctx);
3888 taskreg_nesting_level--;
3889 break;
3890
3891 case GIMPLE_OMP_TASK:
3892 taskreg_nesting_level++;
3893 scan_omp_task (gsi, ctx);
3894 taskreg_nesting_level--;
3895 break;
3896
3897 case GIMPLE_OMP_FOR:
3898 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3899 break;
3900
3901 case GIMPLE_OMP_SECTIONS:
3902 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3903 break;
3904
3905 case GIMPLE_OMP_SINGLE:
3906 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3907 break;
3908
3909 case GIMPLE_OMP_SECTION:
3910 case GIMPLE_OMP_MASTER:
3911 case GIMPLE_OMP_TASKGROUP:
3912 case GIMPLE_OMP_ORDERED:
3913 case GIMPLE_OMP_CRITICAL:
3914 ctx = new_omp_context (stmt, ctx);
3915 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3916 break;
3917
3918 case GIMPLE_OMP_TARGET:
3919 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3920 break;
3921
3922 case GIMPLE_OMP_TEAMS:
3923 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3924 break;
3925
3926 case GIMPLE_BIND:
3927 {
3928 tree var;
3929
3930 *handled_ops_p = false;
3931 if (ctx)
3932 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3933 var ;
3934 var = DECL_CHAIN (var))
3935 insert_decl_map (&ctx->cb, var, var);
3936 }
3937 break;
3938 default:
3939 *handled_ops_p = false;
3940 break;
3941 }
3942
3943 return NULL_TREE;
3944 }
3945
3946
3947 /* Scan all the statements starting at the current statement. CTX
3948 contains context information about the OMP directives and
3949 clauses found during the scan. */
3950
3951 static void
3952 scan_omp (gimple_seq *body_p, omp_context *ctx)
3953 {
3954 location_t saved_location;
3955 struct walk_stmt_info wi;
3956
3957 memset (&wi, 0, sizeof (wi));
3958 wi.info = ctx;
3959 wi.want_locations = true;
3960
3961 saved_location = input_location;
3962 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
3963 input_location = saved_location;
3964 }
3965 \f
3966 /* Re-gimplification and code generation routines. */
3967
3968 /* Build a call to GOMP_barrier. */
3969
3970 static gimple *
3971 build_omp_barrier (tree lhs)
3972 {
3973 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
3974 : BUILT_IN_GOMP_BARRIER);
3975 gcall *g = gimple_build_call (fndecl, 0);
3976 if (lhs)
3977 gimple_call_set_lhs (g, lhs);
3978 return g;
3979 }
3980
3981 /* If a context was created for STMT when it was scanned, return it. */
3982
3983 static omp_context *
3984 maybe_lookup_ctx (gimple *stmt)
3985 {
3986 splay_tree_node n;
3987 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
3988 return n ? (omp_context *) n->value : NULL;
3989 }
3990
3991
3992 /* Find the mapping for DECL in CTX or the immediately enclosing
3993 context that has a mapping for DECL.
3994
3995 If CTX is a nested parallel directive, we may have to use the decl
3996 mappings created in CTX's parent context. Suppose that we have the
3997 following parallel nesting (variable UIDs showed for clarity):
3998
3999 iD.1562 = 0;
4000 #omp parallel shared(iD.1562) -> outer parallel
4001 iD.1562 = iD.1562 + 1;
4002
4003 #omp parallel shared (iD.1562) -> inner parallel
4004 iD.1562 = iD.1562 - 1;
4005
4006 Each parallel structure will create a distinct .omp_data_s structure
4007 for copying iD.1562 in/out of the directive:
4008
4009 outer parallel .omp_data_s.1.i -> iD.1562
4010 inner parallel .omp_data_s.2.i -> iD.1562
4011
4012 A shared variable mapping will produce a copy-out operation before
4013 the parallel directive and a copy-in operation after it. So, in
4014 this case we would have:
4015
4016 iD.1562 = 0;
4017 .omp_data_o.1.i = iD.1562;
4018 #omp parallel shared(iD.1562) -> outer parallel
4019 .omp_data_i.1 = &.omp_data_o.1
4020 .omp_data_i.1->i = .omp_data_i.1->i + 1;
4021
4022 .omp_data_o.2.i = iD.1562; -> **
4023 #omp parallel shared(iD.1562) -> inner parallel
4024 .omp_data_i.2 = &.omp_data_o.2
4025 .omp_data_i.2->i = .omp_data_i.2->i - 1;
4026
4027
4028 ** This is a problem. The symbol iD.1562 cannot be referenced
4029 inside the body of the outer parallel region. But since we are
4030 emitting this copy operation while expanding the inner parallel
4031 directive, we need to access the CTX structure of the outer
4032 parallel directive to get the correct mapping:
4033
4034 .omp_data_o.2.i = .omp_data_i.1->i
4035
4036 Since there may be other workshare or parallel directives enclosing
4037 the parallel directive, it may be necessary to walk up the context
4038 parent chain. This is not a problem in general because nested
4039 parallelism happens only rarely. */
4040
4041 static tree
4042 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
4043 {
4044 tree t;
4045 omp_context *up;
4046
4047 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
4048 t = maybe_lookup_decl (decl, up);
4049
4050 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
4051
4052 return t ? t : decl;
4053 }
4054
4055
4056 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
4057 in outer contexts. */
4058
4059 static tree
4060 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
4061 {
4062 tree t = NULL;
4063 omp_context *up;
4064
4065 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
4066 t = maybe_lookup_decl (decl, up);
4067
4068 return t ? t : decl;
4069 }
4070
4071
4072 /* Construct the initialization value for reduction operation OP. */
4073
4074 tree
4075 omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
4076 {
4077 switch (op)
4078 {
4079 case PLUS_EXPR:
4080 case MINUS_EXPR:
4081 case BIT_IOR_EXPR:
4082 case BIT_XOR_EXPR:
4083 case TRUTH_OR_EXPR:
4084 case TRUTH_ORIF_EXPR:
4085 case TRUTH_XOR_EXPR:
4086 case NE_EXPR:
4087 return build_zero_cst (type);
4088
4089 case MULT_EXPR:
4090 case TRUTH_AND_EXPR:
4091 case TRUTH_ANDIF_EXPR:
4092 case EQ_EXPR:
4093 return fold_convert_loc (loc, type, integer_one_node);
4094
4095 case BIT_AND_EXPR:
4096 return fold_convert_loc (loc, type, integer_minus_one_node);
4097
4098 case MAX_EXPR:
4099 if (SCALAR_FLOAT_TYPE_P (type))
4100 {
4101 REAL_VALUE_TYPE max, min;
4102 if (HONOR_INFINITIES (type))
4103 {
4104 real_inf (&max);
4105 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
4106 }
4107 else
4108 real_maxval (&min, 1, TYPE_MODE (type));
4109 return build_real (type, min);
4110 }
4111 else if (POINTER_TYPE_P (type))
4112 {
4113 wide_int min
4114 = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4115 return wide_int_to_tree (type, min);
4116 }
4117 else
4118 {
4119 gcc_assert (INTEGRAL_TYPE_P (type));
4120 return TYPE_MIN_VALUE (type);
4121 }
4122
4123 case MIN_EXPR:
4124 if (SCALAR_FLOAT_TYPE_P (type))
4125 {
4126 REAL_VALUE_TYPE max;
4127 if (HONOR_INFINITIES (type))
4128 real_inf (&max);
4129 else
4130 real_maxval (&max, 0, TYPE_MODE (type));
4131 return build_real (type, max);
4132 }
4133 else if (POINTER_TYPE_P (type))
4134 {
4135 wide_int max
4136 = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
4137 return wide_int_to_tree (type, max);
4138 }
4139 else
4140 {
4141 gcc_assert (INTEGRAL_TYPE_P (type));
4142 return TYPE_MAX_VALUE (type);
4143 }
4144
4145 default:
4146 gcc_unreachable ();
4147 }
4148 }
4149
4150 /* Construct the initialization value for reduction CLAUSE. */
4151
4152 tree
4153 omp_reduction_init (tree clause, tree type)
4154 {
4155 return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
4156 OMP_CLAUSE_REDUCTION_CODE (clause), type);
4157 }
4158
4159 /* Return alignment to be assumed for var in CLAUSE, which should be
4160 OMP_CLAUSE_ALIGNED. */
4161
4162 static tree
4163 omp_clause_aligned_alignment (tree clause)
4164 {
4165 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
4166 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
4167
4168 /* Otherwise return implementation defined alignment. */
4169 unsigned int al = 1;
4170 machine_mode mode, vmode;
4171 int vs = targetm.vectorize.autovectorize_vector_sizes ();
4172 if (vs)
4173 vs = 1 << floor_log2 (vs);
4174 static enum mode_class classes[]
4175 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
4176 for (int i = 0; i < 4; i += 2)
4177 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
4178 mode != VOIDmode;
4179 mode = GET_MODE_WIDER_MODE (mode))
4180 {
4181 vmode = targetm.vectorize.preferred_simd_mode (mode);
4182 if (GET_MODE_CLASS (vmode) != classes[i + 1])
4183 continue;
4184 while (vs
4185 && GET_MODE_SIZE (vmode) < vs
4186 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
4187 vmode = GET_MODE_2XWIDER_MODE (vmode);
4188
4189 tree type = lang_hooks.types.type_for_mode (mode, 1);
4190 if (type == NULL_TREE || TYPE_MODE (type) != mode)
4191 continue;
4192 type = build_vector_type (type, GET_MODE_SIZE (vmode)
4193 / GET_MODE_SIZE (mode));
4194 if (TYPE_MODE (type) != vmode)
4195 continue;
4196 if (TYPE_ALIGN_UNIT (type) > al)
4197 al = TYPE_ALIGN_UNIT (type);
4198 }
4199 return build_int_cst (integer_type_node, al);
4200 }
4201
4202 /* Return maximum possible vectorization factor for the target. */
4203
4204 static int
4205 omp_max_vf (void)
4206 {
4207 if (!optimize
4208 || optimize_debug
4209 || !flag_tree_loop_optimize
4210 || (!flag_tree_loop_vectorize
4211 && (global_options_set.x_flag_tree_loop_vectorize
4212 || global_options_set.x_flag_tree_vectorize)))
4213 return 1;
4214
4215 int vs = targetm.vectorize.autovectorize_vector_sizes ();
4216 if (vs)
4217 {
4218 vs = 1 << floor_log2 (vs);
4219 return vs;
4220 }
4221 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
4222 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
4223 return GET_MODE_NUNITS (vqimode);
4224 return 1;
4225 }
4226
4227 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
4228 privatization. */
4229
4230 static bool
4231 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
4232 tree &idx, tree &lane, tree &ivar, tree &lvar)
4233 {
4234 if (max_vf == 0)
4235 {
4236 max_vf = omp_max_vf ();
4237 if (max_vf > 1)
4238 {
4239 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
4240 OMP_CLAUSE_SAFELEN);
4241 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
4242 max_vf = 1;
4243 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
4244 max_vf) == -1)
4245 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
4246 }
4247 if (max_vf > 1)
4248 {
4249 idx = create_tmp_var (unsigned_type_node);
4250 lane = create_tmp_var (unsigned_type_node);
4251 }
4252 }
4253 if (max_vf == 1)
4254 return false;
4255
4256 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
4257 tree avar = create_tmp_var_raw (atype);
4258 if (TREE_ADDRESSABLE (new_var))
4259 TREE_ADDRESSABLE (avar) = 1;
4260 DECL_ATTRIBUTES (avar)
4261 = tree_cons (get_identifier ("omp simd array"), NULL,
4262 DECL_ATTRIBUTES (avar));
4263 gimple_add_tmp_var (avar);
4264 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
4265 NULL_TREE, NULL_TREE);
4266 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
4267 NULL_TREE, NULL_TREE);
4268 if (DECL_P (new_var))
4269 {
4270 SET_DECL_VALUE_EXPR (new_var, lvar);
4271 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4272 }
4273 return true;
4274 }
4275
4276 /* Helper function of lower_rec_input_clauses. For a reference
4277 in simd reduction, add an underlying variable it will reference. */
4278
4279 static void
4280 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
4281 {
4282 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
4283 if (TREE_CONSTANT (z))
4284 {
4285 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
4286 get_name (new_vard));
4287 gimple_add_tmp_var (z);
4288 TREE_ADDRESSABLE (z) = 1;
4289 z = build_fold_addr_expr_loc (loc, z);
4290 gimplify_assign (new_vard, z, ilist);
4291 }
4292 }
4293
4294 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
4295 from the receiver (aka child) side and initializers for REFERENCE_TYPE
4296 private variables. Initialization statements go in ILIST, while calls
4297 to destructors go in DLIST. */
4298
4299 static void
4300 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
4301 omp_context *ctx, struct omp_for_data *fd)
4302 {
4303 tree c, dtor, copyin_seq, x, ptr;
4304 bool copyin_by_ref = false;
4305 bool lastprivate_firstprivate = false;
4306 bool reduction_omp_orig_ref = false;
4307 int pass;
4308 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4309 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
4310 int max_vf = 0;
4311 tree lane = NULL_TREE, idx = NULL_TREE;
4312 tree ivar = NULL_TREE, lvar = NULL_TREE;
4313 gimple_seq llist[2] = { NULL, NULL };
4314
4315 copyin_seq = NULL;
4316
4317 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
4318 with data sharing clauses referencing variable sized vars. That
4319 is unnecessarily hard to support and very unlikely to result in
4320 vectorized code anyway. */
4321 if (is_simd)
4322 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4323 switch (OMP_CLAUSE_CODE (c))
4324 {
4325 case OMP_CLAUSE_LINEAR:
4326 if (OMP_CLAUSE_LINEAR_ARRAY (c))
4327 max_vf = 1;
4328 /* FALLTHRU */
4329 case OMP_CLAUSE_PRIVATE:
4330 case OMP_CLAUSE_FIRSTPRIVATE:
4331 case OMP_CLAUSE_LASTPRIVATE:
4332 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
4333 max_vf = 1;
4334 break;
4335 case OMP_CLAUSE_REDUCTION:
4336 if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
4337 || is_variable_sized (OMP_CLAUSE_DECL (c)))
4338 max_vf = 1;
4339 break;
4340 default:
4341 continue;
4342 }
4343
4344 /* Do all the fixed sized types in the first pass, and the variable sized
4345 types in the second pass. This makes sure that the scalar arguments to
4346 the variable sized types are processed before we use them in the
4347 variable sized operations. */
4348 for (pass = 0; pass < 2; ++pass)
4349 {
4350 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4351 {
4352 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
4353 tree var, new_var;
4354 bool by_ref;
4355 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4356
4357 switch (c_kind)
4358 {
4359 case OMP_CLAUSE_PRIVATE:
4360 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
4361 continue;
4362 break;
4363 case OMP_CLAUSE_SHARED:
4364 /* Ignore shared directives in teams construct. */
4365 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
4366 continue;
4367 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
4368 {
4369 gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
4370 || is_global_var (OMP_CLAUSE_DECL (c)));
4371 continue;
4372 }
4373 case OMP_CLAUSE_FIRSTPRIVATE:
4374 case OMP_CLAUSE_COPYIN:
4375 break;
4376 case OMP_CLAUSE_LINEAR:
4377 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
4378 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
4379 lastprivate_firstprivate = true;
4380 break;
4381 case OMP_CLAUSE_REDUCTION:
4382 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
4383 reduction_omp_orig_ref = true;
4384 break;
4385 case OMP_CLAUSE__LOOPTEMP_:
4386 /* Handle _looptemp_ clauses only on parallel/task. */
4387 if (fd)
4388 continue;
4389 break;
4390 case OMP_CLAUSE_LASTPRIVATE:
4391 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4392 {
4393 lastprivate_firstprivate = true;
4394 if (pass != 0 || is_taskloop_ctx (ctx))
4395 continue;
4396 }
4397 /* Even without corresponding firstprivate, if
4398 decl is Fortran allocatable, it needs outer var
4399 reference. */
4400 else if (pass == 0
4401 && lang_hooks.decls.omp_private_outer_ref
4402 (OMP_CLAUSE_DECL (c)))
4403 lastprivate_firstprivate = true;
4404 break;
4405 case OMP_CLAUSE_ALIGNED:
4406 if (pass == 0)
4407 continue;
4408 var = OMP_CLAUSE_DECL (c);
4409 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
4410 && !is_global_var (var))
4411 {
4412 new_var = maybe_lookup_decl (var, ctx);
4413 if (new_var == NULL_TREE)
4414 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
4415 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
4416 x = build_call_expr_loc (clause_loc, x, 2, new_var,
4417 omp_clause_aligned_alignment (c));
4418 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
4419 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
4420 gimplify_and_add (x, ilist);
4421 }
4422 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
4423 && is_global_var (var))
4424 {
4425 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
4426 new_var = lookup_decl (var, ctx);
4427 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
4428 t = build_fold_addr_expr_loc (clause_loc, t);
4429 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
4430 t = build_call_expr_loc (clause_loc, t2, 2, t,
4431 omp_clause_aligned_alignment (c));
4432 t = fold_convert_loc (clause_loc, ptype, t);
4433 x = create_tmp_var (ptype);
4434 t = build2 (MODIFY_EXPR, ptype, x, t);
4435 gimplify_and_add (t, ilist);
4436 t = build_simple_mem_ref_loc (clause_loc, x);
4437 SET_DECL_VALUE_EXPR (new_var, t);
4438 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4439 }
4440 continue;
4441 default:
4442 continue;
4443 }
4444
4445 new_var = var = OMP_CLAUSE_DECL (c);
4446 if (c_kind == OMP_CLAUSE_REDUCTION && TREE_CODE (var) == MEM_REF)
4447 {
4448 var = TREE_OPERAND (var, 0);
4449 if (TREE_CODE (var) == POINTER_PLUS_EXPR)
4450 var = TREE_OPERAND (var, 0);
4451 if (TREE_CODE (var) == INDIRECT_REF
4452 || TREE_CODE (var) == ADDR_EXPR)
4453 var = TREE_OPERAND (var, 0);
4454 if (is_variable_sized (var))
4455 {
4456 gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
4457 var = DECL_VALUE_EXPR (var);
4458 gcc_assert (TREE_CODE (var) == INDIRECT_REF);
4459 var = TREE_OPERAND (var, 0);
4460 gcc_assert (DECL_P (var));
4461 }
4462 new_var = var;
4463 }
4464 if (c_kind != OMP_CLAUSE_COPYIN)
4465 new_var = lookup_decl (var, ctx);
4466
4467 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
4468 {
4469 if (pass != 0)
4470 continue;
4471 }
4472 /* C/C++ array section reductions. */
4473 else if (c_kind == OMP_CLAUSE_REDUCTION
4474 && var != OMP_CLAUSE_DECL (c))
4475 {
4476 if (pass == 0)
4477 continue;
4478
4479 tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
4480 tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
4481 if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
4482 {
4483 tree b = TREE_OPERAND (orig_var, 1);
4484 b = maybe_lookup_decl (b, ctx);
4485 if (b == NULL)
4486 {
4487 b = TREE_OPERAND (orig_var, 1);
4488 b = maybe_lookup_decl_in_outer_ctx (b, ctx);
4489 }
4490 if (integer_zerop (bias))
4491 bias = b;
4492 else
4493 {
4494 bias = fold_convert_loc (clause_loc,
4495 TREE_TYPE (b), bias);
4496 bias = fold_build2_loc (clause_loc, PLUS_EXPR,
4497 TREE_TYPE (b), b, bias);
4498 }
4499 orig_var = TREE_OPERAND (orig_var, 0);
4500 }
4501 if (TREE_CODE (orig_var) == INDIRECT_REF
4502 || TREE_CODE (orig_var) == ADDR_EXPR)
4503 orig_var = TREE_OPERAND (orig_var, 0);
4504 tree d = OMP_CLAUSE_DECL (c);
4505 tree type = TREE_TYPE (d);
4506 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
4507 tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
4508 const char *name = get_name (orig_var);
4509 if (TREE_CONSTANT (v))
4510 {
4511 x = create_tmp_var_raw (type, name);
4512 gimple_add_tmp_var (x);
4513 TREE_ADDRESSABLE (x) = 1;
4514 x = build_fold_addr_expr_loc (clause_loc, x);
4515 }
4516 else
4517 {
4518 tree atmp
4519 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
4520 tree t = maybe_lookup_decl (v, ctx);
4521 if (t)
4522 v = t;
4523 else
4524 v = maybe_lookup_decl_in_outer_ctx (v, ctx);
4525 gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
4526 t = fold_build2_loc (clause_loc, PLUS_EXPR,
4527 TREE_TYPE (v), v,
4528 build_int_cst (TREE_TYPE (v), 1));
4529 t = fold_build2_loc (clause_loc, MULT_EXPR,
4530 TREE_TYPE (v), t,
4531 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4532 tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
4533 x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
4534 }
4535
4536 tree ptype = build_pointer_type (TREE_TYPE (type));
4537 x = fold_convert_loc (clause_loc, ptype, x);
4538 tree y = create_tmp_var (ptype, name);
4539 gimplify_assign (y, x, ilist);
4540 x = y;
4541 tree yb = y;
4542
4543 if (!integer_zerop (bias))
4544 {
4545 bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
4546 bias);
4547 yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
4548 x);
4549 yb = fold_build2_loc (clause_loc, MINUS_EXPR,
4550 pointer_sized_int_node, yb, bias);
4551 x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
4552 yb = create_tmp_var (ptype, name);
4553 gimplify_assign (yb, x, ilist);
4554 x = yb;
4555 }
4556
4557 d = TREE_OPERAND (d, 0);
4558 if (TREE_CODE (d) == POINTER_PLUS_EXPR)
4559 d = TREE_OPERAND (d, 0);
4560 if (TREE_CODE (d) == ADDR_EXPR)
4561 {
4562 if (orig_var != var)
4563 {
4564 gcc_assert (is_variable_sized (orig_var));
4565 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
4566 x);
4567 gimplify_assign (new_var, x, ilist);
4568 tree new_orig_var = lookup_decl (orig_var, ctx);
4569 tree t = build_fold_indirect_ref (new_var);
4570 DECL_IGNORED_P (new_var) = 0;
4571 TREE_THIS_NOTRAP (t);
4572 SET_DECL_VALUE_EXPR (new_orig_var, t);
4573 DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
4574 }
4575 else
4576 {
4577 x = build2 (MEM_REF, TREE_TYPE (new_var), x,
4578 build_int_cst (ptype, 0));
4579 SET_DECL_VALUE_EXPR (new_var, x);
4580 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4581 }
4582 }
4583 else
4584 {
4585 gcc_assert (orig_var == var);
4586 if (TREE_CODE (d) == INDIRECT_REF)
4587 {
4588 x = create_tmp_var (ptype, name);
4589 TREE_ADDRESSABLE (x) = 1;
4590 gimplify_assign (x, yb, ilist);
4591 x = build_fold_addr_expr_loc (clause_loc, x);
4592 }
4593 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
4594 gimplify_assign (new_var, x, ilist);
4595 }
4596 tree y1 = create_tmp_var (ptype, NULL);
4597 gimplify_assign (y1, y, ilist);
4598 tree i2 = NULL_TREE, y2 = NULL_TREE;
4599 tree body2 = NULL_TREE, end2 = NULL_TREE;
4600 tree y3 = NULL_TREE, y4 = NULL_TREE;
4601 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
4602 {
4603 y2 = create_tmp_var (ptype, NULL);
4604 gimplify_assign (y2, y, ilist);
4605 tree ref = build_outer_var_ref (var, ctx);
4606 /* For ref build_outer_var_ref already performs this. */
4607 if (TREE_CODE (d) == INDIRECT_REF)
4608 gcc_assert (is_reference (var));
4609 else if (TREE_CODE (d) == ADDR_EXPR)
4610 ref = build_fold_addr_expr (ref);
4611 else if (is_reference (var))
4612 ref = build_fold_addr_expr (ref);
4613 ref = fold_convert_loc (clause_loc, ptype, ref);
4614 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
4615 && OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
4616 {
4617 y3 = create_tmp_var (ptype, NULL);
4618 gimplify_assign (y3, unshare_expr (ref), ilist);
4619 }
4620 if (is_simd)
4621 {
4622 y4 = create_tmp_var (ptype, NULL);
4623 gimplify_assign (y4, ref, dlist);
4624 }
4625 }
4626 tree i = create_tmp_var (TREE_TYPE (v), NULL);
4627 gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
4628 tree body = create_artificial_label (UNKNOWN_LOCATION);
4629 tree end = create_artificial_label (UNKNOWN_LOCATION);
4630 gimple_seq_add_stmt (ilist, gimple_build_label (body));
4631 if (y2)
4632 {
4633 i2 = create_tmp_var (TREE_TYPE (v), NULL);
4634 gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
4635 body2 = create_artificial_label (UNKNOWN_LOCATION);
4636 end2 = create_artificial_label (UNKNOWN_LOCATION);
4637 gimple_seq_add_stmt (dlist, gimple_build_label (body2));
4638 }
4639 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4640 {
4641 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4642 tree decl_placeholder
4643 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
4644 SET_DECL_VALUE_EXPR (decl_placeholder,
4645 build_simple_mem_ref (y1));
4646 DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
4647 SET_DECL_VALUE_EXPR (placeholder,
4648 y3 ? build_simple_mem_ref (y3)
4649 : error_mark_node);
4650 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4651 x = lang_hooks.decls.omp_clause_default_ctor
4652 (c, build_simple_mem_ref (y1),
4653 y3 ? build_simple_mem_ref (y3) : NULL_TREE);
4654 if (x)
4655 gimplify_and_add (x, ilist);
4656 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
4657 {
4658 gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
4659 lower_omp (&tseq, ctx);
4660 gimple_seq_add_seq (ilist, tseq);
4661 }
4662 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
4663 if (is_simd)
4664 {
4665 SET_DECL_VALUE_EXPR (decl_placeholder,
4666 build_simple_mem_ref (y2));
4667 SET_DECL_VALUE_EXPR (placeholder,
4668 build_simple_mem_ref (y4));
4669 gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
4670 lower_omp (&tseq, ctx);
4671 gimple_seq_add_seq (dlist, tseq);
4672 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4673 }
4674 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
4675 DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
4676 x = lang_hooks.decls.omp_clause_dtor
4677 (c, build_simple_mem_ref (y2));
4678 if (x)
4679 {
4680 gimple_seq tseq = NULL;
4681 dtor = x;
4682 gimplify_stmt (&dtor, &tseq);
4683 gimple_seq_add_seq (dlist, tseq);
4684 }
4685 }
4686 else
4687 {
4688 x = omp_reduction_init (c, TREE_TYPE (type));
4689 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
4690
4691 /* reduction(-:var) sums up the partial results, so it
4692 acts identically to reduction(+:var). */
4693 if (code == MINUS_EXPR)
4694 code = PLUS_EXPR;
4695
4696 gimplify_assign (build_simple_mem_ref (y1), x, ilist);
4697 if (is_simd)
4698 {
4699 x = build2 (code, TREE_TYPE (type),
4700 build_simple_mem_ref (y4),
4701 build_simple_mem_ref (y2));
4702 gimplify_assign (build_simple_mem_ref (y4), x, dlist);
4703 }
4704 }
4705 gimple *g
4706 = gimple_build_assign (y1, POINTER_PLUS_EXPR, y1,
4707 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4708 gimple_seq_add_stmt (ilist, g);
4709 if (y3)
4710 {
4711 g = gimple_build_assign (y3, POINTER_PLUS_EXPR, y3,
4712 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4713 gimple_seq_add_stmt (ilist, g);
4714 }
4715 g = gimple_build_assign (i, PLUS_EXPR, i,
4716 build_int_cst (TREE_TYPE (i), 1));
4717 gimple_seq_add_stmt (ilist, g);
4718 g = gimple_build_cond (LE_EXPR, i, v, body, end);
4719 gimple_seq_add_stmt (ilist, g);
4720 gimple_seq_add_stmt (ilist, gimple_build_label (end));
4721 if (y2)
4722 {
4723 g = gimple_build_assign (y2, POINTER_PLUS_EXPR, y2,
4724 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4725 gimple_seq_add_stmt (dlist, g);
4726 if (y4)
4727 {
4728 g = gimple_build_assign
4729 (y4, POINTER_PLUS_EXPR, y4,
4730 TYPE_SIZE_UNIT (TREE_TYPE (type)));
4731 gimple_seq_add_stmt (dlist, g);
4732 }
4733 g = gimple_build_assign (i2, PLUS_EXPR, i2,
4734 build_int_cst (TREE_TYPE (i2), 1));
4735 gimple_seq_add_stmt (dlist, g);
4736 g = gimple_build_cond (LE_EXPR, i2, v, body2, end2);
4737 gimple_seq_add_stmt (dlist, g);
4738 gimple_seq_add_stmt (dlist, gimple_build_label (end2));
4739 }
4740 continue;
4741 }
4742 else if (is_variable_sized (var))
4743 {
4744 /* For variable sized types, we need to allocate the
4745 actual storage here. Call alloca and store the
4746 result in the pointer decl that we created elsewhere. */
4747 if (pass == 0)
4748 continue;
4749
4750 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
4751 {
4752 gcall *stmt;
4753 tree tmp, atmp;
4754
4755 ptr = DECL_VALUE_EXPR (new_var);
4756 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
4757 ptr = TREE_OPERAND (ptr, 0);
4758 gcc_assert (DECL_P (ptr));
4759 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
4760
4761 /* void *tmp = __builtin_alloca */
4762 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
4763 stmt = gimple_build_call (atmp, 2, x,
4764 size_int (DECL_ALIGN (var)));
4765 tmp = create_tmp_var_raw (ptr_type_node);
4766 gimple_add_tmp_var (tmp);
4767 gimple_call_set_lhs (stmt, tmp);
4768
4769 gimple_seq_add_stmt (ilist, stmt);
4770
4771 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
4772 gimplify_assign (ptr, x, ilist);
4773 }
4774 }
4775 else if (is_reference (var) && !is_oacc_parallel (ctx))
4776 {
4777 /* For references that are being privatized for Fortran,
4778 allocate new backing storage for the new pointer
4779 variable. This allows us to avoid changing all the
4780 code that expects a pointer to something that expects
4781 a direct variable. */
4782 if (pass == 0)
4783 continue;
4784
4785 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
4786 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
4787 {
4788 x = build_receiver_ref (var, false, ctx);
4789 x = build_fold_addr_expr_loc (clause_loc, x);
4790 }
4791 else if (TREE_CONSTANT (x))
4792 {
4793 /* For reduction in SIMD loop, defer adding the
4794 initialization of the reference, because if we decide
4795 to use SIMD array for it, the initilization could cause
4796 expansion ICE. */
4797 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
4798 x = NULL_TREE;
4799 else
4800 {
4801 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
4802 get_name (var));
4803 gimple_add_tmp_var (x);
4804 TREE_ADDRESSABLE (x) = 1;
4805 x = build_fold_addr_expr_loc (clause_loc, x);
4806 }
4807 }
4808 else
4809 {
4810 tree atmp
4811 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
4812 tree rtype = TREE_TYPE (TREE_TYPE (new_var));
4813 tree al = size_int (TYPE_ALIGN (rtype));
4814 x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
4815 }
4816
4817 if (x)
4818 {
4819 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
4820 gimplify_assign (new_var, x, ilist);
4821 }
4822
4823 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4824 }
4825 else if (c_kind == OMP_CLAUSE_REDUCTION
4826 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4827 {
4828 if (pass == 0)
4829 continue;
4830 }
4831 else if (pass != 0)
4832 continue;
4833
4834 switch (OMP_CLAUSE_CODE (c))
4835 {
4836 case OMP_CLAUSE_SHARED:
4837 /* Ignore shared directives in teams construct. */
4838 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
4839 continue;
4840 /* Shared global vars are just accessed directly. */
4841 if (is_global_var (new_var))
4842 break;
4843 /* For taskloop firstprivate/lastprivate, represented
4844 as firstprivate and shared clause on the task, new_var
4845 is the firstprivate var. */
4846 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
4847 break;
4848 /* Set up the DECL_VALUE_EXPR for shared variables now. This
4849 needs to be delayed until after fixup_child_record_type so
4850 that we get the correct type during the dereference. */
4851 by_ref = use_pointer_for_field (var, ctx);
4852 x = build_receiver_ref (var, by_ref, ctx);
4853 SET_DECL_VALUE_EXPR (new_var, x);
4854 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4855
4856 /* ??? If VAR is not passed by reference, and the variable
4857 hasn't been initialized yet, then we'll get a warning for
4858 the store into the omp_data_s structure. Ideally, we'd be
4859 able to notice this and not store anything at all, but
4860 we're generating code too early. Suppress the warning. */
4861 if (!by_ref)
4862 TREE_NO_WARNING (var) = 1;
4863 break;
4864
4865 case OMP_CLAUSE_LASTPRIVATE:
4866 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4867 break;
4868 /* FALLTHRU */
4869
4870 case OMP_CLAUSE_PRIVATE:
4871 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
4872 x = build_outer_var_ref (var, ctx);
4873 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4874 {
4875 if (is_task_ctx (ctx))
4876 x = build_receiver_ref (var, false, ctx);
4877 else
4878 x = build_outer_var_ref (var, ctx);
4879 }
4880 else
4881 x = NULL;
4882 do_private:
4883 tree nx;
4884 nx = lang_hooks.decls.omp_clause_default_ctor
4885 (c, unshare_expr (new_var), x);
4886 if (is_simd)
4887 {
4888 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
4889 if ((TREE_ADDRESSABLE (new_var) || nx || y
4890 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
4891 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4892 idx, lane, ivar, lvar))
4893 {
4894 if (nx)
4895 x = lang_hooks.decls.omp_clause_default_ctor
4896 (c, unshare_expr (ivar), x);
4897 if (nx && x)
4898 gimplify_and_add (x, &llist[0]);
4899 if (y)
4900 {
4901 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
4902 if (y)
4903 {
4904 gimple_seq tseq = NULL;
4905
4906 dtor = y;
4907 gimplify_stmt (&dtor, &tseq);
4908 gimple_seq_add_seq (&llist[1], tseq);
4909 }
4910 }
4911 break;
4912 }
4913 }
4914 if (nx)
4915 gimplify_and_add (nx, ilist);
4916 /* FALLTHRU */
4917
4918 do_dtor:
4919 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
4920 if (x)
4921 {
4922 gimple_seq tseq = NULL;
4923
4924 dtor = x;
4925 gimplify_stmt (&dtor, &tseq);
4926 gimple_seq_add_seq (dlist, tseq);
4927 }
4928 break;
4929
4930 case OMP_CLAUSE_LINEAR:
4931 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
4932 goto do_firstprivate;
4933 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
4934 x = NULL;
4935 else
4936 x = build_outer_var_ref (var, ctx);
4937 goto do_private;
4938
4939 case OMP_CLAUSE_FIRSTPRIVATE:
4940 if (is_task_ctx (ctx))
4941 {
4942 if (is_reference (var) || is_variable_sized (var))
4943 goto do_dtor;
4944 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
4945 ctx))
4946 || use_pointer_for_field (var, NULL))
4947 {
4948 x = build_receiver_ref (var, false, ctx);
4949 SET_DECL_VALUE_EXPR (new_var, x);
4950 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
4951 goto do_dtor;
4952 }
4953 }
4954 do_firstprivate:
4955 x = build_outer_var_ref (var, ctx);
4956 if (is_simd)
4957 {
4958 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4959 && gimple_omp_for_combined_into_p (ctx->stmt))
4960 {
4961 tree t = OMP_CLAUSE_LINEAR_STEP (c);
4962 tree stept = TREE_TYPE (t);
4963 tree ct = find_omp_clause (clauses,
4964 OMP_CLAUSE__LOOPTEMP_);
4965 gcc_assert (ct);
4966 tree l = OMP_CLAUSE_DECL (ct);
4967 tree n1 = fd->loop.n1;
4968 tree step = fd->loop.step;
4969 tree itype = TREE_TYPE (l);
4970 if (POINTER_TYPE_P (itype))
4971 itype = signed_type_for (itype);
4972 l = fold_build2 (MINUS_EXPR, itype, l, n1);
4973 if (TYPE_UNSIGNED (itype)
4974 && fd->loop.cond_code == GT_EXPR)
4975 l = fold_build2 (TRUNC_DIV_EXPR, itype,
4976 fold_build1 (NEGATE_EXPR, itype, l),
4977 fold_build1 (NEGATE_EXPR,
4978 itype, step));
4979 else
4980 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
4981 t = fold_build2 (MULT_EXPR, stept,
4982 fold_convert (stept, l), t);
4983
4984 if (OMP_CLAUSE_LINEAR_ARRAY (c))
4985 {
4986 x = lang_hooks.decls.omp_clause_linear_ctor
4987 (c, new_var, x, t);
4988 gimplify_and_add (x, ilist);
4989 goto do_dtor;
4990 }
4991
4992 if (POINTER_TYPE_P (TREE_TYPE (x)))
4993 x = fold_build2 (POINTER_PLUS_EXPR,
4994 TREE_TYPE (x), x, t);
4995 else
4996 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
4997 }
4998
4999 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
5000 || TREE_ADDRESSABLE (new_var))
5001 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
5002 idx, lane, ivar, lvar))
5003 {
5004 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
5005 {
5006 tree iv = create_tmp_var (TREE_TYPE (new_var));
5007 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
5008 gimplify_and_add (x, ilist);
5009 gimple_stmt_iterator gsi
5010 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
5011 gassign *g
5012 = gimple_build_assign (unshare_expr (lvar), iv);
5013 gsi_insert_before_without_update (&gsi, g,
5014 GSI_SAME_STMT);
5015 tree t = OMP_CLAUSE_LINEAR_STEP (c);
5016 enum tree_code code = PLUS_EXPR;
5017 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
5018 code = POINTER_PLUS_EXPR;
5019 g = gimple_build_assign (iv, code, iv, t);
5020 gsi_insert_before_without_update (&gsi, g,
5021 GSI_SAME_STMT);
5022 break;
5023 }
5024 x = lang_hooks.decls.omp_clause_copy_ctor
5025 (c, unshare_expr (ivar), x);
5026 gimplify_and_add (x, &llist[0]);
5027 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
5028 if (x)
5029 {
5030 gimple_seq tseq = NULL;
5031
5032 dtor = x;
5033 gimplify_stmt (&dtor, &tseq);
5034 gimple_seq_add_seq (&llist[1], tseq);
5035 }
5036 break;
5037 }
5038 }
5039 x = lang_hooks.decls.omp_clause_copy_ctor
5040 (c, unshare_expr (new_var), x);
5041 gimplify_and_add (x, ilist);
5042 goto do_dtor;
5043
5044 case OMP_CLAUSE__LOOPTEMP_:
5045 gcc_assert (is_taskreg_ctx (ctx));
5046 x = build_outer_var_ref (var, ctx);
5047 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
5048 gimplify_and_add (x, ilist);
5049 break;
5050
5051 case OMP_CLAUSE_COPYIN:
5052 by_ref = use_pointer_for_field (var, NULL);
5053 x = build_receiver_ref (var, by_ref, ctx);
5054 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
5055 append_to_statement_list (x, &copyin_seq);
5056 copyin_by_ref |= by_ref;
5057 break;
5058
5059 case OMP_CLAUSE_REDUCTION:
5060 /* OpenACC reductions are initialized using the
5061 GOACC_REDUCTION internal function. */
5062 if (is_gimple_omp_oacc (ctx->stmt))
5063 break;
5064 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
5065 {
5066 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
5067 gimple *tseq;
5068 x = build_outer_var_ref (var, ctx);
5069
5070 if (is_reference (var)
5071 && !useless_type_conversion_p (TREE_TYPE (placeholder),
5072 TREE_TYPE (x)))
5073 x = build_fold_addr_expr_loc (clause_loc, x);
5074 SET_DECL_VALUE_EXPR (placeholder, x);
5075 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
5076 tree new_vard = new_var;
5077 if (is_reference (var))
5078 {
5079 gcc_assert (TREE_CODE (new_var) == MEM_REF);
5080 new_vard = TREE_OPERAND (new_var, 0);
5081 gcc_assert (DECL_P (new_vard));
5082 }
5083 if (is_simd
5084 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
5085 idx, lane, ivar, lvar))
5086 {
5087 if (new_vard == new_var)
5088 {
5089 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
5090 SET_DECL_VALUE_EXPR (new_var, ivar);
5091 }
5092 else
5093 {
5094 SET_DECL_VALUE_EXPR (new_vard,
5095 build_fold_addr_expr (ivar));
5096 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
5097 }
5098 x = lang_hooks.decls.omp_clause_default_ctor
5099 (c, unshare_expr (ivar),
5100 build_outer_var_ref (var, ctx));
5101 if (x)
5102 gimplify_and_add (x, &llist[0]);
5103 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
5104 {
5105 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
5106 lower_omp (&tseq, ctx);
5107 gimple_seq_add_seq (&llist[0], tseq);
5108 }
5109 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
5110 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
5111 lower_omp (&tseq, ctx);
5112 gimple_seq_add_seq (&llist[1], tseq);
5113 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5114 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
5115 if (new_vard == new_var)
5116 SET_DECL_VALUE_EXPR (new_var, lvar);
5117 else
5118 SET_DECL_VALUE_EXPR (new_vard,
5119 build_fold_addr_expr (lvar));
5120 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
5121 if (x)
5122 {
5123 tseq = NULL;
5124 dtor = x;
5125 gimplify_stmt (&dtor, &tseq);
5126 gimple_seq_add_seq (&llist[1], tseq);
5127 }
5128 break;
5129 }
5130 /* If this is a reference to constant size reduction var
5131 with placeholder, we haven't emitted the initializer
5132 for it because it is undesirable if SIMD arrays are used.
5133 But if they aren't used, we need to emit the deferred
5134 initialization now. */
5135 else if (is_reference (var) && is_simd)
5136 handle_simd_reference (clause_loc, new_vard, ilist);
5137 x = lang_hooks.decls.omp_clause_default_ctor
5138 (c, unshare_expr (new_var),
5139 build_outer_var_ref (var, ctx));
5140 if (x)
5141 gimplify_and_add (x, ilist);
5142 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
5143 {
5144 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
5145 lower_omp (&tseq, ctx);
5146 gimple_seq_add_seq (ilist, tseq);
5147 }
5148 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
5149 if (is_simd)
5150 {
5151 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
5152 lower_omp (&tseq, ctx);
5153 gimple_seq_add_seq (dlist, tseq);
5154 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5155 }
5156 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
5157 goto do_dtor;
5158 }
5159 else
5160 {
5161 x = omp_reduction_init (c, TREE_TYPE (new_var));
5162 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
5163 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
5164
5165 /* reduction(-:var) sums up the partial results, so it
5166 acts identically to reduction(+:var). */
5167 if (code == MINUS_EXPR)
5168 code = PLUS_EXPR;
5169
5170 tree new_vard = new_var;
5171 if (is_simd && is_reference (var))
5172 {
5173 gcc_assert (TREE_CODE (new_var) == MEM_REF);
5174 new_vard = TREE_OPERAND (new_var, 0);
5175 gcc_assert (DECL_P (new_vard));
5176 }
5177 if (is_simd
5178 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
5179 idx, lane, ivar, lvar))
5180 {
5181 tree ref = build_outer_var_ref (var, ctx);
5182
5183 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
5184
5185 x = build2 (code, TREE_TYPE (ref), ref, ivar);
5186 ref = build_outer_var_ref (var, ctx);
5187 gimplify_assign (ref, x, &llist[1]);
5188
5189 if (new_vard != new_var)
5190 {
5191 SET_DECL_VALUE_EXPR (new_vard,
5192 build_fold_addr_expr (lvar));
5193 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
5194 }
5195 }
5196 else
5197 {
5198 if (is_reference (var) && is_simd)
5199 handle_simd_reference (clause_loc, new_vard, ilist);
5200 gimplify_assign (new_var, x, ilist);
5201 if (is_simd)
5202 {
5203 tree ref = build_outer_var_ref (var, ctx);
5204
5205 x = build2 (code, TREE_TYPE (ref), ref, new_var);
5206 ref = build_outer_var_ref (var, ctx);
5207 gimplify_assign (ref, x, dlist);
5208 }
5209 }
5210 }
5211 break;
5212
5213 default:
5214 gcc_unreachable ();
5215 }
5216 }
5217 }
5218
5219 if (lane)
5220 {
5221 tree uid = create_tmp_var (ptr_type_node, "simduid");
5222 /* Don't want uninit warnings on simduid, it is always uninitialized,
5223 but we use it not for the value, but for the DECL_UID only. */
5224 TREE_NO_WARNING (uid) = 1;
5225 gimple *g
5226 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
5227 gimple_call_set_lhs (g, lane);
5228 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
5229 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
5230 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
5231 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
5232 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
5233 gimple_omp_for_set_clauses (ctx->stmt, c);
5234 g = gimple_build_assign (lane, INTEGER_CST,
5235 build_int_cst (unsigned_type_node, 0));
5236 gimple_seq_add_stmt (ilist, g);
5237 for (int i = 0; i < 2; i++)
5238 if (llist[i])
5239 {
5240 tree vf = create_tmp_var (unsigned_type_node);
5241 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
5242 gimple_call_set_lhs (g, vf);
5243 gimple_seq *seq = i == 0 ? ilist : dlist;
5244 gimple_seq_add_stmt (seq, g);
5245 tree t = build_int_cst (unsigned_type_node, 0);
5246 g = gimple_build_assign (idx, INTEGER_CST, t);
5247 gimple_seq_add_stmt (seq, g);
5248 tree body = create_artificial_label (UNKNOWN_LOCATION);
5249 tree header = create_artificial_label (UNKNOWN_LOCATION);
5250 tree end = create_artificial_label (UNKNOWN_LOCATION);
5251 gimple_seq_add_stmt (seq, gimple_build_goto (header));
5252 gimple_seq_add_stmt (seq, gimple_build_label (body));
5253 gimple_seq_add_seq (seq, llist[i]);
5254 t = build_int_cst (unsigned_type_node, 1);
5255 g = gimple_build_assign (idx, PLUS_EXPR, idx, t);
5256 gimple_seq_add_stmt (seq, g);
5257 gimple_seq_add_stmt (seq, gimple_build_label (header));
5258 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
5259 gimple_seq_add_stmt (seq, g);
5260 gimple_seq_add_stmt (seq, gimple_build_label (end));
5261 }
5262 }
5263
5264 /* The copyin sequence is not to be executed by the main thread, since
5265 that would result in self-copies. Perhaps not visible to scalars,
5266 but it certainly is to C++ operator=. */
5267 if (copyin_seq)
5268 {
5269 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
5270 0);
5271 x = build2 (NE_EXPR, boolean_type_node, x,
5272 build_int_cst (TREE_TYPE (x), 0));
5273 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
5274 gimplify_and_add (x, ilist);
5275 }
5276
5277 /* If any copyin variable is passed by reference, we must ensure the
5278 master thread doesn't modify it before it is copied over in all
5279 threads. Similarly for variables in both firstprivate and
5280 lastprivate clauses we need to ensure the lastprivate copying
5281 happens after firstprivate copying in all threads. And similarly
5282 for UDRs if initializer expression refers to omp_orig. */
5283 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
5284 {
5285 /* Don't add any barrier for #pragma omp simd or
5286 #pragma omp distribute. */
5287 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
5288 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
5289 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
5290 }
5291
5292 /* If max_vf is non-zero, then we can use only a vectorization factor
5293 up to the max_vf we chose. So stick it into the safelen clause. */
5294 if (max_vf)
5295 {
5296 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
5297 OMP_CLAUSE_SAFELEN);
5298 if (c == NULL_TREE
5299 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
5300 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
5301 max_vf) == 1))
5302 {
5303 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
5304 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
5305 max_vf);
5306 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
5307 gimple_omp_for_set_clauses (ctx->stmt, c);
5308 }
5309 }
5310 }
5311
5312
5313 /* Generate code to implement the LASTPRIVATE clauses. This is used for
5314 both parallel and workshare constructs. PREDICATE may be NULL if it's
5315 always true. */
5316
5317 static void
5318 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
5319 omp_context *ctx)
5320 {
5321 tree x, c, label = NULL, orig_clauses = clauses;
5322 bool par_clauses = false;
5323 tree simduid = NULL, lastlane = NULL;
5324
5325 /* Early exit if there are no lastprivate or linear clauses. */
5326 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
5327 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
5328 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
5329 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
5330 break;
5331 if (clauses == NULL)
5332 {
5333 /* If this was a workshare clause, see if it had been combined
5334 with its parallel. In that case, look for the clauses on the
5335 parallel statement itself. */
5336 if (is_parallel_ctx (ctx))
5337 return;
5338
5339 ctx = ctx->outer;
5340 if (ctx == NULL || !is_parallel_ctx (ctx))
5341 return;
5342
5343 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
5344 OMP_CLAUSE_LASTPRIVATE);
5345 if (clauses == NULL)
5346 return;
5347 par_clauses = true;
5348 }
5349
5350 if (predicate)
5351 {
5352 gcond *stmt;
5353 tree label_true, arm1, arm2;
5354
5355 label = create_artificial_label (UNKNOWN_LOCATION);
5356 label_true = create_artificial_label (UNKNOWN_LOCATION);
5357 arm1 = TREE_OPERAND (predicate, 0);
5358 arm2 = TREE_OPERAND (predicate, 1);
5359 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
5360 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
5361 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
5362 label_true, label);
5363 gimple_seq_add_stmt (stmt_list, stmt);
5364 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
5365 }
5366
5367 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
5368 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
5369 {
5370 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
5371 if (simduid)
5372 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
5373 }
5374
5375 for (c = clauses; c ;)
5376 {
5377 tree var, new_var;
5378 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
5379
5380 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5381 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
5382 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
5383 {
5384 var = OMP_CLAUSE_DECL (c);
5385 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5386 && OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
5387 && is_taskloop_ctx (ctx))
5388 {
5389 gcc_checking_assert (ctx->outer && is_task_ctx (ctx->outer));
5390 new_var = lookup_decl (var, ctx->outer);
5391 }
5392 else
5393 new_var = lookup_decl (var, ctx);
5394
5395 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
5396 {
5397 tree val = DECL_VALUE_EXPR (new_var);
5398 if (TREE_CODE (val) == ARRAY_REF
5399 && VAR_P (TREE_OPERAND (val, 0))
5400 && lookup_attribute ("omp simd array",
5401 DECL_ATTRIBUTES (TREE_OPERAND (val,
5402 0))))
5403 {
5404 if (lastlane == NULL)
5405 {
5406 lastlane = create_tmp_var (unsigned_type_node);
5407 gcall *g
5408 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
5409 2, simduid,
5410 TREE_OPERAND (val, 1));
5411 gimple_call_set_lhs (g, lastlane);
5412 gimple_seq_add_stmt (stmt_list, g);
5413 }
5414 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
5415 TREE_OPERAND (val, 0), lastlane,
5416 NULL_TREE, NULL_TREE);
5417 }
5418 }
5419
5420 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5421 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
5422 {
5423 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
5424 gimple_seq_add_seq (stmt_list,
5425 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
5426 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
5427 }
5428 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
5429 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
5430 {
5431 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
5432 gimple_seq_add_seq (stmt_list,
5433 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
5434 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
5435 }
5436
5437 x = NULL_TREE;
5438 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
5439 && OMP_CLAUSE_LASTPRIVATE_TASKLOOP_IV (c))
5440 {
5441 gcc_checking_assert (is_taskloop_ctx (ctx));
5442 tree ovar = maybe_lookup_decl_in_outer_ctx (var,
5443 ctx->outer->outer);
5444 if (is_global_var (ovar))
5445 x = ovar;
5446 }
5447 if (!x)
5448 x = build_outer_var_ref (var, ctx, true);
5449 if (is_reference (var))
5450 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5451 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
5452 gimplify_and_add (x, stmt_list);
5453 }
5454 c = OMP_CLAUSE_CHAIN (c);
5455 if (c == NULL && !par_clauses)
5456 {
5457 /* If this was a workshare clause, see if it had been combined
5458 with its parallel. In that case, continue looking for the
5459 clauses also on the parallel statement itself. */
5460 if (is_parallel_ctx (ctx))
5461 break;
5462
5463 ctx = ctx->outer;
5464 if (ctx == NULL || !is_parallel_ctx (ctx))
5465 break;
5466
5467 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
5468 OMP_CLAUSE_LASTPRIVATE);
5469 par_clauses = true;
5470 }
5471 }
5472
5473 if (label)
5474 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
5475 }
5476
5477 /* Lower the OpenACC reductions of CLAUSES for compute axis LEVEL
5478 (which might be a placeholder). INNER is true if this is an inner
5479 axis of a multi-axis loop. FORK and JOIN are (optional) fork and
5480 join markers. Generate the before-loop forking sequence in
5481 FORK_SEQ and the after-loop joining sequence to JOIN_SEQ. The
5482 general form of these sequences is
5483
5484 GOACC_REDUCTION_SETUP
5485 GOACC_FORK
5486 GOACC_REDUCTION_INIT
5487 ...
5488 GOACC_REDUCTION_FINI
5489 GOACC_JOIN
5490 GOACC_REDUCTION_TEARDOWN. */
5491
5492 static void
5493 lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner,
5494 gcall *fork, gcall *join, gimple_seq *fork_seq,
5495 gimple_seq *join_seq, omp_context *ctx)
5496 {
5497 gimple_seq before_fork = NULL;
5498 gimple_seq after_fork = NULL;
5499 gimple_seq before_join = NULL;
5500 gimple_seq after_join = NULL;
5501 tree init_code = NULL_TREE, fini_code = NULL_TREE,
5502 setup_code = NULL_TREE, teardown_code = NULL_TREE;
5503 unsigned offset = 0;
5504
5505 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
5506 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
5507 {
5508 tree orig = OMP_CLAUSE_DECL (c);
5509 tree var = maybe_lookup_decl (orig, ctx);
5510 tree ref_to_res = NULL_TREE;
5511 tree incoming, outgoing;
5512
5513 enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
5514 if (rcode == MINUS_EXPR)
5515 rcode = PLUS_EXPR;
5516 else if (rcode == TRUTH_ANDIF_EXPR)
5517 rcode = BIT_AND_EXPR;
5518 else if (rcode == TRUTH_ORIF_EXPR)
5519 rcode = BIT_IOR_EXPR;
5520 tree op = build_int_cst (unsigned_type_node, rcode);
5521
5522 if (!var)
5523 var = orig;
5524 gcc_assert (!is_reference (var));
5525
5526 incoming = outgoing = var;
5527
5528 if (!inner)
5529 {
5530 /* See if an outer construct also reduces this variable. */
5531 omp_context *outer = ctx;
5532
5533 while (omp_context *probe = outer->outer)
5534 {
5535 enum gimple_code type = gimple_code (probe->stmt);
5536 tree cls;
5537
5538 switch (type)
5539 {
5540 case GIMPLE_OMP_FOR:
5541 cls = gimple_omp_for_clauses (probe->stmt);
5542 break;
5543
5544 case GIMPLE_OMP_TARGET:
5545 if (gimple_omp_target_kind (probe->stmt)
5546 != GF_OMP_TARGET_KIND_OACC_PARALLEL)
5547 goto do_lookup;
5548
5549 cls = gimple_omp_target_clauses (probe->stmt);
5550 break;
5551
5552 default:
5553 goto do_lookup;
5554 }
5555
5556 outer = probe;
5557 for (; cls; cls = OMP_CLAUSE_CHAIN (cls))
5558 if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION
5559 && orig == OMP_CLAUSE_DECL (cls))
5560 goto has_outer_reduction;
5561 }
5562
5563 do_lookup:
5564 /* This is the outermost construct with this reduction,
5565 see if there's a mapping for it. */
5566 if (gimple_code (outer->stmt) == GIMPLE_OMP_TARGET
5567 && maybe_lookup_field (orig, outer))
5568 {
5569 ref_to_res = build_receiver_ref (orig, false, outer);
5570 if (is_reference (orig))
5571 ref_to_res = build_simple_mem_ref (ref_to_res);
5572
5573 outgoing = var;
5574 incoming = omp_reduction_init_op (loc, rcode, TREE_TYPE (var));
5575 }
5576 else
5577 incoming = outgoing = orig;
5578
5579 has_outer_reduction:;
5580 }
5581
5582 if (!ref_to_res)
5583 ref_to_res = integer_zero_node;
5584
5585 /* Determine position in reduction buffer, which may be used
5586 by target. */
5587 enum machine_mode mode = TYPE_MODE (TREE_TYPE (var));
5588 unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
5589 offset = (offset + align - 1) & ~(align - 1);
5590 tree off = build_int_cst (sizetype, offset);
5591 offset += GET_MODE_SIZE (mode);
5592
5593 if (!init_code)
5594 {
5595 init_code = build_int_cst (integer_type_node,
5596 IFN_GOACC_REDUCTION_INIT);
5597 fini_code = build_int_cst (integer_type_node,
5598 IFN_GOACC_REDUCTION_FINI);
5599 setup_code = build_int_cst (integer_type_node,
5600 IFN_GOACC_REDUCTION_SETUP);
5601 teardown_code = build_int_cst (integer_type_node,
5602 IFN_GOACC_REDUCTION_TEARDOWN);
5603 }
5604
5605 tree setup_call
5606 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5607 TREE_TYPE (var), 6, setup_code,
5608 unshare_expr (ref_to_res),
5609 incoming, level, op, off);
5610 tree init_call
5611 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5612 TREE_TYPE (var), 6, init_code,
5613 unshare_expr (ref_to_res),
5614 var, level, op, off);
5615 tree fini_call
5616 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5617 TREE_TYPE (var), 6, fini_code,
5618 unshare_expr (ref_to_res),
5619 var, level, op, off);
5620 tree teardown_call
5621 = build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
5622 TREE_TYPE (var), 6, teardown_code,
5623 ref_to_res, var, level, op, off);
5624
5625 gimplify_assign (var, setup_call, &before_fork);
5626 gimplify_assign (var, init_call, &after_fork);
5627 gimplify_assign (var, fini_call, &before_join);
5628 gimplify_assign (outgoing, teardown_call, &after_join);
5629 }
5630
5631 /* Now stitch things together. */
5632 gimple_seq_add_seq (fork_seq, before_fork);
5633 if (fork)
5634 gimple_seq_add_stmt (fork_seq, fork);
5635 gimple_seq_add_seq (fork_seq, after_fork);
5636
5637 gimple_seq_add_seq (join_seq, before_join);
5638 if (join)
5639 gimple_seq_add_stmt (join_seq, join);
5640 gimple_seq_add_seq (join_seq, after_join);
5641 }
5642
5643 /* Generate code to implement the REDUCTION clauses. */
5644
5645 static void
5646 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
5647 {
5648 gimple_seq sub_seq = NULL;
5649 gimple *stmt;
5650 tree x, c;
5651 int count = 0;
5652
5653 /* OpenACC loop reductions are handled elsewhere. */
5654 if (is_gimple_omp_oacc (ctx->stmt))
5655 return;
5656
5657 /* SIMD reductions are handled in lower_rec_input_clauses. */
5658 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
5659 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
5660 return;
5661
5662 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
5663 update in that case, otherwise use a lock. */
5664 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
5665 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
5666 {
5667 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
5668 || TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
5669 {
5670 /* Never use OMP_ATOMIC for array reductions or UDRs. */
5671 count = -1;
5672 break;
5673 }
5674 count++;
5675 }
5676
5677 if (count == 0)
5678 return;
5679
5680 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
5681 {
5682 tree var, ref, new_var, orig_var;
5683 enum tree_code code;
5684 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
5685
5686 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
5687 continue;
5688
5689 orig_var = var = OMP_CLAUSE_DECL (c);
5690 if (TREE_CODE (var) == MEM_REF)
5691 {
5692 var = TREE_OPERAND (var, 0);
5693 if (TREE_CODE (var) == POINTER_PLUS_EXPR)
5694 var = TREE_OPERAND (var, 0);
5695 if (TREE_CODE (var) == INDIRECT_REF
5696 || TREE_CODE (var) == ADDR_EXPR)
5697 var = TREE_OPERAND (var, 0);
5698 orig_var = var;
5699 if (is_variable_sized (var))
5700 {
5701 gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
5702 var = DECL_VALUE_EXPR (var);
5703 gcc_assert (TREE_CODE (var) == INDIRECT_REF);
5704 var = TREE_OPERAND (var, 0);
5705 gcc_assert (DECL_P (var));
5706 }
5707 }
5708 new_var = lookup_decl (var, ctx);
5709 if (var == OMP_CLAUSE_DECL (c) && is_reference (var))
5710 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5711 ref = build_outer_var_ref (var, ctx);
5712 code = OMP_CLAUSE_REDUCTION_CODE (c);
5713
5714 /* reduction(-:var) sums up the partial results, so it acts
5715 identically to reduction(+:var). */
5716 if (code == MINUS_EXPR)
5717 code = PLUS_EXPR;
5718
5719 if (count == 1)
5720 {
5721 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
5722
5723 addr = save_expr (addr);
5724 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
5725 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
5726 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
5727 gimplify_and_add (x, stmt_seqp);
5728 return;
5729 }
5730 else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
5731 {
5732 tree d = OMP_CLAUSE_DECL (c);
5733 tree type = TREE_TYPE (d);
5734 tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
5735 tree i = create_tmp_var (TREE_TYPE (v), NULL);
5736 tree ptype = build_pointer_type (TREE_TYPE (type));
5737 tree bias = TREE_OPERAND (d, 1);
5738 d = TREE_OPERAND (d, 0);
5739 if (TREE_CODE (d) == POINTER_PLUS_EXPR)
5740 {
5741 tree b = TREE_OPERAND (d, 1);
5742 b = maybe_lookup_decl (b, ctx);
5743 if (b == NULL)
5744 {
5745 b = TREE_OPERAND (d, 1);
5746 b = maybe_lookup_decl_in_outer_ctx (b, ctx);
5747 }
5748 if (integer_zerop (bias))
5749 bias = b;
5750 else
5751 {
5752 bias = fold_convert_loc (clause_loc, TREE_TYPE (b), bias);
5753 bias = fold_build2_loc (clause_loc, PLUS_EXPR,
5754 TREE_TYPE (b), b, bias);
5755 }
5756 d = TREE_OPERAND (d, 0);
5757 }
5758 /* For ref build_outer_var_ref already performs this, so
5759 only new_var needs a dereference. */
5760 if (TREE_CODE (d) == INDIRECT_REF)
5761 {
5762 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5763 gcc_assert (is_reference (var) && var == orig_var);
5764 }
5765 else if (TREE_CODE (d) == ADDR_EXPR)
5766 {
5767 if (orig_var == var)
5768 {
5769 new_var = build_fold_addr_expr (new_var);
5770 ref = build_fold_addr_expr (ref);
5771 }
5772 }
5773 else
5774 {
5775 gcc_assert (orig_var == var);
5776 if (is_reference (var))
5777 ref = build_fold_addr_expr (ref);
5778 }
5779 if (DECL_P (v))
5780 {
5781 tree t = maybe_lookup_decl (v, ctx);
5782 if (t)
5783 v = t;
5784 else
5785 v = maybe_lookup_decl_in_outer_ctx (v, ctx);
5786 gimplify_expr (&v, stmt_seqp, NULL, is_gimple_val, fb_rvalue);
5787 }
5788 if (!integer_zerop (bias))
5789 {
5790 bias = fold_convert_loc (clause_loc, sizetype, bias);
5791 new_var = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
5792 TREE_TYPE (new_var), new_var,
5793 unshare_expr (bias));
5794 ref = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
5795 TREE_TYPE (ref), ref, bias);
5796 }
5797 new_var = fold_convert_loc (clause_loc, ptype, new_var);
5798 ref = fold_convert_loc (clause_loc, ptype, ref);
5799 tree m = create_tmp_var (ptype, NULL);
5800 gimplify_assign (m, new_var, stmt_seqp);
5801 new_var = m;
5802 m = create_tmp_var (ptype, NULL);
5803 gimplify_assign (m, ref, stmt_seqp);
5804 ref = m;
5805 gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
5806 tree body = create_artificial_label (UNKNOWN_LOCATION);
5807 tree end = create_artificial_label (UNKNOWN_LOCATION);
5808 gimple_seq_add_stmt (&sub_seq, gimple_build_label (body));
5809 tree priv = build_simple_mem_ref_loc (clause_loc, new_var);
5810 tree out = build_simple_mem_ref_loc (clause_loc, ref);
5811 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
5812 {
5813 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
5814 tree decl_placeholder
5815 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
5816 SET_DECL_VALUE_EXPR (placeholder, out);
5817 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
5818 SET_DECL_VALUE_EXPR (decl_placeholder, priv);
5819 DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
5820 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
5821 gimple_seq_add_seq (&sub_seq,
5822 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
5823 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5824 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
5825 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
5826 }
5827 else
5828 {
5829 x = build2 (code, TREE_TYPE (out), out, priv);
5830 out = unshare_expr (out);
5831 gimplify_assign (out, x, &sub_seq);
5832 }
5833 gimple *g = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
5834 TYPE_SIZE_UNIT (TREE_TYPE (type)));
5835 gimple_seq_add_stmt (&sub_seq, g);
5836 g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
5837 TYPE_SIZE_UNIT (TREE_TYPE (type)));
5838 gimple_seq_add_stmt (&sub_seq, g);
5839 g = gimple_build_assign (i, PLUS_EXPR, i,
5840 build_int_cst (TREE_TYPE (i), 1));
5841 gimple_seq_add_stmt (&sub_seq, g);
5842 g = gimple_build_cond (LE_EXPR, i, v, body, end);
5843 gimple_seq_add_stmt (&sub_seq, g);
5844 gimple_seq_add_stmt (&sub_seq, gimple_build_label (end));
5845 }
5846 else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
5847 {
5848 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
5849
5850 if (is_reference (var)
5851 && !useless_type_conversion_p (TREE_TYPE (placeholder),
5852 TREE_TYPE (ref)))
5853 ref = build_fold_addr_expr_loc (clause_loc, ref);
5854 SET_DECL_VALUE_EXPR (placeholder, ref);
5855 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
5856 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
5857 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
5858 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
5859 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
5860 }
5861 else
5862 {
5863 x = build2 (code, TREE_TYPE (ref), ref, new_var);
5864 ref = build_outer_var_ref (var, ctx);
5865 gimplify_assign (ref, x, &sub_seq);
5866 }
5867 }
5868
5869 if (is_gimple_omp_oacc (ctx->stmt))
5870 return;
5871
5872 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
5873 0);
5874 gimple_seq_add_stmt (stmt_seqp, stmt);
5875
5876 gimple_seq_add_seq (stmt_seqp, sub_seq);
5877
5878 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
5879 0);
5880 gimple_seq_add_stmt (stmt_seqp, stmt);
5881 }
5882
5883
5884 /* Generate code to implement the COPYPRIVATE clauses. */
5885
5886 static void
5887 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
5888 omp_context *ctx)
5889 {
5890 tree c;
5891
5892 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
5893 {
5894 tree var, new_var, ref, x;
5895 bool by_ref;
5896 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
5897
5898 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
5899 continue;
5900
5901 var = OMP_CLAUSE_DECL (c);
5902 by_ref = use_pointer_for_field (var, NULL);
5903
5904 ref = build_sender_ref (var, ctx);
5905 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
5906 if (by_ref)
5907 {
5908 x = build_fold_addr_expr_loc (clause_loc, new_var);
5909 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
5910 }
5911 gimplify_assign (ref, x, slist);
5912
5913 ref = build_receiver_ref (var, false, ctx);
5914 if (by_ref)
5915 {
5916 ref = fold_convert_loc (clause_loc,
5917 build_pointer_type (TREE_TYPE (new_var)),
5918 ref);
5919 ref = build_fold_indirect_ref_loc (clause_loc, ref);
5920 }
5921 if (is_reference (var))
5922 {
5923 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
5924 ref = build_simple_mem_ref_loc (clause_loc, ref);
5925 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
5926 }
5927 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
5928 gimplify_and_add (x, rlist);
5929 }
5930 }
5931
5932
5933 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
5934 and REDUCTION from the sender (aka parent) side. */
5935
5936 static void
5937 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
5938 omp_context *ctx)
5939 {
5940 tree c, t;
5941 int ignored_looptemp = 0;
5942 bool is_taskloop = false;
5943
5944 /* For taskloop, ignore first two _looptemp_ clauses, those are initialized
5945 by GOMP_taskloop. */
5946 if (is_task_ctx (ctx) && gimple_omp_task_taskloop_p (ctx->stmt))
5947 {
5948 ignored_looptemp = 2;
5949 is_taskloop = true;
5950 }
5951
5952 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
5953 {
5954 tree val, ref, x, var;
5955 bool by_ref, do_in = false, do_out = false;
5956 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
5957
5958 switch (OMP_CLAUSE_CODE (c))
5959 {
5960 case OMP_CLAUSE_PRIVATE:
5961 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
5962 break;
5963 continue;
5964 case OMP_CLAUSE_FIRSTPRIVATE:
5965 case OMP_CLAUSE_COPYIN:
5966 case OMP_CLAUSE_LASTPRIVATE:
5967 case OMP_CLAUSE_REDUCTION:
5968 break;
5969 case OMP_CLAUSE_SHARED:
5970 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
5971 break;
5972 continue;
5973 case OMP_CLAUSE__LOOPTEMP_:
5974 if (ignored_looptemp)
5975 {
5976 ignored_looptemp--;
5977 continue;
5978 }
5979 break;
5980 default:
5981 continue;
5982 }
5983
5984 val = OMP_CLAUSE_DECL (c);
5985 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
5986 && TREE_CODE (val) == MEM_REF)
5987 {
5988 val = TREE_OPERAND (val, 0);
5989 if (TREE_CODE (val) == POINTER_PLUS_EXPR)
5990 val = TREE_OPERAND (val, 0);
5991 if (TREE_CODE (val) == INDIRECT_REF
5992 || TREE_CODE (val) == ADDR_EXPR)
5993 val = TREE_OPERAND (val, 0);
5994 if (is_variable_sized (val))
5995 continue;
5996 }
5997
5998 /* For OMP_CLAUSE_SHARED_FIRSTPRIVATE, look beyond the
5999 outer taskloop region. */
6000 omp_context *ctx_for_o = ctx;
6001 if (is_taskloop
6002 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
6003 && OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
6004 ctx_for_o = ctx->outer;
6005
6006 var = lookup_decl_in_outer_ctx (val, ctx_for_o);
6007
6008 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
6009 && is_global_var (var))
6010 continue;
6011
6012 t = omp_member_access_dummy_var (var);
6013 if (t)
6014 {
6015 var = DECL_VALUE_EXPR (var);
6016 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx_for_o);
6017 if (o != t)
6018 var = unshare_and_remap (var, t, o);
6019 else
6020 var = unshare_expr (var);
6021 }
6022
6023 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
6024 {
6025 /* Handle taskloop firstprivate/lastprivate, where the
6026 lastprivate on GIMPLE_OMP_TASK is represented as
6027 OMP_CLAUSE_SHARED_FIRSTPRIVATE. */
6028 tree f = lookup_sfield ((splay_tree_key) &DECL_UID (val), ctx);
6029 x = omp_build_component_ref (ctx->sender_decl, f);
6030 if (use_pointer_for_field (val, ctx))
6031 var = build_fold_addr_expr (var);
6032 gimplify_assign (x, var, ilist);
6033 DECL_ABSTRACT_ORIGIN (f) = NULL;
6034 continue;
6035 }
6036
6037 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
6038 || val == OMP_CLAUSE_DECL (c))
6039 && is_variable_sized (val))
6040 continue;
6041 by_ref = use_pointer_for_field (val, NULL);
6042
6043 switch (OMP_CLAUSE_CODE (c))
6044 {
6045 case OMP_CLAUSE_PRIVATE:
6046 case OMP_CLAUSE_FIRSTPRIVATE:
6047 case OMP_CLAUSE_COPYIN:
6048 case OMP_CLAUSE__LOOPTEMP_:
6049 do_in = true;
6050 break;
6051
6052 case OMP_CLAUSE_LASTPRIVATE:
6053 if (by_ref || is_reference (val))
6054 {
6055 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
6056 continue;
6057 do_in = true;
6058 }
6059 else
6060 {
6061 do_out = true;
6062 if (lang_hooks.decls.omp_private_outer_ref (val))
6063 do_in = true;
6064 }
6065 break;
6066
6067 case OMP_CLAUSE_REDUCTION:
6068 do_in = true;
6069 if (val == OMP_CLAUSE_DECL (c))
6070 do_out = !(by_ref || is_reference (val));
6071 else
6072 by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
6073 break;
6074
6075 default:
6076 gcc_unreachable ();
6077 }
6078
6079 if (do_in)
6080 {
6081 ref = build_sender_ref (val, ctx);
6082 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
6083 gimplify_assign (ref, x, ilist);
6084 if (is_task_ctx (ctx))
6085 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
6086 }
6087
6088 if (do_out)
6089 {
6090 ref = build_sender_ref (val, ctx);
6091 gimplify_assign (var, ref, olist);
6092 }
6093 }
6094 }
6095
6096 /* Generate code to implement SHARED from the sender (aka parent)
6097 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
6098 list things that got automatically shared. */
6099
6100 static void
6101 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
6102 {
6103 tree var, ovar, nvar, t, f, x, record_type;
6104
6105 if (ctx->record_type == NULL)
6106 return;
6107
6108 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
6109 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6110 {
6111 ovar = DECL_ABSTRACT_ORIGIN (f);
6112 if (!ovar || TREE_CODE (ovar) == FIELD_DECL)
6113 continue;
6114
6115 nvar = maybe_lookup_decl (ovar, ctx);
6116 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
6117 continue;
6118
6119 /* If CTX is a nested parallel directive. Find the immediately
6120 enclosing parallel or workshare construct that contains a
6121 mapping for OVAR. */
6122 var = lookup_decl_in_outer_ctx (ovar, ctx);
6123
6124 t = omp_member_access_dummy_var (var);
6125 if (t)
6126 {
6127 var = DECL_VALUE_EXPR (var);
6128 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
6129 if (o != t)
6130 var = unshare_and_remap (var, t, o);
6131 else
6132 var = unshare_expr (var);
6133 }
6134
6135 if (use_pointer_for_field (ovar, ctx))
6136 {
6137 x = build_sender_ref (ovar, ctx);
6138 var = build_fold_addr_expr (var);
6139 gimplify_assign (x, var, ilist);
6140 }
6141 else
6142 {
6143 x = build_sender_ref (ovar, ctx);
6144 gimplify_assign (x, var, ilist);
6145
6146 if (!TREE_READONLY (var)
6147 /* We don't need to receive a new reference to a result
6148 or parm decl. In fact we may not store to it as we will
6149 invalidate any pending RSO and generate wrong gimple
6150 during inlining. */
6151 && !((TREE_CODE (var) == RESULT_DECL
6152 || TREE_CODE (var) == PARM_DECL)
6153 && DECL_BY_REFERENCE (var)))
6154 {
6155 x = build_sender_ref (ovar, ctx);
6156 gimplify_assign (var, x, olist);
6157 }
6158 }
6159 }
6160 }
6161
6162 /* Emit an OpenACC head marker call, encapulating the partitioning and
6163 other information that must be processed by the target compiler.
6164 Return the maximum number of dimensions the associated loop might
6165 be partitioned over. */
6166
6167 static unsigned
6168 lower_oacc_head_mark (location_t loc, tree ddvar, tree clauses,
6169 gimple_seq *seq, omp_context *ctx)
6170 {
6171 unsigned levels = 0;
6172 unsigned tag = 0;
6173 tree gang_static = NULL_TREE;
6174 auto_vec<tree, 5> args;
6175
6176 args.quick_push (build_int_cst
6177 (integer_type_node, IFN_UNIQUE_OACC_HEAD_MARK));
6178 args.quick_push (ddvar);
6179 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
6180 {
6181 switch (OMP_CLAUSE_CODE (c))
6182 {
6183 case OMP_CLAUSE_GANG:
6184 tag |= OLF_DIM_GANG;
6185 gang_static = OMP_CLAUSE_GANG_STATIC_EXPR (c);
6186 /* static:* is represented by -1, and we can ignore it, as
6187 scheduling is always static. */
6188 if (gang_static && integer_minus_onep (gang_static))
6189 gang_static = NULL_TREE;
6190 levels++;
6191 break;
6192
6193 case OMP_CLAUSE_WORKER:
6194 tag |= OLF_DIM_WORKER;
6195 levels++;
6196 break;
6197
6198 case OMP_CLAUSE_VECTOR:
6199 tag |= OLF_DIM_VECTOR;
6200 levels++;
6201 break;
6202
6203 case OMP_CLAUSE_SEQ:
6204 tag |= OLF_SEQ;
6205 break;
6206
6207 case OMP_CLAUSE_AUTO:
6208 tag |= OLF_AUTO;
6209 break;
6210
6211 case OMP_CLAUSE_INDEPENDENT:
6212 tag |= OLF_INDEPENDENT;
6213 break;
6214
6215 default:
6216 continue;
6217 }
6218 }
6219
6220 if (gang_static)
6221 {
6222 if (DECL_P (gang_static))
6223 gang_static = build_outer_var_ref (gang_static, ctx);
6224 tag |= OLF_GANG_STATIC;
6225 }
6226
6227 /* In a parallel region, loops are implicitly INDEPENDENT. */
6228 omp_context *tgt = enclosing_target_ctx (ctx);
6229 if (!tgt || is_oacc_parallel (tgt))
6230 tag |= OLF_INDEPENDENT;
6231
6232 /* A loop lacking SEQ, GANG, WORKER and/or VECTOR is implicitly AUTO. */
6233 if (!(tag & (((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1) << OLF_DIM_BASE)
6234 | OLF_SEQ)))
6235 tag |= OLF_AUTO;
6236
6237 /* Ensure at least one level. */
6238 if (!levels)
6239 levels++;
6240
6241 args.quick_push (build_int_cst (integer_type_node, levels));
6242 args.quick_push (build_int_cst (integer_type_node, tag));
6243 if (gang_static)
6244 args.quick_push (gang_static);
6245
6246 gcall *call = gimple_build_call_internal_vec (IFN_UNIQUE, args);
6247 gimple_set_location (call, loc);
6248 gimple_set_lhs (call, ddvar);
6249 gimple_seq_add_stmt (seq, call);
6250
6251 return levels;
6252 }
6253
6254 /* Emit an OpenACC lopp head or tail marker to SEQ. LEVEL is the
6255 partitioning level of the enclosed region. */
6256
6257 static void
6258 lower_oacc_loop_marker (location_t loc, tree ddvar, bool head,
6259 tree tofollow, gimple_seq *seq)
6260 {
6261 int marker_kind = (head ? IFN_UNIQUE_OACC_HEAD_MARK
6262 : IFN_UNIQUE_OACC_TAIL_MARK);
6263 tree marker = build_int_cst (integer_type_node, marker_kind);
6264 int nargs = 2 + (tofollow != NULL_TREE);
6265 gcall *call = gimple_build_call_internal (IFN_UNIQUE, nargs,
6266 marker, ddvar, tofollow);
6267 gimple_set_location (call, loc);
6268 gimple_set_lhs (call, ddvar);
6269 gimple_seq_add_stmt (seq, call);
6270 }
6271
6272 /* Generate the before and after OpenACC loop sequences. CLAUSES are
6273 the loop clauses, from which we extract reductions. Initialize
6274 HEAD and TAIL. */
6275
6276 static void
6277 lower_oacc_head_tail (location_t loc, tree clauses,
6278 gimple_seq *head, gimple_seq *tail, omp_context *ctx)
6279 {
6280 bool inner = false;
6281 tree ddvar = create_tmp_var (integer_type_node, ".data_dep");
6282 gimple_seq_add_stmt (head, gimple_build_assign (ddvar, integer_zero_node));
6283
6284 unsigned count = lower_oacc_head_mark (loc, ddvar, clauses, head, ctx);
6285 if (!count)
6286 lower_oacc_loop_marker (loc, ddvar, false, integer_zero_node, tail);
6287
6288 tree fork_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_FORK);
6289 tree join_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_JOIN);
6290
6291 for (unsigned done = 1; count; count--, done++)
6292 {
6293 gimple_seq fork_seq = NULL;
6294 gimple_seq join_seq = NULL;
6295
6296 tree place = build_int_cst (integer_type_node, -1);
6297 gcall *fork = gimple_build_call_internal (IFN_UNIQUE, 3,
6298 fork_kind, ddvar, place);
6299 gimple_set_location (fork, loc);
6300 gimple_set_lhs (fork, ddvar);
6301
6302 gcall *join = gimple_build_call_internal (IFN_UNIQUE, 3,
6303 join_kind, ddvar, place);
6304 gimple_set_location (join, loc);
6305 gimple_set_lhs (join, ddvar);
6306
6307 /* Mark the beginning of this level sequence. */
6308 if (inner)
6309 lower_oacc_loop_marker (loc, ddvar, true,
6310 build_int_cst (integer_type_node, count),
6311 &fork_seq);
6312 lower_oacc_loop_marker (loc, ddvar, false,
6313 build_int_cst (integer_type_node, done),
6314 &join_seq);
6315
6316 lower_oacc_reductions (loc, clauses, place, inner,
6317 fork, join, &fork_seq, &join_seq, ctx);
6318
6319 /* Append this level to head. */
6320 gimple_seq_add_seq (head, fork_seq);
6321 /* Prepend it to tail. */
6322 gimple_seq_add_seq (&join_seq, *tail);
6323 *tail = join_seq;
6324
6325 inner = true;
6326 }
6327
6328 /* Mark the end of the sequence. */
6329 lower_oacc_loop_marker (loc, ddvar, true, NULL_TREE, head);
6330 lower_oacc_loop_marker (loc, ddvar, false, NULL_TREE, tail);
6331 }
6332
6333 /* A convenience function to build an empty GIMPLE_COND with just the
6334 condition. */
6335
6336 static gcond *
6337 gimple_build_cond_empty (tree cond)
6338 {
6339 enum tree_code pred_code;
6340 tree lhs, rhs;
6341
6342 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
6343 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
6344 }
6345
6346 static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree,
6347 bool = false);
6348
6349 /* Build the function calls to GOMP_parallel_start etc to actually
6350 generate the parallel operation. REGION is the parallel region
6351 being expanded. BB is the block where to insert the code. WS_ARGS
6352 will be set if this is a call to a combined parallel+workshare
6353 construct, it contains the list of additional arguments needed by
6354 the workshare construct. */
6355
6356 static void
6357 expand_parallel_call (struct omp_region *region, basic_block bb,
6358 gomp_parallel *entry_stmt,
6359 vec<tree, va_gc> *ws_args)
6360 {
6361 tree t, t1, t2, val, cond, c, clauses, flags;
6362 gimple_stmt_iterator gsi;
6363 gimple *stmt;
6364 enum built_in_function start_ix;
6365 int start_ix2;
6366 location_t clause_loc;
6367 vec<tree, va_gc> *args;
6368
6369 clauses = gimple_omp_parallel_clauses (entry_stmt);
6370
6371 /* Determine what flavor of GOMP_parallel we will be
6372 emitting. */
6373 start_ix = BUILT_IN_GOMP_PARALLEL;
6374 if (is_combined_parallel (region))
6375 {
6376 switch (region->inner->type)
6377 {
6378 case GIMPLE_OMP_FOR:
6379 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6380 switch (region->inner->sched_kind)
6381 {
6382 case OMP_CLAUSE_SCHEDULE_RUNTIME:
6383 start_ix2 = 3;
6384 break;
6385 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
6386 case OMP_CLAUSE_SCHEDULE_GUIDED:
6387 if (region->inner->sched_modifiers
6388 & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
6389 {
6390 start_ix2 = 3 + region->inner->sched_kind;
6391 break;
6392 }
6393 /* FALLTHRU */
6394 default:
6395 start_ix2 = region->inner->sched_kind;
6396 break;
6397 }
6398 start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC;
6399 start_ix = (enum built_in_function) start_ix2;
6400 break;
6401 case GIMPLE_OMP_SECTIONS:
6402 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
6403 break;
6404 default:
6405 gcc_unreachable ();
6406 }
6407 }
6408
6409 /* By default, the value of NUM_THREADS is zero (selected at run time)
6410 and there is no conditional. */
6411 cond = NULL_TREE;
6412 val = build_int_cst (unsigned_type_node, 0);
6413 flags = build_int_cst (unsigned_type_node, 0);
6414
6415 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
6416 if (c)
6417 cond = OMP_CLAUSE_IF_EXPR (c);
6418
6419 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
6420 if (c)
6421 {
6422 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
6423 clause_loc = OMP_CLAUSE_LOCATION (c);
6424 }
6425 else
6426 clause_loc = gimple_location (entry_stmt);
6427
6428 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
6429 if (c)
6430 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
6431
6432 /* Ensure 'val' is of the correct type. */
6433 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
6434
6435 /* If we found the clause 'if (cond)', build either
6436 (cond != 0) or (cond ? val : 1u). */
6437 if (cond)
6438 {
6439 cond = gimple_boolify (cond);
6440
6441 if (integer_zerop (val))
6442 val = fold_build2_loc (clause_loc,
6443 EQ_EXPR, unsigned_type_node, cond,
6444 build_int_cst (TREE_TYPE (cond), 0));
6445 else
6446 {
6447 basic_block cond_bb, then_bb, else_bb;
6448 edge e, e_then, e_else;
6449 tree tmp_then, tmp_else, tmp_join, tmp_var;
6450
6451 tmp_var = create_tmp_var (TREE_TYPE (val));
6452 if (gimple_in_ssa_p (cfun))
6453 {
6454 tmp_then = make_ssa_name (tmp_var);
6455 tmp_else = make_ssa_name (tmp_var);
6456 tmp_join = make_ssa_name (tmp_var);
6457 }
6458 else
6459 {
6460 tmp_then = tmp_var;
6461 tmp_else = tmp_var;
6462 tmp_join = tmp_var;
6463 }
6464
6465 e = split_block_after_labels (bb);
6466 cond_bb = e->src;
6467 bb = e->dest;
6468 remove_edge (e);
6469
6470 then_bb = create_empty_bb (cond_bb);
6471 else_bb = create_empty_bb (then_bb);
6472 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
6473 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
6474
6475 stmt = gimple_build_cond_empty (cond);
6476 gsi = gsi_start_bb (cond_bb);
6477 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6478
6479 gsi = gsi_start_bb (then_bb);
6480 expand_omp_build_assign (&gsi, tmp_then, val, true);
6481
6482 gsi = gsi_start_bb (else_bb);
6483 expand_omp_build_assign (&gsi, tmp_else,
6484 build_int_cst (unsigned_type_node, 1),
6485 true);
6486
6487 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
6488 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
6489 add_bb_to_loop (then_bb, cond_bb->loop_father);
6490 add_bb_to_loop (else_bb, cond_bb->loop_father);
6491 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
6492 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
6493
6494 if (gimple_in_ssa_p (cfun))
6495 {
6496 gphi *phi = create_phi_node (tmp_join, bb);
6497 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
6498 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
6499 }
6500
6501 val = tmp_join;
6502 }
6503
6504 gsi = gsi_start_bb (bb);
6505 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
6506 false, GSI_CONTINUE_LINKING);
6507 }
6508
6509 gsi = gsi_last_bb (bb);
6510 t = gimple_omp_parallel_data_arg (entry_stmt);
6511 if (t == NULL)
6512 t1 = null_pointer_node;
6513 else
6514 t1 = build_fold_addr_expr (t);
6515 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
6516
6517 vec_alloc (args, 4 + vec_safe_length (ws_args));
6518 args->quick_push (t2);
6519 args->quick_push (t1);
6520 args->quick_push (val);
6521 if (ws_args)
6522 args->splice (*ws_args);
6523 args->quick_push (flags);
6524
6525 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
6526 builtin_decl_explicit (start_ix), args);
6527
6528 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6529 false, GSI_CONTINUE_LINKING);
6530 }
6531
6532 /* Insert a function call whose name is FUNC_NAME with the information from
6533 ENTRY_STMT into the basic_block BB. */
6534
6535 static void
6536 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
6537 vec <tree, va_gc> *ws_args)
6538 {
6539 tree t, t1, t2;
6540 gimple_stmt_iterator gsi;
6541 vec <tree, va_gc> *args;
6542
6543 gcc_assert (vec_safe_length (ws_args) == 2);
6544 tree func_name = (*ws_args)[0];
6545 tree grain = (*ws_args)[1];
6546
6547 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
6548 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
6549 gcc_assert (count != NULL_TREE);
6550 count = OMP_CLAUSE_OPERAND (count, 0);
6551
6552 gsi = gsi_last_bb (bb);
6553 t = gimple_omp_parallel_data_arg (entry_stmt);
6554 if (t == NULL)
6555 t1 = null_pointer_node;
6556 else
6557 t1 = build_fold_addr_expr (t);
6558 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
6559
6560 vec_alloc (args, 4);
6561 args->quick_push (t2);
6562 args->quick_push (t1);
6563 args->quick_push (count);
6564 args->quick_push (grain);
6565 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
6566
6567 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
6568 GSI_CONTINUE_LINKING);
6569 }
6570
6571 /* Build the function call to GOMP_task to actually
6572 generate the task operation. BB is the block where to insert the code. */
6573
6574 static void
6575 expand_task_call (struct omp_region *region, basic_block bb,
6576 gomp_task *entry_stmt)
6577 {
6578 tree t1, t2, t3;
6579 gimple_stmt_iterator gsi;
6580 location_t loc = gimple_location (entry_stmt);
6581
6582 tree clauses = gimple_omp_task_clauses (entry_stmt);
6583
6584 tree ifc = find_omp_clause (clauses, OMP_CLAUSE_IF);
6585 tree untied = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
6586 tree mergeable = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
6587 tree depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
6588 tree finalc = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
6589 tree priority = find_omp_clause (clauses, OMP_CLAUSE_PRIORITY);
6590
6591 unsigned int iflags
6592 = (untied ? GOMP_TASK_FLAG_UNTIED : 0)
6593 | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0)
6594 | (depend ? GOMP_TASK_FLAG_DEPEND : 0);
6595
6596 bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt);
6597 tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE;
6598 tree num_tasks = NULL_TREE;
6599 bool ull = false;
6600 if (taskloop_p)
6601 {
6602 gimple *g = last_stmt (region->outer->entry);
6603 gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR
6604 && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP);
6605 struct omp_for_data fd;
6606 extract_omp_for_data (as_a <gomp_for *> (g), &fd, NULL);
6607 startvar = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6608 endvar = find_omp_clause (OMP_CLAUSE_CHAIN (startvar),
6609 OMP_CLAUSE__LOOPTEMP_);
6610 startvar = OMP_CLAUSE_DECL (startvar);
6611 endvar = OMP_CLAUSE_DECL (endvar);
6612 step = fold_convert_loc (loc, fd.iter_type, fd.loop.step);
6613 if (fd.loop.cond_code == LT_EXPR)
6614 iflags |= GOMP_TASK_FLAG_UP;
6615 tree tclauses = gimple_omp_for_clauses (g);
6616 num_tasks = find_omp_clause (tclauses, OMP_CLAUSE_NUM_TASKS);
6617 if (num_tasks)
6618 num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks);
6619 else
6620 {
6621 num_tasks = find_omp_clause (tclauses, OMP_CLAUSE_GRAINSIZE);
6622 if (num_tasks)
6623 {
6624 iflags |= GOMP_TASK_FLAG_GRAINSIZE;
6625 num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks);
6626 }
6627 else
6628 num_tasks = integer_zero_node;
6629 }
6630 num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks);
6631 if (ifc == NULL_TREE)
6632 iflags |= GOMP_TASK_FLAG_IF;
6633 if (find_omp_clause (tclauses, OMP_CLAUSE_NOGROUP))
6634 iflags |= GOMP_TASK_FLAG_NOGROUP;
6635 ull = fd.iter_type == long_long_unsigned_type_node;
6636 }
6637 else if (priority)
6638 iflags |= GOMP_TASK_FLAG_PRIORITY;
6639
6640 tree flags = build_int_cst (unsigned_type_node, iflags);
6641
6642 tree cond = boolean_true_node;
6643 if (ifc)
6644 {
6645 if (taskloop_p)
6646 {
6647 tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
6648 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
6649 build_int_cst (unsigned_type_node,
6650 GOMP_TASK_FLAG_IF),
6651 build_int_cst (unsigned_type_node, 0));
6652 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node,
6653 flags, t);
6654 }
6655 else
6656 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
6657 }
6658
6659 if (finalc)
6660 {
6661 tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc));
6662 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
6663 build_int_cst (unsigned_type_node,
6664 GOMP_TASK_FLAG_FINAL),
6665 build_int_cst (unsigned_type_node, 0));
6666 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t);
6667 }
6668 if (depend)
6669 depend = OMP_CLAUSE_DECL (depend);
6670 else
6671 depend = build_int_cst (ptr_type_node, 0);
6672 if (priority)
6673 priority = fold_convert (integer_type_node,
6674 OMP_CLAUSE_PRIORITY_EXPR (priority));
6675 else
6676 priority = integer_zero_node;
6677
6678 gsi = gsi_last_bb (bb);
6679 tree t = gimple_omp_task_data_arg (entry_stmt);
6680 if (t == NULL)
6681 t2 = null_pointer_node;
6682 else
6683 t2 = build_fold_addr_expr_loc (loc, t);
6684 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
6685 t = gimple_omp_task_copy_fn (entry_stmt);
6686 if (t == NULL)
6687 t3 = null_pointer_node;
6688 else
6689 t3 = build_fold_addr_expr_loc (loc, t);
6690
6691 if (taskloop_p)
6692 t = build_call_expr (ull
6693 ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL)
6694 : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP),
6695 11, t1, t2, t3,
6696 gimple_omp_task_arg_size (entry_stmt),
6697 gimple_omp_task_arg_align (entry_stmt), flags,
6698 num_tasks, priority, startvar, endvar, step);
6699 else
6700 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
6701 9, t1, t2, t3,
6702 gimple_omp_task_arg_size (entry_stmt),
6703 gimple_omp_task_arg_align (entry_stmt), cond, flags,
6704 depend, priority);
6705
6706 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6707 false, GSI_CONTINUE_LINKING);
6708 }
6709
6710
6711 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
6712 catch handler and return it. This prevents programs from violating the
6713 structured block semantics with throws. */
6714
6715 static gimple_seq
6716 maybe_catch_exception (gimple_seq body)
6717 {
6718 gimple *g;
6719 tree decl;
6720
6721 if (!flag_exceptions)
6722 return body;
6723
6724 if (lang_hooks.eh_protect_cleanup_actions != NULL)
6725 decl = lang_hooks.eh_protect_cleanup_actions ();
6726 else
6727 decl = builtin_decl_explicit (BUILT_IN_TRAP);
6728
6729 g = gimple_build_eh_must_not_throw (decl);
6730 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
6731 GIMPLE_TRY_CATCH);
6732
6733 return gimple_seq_alloc_with_stmt (g);
6734 }
6735
6736 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
6737
6738 static tree
6739 vec2chain (vec<tree, va_gc> *v)
6740 {
6741 tree chain = NULL_TREE, t;
6742 unsigned ix;
6743
6744 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
6745 {
6746 DECL_CHAIN (t) = chain;
6747 chain = t;
6748 }
6749
6750 return chain;
6751 }
6752
6753
6754 /* Remove barriers in REGION->EXIT's block. Note that this is only
6755 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
6756 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
6757 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
6758 removed. */
6759
6760 static void
6761 remove_exit_barrier (struct omp_region *region)
6762 {
6763 gimple_stmt_iterator gsi;
6764 basic_block exit_bb;
6765 edge_iterator ei;
6766 edge e;
6767 gimple *stmt;
6768 int any_addressable_vars = -1;
6769
6770 exit_bb = region->exit;
6771
6772 /* If the parallel region doesn't return, we don't have REGION->EXIT
6773 block at all. */
6774 if (! exit_bb)
6775 return;
6776
6777 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
6778 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
6779 statements that can appear in between are extremely limited -- no
6780 memory operations at all. Here, we allow nothing at all, so the
6781 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
6782 gsi = gsi_last_bb (exit_bb);
6783 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
6784 gsi_prev (&gsi);
6785 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
6786 return;
6787
6788 FOR_EACH_EDGE (e, ei, exit_bb->preds)
6789 {
6790 gsi = gsi_last_bb (e->src);
6791 if (gsi_end_p (gsi))
6792 continue;
6793 stmt = gsi_stmt (gsi);
6794 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
6795 && !gimple_omp_return_nowait_p (stmt))
6796 {
6797 /* OpenMP 3.0 tasks unfortunately prevent this optimization
6798 in many cases. If there could be tasks queued, the barrier
6799 might be needed to let the tasks run before some local
6800 variable of the parallel that the task uses as shared
6801 runs out of scope. The task can be spawned either
6802 from within current function (this would be easy to check)
6803 or from some function it calls and gets passed an address
6804 of such a variable. */
6805 if (any_addressable_vars < 0)
6806 {
6807 gomp_parallel *parallel_stmt
6808 = as_a <gomp_parallel *> (last_stmt (region->entry));
6809 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
6810 tree local_decls, block, decl;
6811 unsigned ix;
6812
6813 any_addressable_vars = 0;
6814 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
6815 if (TREE_ADDRESSABLE (decl))
6816 {
6817 any_addressable_vars = 1;
6818 break;
6819 }
6820 for (block = gimple_block (stmt);
6821 !any_addressable_vars
6822 && block
6823 && TREE_CODE (block) == BLOCK;
6824 block = BLOCK_SUPERCONTEXT (block))
6825 {
6826 for (local_decls = BLOCK_VARS (block);
6827 local_decls;
6828 local_decls = DECL_CHAIN (local_decls))
6829 if (TREE_ADDRESSABLE (local_decls))
6830 {
6831 any_addressable_vars = 1;
6832 break;
6833 }
6834 if (block == gimple_block (parallel_stmt))
6835 break;
6836 }
6837 }
6838 if (!any_addressable_vars)
6839 gimple_omp_return_set_nowait (stmt);
6840 }
6841 }
6842 }
6843
6844 static void
6845 remove_exit_barriers (struct omp_region *region)
6846 {
6847 if (region->type == GIMPLE_OMP_PARALLEL)
6848 remove_exit_barrier (region);
6849
6850 if (region->inner)
6851 {
6852 region = region->inner;
6853 remove_exit_barriers (region);
6854 while (region->next)
6855 {
6856 region = region->next;
6857 remove_exit_barriers (region);
6858 }
6859 }
6860 }
6861
6862 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
6863 calls. These can't be declared as const functions, but
6864 within one parallel body they are constant, so they can be
6865 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
6866 which are declared const. Similarly for task body, except
6867 that in untied task omp_get_thread_num () can change at any task
6868 scheduling point. */
6869
6870 static void
6871 optimize_omp_library_calls (gimple *entry_stmt)
6872 {
6873 basic_block bb;
6874 gimple_stmt_iterator gsi;
6875 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6876 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
6877 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
6878 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
6879 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
6880 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
6881 OMP_CLAUSE_UNTIED) != NULL);
6882
6883 FOR_EACH_BB_FN (bb, cfun)
6884 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6885 {
6886 gimple *call = gsi_stmt (gsi);
6887 tree decl;
6888
6889 if (is_gimple_call (call)
6890 && (decl = gimple_call_fndecl (call))
6891 && DECL_EXTERNAL (decl)
6892 && TREE_PUBLIC (decl)
6893 && DECL_INITIAL (decl) == NULL)
6894 {
6895 tree built_in;
6896
6897 if (DECL_NAME (decl) == thr_num_id)
6898 {
6899 /* In #pragma omp task untied omp_get_thread_num () can change
6900 during the execution of the task region. */
6901 if (untied_task)
6902 continue;
6903 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6904 }
6905 else if (DECL_NAME (decl) == num_thr_id)
6906 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
6907 else
6908 continue;
6909
6910 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
6911 || gimple_call_num_args (call) != 0)
6912 continue;
6913
6914 if (flag_exceptions && !TREE_NOTHROW (decl))
6915 continue;
6916
6917 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
6918 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
6919 TREE_TYPE (TREE_TYPE (built_in))))
6920 continue;
6921
6922 gimple_call_set_fndecl (call, built_in);
6923 }
6924 }
6925 }
6926
6927 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
6928 regimplified. */
6929
6930 static tree
6931 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
6932 {
6933 tree t = *tp;
6934
6935 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6936 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
6937 return t;
6938
6939 if (TREE_CODE (t) == ADDR_EXPR)
6940 recompute_tree_invariant_for_addr_expr (t);
6941
6942 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6943 return NULL_TREE;
6944 }
6945
6946 /* Prepend or append TO = FROM assignment before or after *GSI_P. */
6947
6948 static void
6949 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from,
6950 bool after)
6951 {
6952 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
6953 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
6954 !after, after ? GSI_CONTINUE_LINKING
6955 : GSI_SAME_STMT);
6956 gimple *stmt = gimple_build_assign (to, from);
6957 if (after)
6958 gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING);
6959 else
6960 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
6961 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
6962 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
6963 {
6964 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
6965 gimple_regimplify_operands (stmt, &gsi);
6966 }
6967 }
6968
6969 /* Expand the OpenMP parallel or task directive starting at REGION. */
6970
6971 static void
6972 expand_omp_taskreg (struct omp_region *region)
6973 {
6974 basic_block entry_bb, exit_bb, new_bb;
6975 struct function *child_cfun;
6976 tree child_fn, block, t;
6977 gimple_stmt_iterator gsi;
6978 gimple *entry_stmt, *stmt;
6979 edge e;
6980 vec<tree, va_gc> *ws_args;
6981
6982 entry_stmt = last_stmt (region->entry);
6983 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
6984 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6985
6986 entry_bb = region->entry;
6987 if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
6988 exit_bb = region->cont;
6989 else
6990 exit_bb = region->exit;
6991
6992 bool is_cilk_for
6993 = (flag_cilkplus
6994 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
6995 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
6996 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
6997
6998 if (is_cilk_for)
6999 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
7000 and the inner statement contains the name of the built-in function
7001 and grain. */
7002 ws_args = region->inner->ws_args;
7003 else if (is_combined_parallel (region))
7004 ws_args = region->ws_args;
7005 else
7006 ws_args = NULL;
7007
7008 if (child_cfun->cfg)
7009 {
7010 /* Due to inlining, it may happen that we have already outlined
7011 the region, in which case all we need to do is make the
7012 sub-graph unreachable and emit the parallel call. */
7013 edge entry_succ_e, exit_succ_e;
7014
7015 entry_succ_e = single_succ_edge (entry_bb);
7016
7017 gsi = gsi_last_bb (entry_bb);
7018 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
7019 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
7020 gsi_remove (&gsi, true);
7021
7022 new_bb = entry_bb;
7023 if (exit_bb)
7024 {
7025 exit_succ_e = single_succ_edge (exit_bb);
7026 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
7027 }
7028 remove_edge_and_dominated_blocks (entry_succ_e);
7029 }
7030 else
7031 {
7032 unsigned srcidx, dstidx, num;
7033
7034 /* If the parallel region needs data sent from the parent
7035 function, then the very first statement (except possible
7036 tree profile counter updates) of the parallel body
7037 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7038 &.OMP_DATA_O is passed as an argument to the child function,
7039 we need to replace it with the argument as seen by the child
7040 function.
7041
7042 In most cases, this will end up being the identity assignment
7043 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7044 a function call that has been inlined, the original PARM_DECL
7045 .OMP_DATA_I may have been converted into a different local
7046 variable. In which case, we need to keep the assignment. */
7047 if (gimple_omp_taskreg_data_arg (entry_stmt))
7048 {
7049 basic_block entry_succ_bb
7050 = single_succ_p (entry_bb) ? single_succ (entry_bb)
7051 : FALLTHRU_EDGE (entry_bb)->dest;
7052 tree arg;
7053 gimple *parcopy_stmt = NULL;
7054
7055 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7056 {
7057 gimple *stmt;
7058
7059 gcc_assert (!gsi_end_p (gsi));
7060 stmt = gsi_stmt (gsi);
7061 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7062 continue;
7063
7064 if (gimple_num_ops (stmt) == 2)
7065 {
7066 tree arg = gimple_assign_rhs1 (stmt);
7067
7068 /* We're ignore the subcode because we're
7069 effectively doing a STRIP_NOPS. */
7070
7071 if (TREE_CODE (arg) == ADDR_EXPR
7072 && TREE_OPERAND (arg, 0)
7073 == gimple_omp_taskreg_data_arg (entry_stmt))
7074 {
7075 parcopy_stmt = stmt;
7076 break;
7077 }
7078 }
7079 }
7080
7081 gcc_assert (parcopy_stmt != NULL);
7082 arg = DECL_ARGUMENTS (child_fn);
7083
7084 if (!gimple_in_ssa_p (cfun))
7085 {
7086 if (gimple_assign_lhs (parcopy_stmt) == arg)
7087 gsi_remove (&gsi, true);
7088 else
7089 {
7090 /* ?? Is setting the subcode really necessary ?? */
7091 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
7092 gimple_assign_set_rhs1 (parcopy_stmt, arg);
7093 }
7094 }
7095 else
7096 {
7097 tree lhs = gimple_assign_lhs (parcopy_stmt);
7098 gcc_assert (SSA_NAME_VAR (lhs) == arg);
7099 /* We'd like to set the rhs to the default def in the child_fn,
7100 but it's too early to create ssa names in the child_fn.
7101 Instead, we set the rhs to the parm. In
7102 move_sese_region_to_fn, we introduce a default def for the
7103 parm, map the parm to it's default def, and once we encounter
7104 this stmt, replace the parm with the default def. */
7105 gimple_assign_set_rhs1 (parcopy_stmt, arg);
7106 update_stmt (parcopy_stmt);
7107 }
7108 }
7109
7110 /* Declare local variables needed in CHILD_CFUN. */
7111 block = DECL_INITIAL (child_fn);
7112 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7113 /* The gimplifier could record temporaries in parallel/task block
7114 rather than in containing function's local_decls chain,
7115 which would mean cgraph missed finalizing them. Do it now. */
7116 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7117 if (TREE_CODE (t) == VAR_DECL
7118 && TREE_STATIC (t)
7119 && !DECL_EXTERNAL (t))
7120 varpool_node::finalize_decl (t);
7121 DECL_SAVED_TREE (child_fn) = NULL;
7122 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7123 gimple_set_body (child_fn, NULL);
7124 TREE_USED (block) = 1;
7125
7126 /* Reset DECL_CONTEXT on function arguments. */
7127 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7128 DECL_CONTEXT (t) = child_fn;
7129
7130 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
7131 so that it can be moved to the child function. */
7132 gsi = gsi_last_bb (entry_bb);
7133 stmt = gsi_stmt (gsi);
7134 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
7135 || gimple_code (stmt) == GIMPLE_OMP_TASK));
7136 e = split_block (entry_bb, stmt);
7137 gsi_remove (&gsi, true);
7138 entry_bb = e->dest;
7139 edge e2 = NULL;
7140 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
7141 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7142 else
7143 {
7144 e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
7145 gcc_assert (e2->dest == region->exit);
7146 remove_edge (BRANCH_EDGE (entry_bb));
7147 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
7148 gsi = gsi_last_bb (region->exit);
7149 gcc_assert (!gsi_end_p (gsi)
7150 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7151 gsi_remove (&gsi, true);
7152 }
7153
7154 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
7155 if (exit_bb)
7156 {
7157 gsi = gsi_last_bb (exit_bb);
7158 gcc_assert (!gsi_end_p (gsi)
7159 && (gimple_code (gsi_stmt (gsi))
7160 == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
7161 stmt = gimple_build_return (NULL);
7162 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7163 gsi_remove (&gsi, true);
7164 }
7165
7166 /* Move the parallel region into CHILD_CFUN. */
7167
7168 if (gimple_in_ssa_p (cfun))
7169 {
7170 init_tree_ssa (child_cfun);
7171 init_ssa_operands (child_cfun);
7172 child_cfun->gimple_df->in_ssa_p = true;
7173 block = NULL_TREE;
7174 }
7175 else
7176 block = gimple_block (entry_stmt);
7177
7178 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7179 if (exit_bb)
7180 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7181 if (e2)
7182 {
7183 basic_block dest_bb = e2->dest;
7184 if (!exit_bb)
7185 make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
7186 remove_edge (e2);
7187 set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
7188 }
7189 /* When the OMP expansion process cannot guarantee an up-to-date
7190 loop tree arrange for the child function to fixup loops. */
7191 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7192 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7193
7194 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7195 num = vec_safe_length (child_cfun->local_decls);
7196 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7197 {
7198 t = (*child_cfun->local_decls)[srcidx];
7199 if (DECL_CONTEXT (t) == cfun->decl)
7200 continue;
7201 if (srcidx != dstidx)
7202 (*child_cfun->local_decls)[dstidx] = t;
7203 dstidx++;
7204 }
7205 if (dstidx != num)
7206 vec_safe_truncate (child_cfun->local_decls, dstidx);
7207
7208 /* Inform the callgraph about the new function. */
7209 child_cfun->curr_properties = cfun->curr_properties;
7210 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
7211 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
7212 cgraph_node *node = cgraph_node::get_create (child_fn);
7213 node->parallelized_function = 1;
7214 cgraph_node::add_new_function (child_fn, true);
7215
7216 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
7217 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
7218
7219 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7220 fixed in a following pass. */
7221 push_cfun (child_cfun);
7222 if (need_asm)
7223 assign_assembler_name_if_neeeded (child_fn);
7224
7225 if (optimize)
7226 optimize_omp_library_calls (entry_stmt);
7227 cgraph_edge::rebuild_edges ();
7228
7229 /* Some EH regions might become dead, see PR34608. If
7230 pass_cleanup_cfg isn't the first pass to happen with the
7231 new child, these dead EH edges might cause problems.
7232 Clean them up now. */
7233 if (flag_exceptions)
7234 {
7235 basic_block bb;
7236 bool changed = false;
7237
7238 FOR_EACH_BB_FN (bb, cfun)
7239 changed |= gimple_purge_dead_eh_edges (bb);
7240 if (changed)
7241 cleanup_tree_cfg ();
7242 }
7243 if (gimple_in_ssa_p (cfun))
7244 update_ssa (TODO_update_ssa);
7245 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7246 verify_loop_structure ();
7247 pop_cfun ();
7248
7249 if (dump_file && !gimple_in_ssa_p (cfun))
7250 {
7251 omp_any_child_fn_dumped = true;
7252 dump_function_header (dump_file, child_fn, dump_flags);
7253 dump_function_to_file (child_fn, dump_file, dump_flags);
7254 }
7255 }
7256
7257 /* Emit a library call to launch the children threads. */
7258 if (is_cilk_for)
7259 expand_cilk_for_call (new_bb,
7260 as_a <gomp_parallel *> (entry_stmt), ws_args);
7261 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
7262 expand_parallel_call (region, new_bb,
7263 as_a <gomp_parallel *> (entry_stmt), ws_args);
7264 else
7265 expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt));
7266 if (gimple_in_ssa_p (cfun))
7267 update_ssa (TODO_update_ssa_only_virtuals);
7268 }
7269
7270 /* Information about members of an OpenACC collapsed loop nest. */
7271
7272 struct oacc_collapse
7273 {
7274 tree base; /* Base value. */
7275 tree iters; /* Number of steps. */
7276 tree step; /* step size. */
7277 };
7278
7279 /* Helper for expand_oacc_for. Determine collapsed loop information.
7280 Fill in COUNTS array. Emit any initialization code before GSI.
7281 Return the calculated outer loop bound of BOUND_TYPE. */
7282
7283 static tree
7284 expand_oacc_collapse_init (const struct omp_for_data *fd,
7285 gimple_stmt_iterator *gsi,
7286 oacc_collapse *counts, tree bound_type)
7287 {
7288 tree total = build_int_cst (bound_type, 1);
7289 int ix;
7290
7291 gcc_assert (integer_onep (fd->loop.step));
7292 gcc_assert (integer_zerop (fd->loop.n1));
7293
7294 for (ix = 0; ix != fd->collapse; ix++)
7295 {
7296 const omp_for_data_loop *loop = &fd->loops[ix];
7297
7298 tree iter_type = TREE_TYPE (loop->v);
7299 tree diff_type = iter_type;
7300 tree plus_type = iter_type;
7301
7302 gcc_assert (loop->cond_code == fd->loop.cond_code);
7303
7304 if (POINTER_TYPE_P (iter_type))
7305 plus_type = sizetype;
7306 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
7307 diff_type = signed_type_for (diff_type);
7308
7309 tree b = loop->n1;
7310 tree e = loop->n2;
7311 tree s = loop->step;
7312 bool up = loop->cond_code == LT_EXPR;
7313 tree dir = build_int_cst (diff_type, up ? +1 : -1);
7314 bool negating;
7315 tree expr;
7316
7317 b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE,
7318 true, GSI_SAME_STMT);
7319 e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE,
7320 true, GSI_SAME_STMT);
7321
7322 /* Convert the step, avoiding possible unsigned->signed overflow. */
7323 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
7324 if (negating)
7325 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
7326 s = fold_convert (diff_type, s);
7327 if (negating)
7328 s = fold_build1 (NEGATE_EXPR, diff_type, s);
7329 s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE,
7330 true, GSI_SAME_STMT);
7331
7332 /* Determine the range, avoiding possible unsigned->signed overflow. */
7333 negating = !up && TYPE_UNSIGNED (iter_type);
7334 expr = fold_build2 (MINUS_EXPR, plus_type,
7335 fold_convert (plus_type, negating ? b : e),
7336 fold_convert (plus_type, negating ? e : b));
7337 expr = fold_convert (diff_type, expr);
7338 if (negating)
7339 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
7340 tree range = force_gimple_operand_gsi
7341 (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT);
7342
7343 /* Determine number of iterations. */
7344 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
7345 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
7346 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
7347
7348 tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
7349 true, GSI_SAME_STMT);
7350
7351 counts[ix].base = b;
7352 counts[ix].iters = iters;
7353 counts[ix].step = s;
7354
7355 total = fold_build2 (MULT_EXPR, bound_type, total,
7356 fold_convert (bound_type, iters));
7357 }
7358
7359 return total;
7360 }
7361
7362 /* Emit initializers for collapsed loop members. IVAR is the outer
7363 loop iteration variable, from which collapsed loop iteration values
7364 are calculated. COUNTS array has been initialized by
7365 expand_oacc_collapse_inits. */
7366
7367 static void
7368 expand_oacc_collapse_vars (const struct omp_for_data *fd,
7369 gimple_stmt_iterator *gsi,
7370 const oacc_collapse *counts, tree ivar)
7371 {
7372 tree ivar_type = TREE_TYPE (ivar);
7373
7374 /* The most rapidly changing iteration variable is the innermost
7375 one. */
7376 for (int ix = fd->collapse; ix--;)
7377 {
7378 const omp_for_data_loop *loop = &fd->loops[ix];
7379 const oacc_collapse *collapse = &counts[ix];
7380 tree iter_type = TREE_TYPE (loop->v);
7381 tree diff_type = TREE_TYPE (collapse->step);
7382 tree plus_type = iter_type;
7383 enum tree_code plus_code = PLUS_EXPR;
7384 tree expr;
7385
7386 if (POINTER_TYPE_P (iter_type))
7387 {
7388 plus_code = POINTER_PLUS_EXPR;
7389 plus_type = sizetype;
7390 }
7391
7392 expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, ivar,
7393 fold_convert (ivar_type, collapse->iters));
7394 expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr),
7395 collapse->step);
7396 expr = fold_build2 (plus_code, iter_type, collapse->base,
7397 fold_convert (plus_type, expr));
7398 expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE,
7399 true, GSI_SAME_STMT);
7400 gassign *ass = gimple_build_assign (loop->v, expr);
7401 gsi_insert_before (gsi, ass, GSI_SAME_STMT);
7402
7403 if (ix)
7404 {
7405 expr = fold_build2 (TRUNC_DIV_EXPR, ivar_type, ivar,
7406 fold_convert (ivar_type, collapse->iters));
7407 ivar = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
7408 true, GSI_SAME_STMT);
7409 }
7410 }
7411 }
7412
7413
7414 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
7415 of the combined collapse > 1 loop constructs, generate code like:
7416 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
7417 if (cond3 is <)
7418 adj = STEP3 - 1;
7419 else
7420 adj = STEP3 + 1;
7421 count3 = (adj + N32 - N31) / STEP3;
7422 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
7423 if (cond2 is <)
7424 adj = STEP2 - 1;
7425 else
7426 adj = STEP2 + 1;
7427 count2 = (adj + N22 - N21) / STEP2;
7428 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
7429 if (cond1 is <)
7430 adj = STEP1 - 1;
7431 else
7432 adj = STEP1 + 1;
7433 count1 = (adj + N12 - N11) / STEP1;
7434 count = count1 * count2 * count3;
7435 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
7436 count = 0;
7437 and set ZERO_ITER_BB to that bb. If this isn't the outermost
7438 of the combined loop constructs, just initialize COUNTS array
7439 from the _looptemp_ clauses. */
7440
7441 /* NOTE: It *could* be better to moosh all of the BBs together,
7442 creating one larger BB with all the computation and the unexpected
7443 jump at the end. I.e.
7444
7445 bool zero3, zero2, zero1, zero;
7446
7447 zero3 = N32 c3 N31;
7448 count3 = (N32 - N31) /[cl] STEP3;
7449 zero2 = N22 c2 N21;
7450 count2 = (N22 - N21) /[cl] STEP2;
7451 zero1 = N12 c1 N11;
7452 count1 = (N12 - N11) /[cl] STEP1;
7453 zero = zero3 || zero2 || zero1;
7454 count = count1 * count2 * count3;
7455 if (__builtin_expect(zero, false)) goto zero_iter_bb;
7456
7457 After all, we expect the zero=false, and thus we expect to have to
7458 evaluate all of the comparison expressions, so short-circuiting
7459 oughtn't be a win. Since the condition isn't protecting a
7460 denominator, we're not concerned about divide-by-zero, so we can
7461 fully evaluate count even if a numerator turned out to be wrong.
7462
7463 It seems like putting this all together would create much better
7464 scheduling opportunities, and less pressure on the chip's branch
7465 predictor. */
7466
7467 static void
7468 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
7469 basic_block &entry_bb, tree *counts,
7470 basic_block &zero_iter1_bb, int &first_zero_iter1,
7471 basic_block &zero_iter2_bb, int &first_zero_iter2,
7472 basic_block &l2_dom_bb)
7473 {
7474 tree t, type = TREE_TYPE (fd->loop.v);
7475 edge e, ne;
7476 int i;
7477
7478 /* Collapsed loops need work for expansion into SSA form. */
7479 gcc_assert (!gimple_in_ssa_p (cfun));
7480
7481 if (gimple_omp_for_combined_into_p (fd->for_stmt)
7482 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
7483 {
7484 gcc_assert (fd->ordered == 0);
7485 /* First two _looptemp_ clauses are for istart/iend, counts[0]
7486 isn't supposed to be handled, as the inner loop doesn't
7487 use it. */
7488 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7489 OMP_CLAUSE__LOOPTEMP_);
7490 gcc_assert (innerc);
7491 for (i = 0; i < fd->collapse; i++)
7492 {
7493 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7494 OMP_CLAUSE__LOOPTEMP_);
7495 gcc_assert (innerc);
7496 if (i)
7497 counts[i] = OMP_CLAUSE_DECL (innerc);
7498 else
7499 counts[0] = NULL_TREE;
7500 }
7501 return;
7502 }
7503
7504 for (i = fd->collapse; i < fd->ordered; i++)
7505 {
7506 tree itype = TREE_TYPE (fd->loops[i].v);
7507 counts[i] = NULL_TREE;
7508 t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
7509 fold_convert (itype, fd->loops[i].n1),
7510 fold_convert (itype, fd->loops[i].n2));
7511 if (t && integer_zerop (t))
7512 {
7513 for (i = fd->collapse; i < fd->ordered; i++)
7514 counts[i] = build_int_cst (type, 0);
7515 break;
7516 }
7517 }
7518 for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++)
7519 {
7520 tree itype = TREE_TYPE (fd->loops[i].v);
7521
7522 if (i >= fd->collapse && counts[i])
7523 continue;
7524 if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse)
7525 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
7526 fold_convert (itype, fd->loops[i].n1),
7527 fold_convert (itype, fd->loops[i].n2)))
7528 == NULL_TREE || !integer_onep (t)))
7529 {
7530 gcond *cond_stmt;
7531 tree n1, n2;
7532 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
7533 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
7534 true, GSI_SAME_STMT);
7535 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
7536 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
7537 true, GSI_SAME_STMT);
7538 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
7539 NULL_TREE, NULL_TREE);
7540 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
7541 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
7542 expand_omp_regimplify_p, NULL, NULL)
7543 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
7544 expand_omp_regimplify_p, NULL, NULL))
7545 {
7546 *gsi = gsi_for_stmt (cond_stmt);
7547 gimple_regimplify_operands (cond_stmt, gsi);
7548 }
7549 e = split_block (entry_bb, cond_stmt);
7550 basic_block &zero_iter_bb
7551 = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb;
7552 int &first_zero_iter
7553 = i < fd->collapse ? first_zero_iter1 : first_zero_iter2;
7554 if (zero_iter_bb == NULL)
7555 {
7556 gassign *assign_stmt;
7557 first_zero_iter = i;
7558 zero_iter_bb = create_empty_bb (entry_bb);
7559 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
7560 *gsi = gsi_after_labels (zero_iter_bb);
7561 if (i < fd->collapse)
7562 assign_stmt = gimple_build_assign (fd->loop.n2,
7563 build_zero_cst (type));
7564 else
7565 {
7566 counts[i] = create_tmp_reg (type, ".count");
7567 assign_stmt
7568 = gimple_build_assign (counts[i], build_zero_cst (type));
7569 }
7570 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
7571 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
7572 entry_bb);
7573 }
7574 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
7575 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
7576 e->flags = EDGE_TRUE_VALUE;
7577 e->probability = REG_BR_PROB_BASE - ne->probability;
7578 if (l2_dom_bb == NULL)
7579 l2_dom_bb = entry_bb;
7580 entry_bb = e->dest;
7581 *gsi = gsi_last_bb (entry_bb);
7582 }
7583
7584 if (POINTER_TYPE_P (itype))
7585 itype = signed_type_for (itype);
7586 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
7587 ? -1 : 1));
7588 t = fold_build2 (PLUS_EXPR, itype,
7589 fold_convert (itype, fd->loops[i].step), t);
7590 t = fold_build2 (PLUS_EXPR, itype, t,
7591 fold_convert (itype, fd->loops[i].n2));
7592 t = fold_build2 (MINUS_EXPR, itype, t,
7593 fold_convert (itype, fd->loops[i].n1));
7594 /* ?? We could probably use CEIL_DIV_EXPR instead of
7595 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
7596 generate the same code in the end because generically we
7597 don't know that the values involved must be negative for
7598 GT?? */
7599 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
7600 t = fold_build2 (TRUNC_DIV_EXPR, itype,
7601 fold_build1 (NEGATE_EXPR, itype, t),
7602 fold_build1 (NEGATE_EXPR, itype,
7603 fold_convert (itype,
7604 fd->loops[i].step)));
7605 else
7606 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
7607 fold_convert (itype, fd->loops[i].step));
7608 t = fold_convert (type, t);
7609 if (TREE_CODE (t) == INTEGER_CST)
7610 counts[i] = t;
7611 else
7612 {
7613 if (i < fd->collapse || i != first_zero_iter2)
7614 counts[i] = create_tmp_reg (type, ".count");
7615 expand_omp_build_assign (gsi, counts[i], t);
7616 }
7617 if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse)
7618 {
7619 if (i == 0)
7620 t = counts[0];
7621 else
7622 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
7623 expand_omp_build_assign (gsi, fd->loop.n2, t);
7624 }
7625 }
7626 }
7627
7628
7629 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
7630 T = V;
7631 V3 = N31 + (T % count3) * STEP3;
7632 T = T / count3;
7633 V2 = N21 + (T % count2) * STEP2;
7634 T = T / count2;
7635 V1 = N11 + T * STEP1;
7636 if this loop doesn't have an inner loop construct combined with it.
7637 If it does have an inner loop construct combined with it and the
7638 iteration count isn't known constant, store values from counts array
7639 into its _looptemp_ temporaries instead. */
7640
7641 static void
7642 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
7643 tree *counts, gimple *inner_stmt, tree startvar)
7644 {
7645 int i;
7646 if (gimple_omp_for_combined_p (fd->for_stmt))
7647 {
7648 /* If fd->loop.n2 is constant, then no propagation of the counts
7649 is needed, they are constant. */
7650 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
7651 return;
7652
7653 tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR
7654 ? gimple_omp_taskreg_clauses (inner_stmt)
7655 : gimple_omp_for_clauses (inner_stmt);
7656 /* First two _looptemp_ clauses are for istart/iend, counts[0]
7657 isn't supposed to be handled, as the inner loop doesn't
7658 use it. */
7659 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
7660 gcc_assert (innerc);
7661 for (i = 0; i < fd->collapse; i++)
7662 {
7663 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7664 OMP_CLAUSE__LOOPTEMP_);
7665 gcc_assert (innerc);
7666 if (i)
7667 {
7668 tree tem = OMP_CLAUSE_DECL (innerc);
7669 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
7670 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
7671 false, GSI_CONTINUE_LINKING);
7672 gassign *stmt = gimple_build_assign (tem, t);
7673 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7674 }
7675 }
7676 return;
7677 }
7678
7679 tree type = TREE_TYPE (fd->loop.v);
7680 tree tem = create_tmp_reg (type, ".tem");
7681 gassign *stmt = gimple_build_assign (tem, startvar);
7682 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7683
7684 for (i = fd->collapse - 1; i >= 0; i--)
7685 {
7686 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
7687 itype = vtype;
7688 if (POINTER_TYPE_P (vtype))
7689 itype = signed_type_for (vtype);
7690 if (i != 0)
7691 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
7692 else
7693 t = tem;
7694 t = fold_convert (itype, t);
7695 t = fold_build2 (MULT_EXPR, itype, t,
7696 fold_convert (itype, fd->loops[i].step));
7697 if (POINTER_TYPE_P (vtype))
7698 t = fold_build_pointer_plus (fd->loops[i].n1, t);
7699 else
7700 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
7701 t = force_gimple_operand_gsi (gsi, t,
7702 DECL_P (fd->loops[i].v)
7703 && TREE_ADDRESSABLE (fd->loops[i].v),
7704 NULL_TREE, false,
7705 GSI_CONTINUE_LINKING);
7706 stmt = gimple_build_assign (fd->loops[i].v, t);
7707 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7708 if (i != 0)
7709 {
7710 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
7711 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
7712 false, GSI_CONTINUE_LINKING);
7713 stmt = gimple_build_assign (tem, t);
7714 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
7715 }
7716 }
7717 }
7718
7719
7720 /* Helper function for expand_omp_for_*. Generate code like:
7721 L10:
7722 V3 += STEP3;
7723 if (V3 cond3 N32) goto BODY_BB; else goto L11;
7724 L11:
7725 V3 = N31;
7726 V2 += STEP2;
7727 if (V2 cond2 N22) goto BODY_BB; else goto L12;
7728 L12:
7729 V2 = N21;
7730 V1 += STEP1;
7731 goto BODY_BB; */
7732
7733 static basic_block
7734 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
7735 basic_block body_bb)
7736 {
7737 basic_block last_bb, bb, collapse_bb = NULL;
7738 int i;
7739 gimple_stmt_iterator gsi;
7740 edge e;
7741 tree t;
7742 gimple *stmt;
7743
7744 last_bb = cont_bb;
7745 for (i = fd->collapse - 1; i >= 0; i--)
7746 {
7747 tree vtype = TREE_TYPE (fd->loops[i].v);
7748
7749 bb = create_empty_bb (last_bb);
7750 add_bb_to_loop (bb, last_bb->loop_father);
7751 gsi = gsi_start_bb (bb);
7752
7753 if (i < fd->collapse - 1)
7754 {
7755 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
7756 e->probability = REG_BR_PROB_BASE / 8;
7757
7758 t = fd->loops[i + 1].n1;
7759 t = force_gimple_operand_gsi (&gsi, t,
7760 DECL_P (fd->loops[i + 1].v)
7761 && TREE_ADDRESSABLE (fd->loops[i
7762 + 1].v),
7763 NULL_TREE, false,
7764 GSI_CONTINUE_LINKING);
7765 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
7766 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7767 }
7768 else
7769 collapse_bb = bb;
7770
7771 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
7772
7773 if (POINTER_TYPE_P (vtype))
7774 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
7775 else
7776 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
7777 t = force_gimple_operand_gsi (&gsi, t,
7778 DECL_P (fd->loops[i].v)
7779 && TREE_ADDRESSABLE (fd->loops[i].v),
7780 NULL_TREE, false, GSI_CONTINUE_LINKING);
7781 stmt = gimple_build_assign (fd->loops[i].v, t);
7782 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7783
7784 if (i > 0)
7785 {
7786 t = fd->loops[i].n2;
7787 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7788 false, GSI_CONTINUE_LINKING);
7789 tree v = fd->loops[i].v;
7790 if (DECL_P (v) && TREE_ADDRESSABLE (v))
7791 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
7792 false, GSI_CONTINUE_LINKING);
7793 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
7794 stmt = gimple_build_cond_empty (t);
7795 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7796 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
7797 e->probability = REG_BR_PROB_BASE * 7 / 8;
7798 }
7799 else
7800 make_edge (bb, body_bb, EDGE_FALLTHRU);
7801 last_bb = bb;
7802 }
7803
7804 return collapse_bb;
7805 }
7806
7807
7808 /* Expand #pragma omp ordered depend(source). */
7809
7810 static void
7811 expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
7812 tree *counts, location_t loc)
7813 {
7814 enum built_in_function source_ix
7815 = fd->iter_type == long_integer_type_node
7816 ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST;
7817 gimple *g
7818 = gimple_build_call (builtin_decl_explicit (source_ix), 1,
7819 build_fold_addr_expr (counts[fd->ordered]));
7820 gimple_set_location (g, loc);
7821 gsi_insert_before (gsi, g, GSI_SAME_STMT);
7822 }
7823
7824 /* Expand a single depend from #pragma omp ordered depend(sink:...). */
7825
7826 static void
7827 expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
7828 tree *counts, tree c, location_t loc)
7829 {
7830 auto_vec<tree, 10> args;
7831 enum built_in_function sink_ix
7832 = fd->iter_type == long_integer_type_node
7833 ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT;
7834 tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE;
7835 int i;
7836 gimple_stmt_iterator gsi2 = *gsi;
7837 bool warned_step = false;
7838
7839 for (i = 0; i < fd->ordered; i++)
7840 {
7841 off = TREE_PURPOSE (deps);
7842 if (!integer_zerop (off))
7843 {
7844 gcc_assert (fd->loops[i].cond_code == LT_EXPR
7845 || fd->loops[i].cond_code == GT_EXPR);
7846 bool forward = fd->loops[i].cond_code == LT_EXPR;
7847 if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
7848 warning_at (loc, 0, "%<depend(sink)%> clause waiting for "
7849 "lexically later iteration");
7850 break;
7851 }
7852 deps = TREE_CHAIN (deps);
7853 }
7854 /* If all offsets corresponding to the collapsed loops are zero,
7855 this depend clause can be ignored. FIXME: but there is still a
7856 flush needed. We need to emit one __sync_synchronize () for it
7857 though (perhaps conditionally)? Solve this together with the
7858 conservative dependence folding optimization.
7859 if (i >= fd->collapse)
7860 return; */
7861
7862 deps = OMP_CLAUSE_DECL (c);
7863 gsi_prev (&gsi2);
7864 edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2));
7865 edge e2 = split_block_after_labels (e1->dest);
7866
7867 *gsi = gsi_after_labels (e1->dest);
7868 for (i = 0; i < fd->ordered; i++)
7869 {
7870 tree itype = TREE_TYPE (fd->loops[i].v);
7871 if (POINTER_TYPE_P (itype))
7872 itype = sizetype;
7873 if (i)
7874 deps = TREE_CHAIN (deps);
7875 off = TREE_PURPOSE (deps);
7876 tree s = fold_convert_loc (loc, itype, fd->loops[i].step);
7877
7878 if (integer_zerop (off))
7879 t = boolean_true_node;
7880 else
7881 {
7882 tree a;
7883 tree co = fold_convert_loc (loc, itype, off);
7884 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7885 {
7886 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
7887 co = fold_build1_loc (loc, NEGATE_EXPR, itype, co);
7888 a = fold_build2_loc (loc, POINTER_PLUS_EXPR,
7889 TREE_TYPE (fd->loops[i].v), fd->loops[i].v,
7890 co);
7891 }
7892 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
7893 a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
7894 fd->loops[i].v, co);
7895 else
7896 a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7897 fd->loops[i].v, co);
7898 if (fd->loops[i].cond_code == LT_EXPR)
7899 {
7900 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
7901 t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
7902 fd->loops[i].n1);
7903 else
7904 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
7905 fd->loops[i].n2);
7906 }
7907 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
7908 t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a,
7909 fd->loops[i].n2);
7910 else
7911 t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a,
7912 fd->loops[i].n1);
7913 }
7914 if (cond)
7915 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t);
7916 else
7917 cond = t;
7918
7919 off = fold_convert_loc (loc, itype, off);
7920
7921 if (fd->loops[i].cond_code == LT_EXPR
7922 ? !integer_onep (fd->loops[i].step)
7923 : !integer_minus_onep (fd->loops[i].step))
7924 {
7925 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
7926 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off,
7927 fold_build1_loc (loc, NEGATE_EXPR, itype,
7928 s));
7929 else
7930 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off, s);
7931 t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t,
7932 build_int_cst (itype, 0));
7933 if (integer_zerop (t) && !warned_step)
7934 {
7935 warning_at (loc, 0, "%<depend(sink)%> refers to iteration never "
7936 "in the iteration space");
7937 warned_step = true;
7938 }
7939 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node,
7940 cond, t);
7941 }
7942
7943 if (i <= fd->collapse - 1 && fd->collapse > 1)
7944 t = fd->loop.v;
7945 else if (counts[i])
7946 t = counts[i];
7947 else
7948 {
7949 t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
7950 fd->loops[i].v, fd->loops[i].n1);
7951 t = fold_convert_loc (loc, fd->iter_type, t);
7952 }
7953 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
7954 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off,
7955 fold_build1_loc (loc, NEGATE_EXPR, itype,
7956 s));
7957 else
7958 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
7959 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
7960 off = fold_build1_loc (loc, NEGATE_EXPR, itype, off);
7961 off = fold_convert_loc (loc, fd->iter_type, off);
7962 if (i <= fd->collapse - 1 && fd->collapse > 1)
7963 {
7964 if (i)
7965 off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff,
7966 off);
7967 if (i < fd->collapse - 1)
7968 {
7969 coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off,
7970 counts[i]);
7971 continue;
7972 }
7973 }
7974 off = unshare_expr (off);
7975 t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off);
7976 t = force_gimple_operand_gsi (gsi, t, true, NULL_TREE,
7977 true, GSI_SAME_STMT);
7978 args.safe_push (t);
7979 }
7980 gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args);
7981 gimple_set_location (g, loc);
7982 gsi_insert_before (gsi, g, GSI_SAME_STMT);
7983
7984 *gsi = gsi_last_bb (e1->src);
7985 cond = unshare_expr (cond);
7986 cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false,
7987 GSI_CONTINUE_LINKING);
7988 gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT);
7989 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
7990 e3->probability = REG_BR_PROB_BASE / 8;
7991 e1->probability = REG_BR_PROB_BASE - e3->probability;
7992 e1->flags = EDGE_TRUE_VALUE;
7993 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
7994
7995 *gsi = gsi_after_labels (e2->dest);
7996 }
7997
7998 /* Expand all #pragma omp ordered depend(source) and
7999 #pragma omp ordered depend(sink:...) constructs in the current
8000 #pragma omp for ordered(n) region. */
8001
8002 static void
8003 expand_omp_ordered_source_sink (struct omp_region *region,
8004 struct omp_for_data *fd, tree *counts,
8005 basic_block cont_bb)
8006 {
8007 struct omp_region *inner;
8008 int i;
8009 for (i = fd->collapse - 1; i < fd->ordered; i++)
8010 if (i == fd->collapse - 1 && fd->collapse > 1)
8011 counts[i] = NULL_TREE;
8012 else if (i >= fd->collapse && !cont_bb)
8013 counts[i] = build_zero_cst (fd->iter_type);
8014 else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))
8015 && integer_onep (fd->loops[i].step))
8016 counts[i] = NULL_TREE;
8017 else
8018 counts[i] = create_tmp_var (fd->iter_type, ".orditer");
8019 tree atype
8020 = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1);
8021 counts[fd->ordered] = create_tmp_var (atype, ".orditera");
8022 TREE_ADDRESSABLE (counts[fd->ordered]) = 1;
8023
8024 for (inner = region->inner; inner; inner = inner->next)
8025 if (inner->type == GIMPLE_OMP_ORDERED)
8026 {
8027 gomp_ordered *ord_stmt = inner->ord_stmt;
8028 gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt);
8029 location_t loc = gimple_location (ord_stmt);
8030 tree c;
8031 for (c = gimple_omp_ordered_clauses (ord_stmt);
8032 c; c = OMP_CLAUSE_CHAIN (c))
8033 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
8034 break;
8035 if (c)
8036 expand_omp_ordered_source (&gsi, fd, counts, loc);
8037 for (c = gimple_omp_ordered_clauses (ord_stmt);
8038 c; c = OMP_CLAUSE_CHAIN (c))
8039 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
8040 expand_omp_ordered_sink (&gsi, fd, counts, c, loc);
8041 gsi_remove (&gsi, true);
8042 }
8043 }
8044
8045 /* Wrap the body into fd->ordered - fd->collapse loops that aren't
8046 collapsed. */
8047
8048 static basic_block
8049 expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
8050 basic_block cont_bb, basic_block body_bb,
8051 bool ordered_lastprivate)
8052 {
8053 if (fd->ordered == fd->collapse)
8054 return cont_bb;
8055
8056 if (!cont_bb)
8057 {
8058 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
8059 for (int i = fd->collapse; i < fd->ordered; i++)
8060 {
8061 tree type = TREE_TYPE (fd->loops[i].v);
8062 tree n1 = fold_convert (type, fd->loops[i].n1);
8063 expand_omp_build_assign (&gsi, fd->loops[i].v, n1);
8064 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8065 size_int (i - fd->collapse + 1),
8066 NULL_TREE, NULL_TREE);
8067 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
8068 }
8069 return NULL;
8070 }
8071
8072 for (int i = fd->ordered - 1; i >= fd->collapse; i--)
8073 {
8074 tree t, type = TREE_TYPE (fd->loops[i].v);
8075 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
8076 expand_omp_build_assign (&gsi, fd->loops[i].v,
8077 fold_convert (type, fd->loops[i].n1));
8078 if (counts[i])
8079 expand_omp_build_assign (&gsi, counts[i],
8080 build_zero_cst (fd->iter_type));
8081 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8082 size_int (i - fd->collapse + 1),
8083 NULL_TREE, NULL_TREE);
8084 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
8085 if (!gsi_end_p (gsi))
8086 gsi_prev (&gsi);
8087 else
8088 gsi = gsi_last_bb (body_bb);
8089 edge e1 = split_block (body_bb, gsi_stmt (gsi));
8090 basic_block new_body = e1->dest;
8091 if (body_bb == cont_bb)
8092 cont_bb = new_body;
8093 edge e2 = NULL;
8094 basic_block new_header;
8095 if (EDGE_COUNT (cont_bb->preds) > 0)
8096 {
8097 gsi = gsi_last_bb (cont_bb);
8098 if (POINTER_TYPE_P (type))
8099 t = fold_build_pointer_plus (fd->loops[i].v,
8100 fold_convert (sizetype,
8101 fd->loops[i].step));
8102 else
8103 t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v,
8104 fold_convert (type, fd->loops[i].step));
8105 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
8106 if (counts[i])
8107 {
8108 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i],
8109 build_int_cst (fd->iter_type, 1));
8110 expand_omp_build_assign (&gsi, counts[i], t);
8111 t = counts[i];
8112 }
8113 else
8114 {
8115 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
8116 fd->loops[i].v, fd->loops[i].n1);
8117 t = fold_convert (fd->iter_type, t);
8118 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8119 true, GSI_SAME_STMT);
8120 }
8121 aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8122 size_int (i - fd->collapse + 1),
8123 NULL_TREE, NULL_TREE);
8124 expand_omp_build_assign (&gsi, aref, t);
8125 gsi_prev (&gsi);
8126 e2 = split_block (cont_bb, gsi_stmt (gsi));
8127 new_header = e2->dest;
8128 }
8129 else
8130 new_header = cont_bb;
8131 gsi = gsi_after_labels (new_header);
8132 tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE,
8133 true, GSI_SAME_STMT);
8134 tree n2
8135 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2),
8136 true, NULL_TREE, true, GSI_SAME_STMT);
8137 t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2);
8138 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT);
8139 edge e3 = split_block (new_header, gsi_stmt (gsi));
8140 cont_bb = e3->dest;
8141 remove_edge (e1);
8142 make_edge (body_bb, new_header, EDGE_FALLTHRU);
8143 e3->flags = EDGE_FALSE_VALUE;
8144 e3->probability = REG_BR_PROB_BASE / 8;
8145 e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE);
8146 e1->probability = REG_BR_PROB_BASE - e3->probability;
8147
8148 set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb);
8149 set_immediate_dominator (CDI_DOMINATORS, new_body, new_header);
8150
8151 if (e2)
8152 {
8153 struct loop *loop = alloc_loop ();
8154 loop->header = new_header;
8155 loop->latch = e2->src;
8156 add_loop (loop, body_bb->loop_father);
8157 }
8158 }
8159
8160 /* If there are any lastprivate clauses and it is possible some loops
8161 might have zero iterations, ensure all the decls are initialized,
8162 otherwise we could crash evaluating C++ class iterators with lastprivate
8163 clauses. */
8164 bool need_inits = false;
8165 for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++)
8166 if (need_inits)
8167 {
8168 tree type = TREE_TYPE (fd->loops[i].v);
8169 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
8170 expand_omp_build_assign (&gsi, fd->loops[i].v,
8171 fold_convert (type, fd->loops[i].n1));
8172 }
8173 else
8174 {
8175 tree type = TREE_TYPE (fd->loops[i].v);
8176 tree this_cond = fold_build2 (fd->loops[i].cond_code,
8177 boolean_type_node,
8178 fold_convert (type, fd->loops[i].n1),
8179 fold_convert (type, fd->loops[i].n2));
8180 if (!integer_onep (this_cond))
8181 need_inits = true;
8182 }
8183
8184 return cont_bb;
8185 }
8186
8187
8188 /* A subroutine of expand_omp_for. Generate code for a parallel
8189 loop with any schedule. Given parameters:
8190
8191 for (V = N1; V cond N2; V += STEP) BODY;
8192
8193 where COND is "<" or ">", we generate pseudocode
8194
8195 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
8196 if (more) goto L0; else goto L3;
8197 L0:
8198 V = istart0;
8199 iend = iend0;
8200 L1:
8201 BODY;
8202 V += STEP;
8203 if (V cond iend) goto L1; else goto L2;
8204 L2:
8205 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
8206 L3:
8207
8208 If this is a combined omp parallel loop, instead of the call to
8209 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
8210 If this is gimple_omp_for_combined_p loop, then instead of assigning
8211 V and iend in L0 we assign the first two _looptemp_ clause decls of the
8212 inner GIMPLE_OMP_FOR and V += STEP; and
8213 if (V cond iend) goto L1; else goto L2; are removed.
8214
8215 For collapsed loops, given parameters:
8216 collapse(3)
8217 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
8218 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
8219 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
8220 BODY;
8221
8222 we generate pseudocode
8223
8224 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
8225 if (cond3 is <)
8226 adj = STEP3 - 1;
8227 else
8228 adj = STEP3 + 1;
8229 count3 = (adj + N32 - N31) / STEP3;
8230 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
8231 if (cond2 is <)
8232 adj = STEP2 - 1;
8233 else
8234 adj = STEP2 + 1;
8235 count2 = (adj + N22 - N21) / STEP2;
8236 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
8237 if (cond1 is <)
8238 adj = STEP1 - 1;
8239 else
8240 adj = STEP1 + 1;
8241 count1 = (adj + N12 - N11) / STEP1;
8242 count = count1 * count2 * count3;
8243 goto Z1;
8244 Z0:
8245 count = 0;
8246 Z1:
8247 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
8248 if (more) goto L0; else goto L3;
8249 L0:
8250 V = istart0;
8251 T = V;
8252 V3 = N31 + (T % count3) * STEP3;
8253 T = T / count3;
8254 V2 = N21 + (T % count2) * STEP2;
8255 T = T / count2;
8256 V1 = N11 + T * STEP1;
8257 iend = iend0;
8258 L1:
8259 BODY;
8260 V += 1;
8261 if (V < iend) goto L10; else goto L2;
8262 L10:
8263 V3 += STEP3;
8264 if (V3 cond3 N32) goto L1; else goto L11;
8265 L11:
8266 V3 = N31;
8267 V2 += STEP2;
8268 if (V2 cond2 N22) goto L1; else goto L12;
8269 L12:
8270 V2 = N21;
8271 V1 += STEP1;
8272 goto L1;
8273 L2:
8274 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
8275 L3:
8276
8277 */
8278
8279 static void
8280 expand_omp_for_generic (struct omp_region *region,
8281 struct omp_for_data *fd,
8282 enum built_in_function start_fn,
8283 enum built_in_function next_fn,
8284 gimple *inner_stmt)
8285 {
8286 tree type, istart0, iend0, iend;
8287 tree t, vmain, vback, bias = NULL_TREE;
8288 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
8289 basic_block l2_bb = NULL, l3_bb = NULL;
8290 gimple_stmt_iterator gsi;
8291 gassign *assign_stmt;
8292 bool in_combined_parallel = is_combined_parallel (region);
8293 bool broken_loop = region->cont == NULL;
8294 edge e, ne;
8295 tree *counts = NULL;
8296 int i;
8297 bool ordered_lastprivate = false;
8298
8299 gcc_assert (!broken_loop || !in_combined_parallel);
8300 gcc_assert (fd->iter_type == long_integer_type_node
8301 || !in_combined_parallel);
8302
8303 entry_bb = region->entry;
8304 cont_bb = region->cont;
8305 collapse_bb = NULL;
8306 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
8307 gcc_assert (broken_loop
8308 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
8309 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
8310 l1_bb = single_succ (l0_bb);
8311 if (!broken_loop)
8312 {
8313 l2_bb = create_empty_bb (cont_bb);
8314 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb
8315 || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest
8316 == l1_bb));
8317 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
8318 }
8319 else
8320 l2_bb = NULL;
8321 l3_bb = BRANCH_EDGE (entry_bb)->dest;
8322 exit_bb = region->exit;
8323
8324 gsi = gsi_last_bb (entry_bb);
8325
8326 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
8327 if (fd->ordered
8328 && find_omp_clause (gimple_omp_for_clauses (gsi_stmt (gsi)),
8329 OMP_CLAUSE_LASTPRIVATE))
8330 ordered_lastprivate = false;
8331 if (fd->collapse > 1 || fd->ordered)
8332 {
8333 int first_zero_iter1 = -1, first_zero_iter2 = -1;
8334 basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL;
8335
8336 counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse);
8337 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
8338 zero_iter1_bb, first_zero_iter1,
8339 zero_iter2_bb, first_zero_iter2, l2_dom_bb);
8340
8341 if (zero_iter1_bb)
8342 {
8343 /* Some counts[i] vars might be uninitialized if
8344 some loop has zero iterations. But the body shouldn't
8345 be executed in that case, so just avoid uninit warnings. */
8346 for (i = first_zero_iter1;
8347 i < (fd->ordered ? fd->ordered : fd->collapse); i++)
8348 if (SSA_VAR_P (counts[i]))
8349 TREE_NO_WARNING (counts[i]) = 1;
8350 gsi_prev (&gsi);
8351 e = split_block (entry_bb, gsi_stmt (gsi));
8352 entry_bb = e->dest;
8353 make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
8354 gsi = gsi_last_bb (entry_bb);
8355 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
8356 get_immediate_dominator (CDI_DOMINATORS,
8357 zero_iter1_bb));
8358 }
8359 if (zero_iter2_bb)
8360 {
8361 /* Some counts[i] vars might be uninitialized if
8362 some loop has zero iterations. But the body shouldn't
8363 be executed in that case, so just avoid uninit warnings. */
8364 for (i = first_zero_iter2; i < fd->ordered; i++)
8365 if (SSA_VAR_P (counts[i]))
8366 TREE_NO_WARNING (counts[i]) = 1;
8367 if (zero_iter1_bb)
8368 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
8369 else
8370 {
8371 gsi_prev (&gsi);
8372 e = split_block (entry_bb, gsi_stmt (gsi));
8373 entry_bb = e->dest;
8374 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
8375 gsi = gsi_last_bb (entry_bb);
8376 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
8377 get_immediate_dominator
8378 (CDI_DOMINATORS, zero_iter2_bb));
8379 }
8380 }
8381 if (fd->collapse == 1)
8382 {
8383 counts[0] = fd->loop.n2;
8384 fd->loop = fd->loops[0];
8385 }
8386 }
8387
8388 type = TREE_TYPE (fd->loop.v);
8389 istart0 = create_tmp_var (fd->iter_type, ".istart0");
8390 iend0 = create_tmp_var (fd->iter_type, ".iend0");
8391 TREE_ADDRESSABLE (istart0) = 1;
8392 TREE_ADDRESSABLE (iend0) = 1;
8393
8394 /* See if we need to bias by LLONG_MIN. */
8395 if (fd->iter_type == long_long_unsigned_type_node
8396 && TREE_CODE (type) == INTEGER_TYPE
8397 && !TYPE_UNSIGNED (type)
8398 && fd->ordered == 0)
8399 {
8400 tree n1, n2;
8401
8402 if (fd->loop.cond_code == LT_EXPR)
8403 {
8404 n1 = fd->loop.n1;
8405 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
8406 }
8407 else
8408 {
8409 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
8410 n2 = fd->loop.n1;
8411 }
8412 if (TREE_CODE (n1) != INTEGER_CST
8413 || TREE_CODE (n2) != INTEGER_CST
8414 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
8415 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
8416 }
8417
8418 gimple_stmt_iterator gsif = gsi;
8419 gsi_prev (&gsif);
8420
8421 tree arr = NULL_TREE;
8422 if (in_combined_parallel)
8423 {
8424 gcc_assert (fd->ordered == 0);
8425 /* In a combined parallel loop, emit a call to
8426 GOMP_loop_foo_next. */
8427 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
8428 build_fold_addr_expr (istart0),
8429 build_fold_addr_expr (iend0));
8430 }
8431 else
8432 {
8433 tree t0, t1, t2, t3, t4;
8434 /* If this is not a combined parallel loop, emit a call to
8435 GOMP_loop_foo_start in ENTRY_BB. */
8436 t4 = build_fold_addr_expr (iend0);
8437 t3 = build_fold_addr_expr (istart0);
8438 if (fd->ordered)
8439 {
8440 t0 = build_int_cst (unsigned_type_node,
8441 fd->ordered - fd->collapse + 1);
8442 arr = create_tmp_var (build_array_type_nelts (fd->iter_type,
8443 fd->ordered
8444 - fd->collapse + 1),
8445 ".omp_counts");
8446 DECL_NAMELESS (arr) = 1;
8447 TREE_ADDRESSABLE (arr) = 1;
8448 TREE_STATIC (arr) = 1;
8449 vec<constructor_elt, va_gc> *v;
8450 vec_alloc (v, fd->ordered - fd->collapse + 1);
8451 int idx;
8452
8453 for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++)
8454 {
8455 tree c;
8456 if (idx == 0 && fd->collapse > 1)
8457 c = fd->loop.n2;
8458 else
8459 c = counts[idx + fd->collapse - 1];
8460 tree purpose = size_int (idx);
8461 CONSTRUCTOR_APPEND_ELT (v, purpose, c);
8462 if (TREE_CODE (c) != INTEGER_CST)
8463 TREE_STATIC (arr) = 0;
8464 }
8465
8466 DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v);
8467 if (!TREE_STATIC (arr))
8468 force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR,
8469 void_type_node, arr),
8470 true, NULL_TREE, true, GSI_SAME_STMT);
8471 t1 = build_fold_addr_expr (arr);
8472 t2 = NULL_TREE;
8473 }
8474 else
8475 {
8476 t2 = fold_convert (fd->iter_type, fd->loop.step);
8477 t1 = fd->loop.n2;
8478 t0 = fd->loop.n1;
8479 if (gimple_omp_for_combined_into_p (fd->for_stmt))
8480 {
8481 tree innerc
8482 = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
8483 OMP_CLAUSE__LOOPTEMP_);
8484 gcc_assert (innerc);
8485 t0 = OMP_CLAUSE_DECL (innerc);
8486 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
8487 OMP_CLAUSE__LOOPTEMP_);
8488 gcc_assert (innerc);
8489 t1 = OMP_CLAUSE_DECL (innerc);
8490 }
8491 if (POINTER_TYPE_P (TREE_TYPE (t0))
8492 && TYPE_PRECISION (TREE_TYPE (t0))
8493 != TYPE_PRECISION (fd->iter_type))
8494 {
8495 /* Avoid casting pointers to integer of a different size. */
8496 tree itype = signed_type_for (type);
8497 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
8498 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
8499 }
8500 else
8501 {
8502 t1 = fold_convert (fd->iter_type, t1);
8503 t0 = fold_convert (fd->iter_type, t0);
8504 }
8505 if (bias)
8506 {
8507 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
8508 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
8509 }
8510 }
8511 if (fd->iter_type == long_integer_type_node || fd->ordered)
8512 {
8513 if (fd->chunk_size)
8514 {
8515 t = fold_convert (fd->iter_type, fd->chunk_size);
8516 t = omp_adjust_chunk_size (t, fd->simd_schedule);
8517 if (fd->ordered)
8518 t = build_call_expr (builtin_decl_explicit (start_fn),
8519 5, t0, t1, t, t3, t4);
8520 else
8521 t = build_call_expr (builtin_decl_explicit (start_fn),
8522 6, t0, t1, t2, t, t3, t4);
8523 }
8524 else if (fd->ordered)
8525 t = build_call_expr (builtin_decl_explicit (start_fn),
8526 4, t0, t1, t3, t4);
8527 else
8528 t = build_call_expr (builtin_decl_explicit (start_fn),
8529 5, t0, t1, t2, t3, t4);
8530 }
8531 else
8532 {
8533 tree t5;
8534 tree c_bool_type;
8535 tree bfn_decl;
8536
8537 /* The GOMP_loop_ull_*start functions have additional boolean
8538 argument, true for < loops and false for > loops.
8539 In Fortran, the C bool type can be different from
8540 boolean_type_node. */
8541 bfn_decl = builtin_decl_explicit (start_fn);
8542 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
8543 t5 = build_int_cst (c_bool_type,
8544 fd->loop.cond_code == LT_EXPR ? 1 : 0);
8545 if (fd->chunk_size)
8546 {
8547 tree bfn_decl = builtin_decl_explicit (start_fn);
8548 t = fold_convert (fd->iter_type, fd->chunk_size);
8549 t = omp_adjust_chunk_size (t, fd->simd_schedule);
8550 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
8551 }
8552 else
8553 t = build_call_expr (builtin_decl_explicit (start_fn),
8554 6, t5, t0, t1, t2, t3, t4);
8555 }
8556 }
8557 if (TREE_TYPE (t) != boolean_type_node)
8558 t = fold_build2 (NE_EXPR, boolean_type_node,
8559 t, build_int_cst (TREE_TYPE (t), 0));
8560 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8561 true, GSI_SAME_STMT);
8562 if (arr && !TREE_STATIC (arr))
8563 {
8564 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
8565 TREE_THIS_VOLATILE (clobber) = 1;
8566 gsi_insert_before (&gsi, gimple_build_assign (arr, clobber),
8567 GSI_SAME_STMT);
8568 }
8569 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
8570
8571 /* Remove the GIMPLE_OMP_FOR statement. */
8572 gsi_remove (&gsi, true);
8573
8574 if (gsi_end_p (gsif))
8575 gsif = gsi_after_labels (gsi_bb (gsif));
8576 gsi_next (&gsif);
8577
8578 /* Iteration setup for sequential loop goes in L0_BB. */
8579 tree startvar = fd->loop.v;
8580 tree endvar = NULL_TREE;
8581
8582 if (gimple_omp_for_combined_p (fd->for_stmt))
8583 {
8584 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
8585 && gimple_omp_for_kind (inner_stmt)
8586 == GF_OMP_FOR_KIND_SIMD);
8587 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
8588 OMP_CLAUSE__LOOPTEMP_);
8589 gcc_assert (innerc);
8590 startvar = OMP_CLAUSE_DECL (innerc);
8591 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
8592 OMP_CLAUSE__LOOPTEMP_);
8593 gcc_assert (innerc);
8594 endvar = OMP_CLAUSE_DECL (innerc);
8595 }
8596
8597 gsi = gsi_start_bb (l0_bb);
8598 t = istart0;
8599 if (fd->ordered && fd->collapse == 1)
8600 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
8601 fold_convert (fd->iter_type, fd->loop.step));
8602 else if (bias)
8603 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
8604 if (fd->ordered && fd->collapse == 1)
8605 {
8606 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8607 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
8608 fd->loop.n1, fold_convert (sizetype, t));
8609 else
8610 {
8611 t = fold_convert (TREE_TYPE (startvar), t);
8612 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
8613 fd->loop.n1, t);
8614 }
8615 }
8616 else
8617 {
8618 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8619 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
8620 t = fold_convert (TREE_TYPE (startvar), t);
8621 }
8622 t = force_gimple_operand_gsi (&gsi, t,
8623 DECL_P (startvar)
8624 && TREE_ADDRESSABLE (startvar),
8625 NULL_TREE, false, GSI_CONTINUE_LINKING);
8626 assign_stmt = gimple_build_assign (startvar, t);
8627 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8628
8629 t = iend0;
8630 if (fd->ordered && fd->collapse == 1)
8631 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
8632 fold_convert (fd->iter_type, fd->loop.step));
8633 else if (bias)
8634 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
8635 if (fd->ordered && fd->collapse == 1)
8636 {
8637 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8638 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
8639 fd->loop.n1, fold_convert (sizetype, t));
8640 else
8641 {
8642 t = fold_convert (TREE_TYPE (startvar), t);
8643 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
8644 fd->loop.n1, t);
8645 }
8646 }
8647 else
8648 {
8649 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
8650 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
8651 t = fold_convert (TREE_TYPE (startvar), t);
8652 }
8653 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8654 false, GSI_CONTINUE_LINKING);
8655 if (endvar)
8656 {
8657 assign_stmt = gimple_build_assign (endvar, iend);
8658 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8659 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
8660 assign_stmt = gimple_build_assign (fd->loop.v, iend);
8661 else
8662 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
8663 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8664 }
8665 /* Handle linear clause adjustments. */
8666 tree itercnt = NULL_TREE;
8667 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
8668 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
8669 c; c = OMP_CLAUSE_CHAIN (c))
8670 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
8671 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
8672 {
8673 tree d = OMP_CLAUSE_DECL (c);
8674 bool is_ref = is_reference (d);
8675 tree t = d, a, dest;
8676 if (is_ref)
8677 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
8678 tree type = TREE_TYPE (t);
8679 if (POINTER_TYPE_P (type))
8680 type = sizetype;
8681 dest = unshare_expr (t);
8682 tree v = create_tmp_var (TREE_TYPE (t), NULL);
8683 expand_omp_build_assign (&gsif, v, t);
8684 if (itercnt == NULL_TREE)
8685 {
8686 itercnt = startvar;
8687 tree n1 = fd->loop.n1;
8688 if (POINTER_TYPE_P (TREE_TYPE (itercnt)))
8689 {
8690 itercnt
8691 = fold_convert (signed_type_for (TREE_TYPE (itercnt)),
8692 itercnt);
8693 n1 = fold_convert (TREE_TYPE (itercnt), n1);
8694 }
8695 itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt),
8696 itercnt, n1);
8697 itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt),
8698 itercnt, fd->loop.step);
8699 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
8700 NULL_TREE, false,
8701 GSI_CONTINUE_LINKING);
8702 }
8703 a = fold_build2 (MULT_EXPR, type,
8704 fold_convert (type, itercnt),
8705 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
8706 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
8707 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
8708 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8709 false, GSI_CONTINUE_LINKING);
8710 assign_stmt = gimple_build_assign (dest, t);
8711 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
8712 }
8713 if (fd->collapse > 1)
8714 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
8715
8716 if (fd->ordered)
8717 {
8718 /* Until now, counts array contained number of iterations or
8719 variable containing it for ith loop. From now on, we need
8720 those counts only for collapsed loops, and only for the 2nd
8721 till the last collapsed one. Move those one element earlier,
8722 we'll use counts[fd->collapse - 1] for the first source/sink
8723 iteration counter and so on and counts[fd->ordered]
8724 as the array holding the current counter values for
8725 depend(source). */
8726 if (fd->collapse > 1)
8727 memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0]));
8728 if (broken_loop)
8729 {
8730 int i;
8731 for (i = fd->collapse; i < fd->ordered; i++)
8732 {
8733 tree type = TREE_TYPE (fd->loops[i].v);
8734 tree this_cond
8735 = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
8736 fold_convert (type, fd->loops[i].n1),
8737 fold_convert (type, fd->loops[i].n2));
8738 if (!integer_onep (this_cond))
8739 break;
8740 }
8741 if (i < fd->ordered)
8742 {
8743 cont_bb
8744 = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
8745 add_bb_to_loop (cont_bb, l1_bb->loop_father);
8746 gimple_stmt_iterator gsi = gsi_after_labels (cont_bb);
8747 gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v);
8748 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8749 make_edge (cont_bb, l3_bb, EDGE_FALLTHRU);
8750 make_edge (cont_bb, l1_bb, 0);
8751 l2_bb = create_empty_bb (cont_bb);
8752 broken_loop = false;
8753 }
8754 }
8755 expand_omp_ordered_source_sink (region, fd, counts, cont_bb);
8756 cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb,
8757 ordered_lastprivate);
8758 if (counts[fd->collapse - 1])
8759 {
8760 gcc_assert (fd->collapse == 1);
8761 gsi = gsi_last_bb (l0_bb);
8762 expand_omp_build_assign (&gsi, counts[fd->collapse - 1],
8763 istart0, true);
8764 gsi = gsi_last_bb (cont_bb);
8765 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1],
8766 build_int_cst (fd->iter_type, 1));
8767 expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t);
8768 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8769 size_zero_node, NULL_TREE, NULL_TREE);
8770 expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]);
8771 t = counts[fd->collapse - 1];
8772 }
8773 else if (fd->collapse > 1)
8774 t = fd->loop.v;
8775 else
8776 {
8777 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
8778 fd->loops[0].v, fd->loops[0].n1);
8779 t = fold_convert (fd->iter_type, t);
8780 }
8781 gsi = gsi_last_bb (l0_bb);
8782 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
8783 size_zero_node, NULL_TREE, NULL_TREE);
8784 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8785 false, GSI_CONTINUE_LINKING);
8786 expand_omp_build_assign (&gsi, aref, t, true);
8787 }
8788
8789 if (!broken_loop)
8790 {
8791 /* Code to control the increment and predicate for the sequential
8792 loop goes in the CONT_BB. */
8793 gsi = gsi_last_bb (cont_bb);
8794 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
8795 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
8796 vmain = gimple_omp_continue_control_use (cont_stmt);
8797 vback = gimple_omp_continue_control_def (cont_stmt);
8798
8799 if (!gimple_omp_for_combined_p (fd->for_stmt))
8800 {
8801 if (POINTER_TYPE_P (type))
8802 t = fold_build_pointer_plus (vmain, fd->loop.step);
8803 else
8804 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
8805 t = force_gimple_operand_gsi (&gsi, t,
8806 DECL_P (vback)
8807 && TREE_ADDRESSABLE (vback),
8808 NULL_TREE, true, GSI_SAME_STMT);
8809 assign_stmt = gimple_build_assign (vback, t);
8810 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
8811
8812 if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
8813 {
8814 if (fd->collapse > 1)
8815 t = fd->loop.v;
8816 else
8817 {
8818 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
8819 fd->loops[0].v, fd->loops[0].n1);
8820 t = fold_convert (fd->iter_type, t);
8821 }
8822 tree aref = build4 (ARRAY_REF, fd->iter_type,
8823 counts[fd->ordered], size_zero_node,
8824 NULL_TREE, NULL_TREE);
8825 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8826 true, GSI_SAME_STMT);
8827 expand_omp_build_assign (&gsi, aref, t);
8828 }
8829
8830 t = build2 (fd->loop.cond_code, boolean_type_node,
8831 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
8832 iend);
8833 gcond *cond_stmt = gimple_build_cond_empty (t);
8834 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
8835 }
8836
8837 /* Remove GIMPLE_OMP_CONTINUE. */
8838 gsi_remove (&gsi, true);
8839
8840 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
8841 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
8842
8843 /* Emit code to get the next parallel iteration in L2_BB. */
8844 gsi = gsi_start_bb (l2_bb);
8845
8846 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
8847 build_fold_addr_expr (istart0),
8848 build_fold_addr_expr (iend0));
8849 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
8850 false, GSI_CONTINUE_LINKING);
8851 if (TREE_TYPE (t) != boolean_type_node)
8852 t = fold_build2 (NE_EXPR, boolean_type_node,
8853 t, build_int_cst (TREE_TYPE (t), 0));
8854 gcond *cond_stmt = gimple_build_cond_empty (t);
8855 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
8856 }
8857
8858 /* Add the loop cleanup function. */
8859 gsi = gsi_last_bb (exit_bb);
8860 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
8861 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
8862 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
8863 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
8864 else
8865 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
8866 gcall *call_stmt = gimple_build_call (t, 0);
8867 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
8868 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
8869 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
8870 if (fd->ordered)
8871 {
8872 tree arr = counts[fd->ordered];
8873 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
8874 TREE_THIS_VOLATILE (clobber) = 1;
8875 gsi_insert_after (&gsi, gimple_build_assign (arr, clobber),
8876 GSI_SAME_STMT);
8877 }
8878 gsi_remove (&gsi, true);
8879
8880 /* Connect the new blocks. */
8881 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
8882 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
8883
8884 if (!broken_loop)
8885 {
8886 gimple_seq phis;
8887
8888 e = find_edge (cont_bb, l3_bb);
8889 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
8890
8891 phis = phi_nodes (l3_bb);
8892 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
8893 {
8894 gimple *phi = gsi_stmt (gsi);
8895 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
8896 PHI_ARG_DEF_FROM_EDGE (phi, e));
8897 }
8898 remove_edge (e);
8899
8900 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
8901 e = find_edge (cont_bb, l1_bb);
8902 if (e == NULL)
8903 {
8904 e = BRANCH_EDGE (cont_bb);
8905 gcc_assert (single_succ (e->dest) == l1_bb);
8906 }
8907 if (gimple_omp_for_combined_p (fd->for_stmt))
8908 {
8909 remove_edge (e);
8910 e = NULL;
8911 }
8912 else if (fd->collapse > 1)
8913 {
8914 remove_edge (e);
8915 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
8916 }
8917 else
8918 e->flags = EDGE_TRUE_VALUE;
8919 if (e)
8920 {
8921 e->probability = REG_BR_PROB_BASE * 7 / 8;
8922 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
8923 }
8924 else
8925 {
8926 e = find_edge (cont_bb, l2_bb);
8927 e->flags = EDGE_FALLTHRU;
8928 }
8929 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
8930
8931 if (gimple_in_ssa_p (cfun))
8932 {
8933 /* Add phis to the outer loop that connect to the phis in the inner,
8934 original loop, and move the loop entry value of the inner phi to
8935 the loop entry value of the outer phi. */
8936 gphi_iterator psi;
8937 for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi))
8938 {
8939 source_location locus;
8940 gphi *nphi;
8941 gphi *exit_phi = psi.phi ();
8942
8943 edge l2_to_l3 = find_edge (l2_bb, l3_bb);
8944 tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3);
8945
8946 basic_block latch = BRANCH_EDGE (cont_bb)->dest;
8947 edge latch_to_l1 = find_edge (latch, l1_bb);
8948 gphi *inner_phi
8949 = find_phi_with_arg_on_edge (exit_res, latch_to_l1);
8950
8951 tree t = gimple_phi_result (exit_phi);
8952 tree new_res = copy_ssa_name (t, NULL);
8953 nphi = create_phi_node (new_res, l0_bb);
8954
8955 edge l0_to_l1 = find_edge (l0_bb, l1_bb);
8956 t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1);
8957 locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1);
8958 edge entry_to_l0 = find_edge (entry_bb, l0_bb);
8959 add_phi_arg (nphi, t, entry_to_l0, locus);
8960
8961 edge l2_to_l0 = find_edge (l2_bb, l0_bb);
8962 add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION);
8963
8964 add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION);
8965 };
8966 }
8967
8968 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
8969 recompute_dominator (CDI_DOMINATORS, l2_bb));
8970 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
8971 recompute_dominator (CDI_DOMINATORS, l3_bb));
8972 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
8973 recompute_dominator (CDI_DOMINATORS, l0_bb));
8974 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
8975 recompute_dominator (CDI_DOMINATORS, l1_bb));
8976
8977 /* We enter expand_omp_for_generic with a loop. This original loop may
8978 have its own loop struct, or it may be part of an outer loop struct
8979 (which may be the fake loop). */
8980 struct loop *outer_loop = entry_bb->loop_father;
8981 bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
8982
8983 add_bb_to_loop (l2_bb, outer_loop);
8984
8985 /* We've added a new loop around the original loop. Allocate the
8986 corresponding loop struct. */
8987 struct loop *new_loop = alloc_loop ();
8988 new_loop->header = l0_bb;
8989 new_loop->latch = l2_bb;
8990 add_loop (new_loop, outer_loop);
8991
8992 /* Allocate a loop structure for the original loop unless we already
8993 had one. */
8994 if (!orig_loop_has_loop_struct
8995 && !gimple_omp_for_combined_p (fd->for_stmt))
8996 {
8997 struct loop *orig_loop = alloc_loop ();
8998 orig_loop->header = l1_bb;
8999 /* The loop may have multiple latches. */
9000 add_loop (orig_loop, new_loop);
9001 }
9002 }
9003 }
9004
9005
9006 /* A subroutine of expand_omp_for. Generate code for a parallel
9007 loop with static schedule and no specified chunk size. Given
9008 parameters:
9009
9010 for (V = N1; V cond N2; V += STEP) BODY;
9011
9012 where COND is "<" or ">", we generate pseudocode
9013
9014 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
9015 if (cond is <)
9016 adj = STEP - 1;
9017 else
9018 adj = STEP + 1;
9019 if ((__typeof (V)) -1 > 0 && cond is >)
9020 n = -(adj + N2 - N1) / -STEP;
9021 else
9022 n = (adj + N2 - N1) / STEP;
9023 q = n / nthreads;
9024 tt = n % nthreads;
9025 if (threadid < tt) goto L3; else goto L4;
9026 L3:
9027 tt = 0;
9028 q = q + 1;
9029 L4:
9030 s0 = q * threadid + tt;
9031 e0 = s0 + q;
9032 V = s0 * STEP + N1;
9033 if (s0 >= e0) goto L2; else goto L0;
9034 L0:
9035 e = e0 * STEP + N1;
9036 L1:
9037 BODY;
9038 V += STEP;
9039 if (V cond e) goto L1;
9040 L2:
9041 */
9042
9043 static void
9044 expand_omp_for_static_nochunk (struct omp_region *region,
9045 struct omp_for_data *fd,
9046 gimple *inner_stmt)
9047 {
9048 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
9049 tree type, itype, vmain, vback;
9050 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
9051 basic_block body_bb, cont_bb, collapse_bb = NULL;
9052 basic_block fin_bb;
9053 gimple_stmt_iterator gsi;
9054 edge ep;
9055 bool broken_loop = region->cont == NULL;
9056 tree *counts = NULL;
9057 tree n1, n2, step;
9058
9059 itype = type = TREE_TYPE (fd->loop.v);
9060 if (POINTER_TYPE_P (type))
9061 itype = signed_type_for (type);
9062
9063 entry_bb = region->entry;
9064 cont_bb = region->cont;
9065 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
9066 fin_bb = BRANCH_EDGE (entry_bb)->dest;
9067 gcc_assert (broken_loop
9068 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
9069 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
9070 body_bb = single_succ (seq_start_bb);
9071 if (!broken_loop)
9072 {
9073 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
9074 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
9075 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
9076 }
9077 exit_bb = region->exit;
9078
9079 /* Iteration space partitioning goes in ENTRY_BB. */
9080 gsi = gsi_last_bb (entry_bb);
9081 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9082
9083 if (fd->collapse > 1)
9084 {
9085 int first_zero_iter = -1, dummy = -1;
9086 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
9087
9088 counts = XALLOCAVEC (tree, fd->collapse);
9089 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
9090 fin_bb, first_zero_iter,
9091 dummy_bb, dummy, l2_dom_bb);
9092 t = NULL_TREE;
9093 }
9094 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
9095 t = integer_one_node;
9096 else
9097 t = fold_binary (fd->loop.cond_code, boolean_type_node,
9098 fold_convert (type, fd->loop.n1),
9099 fold_convert (type, fd->loop.n2));
9100 if (fd->collapse == 1
9101 && TYPE_UNSIGNED (type)
9102 && (t == NULL_TREE || !integer_onep (t)))
9103 {
9104 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
9105 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
9106 true, GSI_SAME_STMT);
9107 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
9108 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
9109 true, GSI_SAME_STMT);
9110 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
9111 NULL_TREE, NULL_TREE);
9112 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9113 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
9114 expand_omp_regimplify_p, NULL, NULL)
9115 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
9116 expand_omp_regimplify_p, NULL, NULL))
9117 {
9118 gsi = gsi_for_stmt (cond_stmt);
9119 gimple_regimplify_operands (cond_stmt, &gsi);
9120 }
9121 ep = split_block (entry_bb, cond_stmt);
9122 ep->flags = EDGE_TRUE_VALUE;
9123 entry_bb = ep->dest;
9124 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
9125 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
9126 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
9127 if (gimple_in_ssa_p (cfun))
9128 {
9129 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
9130 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
9131 !gsi_end_p (gpi); gsi_next (&gpi))
9132 {
9133 gphi *phi = gpi.phi ();
9134 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
9135 ep, UNKNOWN_LOCATION);
9136 }
9137 }
9138 gsi = gsi_last_bb (entry_bb);
9139 }
9140
9141 switch (gimple_omp_for_kind (fd->for_stmt))
9142 {
9143 case GF_OMP_FOR_KIND_FOR:
9144 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
9145 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9146 break;
9147 case GF_OMP_FOR_KIND_DISTRIBUTE:
9148 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
9149 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
9150 break;
9151 default:
9152 gcc_unreachable ();
9153 }
9154 nthreads = build_call_expr (nthreads, 0);
9155 nthreads = fold_convert (itype, nthreads);
9156 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
9157 true, GSI_SAME_STMT);
9158 threadid = build_call_expr (threadid, 0);
9159 threadid = fold_convert (itype, threadid);
9160 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
9161 true, GSI_SAME_STMT);
9162
9163 n1 = fd->loop.n1;
9164 n2 = fd->loop.n2;
9165 step = fd->loop.step;
9166 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9167 {
9168 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
9169 OMP_CLAUSE__LOOPTEMP_);
9170 gcc_assert (innerc);
9171 n1 = OMP_CLAUSE_DECL (innerc);
9172 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9173 OMP_CLAUSE__LOOPTEMP_);
9174 gcc_assert (innerc);
9175 n2 = OMP_CLAUSE_DECL (innerc);
9176 }
9177 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
9178 true, NULL_TREE, true, GSI_SAME_STMT);
9179 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
9180 true, NULL_TREE, true, GSI_SAME_STMT);
9181 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
9182 true, NULL_TREE, true, GSI_SAME_STMT);
9183
9184 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
9185 t = fold_build2 (PLUS_EXPR, itype, step, t);
9186 t = fold_build2 (PLUS_EXPR, itype, t, n2);
9187 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
9188 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
9189 t = fold_build2 (TRUNC_DIV_EXPR, itype,
9190 fold_build1 (NEGATE_EXPR, itype, t),
9191 fold_build1 (NEGATE_EXPR, itype, step));
9192 else
9193 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
9194 t = fold_convert (itype, t);
9195 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
9196
9197 q = create_tmp_reg (itype, "q");
9198 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
9199 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
9200 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
9201
9202 tt = create_tmp_reg (itype, "tt");
9203 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
9204 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
9205 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
9206
9207 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
9208 gcond *cond_stmt = gimple_build_cond_empty (t);
9209 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9210
9211 second_bb = split_block (entry_bb, cond_stmt)->dest;
9212 gsi = gsi_last_bb (second_bb);
9213 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9214
9215 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
9216 GSI_SAME_STMT);
9217 gassign *assign_stmt
9218 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
9219 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9220
9221 third_bb = split_block (second_bb, assign_stmt)->dest;
9222 gsi = gsi_last_bb (third_bb);
9223 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9224
9225 t = build2 (MULT_EXPR, itype, q, threadid);
9226 t = build2 (PLUS_EXPR, itype, t, tt);
9227 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
9228
9229 t = fold_build2 (PLUS_EXPR, itype, s0, q);
9230 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
9231
9232 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
9233 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
9234
9235 /* Remove the GIMPLE_OMP_FOR statement. */
9236 gsi_remove (&gsi, true);
9237
9238 /* Setup code for sequential iteration goes in SEQ_START_BB. */
9239 gsi = gsi_start_bb (seq_start_bb);
9240
9241 tree startvar = fd->loop.v;
9242 tree endvar = NULL_TREE;
9243
9244 if (gimple_omp_for_combined_p (fd->for_stmt))
9245 {
9246 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
9247 ? gimple_omp_parallel_clauses (inner_stmt)
9248 : gimple_omp_for_clauses (inner_stmt);
9249 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
9250 gcc_assert (innerc);
9251 startvar = OMP_CLAUSE_DECL (innerc);
9252 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9253 OMP_CLAUSE__LOOPTEMP_);
9254 gcc_assert (innerc);
9255 endvar = OMP_CLAUSE_DECL (innerc);
9256 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
9257 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
9258 {
9259 int i;
9260 for (i = 1; i < fd->collapse; i++)
9261 {
9262 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9263 OMP_CLAUSE__LOOPTEMP_);
9264 gcc_assert (innerc);
9265 }
9266 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9267 OMP_CLAUSE__LOOPTEMP_);
9268 if (innerc)
9269 {
9270 /* If needed (distribute parallel for with lastprivate),
9271 propagate down the total number of iterations. */
9272 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
9273 fd->loop.n2);
9274 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
9275 GSI_CONTINUE_LINKING);
9276 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
9277 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9278 }
9279 }
9280 }
9281 t = fold_convert (itype, s0);
9282 t = fold_build2 (MULT_EXPR, itype, t, step);
9283 if (POINTER_TYPE_P (type))
9284 t = fold_build_pointer_plus (n1, t);
9285 else
9286 t = fold_build2 (PLUS_EXPR, type, t, n1);
9287 t = fold_convert (TREE_TYPE (startvar), t);
9288 t = force_gimple_operand_gsi (&gsi, t,
9289 DECL_P (startvar)
9290 && TREE_ADDRESSABLE (startvar),
9291 NULL_TREE, false, GSI_CONTINUE_LINKING);
9292 assign_stmt = gimple_build_assign (startvar, t);
9293 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9294
9295 t = fold_convert (itype, e0);
9296 t = fold_build2 (MULT_EXPR, itype, t, step);
9297 if (POINTER_TYPE_P (type))
9298 t = fold_build_pointer_plus (n1, t);
9299 else
9300 t = fold_build2 (PLUS_EXPR, type, t, n1);
9301 t = fold_convert (TREE_TYPE (startvar), t);
9302 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9303 false, GSI_CONTINUE_LINKING);
9304 if (endvar)
9305 {
9306 assign_stmt = gimple_build_assign (endvar, e);
9307 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9308 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
9309 assign_stmt = gimple_build_assign (fd->loop.v, e);
9310 else
9311 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
9312 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9313 }
9314 /* Handle linear clause adjustments. */
9315 tree itercnt = NULL_TREE;
9316 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
9317 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
9318 c; c = OMP_CLAUSE_CHAIN (c))
9319 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
9320 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
9321 {
9322 tree d = OMP_CLAUSE_DECL (c);
9323 bool is_ref = is_reference (d);
9324 tree t = d, a, dest;
9325 if (is_ref)
9326 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
9327 if (itercnt == NULL_TREE)
9328 {
9329 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9330 {
9331 itercnt = fold_build2 (MINUS_EXPR, itype,
9332 fold_convert (itype, n1),
9333 fold_convert (itype, fd->loop.n1));
9334 itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step);
9335 itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0);
9336 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
9337 NULL_TREE, false,
9338 GSI_CONTINUE_LINKING);
9339 }
9340 else
9341 itercnt = s0;
9342 }
9343 tree type = TREE_TYPE (t);
9344 if (POINTER_TYPE_P (type))
9345 type = sizetype;
9346 a = fold_build2 (MULT_EXPR, type,
9347 fold_convert (type, itercnt),
9348 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
9349 dest = unshare_expr (t);
9350 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
9351 : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a);
9352 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9353 false, GSI_CONTINUE_LINKING);
9354 assign_stmt = gimple_build_assign (dest, t);
9355 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9356 }
9357 if (fd->collapse > 1)
9358 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
9359
9360 if (!broken_loop)
9361 {
9362 /* The code controlling the sequential loop replaces the
9363 GIMPLE_OMP_CONTINUE. */
9364 gsi = gsi_last_bb (cont_bb);
9365 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
9366 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
9367 vmain = gimple_omp_continue_control_use (cont_stmt);
9368 vback = gimple_omp_continue_control_def (cont_stmt);
9369
9370 if (!gimple_omp_for_combined_p (fd->for_stmt))
9371 {
9372 if (POINTER_TYPE_P (type))
9373 t = fold_build_pointer_plus (vmain, step);
9374 else
9375 t = fold_build2 (PLUS_EXPR, type, vmain, step);
9376 t = force_gimple_operand_gsi (&gsi, t,
9377 DECL_P (vback)
9378 && TREE_ADDRESSABLE (vback),
9379 NULL_TREE, true, GSI_SAME_STMT);
9380 assign_stmt = gimple_build_assign (vback, t);
9381 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9382
9383 t = build2 (fd->loop.cond_code, boolean_type_node,
9384 DECL_P (vback) && TREE_ADDRESSABLE (vback)
9385 ? t : vback, e);
9386 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
9387 }
9388
9389 /* Remove the GIMPLE_OMP_CONTINUE statement. */
9390 gsi_remove (&gsi, true);
9391
9392 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
9393 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
9394 }
9395
9396 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
9397 gsi = gsi_last_bb (exit_bb);
9398 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
9399 {
9400 t = gimple_omp_return_lhs (gsi_stmt (gsi));
9401 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
9402 }
9403 gsi_remove (&gsi, true);
9404
9405 /* Connect all the blocks. */
9406 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
9407 ep->probability = REG_BR_PROB_BASE / 4 * 3;
9408 ep = find_edge (entry_bb, second_bb);
9409 ep->flags = EDGE_TRUE_VALUE;
9410 ep->probability = REG_BR_PROB_BASE / 4;
9411 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
9412 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
9413
9414 if (!broken_loop)
9415 {
9416 ep = find_edge (cont_bb, body_bb);
9417 if (ep == NULL)
9418 {
9419 ep = BRANCH_EDGE (cont_bb);
9420 gcc_assert (single_succ (ep->dest) == body_bb);
9421 }
9422 if (gimple_omp_for_combined_p (fd->for_stmt))
9423 {
9424 remove_edge (ep);
9425 ep = NULL;
9426 }
9427 else if (fd->collapse > 1)
9428 {
9429 remove_edge (ep);
9430 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
9431 }
9432 else
9433 ep->flags = EDGE_TRUE_VALUE;
9434 find_edge (cont_bb, fin_bb)->flags
9435 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
9436 }
9437
9438 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
9439 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
9440 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
9441
9442 set_immediate_dominator (CDI_DOMINATORS, body_bb,
9443 recompute_dominator (CDI_DOMINATORS, body_bb));
9444 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
9445 recompute_dominator (CDI_DOMINATORS, fin_bb));
9446
9447 struct loop *loop = body_bb->loop_father;
9448 if (loop != entry_bb->loop_father)
9449 {
9450 gcc_assert (loop->header == body_bb);
9451 gcc_assert (broken_loop
9452 || loop->latch == region->cont
9453 || single_pred (loop->latch) == region->cont);
9454 return;
9455 }
9456
9457 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
9458 {
9459 loop = alloc_loop ();
9460 loop->header = body_bb;
9461 if (collapse_bb == NULL)
9462 loop->latch = cont_bb;
9463 add_loop (loop, body_bb->loop_father);
9464 }
9465 }
9466
9467 /* Return phi in E->DEST with ARG on edge E. */
9468
9469 static gphi *
9470 find_phi_with_arg_on_edge (tree arg, edge e)
9471 {
9472 basic_block bb = e->dest;
9473
9474 for (gphi_iterator gpi = gsi_start_phis (bb);
9475 !gsi_end_p (gpi);
9476 gsi_next (&gpi))
9477 {
9478 gphi *phi = gpi.phi ();
9479 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg)
9480 return phi;
9481 }
9482
9483 return NULL;
9484 }
9485
9486 /* A subroutine of expand_omp_for. Generate code for a parallel
9487 loop with static schedule and a specified chunk size. Given
9488 parameters:
9489
9490 for (V = N1; V cond N2; V += STEP) BODY;
9491
9492 where COND is "<" or ">", we generate pseudocode
9493
9494 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
9495 if (cond is <)
9496 adj = STEP - 1;
9497 else
9498 adj = STEP + 1;
9499 if ((__typeof (V)) -1 > 0 && cond is >)
9500 n = -(adj + N2 - N1) / -STEP;
9501 else
9502 n = (adj + N2 - N1) / STEP;
9503 trip = 0;
9504 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
9505 here so that V is defined
9506 if the loop is not entered
9507 L0:
9508 s0 = (trip * nthreads + threadid) * CHUNK;
9509 e0 = min(s0 + CHUNK, n);
9510 if (s0 < n) goto L1; else goto L4;
9511 L1:
9512 V = s0 * STEP + N1;
9513 e = e0 * STEP + N1;
9514 L2:
9515 BODY;
9516 V += STEP;
9517 if (V cond e) goto L2; else goto L3;
9518 L3:
9519 trip += 1;
9520 goto L0;
9521 L4:
9522 */
9523
9524 static void
9525 expand_omp_for_static_chunk (struct omp_region *region,
9526 struct omp_for_data *fd, gimple *inner_stmt)
9527 {
9528 tree n, s0, e0, e, t;
9529 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
9530 tree type, itype, vmain, vback, vextra;
9531 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
9532 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
9533 gimple_stmt_iterator gsi;
9534 edge se;
9535 bool broken_loop = region->cont == NULL;
9536 tree *counts = NULL;
9537 tree n1, n2, step;
9538
9539 itype = type = TREE_TYPE (fd->loop.v);
9540 if (POINTER_TYPE_P (type))
9541 itype = signed_type_for (type);
9542
9543 entry_bb = region->entry;
9544 se = split_block (entry_bb, last_stmt (entry_bb));
9545 entry_bb = se->src;
9546 iter_part_bb = se->dest;
9547 cont_bb = region->cont;
9548 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
9549 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
9550 gcc_assert (broken_loop
9551 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
9552 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
9553 body_bb = single_succ (seq_start_bb);
9554 if (!broken_loop)
9555 {
9556 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
9557 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
9558 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
9559 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
9560 }
9561 exit_bb = region->exit;
9562
9563 /* Trip and adjustment setup goes in ENTRY_BB. */
9564 gsi = gsi_last_bb (entry_bb);
9565 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
9566
9567 if (fd->collapse > 1)
9568 {
9569 int first_zero_iter = -1, dummy = -1;
9570 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
9571
9572 counts = XALLOCAVEC (tree, fd->collapse);
9573 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
9574 fin_bb, first_zero_iter,
9575 dummy_bb, dummy, l2_dom_bb);
9576 t = NULL_TREE;
9577 }
9578 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
9579 t = integer_one_node;
9580 else
9581 t = fold_binary (fd->loop.cond_code, boolean_type_node,
9582 fold_convert (type, fd->loop.n1),
9583 fold_convert (type, fd->loop.n2));
9584 if (fd->collapse == 1
9585 && TYPE_UNSIGNED (type)
9586 && (t == NULL_TREE || !integer_onep (t)))
9587 {
9588 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
9589 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
9590 true, GSI_SAME_STMT);
9591 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
9592 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
9593 true, GSI_SAME_STMT);
9594 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
9595 NULL_TREE, NULL_TREE);
9596 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
9597 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
9598 expand_omp_regimplify_p, NULL, NULL)
9599 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
9600 expand_omp_regimplify_p, NULL, NULL))
9601 {
9602 gsi = gsi_for_stmt (cond_stmt);
9603 gimple_regimplify_operands (cond_stmt, &gsi);
9604 }
9605 se = split_block (entry_bb, cond_stmt);
9606 se->flags = EDGE_TRUE_VALUE;
9607 entry_bb = se->dest;
9608 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
9609 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
9610 se->probability = REG_BR_PROB_BASE / 2000 - 1;
9611 if (gimple_in_ssa_p (cfun))
9612 {
9613 int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx;
9614 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
9615 !gsi_end_p (gpi); gsi_next (&gpi))
9616 {
9617 gphi *phi = gpi.phi ();
9618 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
9619 se, UNKNOWN_LOCATION);
9620 }
9621 }
9622 gsi = gsi_last_bb (entry_bb);
9623 }
9624
9625 switch (gimple_omp_for_kind (fd->for_stmt))
9626 {
9627 case GF_OMP_FOR_KIND_FOR:
9628 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
9629 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
9630 break;
9631 case GF_OMP_FOR_KIND_DISTRIBUTE:
9632 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
9633 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
9634 break;
9635 default:
9636 gcc_unreachable ();
9637 }
9638 nthreads = build_call_expr (nthreads, 0);
9639 nthreads = fold_convert (itype, nthreads);
9640 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
9641 true, GSI_SAME_STMT);
9642 threadid = build_call_expr (threadid, 0);
9643 threadid = fold_convert (itype, threadid);
9644 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
9645 true, GSI_SAME_STMT);
9646
9647 n1 = fd->loop.n1;
9648 n2 = fd->loop.n2;
9649 step = fd->loop.step;
9650 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9651 {
9652 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
9653 OMP_CLAUSE__LOOPTEMP_);
9654 gcc_assert (innerc);
9655 n1 = OMP_CLAUSE_DECL (innerc);
9656 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9657 OMP_CLAUSE__LOOPTEMP_);
9658 gcc_assert (innerc);
9659 n2 = OMP_CLAUSE_DECL (innerc);
9660 }
9661 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
9662 true, NULL_TREE, true, GSI_SAME_STMT);
9663 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
9664 true, NULL_TREE, true, GSI_SAME_STMT);
9665 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
9666 true, NULL_TREE, true, GSI_SAME_STMT);
9667 tree chunk_size = fold_convert (itype, fd->chunk_size);
9668 chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule);
9669 chunk_size
9670 = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true,
9671 GSI_SAME_STMT);
9672
9673 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
9674 t = fold_build2 (PLUS_EXPR, itype, step, t);
9675 t = fold_build2 (PLUS_EXPR, itype, t, n2);
9676 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
9677 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
9678 t = fold_build2 (TRUNC_DIV_EXPR, itype,
9679 fold_build1 (NEGATE_EXPR, itype, t),
9680 fold_build1 (NEGATE_EXPR, itype, step));
9681 else
9682 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
9683 t = fold_convert (itype, t);
9684 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9685 true, GSI_SAME_STMT);
9686
9687 trip_var = create_tmp_reg (itype, ".trip");
9688 if (gimple_in_ssa_p (cfun))
9689 {
9690 trip_init = make_ssa_name (trip_var);
9691 trip_main = make_ssa_name (trip_var);
9692 trip_back = make_ssa_name (trip_var);
9693 }
9694 else
9695 {
9696 trip_init = trip_var;
9697 trip_main = trip_var;
9698 trip_back = trip_var;
9699 }
9700
9701 gassign *assign_stmt
9702 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
9703 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9704
9705 t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size);
9706 t = fold_build2 (MULT_EXPR, itype, t, step);
9707 if (POINTER_TYPE_P (type))
9708 t = fold_build_pointer_plus (n1, t);
9709 else
9710 t = fold_build2 (PLUS_EXPR, type, t, n1);
9711 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9712 true, GSI_SAME_STMT);
9713
9714 /* Remove the GIMPLE_OMP_FOR. */
9715 gsi_remove (&gsi, true);
9716
9717 gimple_stmt_iterator gsif = gsi;
9718
9719 /* Iteration space partitioning goes in ITER_PART_BB. */
9720 gsi = gsi_last_bb (iter_part_bb);
9721
9722 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
9723 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
9724 t = fold_build2 (MULT_EXPR, itype, t, chunk_size);
9725 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9726 false, GSI_CONTINUE_LINKING);
9727
9728 t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size);
9729 t = fold_build2 (MIN_EXPR, itype, t, n);
9730 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9731 false, GSI_CONTINUE_LINKING);
9732
9733 t = build2 (LT_EXPR, boolean_type_node, s0, n);
9734 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
9735
9736 /* Setup code for sequential iteration goes in SEQ_START_BB. */
9737 gsi = gsi_start_bb (seq_start_bb);
9738
9739 tree startvar = fd->loop.v;
9740 tree endvar = NULL_TREE;
9741
9742 if (gimple_omp_for_combined_p (fd->for_stmt))
9743 {
9744 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
9745 ? gimple_omp_parallel_clauses (inner_stmt)
9746 : gimple_omp_for_clauses (inner_stmt);
9747 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
9748 gcc_assert (innerc);
9749 startvar = OMP_CLAUSE_DECL (innerc);
9750 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9751 OMP_CLAUSE__LOOPTEMP_);
9752 gcc_assert (innerc);
9753 endvar = OMP_CLAUSE_DECL (innerc);
9754 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST
9755 && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
9756 {
9757 int i;
9758 for (i = 1; i < fd->collapse; i++)
9759 {
9760 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9761 OMP_CLAUSE__LOOPTEMP_);
9762 gcc_assert (innerc);
9763 }
9764 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
9765 OMP_CLAUSE__LOOPTEMP_);
9766 if (innerc)
9767 {
9768 /* If needed (distribute parallel for with lastprivate),
9769 propagate down the total number of iterations. */
9770 tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)),
9771 fd->loop.n2);
9772 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false,
9773 GSI_CONTINUE_LINKING);
9774 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
9775 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9776 }
9777 }
9778 }
9779
9780 t = fold_convert (itype, s0);
9781 t = fold_build2 (MULT_EXPR, itype, t, step);
9782 if (POINTER_TYPE_P (type))
9783 t = fold_build_pointer_plus (n1, t);
9784 else
9785 t = fold_build2 (PLUS_EXPR, type, t, n1);
9786 t = fold_convert (TREE_TYPE (startvar), t);
9787 t = force_gimple_operand_gsi (&gsi, t,
9788 DECL_P (startvar)
9789 && TREE_ADDRESSABLE (startvar),
9790 NULL_TREE, false, GSI_CONTINUE_LINKING);
9791 assign_stmt = gimple_build_assign (startvar, t);
9792 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9793
9794 t = fold_convert (itype, e0);
9795 t = fold_build2 (MULT_EXPR, itype, t, step);
9796 if (POINTER_TYPE_P (type))
9797 t = fold_build_pointer_plus (n1, t);
9798 else
9799 t = fold_build2 (PLUS_EXPR, type, t, n1);
9800 t = fold_convert (TREE_TYPE (startvar), t);
9801 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9802 false, GSI_CONTINUE_LINKING);
9803 if (endvar)
9804 {
9805 assign_stmt = gimple_build_assign (endvar, e);
9806 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9807 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
9808 assign_stmt = gimple_build_assign (fd->loop.v, e);
9809 else
9810 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
9811 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9812 }
9813 /* Handle linear clause adjustments. */
9814 tree itercnt = NULL_TREE, itercntbias = NULL_TREE;
9815 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
9816 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
9817 c; c = OMP_CLAUSE_CHAIN (c))
9818 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
9819 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
9820 {
9821 tree d = OMP_CLAUSE_DECL (c);
9822 bool is_ref = is_reference (d);
9823 tree t = d, a, dest;
9824 if (is_ref)
9825 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
9826 tree type = TREE_TYPE (t);
9827 if (POINTER_TYPE_P (type))
9828 type = sizetype;
9829 dest = unshare_expr (t);
9830 tree v = create_tmp_var (TREE_TYPE (t), NULL);
9831 expand_omp_build_assign (&gsif, v, t);
9832 if (itercnt == NULL_TREE)
9833 {
9834 if (gimple_omp_for_combined_into_p (fd->for_stmt))
9835 {
9836 itercntbias
9837 = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1),
9838 fold_convert (itype, fd->loop.n1));
9839 itercntbias = fold_build2 (EXACT_DIV_EXPR, itype,
9840 itercntbias, step);
9841 itercntbias
9842 = force_gimple_operand_gsi (&gsif, itercntbias, true,
9843 NULL_TREE, true,
9844 GSI_SAME_STMT);
9845 itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0);
9846 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
9847 NULL_TREE, false,
9848 GSI_CONTINUE_LINKING);
9849 }
9850 else
9851 itercnt = s0;
9852 }
9853 a = fold_build2 (MULT_EXPR, type,
9854 fold_convert (type, itercnt),
9855 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
9856 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
9857 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
9858 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9859 false, GSI_CONTINUE_LINKING);
9860 assign_stmt = gimple_build_assign (dest, t);
9861 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9862 }
9863 if (fd->collapse > 1)
9864 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
9865
9866 if (!broken_loop)
9867 {
9868 /* The code controlling the sequential loop goes in CONT_BB,
9869 replacing the GIMPLE_OMP_CONTINUE. */
9870 gsi = gsi_last_bb (cont_bb);
9871 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
9872 vmain = gimple_omp_continue_control_use (cont_stmt);
9873 vback = gimple_omp_continue_control_def (cont_stmt);
9874
9875 if (!gimple_omp_for_combined_p (fd->for_stmt))
9876 {
9877 if (POINTER_TYPE_P (type))
9878 t = fold_build_pointer_plus (vmain, step);
9879 else
9880 t = fold_build2 (PLUS_EXPR, type, vmain, step);
9881 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
9882 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
9883 true, GSI_SAME_STMT);
9884 assign_stmt = gimple_build_assign (vback, t);
9885 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
9886
9887 if (tree_int_cst_equal (fd->chunk_size, integer_one_node))
9888 t = build2 (EQ_EXPR, boolean_type_node,
9889 build_int_cst (itype, 0),
9890 build_int_cst (itype, 1));
9891 else
9892 t = build2 (fd->loop.cond_code, boolean_type_node,
9893 DECL_P (vback) && TREE_ADDRESSABLE (vback)
9894 ? t : vback, e);
9895 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
9896 }
9897
9898 /* Remove GIMPLE_OMP_CONTINUE. */
9899 gsi_remove (&gsi, true);
9900
9901 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
9902 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
9903
9904 /* Trip update code goes into TRIP_UPDATE_BB. */
9905 gsi = gsi_start_bb (trip_update_bb);
9906
9907 t = build_int_cst (itype, 1);
9908 t = build2 (PLUS_EXPR, itype, trip_main, t);
9909 assign_stmt = gimple_build_assign (trip_back, t);
9910 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
9911 }
9912
9913 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
9914 gsi = gsi_last_bb (exit_bb);
9915 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
9916 {
9917 t = gimple_omp_return_lhs (gsi_stmt (gsi));
9918 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
9919 }
9920 gsi_remove (&gsi, true);
9921
9922 /* Connect the new blocks. */
9923 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
9924 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
9925
9926 if (!broken_loop)
9927 {
9928 se = find_edge (cont_bb, body_bb);
9929 if (se == NULL)
9930 {
9931 se = BRANCH_EDGE (cont_bb);
9932 gcc_assert (single_succ (se->dest) == body_bb);
9933 }
9934 if (gimple_omp_for_combined_p (fd->for_stmt))
9935 {
9936 remove_edge (se);
9937 se = NULL;
9938 }
9939 else if (fd->collapse > 1)
9940 {
9941 remove_edge (se);
9942 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
9943 }
9944 else
9945 se->flags = EDGE_TRUE_VALUE;
9946 find_edge (cont_bb, trip_update_bb)->flags
9947 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
9948
9949 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
9950 }
9951
9952 if (gimple_in_ssa_p (cfun))
9953 {
9954 gphi_iterator psi;
9955 gphi *phi;
9956 edge re, ene;
9957 edge_var_map *vm;
9958 size_t i;
9959
9960 gcc_assert (fd->collapse == 1 && !broken_loop);
9961
9962 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
9963 remove arguments of the phi nodes in fin_bb. We need to create
9964 appropriate phi nodes in iter_part_bb instead. */
9965 se = find_edge (iter_part_bb, fin_bb);
9966 re = single_succ_edge (trip_update_bb);
9967 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
9968 ene = single_succ_edge (entry_bb);
9969
9970 psi = gsi_start_phis (fin_bb);
9971 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
9972 gsi_next (&psi), ++i)
9973 {
9974 gphi *nphi;
9975 source_location locus;
9976
9977 phi = psi.phi ();
9978 t = gimple_phi_result (phi);
9979 gcc_assert (t == redirect_edge_var_map_result (vm));
9980
9981 if (!single_pred_p (fin_bb))
9982 t = copy_ssa_name (t, phi);
9983
9984 nphi = create_phi_node (t, iter_part_bb);
9985
9986 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
9987 locus = gimple_phi_arg_location_from_edge (phi, se);
9988
9989 /* A special case -- fd->loop.v is not yet computed in
9990 iter_part_bb, we need to use vextra instead. */
9991 if (t == fd->loop.v)
9992 t = vextra;
9993 add_phi_arg (nphi, t, ene, locus);
9994 locus = redirect_edge_var_map_location (vm);
9995 tree back_arg = redirect_edge_var_map_def (vm);
9996 add_phi_arg (nphi, back_arg, re, locus);
9997 edge ce = find_edge (cont_bb, body_bb);
9998 if (ce == NULL)
9999 {
10000 ce = BRANCH_EDGE (cont_bb);
10001 gcc_assert (single_succ (ce->dest) == body_bb);
10002 ce = single_succ_edge (ce->dest);
10003 }
10004 gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce);
10005 gcc_assert (inner_loop_phi != NULL);
10006 add_phi_arg (inner_loop_phi, gimple_phi_result (nphi),
10007 find_edge (seq_start_bb, body_bb), locus);
10008
10009 if (!single_pred_p (fin_bb))
10010 add_phi_arg (phi, gimple_phi_result (nphi), se, locus);
10011 }
10012 gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ()));
10013 redirect_edge_var_map_clear (re);
10014 if (single_pred_p (fin_bb))
10015 while (1)
10016 {
10017 psi = gsi_start_phis (fin_bb);
10018 if (gsi_end_p (psi))
10019 break;
10020 remove_phi_node (&psi, false);
10021 }
10022
10023 /* Make phi node for trip. */
10024 phi = create_phi_node (trip_main, iter_part_bb);
10025 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
10026 UNKNOWN_LOCATION);
10027 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
10028 UNKNOWN_LOCATION);
10029 }
10030
10031 if (!broken_loop)
10032 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
10033 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
10034 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
10035 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
10036 recompute_dominator (CDI_DOMINATORS, fin_bb));
10037 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
10038 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
10039 set_immediate_dominator (CDI_DOMINATORS, body_bb,
10040 recompute_dominator (CDI_DOMINATORS, body_bb));
10041
10042 if (!broken_loop)
10043 {
10044 struct loop *loop = body_bb->loop_father;
10045 struct loop *trip_loop = alloc_loop ();
10046 trip_loop->header = iter_part_bb;
10047 trip_loop->latch = trip_update_bb;
10048 add_loop (trip_loop, iter_part_bb->loop_father);
10049
10050 if (loop != entry_bb->loop_father)
10051 {
10052 gcc_assert (loop->header == body_bb);
10053 gcc_assert (loop->latch == region->cont
10054 || single_pred (loop->latch) == region->cont);
10055 trip_loop->inner = loop;
10056 return;
10057 }
10058
10059 if (!gimple_omp_for_combined_p (fd->for_stmt))
10060 {
10061 loop = alloc_loop ();
10062 loop->header = body_bb;
10063 if (collapse_bb == NULL)
10064 loop->latch = cont_bb;
10065 add_loop (loop, trip_loop);
10066 }
10067 }
10068 }
10069
10070 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
10071 Given parameters:
10072 for (V = N1; V cond N2; V += STEP) BODY;
10073
10074 where COND is "<" or ">" or "!=", we generate pseudocode
10075
10076 for (ind_var = low; ind_var < high; ind_var++)
10077 {
10078 V = n1 + (ind_var * STEP)
10079
10080 <BODY>
10081 }
10082
10083 In the above pseudocode, low and high are function parameters of the
10084 child function. In the function below, we are inserting a temp.
10085 variable that will be making a call to two OMP functions that will not be
10086 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
10087 with _Cilk_for). These functions are replaced with low and high
10088 by the function that handles taskreg. */
10089
10090
10091 static void
10092 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
10093 {
10094 bool broken_loop = region->cont == NULL;
10095 basic_block entry_bb = region->entry;
10096 basic_block cont_bb = region->cont;
10097
10098 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10099 gcc_assert (broken_loop
10100 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
10101 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
10102 basic_block l1_bb, l2_bb;
10103
10104 if (!broken_loop)
10105 {
10106 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
10107 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
10108 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
10109 l2_bb = BRANCH_EDGE (entry_bb)->dest;
10110 }
10111 else
10112 {
10113 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
10114 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
10115 l2_bb = single_succ (l1_bb);
10116 }
10117 basic_block exit_bb = region->exit;
10118 basic_block l2_dom_bb = NULL;
10119
10120 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
10121
10122 /* Below statements until the "tree high_val = ..." are pseudo statements
10123 used to pass information to be used by expand_omp_taskreg.
10124 low_val and high_val will be replaced by the __low and __high
10125 parameter from the child function.
10126
10127 The call_exprs part is a place-holder, it is mainly used
10128 to distinctly identify to the top-level part that this is
10129 where we should put low and high (reasoning given in header
10130 comment). */
10131
10132 tree child_fndecl
10133 = gimple_omp_parallel_child_fn (
10134 as_a <gomp_parallel *> (last_stmt (region->outer->entry)));
10135 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
10136 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
10137 {
10138 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
10139 high_val = t;
10140 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
10141 low_val = t;
10142 }
10143 gcc_assert (low_val && high_val);
10144
10145 tree type = TREE_TYPE (low_val);
10146 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
10147 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
10148
10149 /* Not needed in SSA form right now. */
10150 gcc_assert (!gimple_in_ssa_p (cfun));
10151 if (l2_dom_bb == NULL)
10152 l2_dom_bb = l1_bb;
10153
10154 tree n1 = low_val;
10155 tree n2 = high_val;
10156
10157 gimple *stmt = gimple_build_assign (ind_var, n1);
10158
10159 /* Replace the GIMPLE_OMP_FOR statement. */
10160 gsi_replace (&gsi, stmt, true);
10161
10162 if (!broken_loop)
10163 {
10164 /* Code to control the increment goes in the CONT_BB. */
10165 gsi = gsi_last_bb (cont_bb);
10166 stmt = gsi_stmt (gsi);
10167 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
10168 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
10169 build_one_cst (type));
10170
10171 /* Replace GIMPLE_OMP_CONTINUE. */
10172 gsi_replace (&gsi, stmt, true);
10173 }
10174
10175 /* Emit the condition in L1_BB. */
10176 gsi = gsi_after_labels (l1_bb);
10177 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
10178 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
10179 fd->loop.step);
10180 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
10181 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
10182 fd->loop.n1, fold_convert (sizetype, t));
10183 else
10184 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
10185 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
10186 t = fold_convert (TREE_TYPE (fd->loop.v), t);
10187 expand_omp_build_assign (&gsi, fd->loop.v, t);
10188
10189 /* The condition is always '<' since the runtime will fill in the low
10190 and high values. */
10191 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
10192 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
10193
10194 /* Remove GIMPLE_OMP_RETURN. */
10195 gsi = gsi_last_bb (exit_bb);
10196 gsi_remove (&gsi, true);
10197
10198 /* Connect the new blocks. */
10199 remove_edge (FALLTHRU_EDGE (entry_bb));
10200
10201 edge e, ne;
10202 if (!broken_loop)
10203 {
10204 remove_edge (BRANCH_EDGE (entry_bb));
10205 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
10206
10207 e = BRANCH_EDGE (l1_bb);
10208 ne = FALLTHRU_EDGE (l1_bb);
10209 e->flags = EDGE_TRUE_VALUE;
10210 }
10211 else
10212 {
10213 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
10214
10215 ne = single_succ_edge (l1_bb);
10216 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
10217
10218 }
10219 ne->flags = EDGE_FALSE_VALUE;
10220 e->probability = REG_BR_PROB_BASE * 7 / 8;
10221 ne->probability = REG_BR_PROB_BASE / 8;
10222
10223 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
10224 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
10225 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
10226
10227 if (!broken_loop)
10228 {
10229 struct loop *loop = alloc_loop ();
10230 loop->header = l1_bb;
10231 loop->latch = cont_bb;
10232 add_loop (loop, l1_bb->loop_father);
10233 loop->safelen = INT_MAX;
10234 }
10235
10236 /* Pick the correct library function based on the precision of the
10237 induction variable type. */
10238 tree lib_fun = NULL_TREE;
10239 if (TYPE_PRECISION (type) == 32)
10240 lib_fun = cilk_for_32_fndecl;
10241 else if (TYPE_PRECISION (type) == 64)
10242 lib_fun = cilk_for_64_fndecl;
10243 else
10244 gcc_unreachable ();
10245
10246 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
10247
10248 /* WS_ARGS contains the library function flavor to call:
10249 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
10250 user-defined grain value. If the user does not define one, then zero
10251 is passed in by the parser. */
10252 vec_alloc (region->ws_args, 2);
10253 region->ws_args->quick_push (lib_fun);
10254 region->ws_args->quick_push (fd->chunk_size);
10255 }
10256
10257 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
10258 loop. Given parameters:
10259
10260 for (V = N1; V cond N2; V += STEP) BODY;
10261
10262 where COND is "<" or ">", we generate pseudocode
10263
10264 V = N1;
10265 goto L1;
10266 L0:
10267 BODY;
10268 V += STEP;
10269 L1:
10270 if (V cond N2) goto L0; else goto L2;
10271 L2:
10272
10273 For collapsed loops, given parameters:
10274 collapse(3)
10275 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
10276 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
10277 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
10278 BODY;
10279
10280 we generate pseudocode
10281
10282 if (cond3 is <)
10283 adj = STEP3 - 1;
10284 else
10285 adj = STEP3 + 1;
10286 count3 = (adj + N32 - N31) / STEP3;
10287 if (cond2 is <)
10288 adj = STEP2 - 1;
10289 else
10290 adj = STEP2 + 1;
10291 count2 = (adj + N22 - N21) / STEP2;
10292 if (cond1 is <)
10293 adj = STEP1 - 1;
10294 else
10295 adj = STEP1 + 1;
10296 count1 = (adj + N12 - N11) / STEP1;
10297 count = count1 * count2 * count3;
10298 V = 0;
10299 V1 = N11;
10300 V2 = N21;
10301 V3 = N31;
10302 goto L1;
10303 L0:
10304 BODY;
10305 V += 1;
10306 V3 += STEP3;
10307 V2 += (V3 cond3 N32) ? 0 : STEP2;
10308 V3 = (V3 cond3 N32) ? V3 : N31;
10309 V1 += (V2 cond2 N22) ? 0 : STEP1;
10310 V2 = (V2 cond2 N22) ? V2 : N21;
10311 L1:
10312 if (V < count) goto L0; else goto L2;
10313 L2:
10314
10315 */
10316
10317 static void
10318 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
10319 {
10320 tree type, t;
10321 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
10322 gimple_stmt_iterator gsi;
10323 gimple *stmt;
10324 gcond *cond_stmt;
10325 bool broken_loop = region->cont == NULL;
10326 edge e, ne;
10327 tree *counts = NULL;
10328 int i;
10329 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10330 OMP_CLAUSE_SAFELEN);
10331 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10332 OMP_CLAUSE__SIMDUID_);
10333 tree n1, n2;
10334
10335 type = TREE_TYPE (fd->loop.v);
10336 entry_bb = region->entry;
10337 cont_bb = region->cont;
10338 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10339 gcc_assert (broken_loop
10340 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
10341 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
10342 if (!broken_loop)
10343 {
10344 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
10345 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
10346 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
10347 l2_bb = BRANCH_EDGE (entry_bb)->dest;
10348 }
10349 else
10350 {
10351 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
10352 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
10353 l2_bb = single_succ (l1_bb);
10354 }
10355 exit_bb = region->exit;
10356 l2_dom_bb = NULL;
10357
10358 gsi = gsi_last_bb (entry_bb);
10359
10360 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
10361 /* Not needed in SSA form right now. */
10362 gcc_assert (!gimple_in_ssa_p (cfun));
10363 if (fd->collapse > 1)
10364 {
10365 int first_zero_iter = -1, dummy = -1;
10366 basic_block zero_iter_bb = l2_bb, dummy_bb = NULL;
10367
10368 counts = XALLOCAVEC (tree, fd->collapse);
10369 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
10370 zero_iter_bb, first_zero_iter,
10371 dummy_bb, dummy, l2_dom_bb);
10372 }
10373 if (l2_dom_bb == NULL)
10374 l2_dom_bb = l1_bb;
10375
10376 n1 = fd->loop.n1;
10377 n2 = fd->loop.n2;
10378 if (gimple_omp_for_combined_into_p (fd->for_stmt))
10379 {
10380 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10381 OMP_CLAUSE__LOOPTEMP_);
10382 gcc_assert (innerc);
10383 n1 = OMP_CLAUSE_DECL (innerc);
10384 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10385 OMP_CLAUSE__LOOPTEMP_);
10386 gcc_assert (innerc);
10387 n2 = OMP_CLAUSE_DECL (innerc);
10388 expand_omp_build_assign (&gsi, fd->loop.v,
10389 fold_convert (type, n1));
10390 if (fd->collapse > 1)
10391 {
10392 gsi_prev (&gsi);
10393 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
10394 gsi_next (&gsi);
10395 }
10396 }
10397 else
10398 {
10399 expand_omp_build_assign (&gsi, fd->loop.v,
10400 fold_convert (type, fd->loop.n1));
10401 if (fd->collapse > 1)
10402 for (i = 0; i < fd->collapse; i++)
10403 {
10404 tree itype = TREE_TYPE (fd->loops[i].v);
10405 if (POINTER_TYPE_P (itype))
10406 itype = signed_type_for (itype);
10407 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
10408 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
10409 }
10410 }
10411
10412 /* Remove the GIMPLE_OMP_FOR statement. */
10413 gsi_remove (&gsi, true);
10414
10415 if (!broken_loop)
10416 {
10417 /* Code to control the increment goes in the CONT_BB. */
10418 gsi = gsi_last_bb (cont_bb);
10419 stmt = gsi_stmt (gsi);
10420 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
10421
10422 if (POINTER_TYPE_P (type))
10423 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
10424 else
10425 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
10426 expand_omp_build_assign (&gsi, fd->loop.v, t);
10427
10428 if (fd->collapse > 1)
10429 {
10430 i = fd->collapse - 1;
10431 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
10432 {
10433 t = fold_convert (sizetype, fd->loops[i].step);
10434 t = fold_build_pointer_plus (fd->loops[i].v, t);
10435 }
10436 else
10437 {
10438 t = fold_convert (TREE_TYPE (fd->loops[i].v),
10439 fd->loops[i].step);
10440 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
10441 fd->loops[i].v, t);
10442 }
10443 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
10444
10445 for (i = fd->collapse - 1; i > 0; i--)
10446 {
10447 tree itype = TREE_TYPE (fd->loops[i].v);
10448 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
10449 if (POINTER_TYPE_P (itype2))
10450 itype2 = signed_type_for (itype2);
10451 t = build3 (COND_EXPR, itype2,
10452 build2 (fd->loops[i].cond_code, boolean_type_node,
10453 fd->loops[i].v,
10454 fold_convert (itype, fd->loops[i].n2)),
10455 build_int_cst (itype2, 0),
10456 fold_convert (itype2, fd->loops[i - 1].step));
10457 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
10458 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
10459 else
10460 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
10461 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
10462
10463 t = build3 (COND_EXPR, itype,
10464 build2 (fd->loops[i].cond_code, boolean_type_node,
10465 fd->loops[i].v,
10466 fold_convert (itype, fd->loops[i].n2)),
10467 fd->loops[i].v,
10468 fold_convert (itype, fd->loops[i].n1));
10469 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
10470 }
10471 }
10472
10473 /* Remove GIMPLE_OMP_CONTINUE. */
10474 gsi_remove (&gsi, true);
10475 }
10476
10477 /* Emit the condition in L1_BB. */
10478 gsi = gsi_start_bb (l1_bb);
10479
10480 t = fold_convert (type, n2);
10481 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
10482 false, GSI_CONTINUE_LINKING);
10483 tree v = fd->loop.v;
10484 if (DECL_P (v) && TREE_ADDRESSABLE (v))
10485 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
10486 false, GSI_CONTINUE_LINKING);
10487 t = build2 (fd->loop.cond_code, boolean_type_node, v, t);
10488 cond_stmt = gimple_build_cond_empty (t);
10489 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
10490 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
10491 NULL, NULL)
10492 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
10493 NULL, NULL))
10494 {
10495 gsi = gsi_for_stmt (cond_stmt);
10496 gimple_regimplify_operands (cond_stmt, &gsi);
10497 }
10498
10499 /* Remove GIMPLE_OMP_RETURN. */
10500 gsi = gsi_last_bb (exit_bb);
10501 gsi_remove (&gsi, true);
10502
10503 /* Connect the new blocks. */
10504 remove_edge (FALLTHRU_EDGE (entry_bb));
10505
10506 if (!broken_loop)
10507 {
10508 remove_edge (BRANCH_EDGE (entry_bb));
10509 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
10510
10511 e = BRANCH_EDGE (l1_bb);
10512 ne = FALLTHRU_EDGE (l1_bb);
10513 e->flags = EDGE_TRUE_VALUE;
10514 }
10515 else
10516 {
10517 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
10518
10519 ne = single_succ_edge (l1_bb);
10520 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
10521
10522 }
10523 ne->flags = EDGE_FALSE_VALUE;
10524 e->probability = REG_BR_PROB_BASE * 7 / 8;
10525 ne->probability = REG_BR_PROB_BASE / 8;
10526
10527 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
10528 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
10529 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
10530
10531 if (!broken_loop)
10532 {
10533 struct loop *loop = alloc_loop ();
10534 loop->header = l1_bb;
10535 loop->latch = cont_bb;
10536 add_loop (loop, l1_bb->loop_father);
10537 if (safelen == NULL_TREE)
10538 loop->safelen = INT_MAX;
10539 else
10540 {
10541 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
10542 if (TREE_CODE (safelen) != INTEGER_CST)
10543 loop->safelen = 0;
10544 else if (!tree_fits_uhwi_p (safelen)
10545 || tree_to_uhwi (safelen) > INT_MAX)
10546 loop->safelen = INT_MAX;
10547 else
10548 loop->safelen = tree_to_uhwi (safelen);
10549 if (loop->safelen == 1)
10550 loop->safelen = 0;
10551 }
10552 if (simduid)
10553 {
10554 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
10555 cfun->has_simduid_loops = true;
10556 }
10557 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
10558 the loop. */
10559 if ((flag_tree_loop_vectorize
10560 || (!global_options_set.x_flag_tree_loop_vectorize
10561 && !global_options_set.x_flag_tree_vectorize))
10562 && flag_tree_loop_optimize
10563 && loop->safelen > 1)
10564 {
10565 loop->force_vectorize = true;
10566 cfun->has_force_vectorize_loops = true;
10567 }
10568 }
10569 else if (simduid)
10570 cfun->has_simduid_loops = true;
10571 }
10572
10573 /* Taskloop construct is represented after gimplification with
10574 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
10575 in between them. This routine expands the outer GIMPLE_OMP_FOR,
10576 which should just compute all the needed loop temporaries
10577 for GIMPLE_OMP_TASK. */
10578
10579 static void
10580 expand_omp_taskloop_for_outer (struct omp_region *region,
10581 struct omp_for_data *fd,
10582 gimple *inner_stmt)
10583 {
10584 tree type, bias = NULL_TREE;
10585 basic_block entry_bb, cont_bb, exit_bb;
10586 gimple_stmt_iterator gsi;
10587 gassign *assign_stmt;
10588 tree *counts = NULL;
10589 int i;
10590
10591 gcc_assert (inner_stmt);
10592 gcc_assert (region->cont);
10593 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK
10594 && gimple_omp_task_taskloop_p (inner_stmt));
10595 type = TREE_TYPE (fd->loop.v);
10596
10597 /* See if we need to bias by LLONG_MIN. */
10598 if (fd->iter_type == long_long_unsigned_type_node
10599 && TREE_CODE (type) == INTEGER_TYPE
10600 && !TYPE_UNSIGNED (type))
10601 {
10602 tree n1, n2;
10603
10604 if (fd->loop.cond_code == LT_EXPR)
10605 {
10606 n1 = fd->loop.n1;
10607 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
10608 }
10609 else
10610 {
10611 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
10612 n2 = fd->loop.n1;
10613 }
10614 if (TREE_CODE (n1) != INTEGER_CST
10615 || TREE_CODE (n2) != INTEGER_CST
10616 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
10617 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
10618 }
10619
10620 entry_bb = region->entry;
10621 cont_bb = region->cont;
10622 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10623 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
10624 exit_bb = region->exit;
10625
10626 gsi = gsi_last_bb (entry_bb);
10627 gimple *for_stmt = gsi_stmt (gsi);
10628 gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR);
10629 if (fd->collapse > 1)
10630 {
10631 int first_zero_iter = -1, dummy = -1;
10632 basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL;
10633
10634 counts = XALLOCAVEC (tree, fd->collapse);
10635 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
10636 zero_iter_bb, first_zero_iter,
10637 dummy_bb, dummy, l2_dom_bb);
10638
10639 if (zero_iter_bb)
10640 {
10641 /* Some counts[i] vars might be uninitialized if
10642 some loop has zero iterations. But the body shouldn't
10643 be executed in that case, so just avoid uninit warnings. */
10644 for (i = first_zero_iter; i < fd->collapse; i++)
10645 if (SSA_VAR_P (counts[i]))
10646 TREE_NO_WARNING (counts[i]) = 1;
10647 gsi_prev (&gsi);
10648 edge e = split_block (entry_bb, gsi_stmt (gsi));
10649 entry_bb = e->dest;
10650 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
10651 gsi = gsi_last_bb (entry_bb);
10652 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
10653 get_immediate_dominator (CDI_DOMINATORS,
10654 zero_iter_bb));
10655 }
10656 }
10657
10658 tree t0, t1;
10659 t1 = fd->loop.n2;
10660 t0 = fd->loop.n1;
10661 if (POINTER_TYPE_P (TREE_TYPE (t0))
10662 && TYPE_PRECISION (TREE_TYPE (t0))
10663 != TYPE_PRECISION (fd->iter_type))
10664 {
10665 /* Avoid casting pointers to integer of a different size. */
10666 tree itype = signed_type_for (type);
10667 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
10668 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
10669 }
10670 else
10671 {
10672 t1 = fold_convert (fd->iter_type, t1);
10673 t0 = fold_convert (fd->iter_type, t0);
10674 }
10675 if (bias)
10676 {
10677 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
10678 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
10679 }
10680
10681 tree innerc = find_omp_clause (gimple_omp_task_clauses (inner_stmt),
10682 OMP_CLAUSE__LOOPTEMP_);
10683 gcc_assert (innerc);
10684 tree startvar = OMP_CLAUSE_DECL (innerc);
10685 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
10686 gcc_assert (innerc);
10687 tree endvar = OMP_CLAUSE_DECL (innerc);
10688 if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
10689 {
10690 gcc_assert (innerc);
10691 for (i = 1; i < fd->collapse; i++)
10692 {
10693 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10694 OMP_CLAUSE__LOOPTEMP_);
10695 gcc_assert (innerc);
10696 }
10697 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10698 OMP_CLAUSE__LOOPTEMP_);
10699 if (innerc)
10700 {
10701 /* If needed (inner taskloop has lastprivate clause), propagate
10702 down the total number of iterations. */
10703 tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false,
10704 NULL_TREE, false,
10705 GSI_CONTINUE_LINKING);
10706 assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t);
10707 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10708 }
10709 }
10710
10711 t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false,
10712 GSI_CONTINUE_LINKING);
10713 assign_stmt = gimple_build_assign (startvar, t0);
10714 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10715
10716 t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false,
10717 GSI_CONTINUE_LINKING);
10718 assign_stmt = gimple_build_assign (endvar, t1);
10719 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10720 if (fd->collapse > 1)
10721 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
10722
10723 /* Remove the GIMPLE_OMP_FOR statement. */
10724 gsi = gsi_for_stmt (for_stmt);
10725 gsi_remove (&gsi, true);
10726
10727 gsi = gsi_last_bb (cont_bb);
10728 gsi_remove (&gsi, true);
10729
10730 gsi = gsi_last_bb (exit_bb);
10731 gsi_remove (&gsi, true);
10732
10733 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE;
10734 remove_edge (BRANCH_EDGE (entry_bb));
10735 FALLTHRU_EDGE (cont_bb)->probability = REG_BR_PROB_BASE;
10736 remove_edge (BRANCH_EDGE (cont_bb));
10737 set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb);
10738 set_immediate_dominator (CDI_DOMINATORS, region->entry,
10739 recompute_dominator (CDI_DOMINATORS, region->entry));
10740 }
10741
10742 /* Taskloop construct is represented after gimplification with
10743 two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched
10744 in between them. This routine expands the inner GIMPLE_OMP_FOR.
10745 GOMP_taskloop{,_ull} function arranges for each task to be given just
10746 a single range of iterations. */
10747
10748 static void
10749 expand_omp_taskloop_for_inner (struct omp_region *region,
10750 struct omp_for_data *fd,
10751 gimple *inner_stmt)
10752 {
10753 tree e, t, type, itype, vmain, vback, bias = NULL_TREE;
10754 basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL;
10755 basic_block fin_bb;
10756 gimple_stmt_iterator gsi;
10757 edge ep;
10758 bool broken_loop = region->cont == NULL;
10759 tree *counts = NULL;
10760 tree n1, n2, step;
10761
10762 itype = type = TREE_TYPE (fd->loop.v);
10763 if (POINTER_TYPE_P (type))
10764 itype = signed_type_for (type);
10765
10766 /* See if we need to bias by LLONG_MIN. */
10767 if (fd->iter_type == long_long_unsigned_type_node
10768 && TREE_CODE (type) == INTEGER_TYPE
10769 && !TYPE_UNSIGNED (type))
10770 {
10771 tree n1, n2;
10772
10773 if (fd->loop.cond_code == LT_EXPR)
10774 {
10775 n1 = fd->loop.n1;
10776 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
10777 }
10778 else
10779 {
10780 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
10781 n2 = fd->loop.n1;
10782 }
10783 if (TREE_CODE (n1) != INTEGER_CST
10784 || TREE_CODE (n2) != INTEGER_CST
10785 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
10786 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
10787 }
10788
10789 entry_bb = region->entry;
10790 cont_bb = region->cont;
10791 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
10792 fin_bb = BRANCH_EDGE (entry_bb)->dest;
10793 gcc_assert (broken_loop
10794 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
10795 body_bb = FALLTHRU_EDGE (entry_bb)->dest;
10796 if (!broken_loop)
10797 {
10798 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
10799 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
10800 }
10801 exit_bb = region->exit;
10802
10803 /* Iteration space partitioning goes in ENTRY_BB. */
10804 gsi = gsi_last_bb (entry_bb);
10805 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
10806
10807 if (fd->collapse > 1)
10808 {
10809 int first_zero_iter = -1, dummy = -1;
10810 basic_block l2_dom_bb = NULL, dummy_bb = NULL;
10811
10812 counts = XALLOCAVEC (tree, fd->collapse);
10813 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
10814 fin_bb, first_zero_iter,
10815 dummy_bb, dummy, l2_dom_bb);
10816 t = NULL_TREE;
10817 }
10818 else
10819 t = integer_one_node;
10820
10821 step = fd->loop.step;
10822 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
10823 OMP_CLAUSE__LOOPTEMP_);
10824 gcc_assert (innerc);
10825 n1 = OMP_CLAUSE_DECL (innerc);
10826 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_);
10827 gcc_assert (innerc);
10828 n2 = OMP_CLAUSE_DECL (innerc);
10829 if (bias)
10830 {
10831 n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias);
10832 n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias);
10833 }
10834 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
10835 true, NULL_TREE, true, GSI_SAME_STMT);
10836 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
10837 true, NULL_TREE, true, GSI_SAME_STMT);
10838 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
10839 true, NULL_TREE, true, GSI_SAME_STMT);
10840
10841 tree startvar = fd->loop.v;
10842 tree endvar = NULL_TREE;
10843
10844 if (gimple_omp_for_combined_p (fd->for_stmt))
10845 {
10846 tree clauses = gimple_omp_for_clauses (inner_stmt);
10847 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
10848 gcc_assert (innerc);
10849 startvar = OMP_CLAUSE_DECL (innerc);
10850 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
10851 OMP_CLAUSE__LOOPTEMP_);
10852 gcc_assert (innerc);
10853 endvar = OMP_CLAUSE_DECL (innerc);
10854 }
10855 t = fold_convert (TREE_TYPE (startvar), n1);
10856 t = force_gimple_operand_gsi (&gsi, t,
10857 DECL_P (startvar)
10858 && TREE_ADDRESSABLE (startvar),
10859 NULL_TREE, false, GSI_CONTINUE_LINKING);
10860 gimple *assign_stmt = gimple_build_assign (startvar, t);
10861 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10862
10863 t = fold_convert (TREE_TYPE (startvar), n2);
10864 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
10865 false, GSI_CONTINUE_LINKING);
10866 if (endvar)
10867 {
10868 assign_stmt = gimple_build_assign (endvar, e);
10869 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10870 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
10871 assign_stmt = gimple_build_assign (fd->loop.v, e);
10872 else
10873 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
10874 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
10875 }
10876 if (fd->collapse > 1)
10877 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
10878
10879 if (!broken_loop)
10880 {
10881 /* The code controlling the sequential loop replaces the
10882 GIMPLE_OMP_CONTINUE. */
10883 gsi = gsi_last_bb (cont_bb);
10884 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
10885 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
10886 vmain = gimple_omp_continue_control_use (cont_stmt);
10887 vback = gimple_omp_continue_control_def (cont_stmt);
10888
10889 if (!gimple_omp_for_combined_p (fd->for_stmt))
10890 {
10891 if (POINTER_TYPE_P (type))
10892 t = fold_build_pointer_plus (vmain, step);
10893 else
10894 t = fold_build2 (PLUS_EXPR, type, vmain, step);
10895 t = force_gimple_operand_gsi (&gsi, t,
10896 DECL_P (vback)
10897 && TREE_ADDRESSABLE (vback),
10898 NULL_TREE, true, GSI_SAME_STMT);
10899 assign_stmt = gimple_build_assign (vback, t);
10900 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
10901
10902 t = build2 (fd->loop.cond_code, boolean_type_node,
10903 DECL_P (vback) && TREE_ADDRESSABLE (vback)
10904 ? t : vback, e);
10905 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
10906 }
10907
10908 /* Remove the GIMPLE_OMP_CONTINUE statement. */
10909 gsi_remove (&gsi, true);
10910
10911 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
10912 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
10913 }
10914
10915 /* Remove the GIMPLE_OMP_FOR statement. */
10916 gsi = gsi_for_stmt (fd->for_stmt);
10917 gsi_remove (&gsi, true);
10918
10919 /* Remove the GIMPLE_OMP_RETURN statement. */
10920 gsi = gsi_last_bb (exit_bb);
10921 gsi_remove (&gsi, true);
10922
10923 FALLTHRU_EDGE (entry_bb)->probability = REG_BR_PROB_BASE;
10924 if (!broken_loop)
10925 remove_edge (BRANCH_EDGE (entry_bb));
10926 else
10927 {
10928 remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb));
10929 region->outer->cont = NULL;
10930 }
10931
10932 /* Connect all the blocks. */
10933 if (!broken_loop)
10934 {
10935 ep = find_edge (cont_bb, body_bb);
10936 if (gimple_omp_for_combined_p (fd->for_stmt))
10937 {
10938 remove_edge (ep);
10939 ep = NULL;
10940 }
10941 else if (fd->collapse > 1)
10942 {
10943 remove_edge (ep);
10944 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
10945 }
10946 else
10947 ep->flags = EDGE_TRUE_VALUE;
10948 find_edge (cont_bb, fin_bb)->flags
10949 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
10950 }
10951
10952 set_immediate_dominator (CDI_DOMINATORS, body_bb,
10953 recompute_dominator (CDI_DOMINATORS, body_bb));
10954 if (!broken_loop)
10955 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
10956 recompute_dominator (CDI_DOMINATORS, fin_bb));
10957
10958 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
10959 {
10960 struct loop *loop = alloc_loop ();
10961 loop->header = body_bb;
10962 if (collapse_bb == NULL)
10963 loop->latch = cont_bb;
10964 add_loop (loop, body_bb->loop_father);
10965 }
10966 }
10967
10968 /* A subroutine of expand_omp_for. Generate code for an OpenACC
10969 partitioned loop. The lowering here is abstracted, in that the
10970 loop parameters are passed through internal functions, which are
10971 further lowered by oacc_device_lower, once we get to the target
10972 compiler. The loop is of the form:
10973
10974 for (V = B; V LTGT E; V += S) {BODY}
10975
10976 where LTGT is < or >. We may have a specified chunking size, CHUNKING
10977 (constant 0 for no chunking) and we will have a GWV partitioning
10978 mask, specifying dimensions over which the loop is to be
10979 partitioned (see note below). We generate code that looks like:
10980
10981 <entry_bb> [incoming FALL->body, BRANCH->exit]
10982 typedef signedintify (typeof (V)) T; // underlying signed integral type
10983 T range = E - B;
10984 T chunk_no = 0;
10985 T DIR = LTGT == '<' ? +1 : -1;
10986 T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV);
10987 T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV);
10988
10989 <head_bb> [created by splitting end of entry_bb]
10990 T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no);
10991 T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset);
10992 if (!(offset LTGT bound)) goto bottom_bb;
10993
10994 <body_bb> [incoming]
10995 V = B + offset;
10996 {BODY}
10997
10998 <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb]
10999 offset += step;
11000 if (offset LTGT bound) goto body_bb; [*]
11001
11002 <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb
11003 chunk_no++;
11004 if (chunk < chunk_max) goto head_bb;
11005
11006 <exit_bb> [incoming]
11007 V = B + ((range -/+ 1) / S +/- 1) * S [*]
11008
11009 [*] Needed if V live at end of loop
11010
11011 Note: CHUNKING & GWV mask are specified explicitly here. This is a
11012 transition, and will be specified by a more general mechanism shortly.
11013 */
11014
11015 static void
11016 expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
11017 {
11018 tree v = fd->loop.v;
11019 enum tree_code cond_code = fd->loop.cond_code;
11020 enum tree_code plus_code = PLUS_EXPR;
11021
11022 tree chunk_size = integer_minus_one_node;
11023 tree gwv = integer_zero_node;
11024 tree iter_type = TREE_TYPE (v);
11025 tree diff_type = iter_type;
11026 tree plus_type = iter_type;
11027 struct oacc_collapse *counts = NULL;
11028
11029 gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt)
11030 == GF_OMP_FOR_KIND_OACC_LOOP);
11031 gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt));
11032 gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR);
11033
11034 if (POINTER_TYPE_P (iter_type))
11035 {
11036 plus_code = POINTER_PLUS_EXPR;
11037 plus_type = sizetype;
11038 }
11039 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
11040 diff_type = signed_type_for (diff_type);
11041
11042 basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */
11043 basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */
11044 basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */
11045 basic_block bottom_bb = NULL;
11046
11047 /* entry_bb has two sucessors; the branch edge is to the exit
11048 block, fallthrough edge to body. */
11049 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2
11050 && BRANCH_EDGE (entry_bb)->dest == exit_bb);
11051
11052 /* If cont_bb non-NULL, it has 2 successors. The branch successor is
11053 body_bb, or to a block whose only successor is the body_bb. Its
11054 fallthrough successor is the final block (same as the branch
11055 successor of the entry_bb). */
11056 if (cont_bb)
11057 {
11058 basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest;
11059 basic_block bed = BRANCH_EDGE (cont_bb)->dest;
11060
11061 gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb);
11062 gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb);
11063 }
11064 else
11065 gcc_assert (!gimple_in_ssa_p (cfun));
11066
11067 /* The exit block only has entry_bb and cont_bb as predecessors. */
11068 gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL));
11069
11070 tree chunk_no;
11071 tree chunk_max = NULL_TREE;
11072 tree bound, offset;
11073 tree step = create_tmp_var (diff_type, ".step");
11074 bool up = cond_code == LT_EXPR;
11075 tree dir = build_int_cst (diff_type, up ? +1 : -1);
11076 bool chunking = !gimple_in_ssa_p (cfun);;
11077 bool negating;
11078
11079 /* SSA instances. */
11080 tree offset_incr = NULL_TREE;
11081 tree offset_init = NULL_TREE;
11082
11083 gimple_stmt_iterator gsi;
11084 gassign *ass;
11085 gcall *call;
11086 gimple *stmt;
11087 tree expr;
11088 location_t loc;
11089 edge split, be, fte;
11090
11091 /* Split the end of entry_bb to create head_bb. */
11092 split = split_block (entry_bb, last_stmt (entry_bb));
11093 basic_block head_bb = split->dest;
11094 entry_bb = split->src;
11095
11096 /* Chunk setup goes at end of entry_bb, replacing the omp_for. */
11097 gsi = gsi_last_bb (entry_bb);
11098 gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi));
11099 loc = gimple_location (for_stmt);
11100
11101 if (gimple_in_ssa_p (cfun))
11102 {
11103 offset_init = gimple_omp_for_index (for_stmt, 0);
11104 gcc_assert (integer_zerop (fd->loop.n1));
11105 /* The SSA parallelizer does gang parallelism. */
11106 gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG));
11107 }
11108
11109 if (fd->collapse > 1)
11110 {
11111 counts = XALLOCAVEC (struct oacc_collapse, fd->collapse);
11112 tree total = expand_oacc_collapse_init (fd, &gsi, counts,
11113 TREE_TYPE (fd->loop.n2));
11114
11115 if (SSA_VAR_P (fd->loop.n2))
11116 {
11117 total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE,
11118 true, GSI_SAME_STMT);
11119 ass = gimple_build_assign (fd->loop.n2, total);
11120 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11121 }
11122
11123 }
11124
11125 tree b = fd->loop.n1;
11126 tree e = fd->loop.n2;
11127 tree s = fd->loop.step;
11128
11129 b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT);
11130 e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT);
11131
11132 /* Convert the step, avoiding possible unsigned->signed overflow. */
11133 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
11134 if (negating)
11135 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
11136 s = fold_convert (diff_type, s);
11137 if (negating)
11138 s = fold_build1 (NEGATE_EXPR, diff_type, s);
11139 s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT);
11140
11141 if (!chunking)
11142 chunk_size = integer_zero_node;
11143 expr = fold_convert (diff_type, chunk_size);
11144 chunk_size = force_gimple_operand_gsi (&gsi, expr, true,
11145 NULL_TREE, true, GSI_SAME_STMT);
11146 /* Determine the range, avoiding possible unsigned->signed overflow. */
11147 negating = !up && TYPE_UNSIGNED (iter_type);
11148 expr = fold_build2 (MINUS_EXPR, plus_type,
11149 fold_convert (plus_type, negating ? b : e),
11150 fold_convert (plus_type, negating ? e : b));
11151 expr = fold_convert (diff_type, expr);
11152 if (negating)
11153 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
11154 tree range = force_gimple_operand_gsi (&gsi, expr, true,
11155 NULL_TREE, true, GSI_SAME_STMT);
11156
11157 chunk_no = build_int_cst (diff_type, 0);
11158 if (chunking)
11159 {
11160 gcc_assert (!gimple_in_ssa_p (cfun));
11161
11162 expr = chunk_no;
11163 chunk_max = create_tmp_var (diff_type, ".chunk_max");
11164 chunk_no = create_tmp_var (diff_type, ".chunk_no");
11165
11166 ass = gimple_build_assign (chunk_no, expr);
11167 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11168
11169 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
11170 build_int_cst (integer_type_node,
11171 IFN_GOACC_LOOP_CHUNKS),
11172 dir, range, s, chunk_size, gwv);
11173 gimple_call_set_lhs (call, chunk_max);
11174 gimple_set_location (call, loc);
11175 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
11176 }
11177 else
11178 chunk_size = chunk_no;
11179
11180 call = gimple_build_call_internal (IFN_GOACC_LOOP, 6,
11181 build_int_cst (integer_type_node,
11182 IFN_GOACC_LOOP_STEP),
11183 dir, range, s, chunk_size, gwv);
11184 gimple_call_set_lhs (call, step);
11185 gimple_set_location (call, loc);
11186 gsi_insert_before (&gsi, call, GSI_SAME_STMT);
11187
11188 /* Remove the GIMPLE_OMP_FOR. */
11189 gsi_remove (&gsi, true);
11190
11191 /* Fixup edges from head_bb */
11192 be = BRANCH_EDGE (head_bb);
11193 fte = FALLTHRU_EDGE (head_bb);
11194 be->flags |= EDGE_FALSE_VALUE;
11195 fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE;
11196
11197 basic_block body_bb = fte->dest;
11198
11199 if (gimple_in_ssa_p (cfun))
11200 {
11201 gsi = gsi_last_bb (cont_bb);
11202 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
11203
11204 offset = gimple_omp_continue_control_use (cont_stmt);
11205 offset_incr = gimple_omp_continue_control_def (cont_stmt);
11206 }
11207 else
11208 {
11209 offset = create_tmp_var (diff_type, ".offset");
11210 offset_init = offset_incr = offset;
11211 }
11212 bound = create_tmp_var (TREE_TYPE (offset), ".bound");
11213
11214 /* Loop offset & bound go into head_bb. */
11215 gsi = gsi_start_bb (head_bb);
11216
11217 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
11218 build_int_cst (integer_type_node,
11219 IFN_GOACC_LOOP_OFFSET),
11220 dir, range, s,
11221 chunk_size, gwv, chunk_no);
11222 gimple_call_set_lhs (call, offset_init);
11223 gimple_set_location (call, loc);
11224 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
11225
11226 call = gimple_build_call_internal (IFN_GOACC_LOOP, 7,
11227 build_int_cst (integer_type_node,
11228 IFN_GOACC_LOOP_BOUND),
11229 dir, range, s,
11230 chunk_size, gwv, offset_init);
11231 gimple_call_set_lhs (call, bound);
11232 gimple_set_location (call, loc);
11233 gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING);
11234
11235 expr = build2 (cond_code, boolean_type_node, offset_init, bound);
11236 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
11237 GSI_CONTINUE_LINKING);
11238
11239 /* V assignment goes into body_bb. */
11240 if (!gimple_in_ssa_p (cfun))
11241 {
11242 gsi = gsi_start_bb (body_bb);
11243
11244 expr = build2 (plus_code, iter_type, b,
11245 fold_convert (plus_type, offset));
11246 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
11247 true, GSI_SAME_STMT);
11248 ass = gimple_build_assign (v, expr);
11249 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11250 if (fd->collapse > 1)
11251 expand_oacc_collapse_vars (fd, &gsi, counts, v);
11252 }
11253
11254 /* Loop increment goes into cont_bb. If this is not a loop, we
11255 will have spawned threads as if it was, and each one will
11256 execute one iteration. The specification is not explicit about
11257 whether such constructs are ill-formed or not, and they can
11258 occur, especially when noreturn routines are involved. */
11259 if (cont_bb)
11260 {
11261 gsi = gsi_last_bb (cont_bb);
11262 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
11263 loc = gimple_location (cont_stmt);
11264
11265 /* Increment offset. */
11266 if (gimple_in_ssa_p (cfun))
11267 expr= build2 (plus_code, iter_type, offset,
11268 fold_convert (plus_type, step));
11269 else
11270 expr = build2 (PLUS_EXPR, diff_type, offset, step);
11271 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
11272 true, GSI_SAME_STMT);
11273 ass = gimple_build_assign (offset_incr, expr);
11274 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11275 expr = build2 (cond_code, boolean_type_node, offset_incr, bound);
11276 gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT);
11277
11278 /* Remove the GIMPLE_OMP_CONTINUE. */
11279 gsi_remove (&gsi, true);
11280
11281 /* Fixup edges from cont_bb */
11282 be = BRANCH_EDGE (cont_bb);
11283 fte = FALLTHRU_EDGE (cont_bb);
11284 be->flags |= EDGE_TRUE_VALUE;
11285 fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
11286
11287 if (chunking)
11288 {
11289 /* Split the beginning of exit_bb to make bottom_bb. We
11290 need to insert a nop at the start, because splitting is
11291 after a stmt, not before. */
11292 gsi = gsi_start_bb (exit_bb);
11293 stmt = gimple_build_nop ();
11294 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
11295 split = split_block (exit_bb, stmt);
11296 bottom_bb = split->src;
11297 exit_bb = split->dest;
11298 gsi = gsi_last_bb (bottom_bb);
11299
11300 /* Chunk increment and test goes into bottom_bb. */
11301 expr = build2 (PLUS_EXPR, diff_type, chunk_no,
11302 build_int_cst (diff_type, 1));
11303 ass = gimple_build_assign (chunk_no, expr);
11304 gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING);
11305
11306 /* Chunk test at end of bottom_bb. */
11307 expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max);
11308 gsi_insert_after (&gsi, gimple_build_cond_empty (expr),
11309 GSI_CONTINUE_LINKING);
11310
11311 /* Fixup edges from bottom_bb. */
11312 split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE;
11313 make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE);
11314 }
11315 }
11316
11317 gsi = gsi_last_bb (exit_bb);
11318 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
11319 loc = gimple_location (gsi_stmt (gsi));
11320
11321 if (!gimple_in_ssa_p (cfun))
11322 {
11323 /* Insert the final value of V, in case it is live. This is the
11324 value for the only thread that survives past the join. */
11325 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
11326 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
11327 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
11328 expr = fold_build2 (MULT_EXPR, diff_type, expr, s);
11329 expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr));
11330 expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE,
11331 true, GSI_SAME_STMT);
11332 ass = gimple_build_assign (v, expr);
11333 gsi_insert_before (&gsi, ass, GSI_SAME_STMT);
11334 }
11335
11336 /* Remove the OMP_RETURN. */
11337 gsi_remove (&gsi, true);
11338
11339 if (cont_bb)
11340 {
11341 /* We now have one or two nested loops. Update the loop
11342 structures. */
11343 struct loop *parent = entry_bb->loop_father;
11344 struct loop *body = body_bb->loop_father;
11345
11346 if (chunking)
11347 {
11348 struct loop *chunk_loop = alloc_loop ();
11349 chunk_loop->header = head_bb;
11350 chunk_loop->latch = bottom_bb;
11351 add_loop (chunk_loop, parent);
11352 parent = chunk_loop;
11353 }
11354 else if (parent != body)
11355 {
11356 gcc_assert (body->header == body_bb);
11357 gcc_assert (body->latch == cont_bb
11358 || single_pred (body->latch) == cont_bb);
11359 parent = NULL;
11360 }
11361
11362 if (parent)
11363 {
11364 struct loop *body_loop = alloc_loop ();
11365 body_loop->header = body_bb;
11366 body_loop->latch = cont_bb;
11367 add_loop (body_loop, parent);
11368 }
11369 }
11370 }
11371
11372 /* Expand the OMP loop defined by REGION. */
11373
11374 static void
11375 expand_omp_for (struct omp_region *region, gimple *inner_stmt)
11376 {
11377 struct omp_for_data fd;
11378 struct omp_for_data_loop *loops;
11379
11380 loops
11381 = (struct omp_for_data_loop *)
11382 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
11383 * sizeof (struct omp_for_data_loop));
11384 extract_omp_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
11385 &fd, loops);
11386 region->sched_kind = fd.sched_kind;
11387 region->sched_modifiers = fd.sched_modifiers;
11388
11389 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
11390 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
11391 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
11392 if (region->cont)
11393 {
11394 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
11395 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
11396 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
11397 }
11398 else
11399 /* If there isn't a continue then this is a degerate case where
11400 the introduction of abnormal edges during lowering will prevent
11401 original loops from being detected. Fix that up. */
11402 loops_state_set (LOOPS_NEED_FIXUP);
11403
11404 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
11405 expand_omp_simd (region, &fd);
11406 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
11407 expand_cilk_for (region, &fd);
11408 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
11409 {
11410 gcc_assert (!inner_stmt);
11411 expand_oacc_for (region, &fd);
11412 }
11413 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP)
11414 {
11415 if (gimple_omp_for_combined_into_p (fd.for_stmt))
11416 expand_omp_taskloop_for_inner (region, &fd, inner_stmt);
11417 else
11418 expand_omp_taskloop_for_outer (region, &fd, inner_stmt);
11419 }
11420 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
11421 && !fd.have_ordered)
11422 {
11423 if (fd.chunk_size == NULL)
11424 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
11425 else
11426 expand_omp_for_static_chunk (region, &fd, inner_stmt);
11427 }
11428 else
11429 {
11430 int fn_index, start_ix, next_ix;
11431
11432 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
11433 == GF_OMP_FOR_KIND_FOR);
11434 if (fd.chunk_size == NULL
11435 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
11436 fd.chunk_size = integer_zero_node;
11437 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
11438 switch (fd.sched_kind)
11439 {
11440 case OMP_CLAUSE_SCHEDULE_RUNTIME:
11441 fn_index = 3;
11442 break;
11443 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
11444 case OMP_CLAUSE_SCHEDULE_GUIDED:
11445 if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
11446 && !fd.ordered
11447 && !fd.have_ordered)
11448 {
11449 fn_index = 3 + fd.sched_kind;
11450 break;
11451 }
11452 /* FALLTHRU */
11453 default:
11454 fn_index = fd.sched_kind;
11455 break;
11456 }
11457 if (!fd.ordered)
11458 fn_index += fd.have_ordered * 6;
11459 if (fd.ordered)
11460 start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index;
11461 else
11462 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
11463 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
11464 if (fd.iter_type == long_long_unsigned_type_node)
11465 {
11466 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
11467 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
11468 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
11469 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
11470 }
11471 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
11472 (enum built_in_function) next_ix, inner_stmt);
11473 }
11474
11475 if (gimple_in_ssa_p (cfun))
11476 update_ssa (TODO_update_ssa_only_virtuals);
11477 }
11478
11479
11480 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
11481
11482 v = GOMP_sections_start (n);
11483 L0:
11484 switch (v)
11485 {
11486 case 0:
11487 goto L2;
11488 case 1:
11489 section 1;
11490 goto L1;
11491 case 2:
11492 ...
11493 case n:
11494 ...
11495 default:
11496 abort ();
11497 }
11498 L1:
11499 v = GOMP_sections_next ();
11500 goto L0;
11501 L2:
11502 reduction;
11503
11504 If this is a combined parallel sections, replace the call to
11505 GOMP_sections_start with call to GOMP_sections_next. */
11506
11507 static void
11508 expand_omp_sections (struct omp_region *region)
11509 {
11510 tree t, u, vin = NULL, vmain, vnext, l2;
11511 unsigned len;
11512 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
11513 gimple_stmt_iterator si, switch_si;
11514 gomp_sections *sections_stmt;
11515 gimple *stmt;
11516 gomp_continue *cont;
11517 edge_iterator ei;
11518 edge e;
11519 struct omp_region *inner;
11520 unsigned i, casei;
11521 bool exit_reachable = region->cont != NULL;
11522
11523 gcc_assert (region->exit != NULL);
11524 entry_bb = region->entry;
11525 l0_bb = single_succ (entry_bb);
11526 l1_bb = region->cont;
11527 l2_bb = region->exit;
11528 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
11529 l2 = gimple_block_label (l2_bb);
11530 else
11531 {
11532 /* This can happen if there are reductions. */
11533 len = EDGE_COUNT (l0_bb->succs);
11534 gcc_assert (len > 0);
11535 e = EDGE_SUCC (l0_bb, len - 1);
11536 si = gsi_last_bb (e->dest);
11537 l2 = NULL_TREE;
11538 if (gsi_end_p (si)
11539 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
11540 l2 = gimple_block_label (e->dest);
11541 else
11542 FOR_EACH_EDGE (e, ei, l0_bb->succs)
11543 {
11544 si = gsi_last_bb (e->dest);
11545 if (gsi_end_p (si)
11546 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
11547 {
11548 l2 = gimple_block_label (e->dest);
11549 break;
11550 }
11551 }
11552 }
11553 if (exit_reachable)
11554 default_bb = create_empty_bb (l1_bb->prev_bb);
11555 else
11556 default_bb = create_empty_bb (l0_bb);
11557
11558 /* We will build a switch() with enough cases for all the
11559 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
11560 and a default case to abort if something goes wrong. */
11561 len = EDGE_COUNT (l0_bb->succs);
11562
11563 /* Use vec::quick_push on label_vec throughout, since we know the size
11564 in advance. */
11565 auto_vec<tree> label_vec (len);
11566
11567 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
11568 GIMPLE_OMP_SECTIONS statement. */
11569 si = gsi_last_bb (entry_bb);
11570 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
11571 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
11572 vin = gimple_omp_sections_control (sections_stmt);
11573 if (!is_combined_parallel (region))
11574 {
11575 /* If we are not inside a combined parallel+sections region,
11576 call GOMP_sections_start. */
11577 t = build_int_cst (unsigned_type_node, len - 1);
11578 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
11579 stmt = gimple_build_call (u, 1, t);
11580 }
11581 else
11582 {
11583 /* Otherwise, call GOMP_sections_next. */
11584 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
11585 stmt = gimple_build_call (u, 0);
11586 }
11587 gimple_call_set_lhs (stmt, vin);
11588 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
11589 gsi_remove (&si, true);
11590
11591 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
11592 L0_BB. */
11593 switch_si = gsi_last_bb (l0_bb);
11594 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
11595 if (exit_reachable)
11596 {
11597 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
11598 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
11599 vmain = gimple_omp_continue_control_use (cont);
11600 vnext = gimple_omp_continue_control_def (cont);
11601 }
11602 else
11603 {
11604 vmain = vin;
11605 vnext = NULL_TREE;
11606 }
11607
11608 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
11609 label_vec.quick_push (t);
11610 i = 1;
11611
11612 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
11613 for (inner = region->inner, casei = 1;
11614 inner;
11615 inner = inner->next, i++, casei++)
11616 {
11617 basic_block s_entry_bb, s_exit_bb;
11618
11619 /* Skip optional reduction region. */
11620 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
11621 {
11622 --i;
11623 --casei;
11624 continue;
11625 }
11626
11627 s_entry_bb = inner->entry;
11628 s_exit_bb = inner->exit;
11629
11630 t = gimple_block_label (s_entry_bb);
11631 u = build_int_cst (unsigned_type_node, casei);
11632 u = build_case_label (u, NULL, t);
11633 label_vec.quick_push (u);
11634
11635 si = gsi_last_bb (s_entry_bb);
11636 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
11637 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
11638 gsi_remove (&si, true);
11639 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
11640
11641 if (s_exit_bb == NULL)
11642 continue;
11643
11644 si = gsi_last_bb (s_exit_bb);
11645 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
11646 gsi_remove (&si, true);
11647
11648 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
11649 }
11650
11651 /* Error handling code goes in DEFAULT_BB. */
11652 t = gimple_block_label (default_bb);
11653 u = build_case_label (NULL, NULL, t);
11654 make_edge (l0_bb, default_bb, 0);
11655 add_bb_to_loop (default_bb, current_loops->tree_root);
11656
11657 stmt = gimple_build_switch (vmain, u, label_vec);
11658 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
11659 gsi_remove (&switch_si, true);
11660
11661 si = gsi_start_bb (default_bb);
11662 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
11663 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
11664
11665 if (exit_reachable)
11666 {
11667 tree bfn_decl;
11668
11669 /* Code to get the next section goes in L1_BB. */
11670 si = gsi_last_bb (l1_bb);
11671 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
11672
11673 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
11674 stmt = gimple_build_call (bfn_decl, 0);
11675 gimple_call_set_lhs (stmt, vnext);
11676 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
11677 gsi_remove (&si, true);
11678
11679 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
11680 }
11681
11682 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
11683 si = gsi_last_bb (l2_bb);
11684 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
11685 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
11686 else if (gimple_omp_return_lhs (gsi_stmt (si)))
11687 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
11688 else
11689 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
11690 stmt = gimple_build_call (t, 0);
11691 if (gimple_omp_return_lhs (gsi_stmt (si)))
11692 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
11693 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
11694 gsi_remove (&si, true);
11695
11696 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
11697 }
11698
11699
11700 /* Expand code for an OpenMP single directive. We've already expanded
11701 much of the code, here we simply place the GOMP_barrier call. */
11702
11703 static void
11704 expand_omp_single (struct omp_region *region)
11705 {
11706 basic_block entry_bb, exit_bb;
11707 gimple_stmt_iterator si;
11708
11709 entry_bb = region->entry;
11710 exit_bb = region->exit;
11711
11712 si = gsi_last_bb (entry_bb);
11713 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
11714 gsi_remove (&si, true);
11715 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
11716
11717 si = gsi_last_bb (exit_bb);
11718 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
11719 {
11720 tree t = gimple_omp_return_lhs (gsi_stmt (si));
11721 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
11722 }
11723 gsi_remove (&si, true);
11724 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
11725 }
11726
11727
11728 /* Generic expansion for OpenMP synchronization directives: master,
11729 ordered and critical. All we need to do here is remove the entry
11730 and exit markers for REGION. */
11731
11732 static void
11733 expand_omp_synch (struct omp_region *region)
11734 {
11735 basic_block entry_bb, exit_bb;
11736 gimple_stmt_iterator si;
11737
11738 entry_bb = region->entry;
11739 exit_bb = region->exit;
11740
11741 si = gsi_last_bb (entry_bb);
11742 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
11743 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
11744 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
11745 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
11746 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
11747 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
11748 gsi_remove (&si, true);
11749 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
11750
11751 if (exit_bb)
11752 {
11753 si = gsi_last_bb (exit_bb);
11754 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
11755 gsi_remove (&si, true);
11756 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
11757 }
11758 }
11759
11760 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
11761 operation as a normal volatile load. */
11762
11763 static bool
11764 expand_omp_atomic_load (basic_block load_bb, tree addr,
11765 tree loaded_val, int index)
11766 {
11767 enum built_in_function tmpbase;
11768 gimple_stmt_iterator gsi;
11769 basic_block store_bb;
11770 location_t loc;
11771 gimple *stmt;
11772 tree decl, call, type, itype;
11773
11774 gsi = gsi_last_bb (load_bb);
11775 stmt = gsi_stmt (gsi);
11776 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
11777 loc = gimple_location (stmt);
11778
11779 /* ??? If the target does not implement atomic_load_optab[mode], and mode
11780 is smaller than word size, then expand_atomic_load assumes that the load
11781 is atomic. We could avoid the builtin entirely in this case. */
11782
11783 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
11784 decl = builtin_decl_explicit (tmpbase);
11785 if (decl == NULL_TREE)
11786 return false;
11787
11788 type = TREE_TYPE (loaded_val);
11789 itype = TREE_TYPE (TREE_TYPE (decl));
11790
11791 call = build_call_expr_loc (loc, decl, 2, addr,
11792 build_int_cst (NULL,
11793 gimple_omp_atomic_seq_cst_p (stmt)
11794 ? MEMMODEL_SEQ_CST
11795 : MEMMODEL_RELAXED));
11796 if (!useless_type_conversion_p (type, itype))
11797 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
11798 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
11799
11800 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
11801 gsi_remove (&gsi, true);
11802
11803 store_bb = single_succ (load_bb);
11804 gsi = gsi_last_bb (store_bb);
11805 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
11806 gsi_remove (&gsi, true);
11807
11808 if (gimple_in_ssa_p (cfun))
11809 update_ssa (TODO_update_ssa_no_phi);
11810
11811 return true;
11812 }
11813
11814 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
11815 operation as a normal volatile store. */
11816
11817 static bool
11818 expand_omp_atomic_store (basic_block load_bb, tree addr,
11819 tree loaded_val, tree stored_val, int index)
11820 {
11821 enum built_in_function tmpbase;
11822 gimple_stmt_iterator gsi;
11823 basic_block store_bb = single_succ (load_bb);
11824 location_t loc;
11825 gimple *stmt;
11826 tree decl, call, type, itype;
11827 machine_mode imode;
11828 bool exchange;
11829
11830 gsi = gsi_last_bb (load_bb);
11831 stmt = gsi_stmt (gsi);
11832 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
11833
11834 /* If the load value is needed, then this isn't a store but an exchange. */
11835 exchange = gimple_omp_atomic_need_value_p (stmt);
11836
11837 gsi = gsi_last_bb (store_bb);
11838 stmt = gsi_stmt (gsi);
11839 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
11840 loc = gimple_location (stmt);
11841
11842 /* ??? If the target does not implement atomic_store_optab[mode], and mode
11843 is smaller than word size, then expand_atomic_store assumes that the store
11844 is atomic. We could avoid the builtin entirely in this case. */
11845
11846 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
11847 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
11848 decl = builtin_decl_explicit (tmpbase);
11849 if (decl == NULL_TREE)
11850 return false;
11851
11852 type = TREE_TYPE (stored_val);
11853
11854 /* Dig out the type of the function's second argument. */
11855 itype = TREE_TYPE (decl);
11856 itype = TYPE_ARG_TYPES (itype);
11857 itype = TREE_CHAIN (itype);
11858 itype = TREE_VALUE (itype);
11859 imode = TYPE_MODE (itype);
11860
11861 if (exchange && !can_atomic_exchange_p (imode, true))
11862 return false;
11863
11864 if (!useless_type_conversion_p (itype, type))
11865 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
11866 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
11867 build_int_cst (NULL,
11868 gimple_omp_atomic_seq_cst_p (stmt)
11869 ? MEMMODEL_SEQ_CST
11870 : MEMMODEL_RELAXED));
11871 if (exchange)
11872 {
11873 if (!useless_type_conversion_p (type, itype))
11874 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
11875 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
11876 }
11877
11878 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
11879 gsi_remove (&gsi, true);
11880
11881 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
11882 gsi = gsi_last_bb (load_bb);
11883 gsi_remove (&gsi, true);
11884
11885 if (gimple_in_ssa_p (cfun))
11886 update_ssa (TODO_update_ssa_no_phi);
11887
11888 return true;
11889 }
11890
11891 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
11892 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
11893 size of the data type, and thus usable to find the index of the builtin
11894 decl. Returns false if the expression is not of the proper form. */
11895
11896 static bool
11897 expand_omp_atomic_fetch_op (basic_block load_bb,
11898 tree addr, tree loaded_val,
11899 tree stored_val, int index)
11900 {
11901 enum built_in_function oldbase, newbase, tmpbase;
11902 tree decl, itype, call;
11903 tree lhs, rhs;
11904 basic_block store_bb = single_succ (load_bb);
11905 gimple_stmt_iterator gsi;
11906 gimple *stmt;
11907 location_t loc;
11908 enum tree_code code;
11909 bool need_old, need_new;
11910 machine_mode imode;
11911 bool seq_cst;
11912
11913 /* We expect to find the following sequences:
11914
11915 load_bb:
11916 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
11917
11918 store_bb:
11919 val = tmp OP something; (or: something OP tmp)
11920 GIMPLE_OMP_STORE (val)
11921
11922 ???FIXME: Allow a more flexible sequence.
11923 Perhaps use data flow to pick the statements.
11924
11925 */
11926
11927 gsi = gsi_after_labels (store_bb);
11928 stmt = gsi_stmt (gsi);
11929 loc = gimple_location (stmt);
11930 if (!is_gimple_assign (stmt))
11931 return false;
11932 gsi_next (&gsi);
11933 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
11934 return false;
11935 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
11936 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
11937 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
11938 gcc_checking_assert (!need_old || !need_new);
11939
11940 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
11941 return false;
11942
11943 /* Check for one of the supported fetch-op operations. */
11944 code = gimple_assign_rhs_code (stmt);
11945 switch (code)
11946 {
11947 case PLUS_EXPR:
11948 case POINTER_PLUS_EXPR:
11949 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
11950 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
11951 break;
11952 case MINUS_EXPR:
11953 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
11954 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
11955 break;
11956 case BIT_AND_EXPR:
11957 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
11958 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
11959 break;
11960 case BIT_IOR_EXPR:
11961 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
11962 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
11963 break;
11964 case BIT_XOR_EXPR:
11965 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
11966 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
11967 break;
11968 default:
11969 return false;
11970 }
11971
11972 /* Make sure the expression is of the proper form. */
11973 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
11974 rhs = gimple_assign_rhs2 (stmt);
11975 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
11976 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
11977 rhs = gimple_assign_rhs1 (stmt);
11978 else
11979 return false;
11980
11981 tmpbase = ((enum built_in_function)
11982 ((need_new ? newbase : oldbase) + index + 1));
11983 decl = builtin_decl_explicit (tmpbase);
11984 if (decl == NULL_TREE)
11985 return false;
11986 itype = TREE_TYPE (TREE_TYPE (decl));
11987 imode = TYPE_MODE (itype);
11988
11989 /* We could test all of the various optabs involved, but the fact of the
11990 matter is that (with the exception of i486 vs i586 and xadd) all targets
11991 that support any atomic operaton optab also implements compare-and-swap.
11992 Let optabs.c take care of expanding any compare-and-swap loop. */
11993 if (!can_compare_and_swap_p (imode, true))
11994 return false;
11995
11996 gsi = gsi_last_bb (load_bb);
11997 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
11998
11999 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
12000 It only requires that the operation happen atomically. Thus we can
12001 use the RELAXED memory model. */
12002 call = build_call_expr_loc (loc, decl, 3, addr,
12003 fold_convert_loc (loc, itype, rhs),
12004 build_int_cst (NULL,
12005 seq_cst ? MEMMODEL_SEQ_CST
12006 : MEMMODEL_RELAXED));
12007
12008 if (need_old || need_new)
12009 {
12010 lhs = need_old ? loaded_val : stored_val;
12011 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
12012 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
12013 }
12014 else
12015 call = fold_convert_loc (loc, void_type_node, call);
12016 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
12017 gsi_remove (&gsi, true);
12018
12019 gsi = gsi_last_bb (store_bb);
12020 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
12021 gsi_remove (&gsi, true);
12022 gsi = gsi_last_bb (store_bb);
12023 gsi_remove (&gsi, true);
12024
12025 if (gimple_in_ssa_p (cfun))
12026 update_ssa (TODO_update_ssa_no_phi);
12027
12028 return true;
12029 }
12030
12031 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
12032
12033 oldval = *addr;
12034 repeat:
12035 newval = rhs; // with oldval replacing *addr in rhs
12036 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
12037 if (oldval != newval)
12038 goto repeat;
12039
12040 INDEX is log2 of the size of the data type, and thus usable to find the
12041 index of the builtin decl. */
12042
12043 static bool
12044 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
12045 tree addr, tree loaded_val, tree stored_val,
12046 int index)
12047 {
12048 tree loadedi, storedi, initial, new_storedi, old_vali;
12049 tree type, itype, cmpxchg, iaddr;
12050 gimple_stmt_iterator si;
12051 basic_block loop_header = single_succ (load_bb);
12052 gimple *phi, *stmt;
12053 edge e;
12054 enum built_in_function fncode;
12055
12056 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
12057 order to use the RELAXED memory model effectively. */
12058 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
12059 + index + 1);
12060 cmpxchg = builtin_decl_explicit (fncode);
12061 if (cmpxchg == NULL_TREE)
12062 return false;
12063 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
12064 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
12065
12066 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
12067 return false;
12068
12069 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
12070 si = gsi_last_bb (load_bb);
12071 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
12072
12073 /* For floating-point values, we'll need to view-convert them to integers
12074 so that we can perform the atomic compare and swap. Simplify the
12075 following code by always setting up the "i"ntegral variables. */
12076 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
12077 {
12078 tree iaddr_val;
12079
12080 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
12081 true));
12082 iaddr_val
12083 = force_gimple_operand_gsi (&si,
12084 fold_convert (TREE_TYPE (iaddr), addr),
12085 false, NULL_TREE, true, GSI_SAME_STMT);
12086 stmt = gimple_build_assign (iaddr, iaddr_val);
12087 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12088 loadedi = create_tmp_var (itype);
12089 if (gimple_in_ssa_p (cfun))
12090 loadedi = make_ssa_name (loadedi);
12091 }
12092 else
12093 {
12094 iaddr = addr;
12095 loadedi = loaded_val;
12096 }
12097
12098 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
12099 tree loaddecl = builtin_decl_explicit (fncode);
12100 if (loaddecl)
12101 initial
12102 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
12103 build_call_expr (loaddecl, 2, iaddr,
12104 build_int_cst (NULL_TREE,
12105 MEMMODEL_RELAXED)));
12106 else
12107 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
12108 build_int_cst (TREE_TYPE (iaddr), 0));
12109
12110 initial
12111 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
12112 GSI_SAME_STMT);
12113
12114 /* Move the value to the LOADEDI temporary. */
12115 if (gimple_in_ssa_p (cfun))
12116 {
12117 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
12118 phi = create_phi_node (loadedi, loop_header);
12119 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
12120 initial);
12121 }
12122 else
12123 gsi_insert_before (&si,
12124 gimple_build_assign (loadedi, initial),
12125 GSI_SAME_STMT);
12126 if (loadedi != loaded_val)
12127 {
12128 gimple_stmt_iterator gsi2;
12129 tree x;
12130
12131 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
12132 gsi2 = gsi_start_bb (loop_header);
12133 if (gimple_in_ssa_p (cfun))
12134 {
12135 gassign *stmt;
12136 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
12137 true, GSI_SAME_STMT);
12138 stmt = gimple_build_assign (loaded_val, x);
12139 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
12140 }
12141 else
12142 {
12143 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
12144 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
12145 true, GSI_SAME_STMT);
12146 }
12147 }
12148 gsi_remove (&si, true);
12149
12150 si = gsi_last_bb (store_bb);
12151 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
12152
12153 if (iaddr == addr)
12154 storedi = stored_val;
12155 else
12156 storedi =
12157 force_gimple_operand_gsi (&si,
12158 build1 (VIEW_CONVERT_EXPR, itype,
12159 stored_val), true, NULL_TREE, true,
12160 GSI_SAME_STMT);
12161
12162 /* Build the compare&swap statement. */
12163 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
12164 new_storedi = force_gimple_operand_gsi (&si,
12165 fold_convert (TREE_TYPE (loadedi),
12166 new_storedi),
12167 true, NULL_TREE,
12168 true, GSI_SAME_STMT);
12169
12170 if (gimple_in_ssa_p (cfun))
12171 old_vali = loadedi;
12172 else
12173 {
12174 old_vali = create_tmp_var (TREE_TYPE (loadedi));
12175 stmt = gimple_build_assign (old_vali, loadedi);
12176 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12177
12178 stmt = gimple_build_assign (loadedi, new_storedi);
12179 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12180 }
12181
12182 /* Note that we always perform the comparison as an integer, even for
12183 floating point. This allows the atomic operation to properly
12184 succeed even with NaNs and -0.0. */
12185 stmt = gimple_build_cond_empty
12186 (build2 (NE_EXPR, boolean_type_node,
12187 new_storedi, old_vali));
12188 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12189
12190 /* Update cfg. */
12191 e = single_succ_edge (store_bb);
12192 e->flags &= ~EDGE_FALLTHRU;
12193 e->flags |= EDGE_FALSE_VALUE;
12194
12195 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
12196
12197 /* Copy the new value to loadedi (we already did that before the condition
12198 if we are not in SSA). */
12199 if (gimple_in_ssa_p (cfun))
12200 {
12201 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
12202 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
12203 }
12204
12205 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
12206 gsi_remove (&si, true);
12207
12208 struct loop *loop = alloc_loop ();
12209 loop->header = loop_header;
12210 loop->latch = store_bb;
12211 add_loop (loop, loop_header->loop_father);
12212
12213 if (gimple_in_ssa_p (cfun))
12214 update_ssa (TODO_update_ssa_no_phi);
12215
12216 return true;
12217 }
12218
12219 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
12220
12221 GOMP_atomic_start ();
12222 *addr = rhs;
12223 GOMP_atomic_end ();
12224
12225 The result is not globally atomic, but works so long as all parallel
12226 references are within #pragma omp atomic directives. According to
12227 responses received from omp@openmp.org, appears to be within spec.
12228 Which makes sense, since that's how several other compilers handle
12229 this situation as well.
12230 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
12231 expanding. STORED_VAL is the operand of the matching
12232 GIMPLE_OMP_ATOMIC_STORE.
12233
12234 We replace
12235 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
12236 loaded_val = *addr;
12237
12238 and replace
12239 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
12240 *addr = stored_val;
12241 */
12242
12243 static bool
12244 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
12245 tree addr, tree loaded_val, tree stored_val)
12246 {
12247 gimple_stmt_iterator si;
12248 gassign *stmt;
12249 tree t;
12250
12251 si = gsi_last_bb (load_bb);
12252 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
12253
12254 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
12255 t = build_call_expr (t, 0);
12256 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
12257
12258 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
12259 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12260 gsi_remove (&si, true);
12261
12262 si = gsi_last_bb (store_bb);
12263 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
12264
12265 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
12266 stored_val);
12267 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
12268
12269 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
12270 t = build_call_expr (t, 0);
12271 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
12272 gsi_remove (&si, true);
12273
12274 if (gimple_in_ssa_p (cfun))
12275 update_ssa (TODO_update_ssa_no_phi);
12276 return true;
12277 }
12278
12279 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
12280 using expand_omp_atomic_fetch_op. If it failed, we try to
12281 call expand_omp_atomic_pipeline, and if it fails too, the
12282 ultimate fallback is wrapping the operation in a mutex
12283 (expand_omp_atomic_mutex). REGION is the atomic region built
12284 by build_omp_regions_1(). */
12285
12286 static void
12287 expand_omp_atomic (struct omp_region *region)
12288 {
12289 basic_block load_bb = region->entry, store_bb = region->exit;
12290 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
12291 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
12292 tree loaded_val = gimple_omp_atomic_load_lhs (load);
12293 tree addr = gimple_omp_atomic_load_rhs (load);
12294 tree stored_val = gimple_omp_atomic_store_val (store);
12295 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
12296 HOST_WIDE_INT index;
12297
12298 /* Make sure the type is one of the supported sizes. */
12299 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
12300 index = exact_log2 (index);
12301 if (index >= 0 && index <= 4)
12302 {
12303 unsigned int align = TYPE_ALIGN_UNIT (type);
12304
12305 /* __sync builtins require strict data alignment. */
12306 if (exact_log2 (align) >= index)
12307 {
12308 /* Atomic load. */
12309 if (loaded_val == stored_val
12310 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
12311 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
12312 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
12313 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
12314 return;
12315
12316 /* Atomic store. */
12317 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
12318 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
12319 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
12320 && store_bb == single_succ (load_bb)
12321 && first_stmt (store_bb) == store
12322 && expand_omp_atomic_store (load_bb, addr, loaded_val,
12323 stored_val, index))
12324 return;
12325
12326 /* When possible, use specialized atomic update functions. */
12327 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
12328 && store_bb == single_succ (load_bb)
12329 && expand_omp_atomic_fetch_op (load_bb, addr,
12330 loaded_val, stored_val, index))
12331 return;
12332
12333 /* If we don't have specialized __sync builtins, try and implement
12334 as a compare and swap loop. */
12335 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
12336 loaded_val, stored_val, index))
12337 return;
12338 }
12339 }
12340
12341 /* The ultimate fallback is wrapping the operation in a mutex. */
12342 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
12343 }
12344
12345
12346 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
12347 macro on gomp-constants.h. We do not check for overflow. */
12348
12349 static tree
12350 oacc_launch_pack (unsigned code, tree device, unsigned op)
12351 {
12352 tree res;
12353
12354 res = build_int_cst (unsigned_type_node, GOMP_LAUNCH_PACK (code, 0, op));
12355 if (device)
12356 {
12357 device = fold_build2 (LSHIFT_EXPR, unsigned_type_node,
12358 device, build_int_cst (unsigned_type_node,
12359 GOMP_LAUNCH_DEVICE_SHIFT));
12360 res = fold_build2 (BIT_IOR_EXPR, unsigned_type_node, res, device);
12361 }
12362 return res;
12363 }
12364
12365 /* Look for compute grid dimension clauses and convert to an attribute
12366 attached to FN. This permits the target-side code to (a) massage
12367 the dimensions, (b) emit that data and (c) optimize. Non-constant
12368 dimensions are pushed onto ARGS.
12369
12370 The attribute value is a TREE_LIST. A set of dimensions is
12371 represented as a list of INTEGER_CST. Those that are runtime
12372 exprs are represented as an INTEGER_CST of zero.
12373
12374 TOOO. Normally the attribute will just contain a single such list. If
12375 however it contains a list of lists, this will represent the use of
12376 device_type. Each member of the outer list is an assoc list of
12377 dimensions, keyed by the device type. The first entry will be the
12378 default. Well, that's the plan. */
12379
12380 #define OACC_FN_ATTRIB "oacc function"
12381
12382 /* Replace any existing oacc fn attribute with updated dimensions. */
12383
12384 void
12385 replace_oacc_fn_attrib (tree fn, tree dims)
12386 {
12387 tree ident = get_identifier (OACC_FN_ATTRIB);
12388 tree attribs = DECL_ATTRIBUTES (fn);
12389
12390 /* If we happen to be present as the first attrib, drop it. */
12391 if (attribs && TREE_PURPOSE (attribs) == ident)
12392 attribs = TREE_CHAIN (attribs);
12393 DECL_ATTRIBUTES (fn) = tree_cons (ident, dims, attribs);
12394 }
12395
12396 /* Scan CLAUSES for launch dimensions and attach them to the oacc
12397 function attribute. Push any that are non-constant onto the ARGS
12398 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
12399
12400 static void
12401 set_oacc_fn_attrib (tree fn, tree clauses, vec<tree> *args)
12402 {
12403 /* Must match GOMP_DIM ordering. */
12404 static const omp_clause_code ids[]
12405 = { OMP_CLAUSE_NUM_GANGS, OMP_CLAUSE_NUM_WORKERS,
12406 OMP_CLAUSE_VECTOR_LENGTH };
12407 unsigned ix;
12408 tree dims[GOMP_DIM_MAX];
12409 tree attr = NULL_TREE;
12410 unsigned non_const = 0;
12411
12412 for (ix = GOMP_DIM_MAX; ix--;)
12413 {
12414 tree clause = find_omp_clause (clauses, ids[ix]);
12415 tree dim = NULL_TREE;
12416
12417 if (clause)
12418 dim = OMP_CLAUSE_EXPR (clause, ids[ix]);
12419 dims[ix] = dim;
12420 if (dim && TREE_CODE (dim) != INTEGER_CST)
12421 {
12422 dim = integer_zero_node;
12423 non_const |= GOMP_DIM_MASK (ix);
12424 }
12425 attr = tree_cons (NULL_TREE, dim, attr);
12426 }
12427
12428 replace_oacc_fn_attrib (fn, attr);
12429
12430 if (non_const)
12431 {
12432 /* Push a dynamic argument set. */
12433 args->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM,
12434 NULL_TREE, non_const));
12435 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
12436 if (non_const & GOMP_DIM_MASK (ix))
12437 args->safe_push (dims[ix]);
12438 }
12439 }
12440
12441 /* Process the routine's dimension clauess to generate an attribute
12442 value. Issue diagnostics as appropriate. We default to SEQ
12443 (OpenACC 2.5 clarifies this). All dimensions have a size of zero
12444 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
12445 can have a loop partitioned on it. non-zero indicates
12446 yes, zero indicates no. By construction once a non-zero has been
12447 reached, further inner dimensions must also be non-zero. We set
12448 TREE_VALUE to zero for the dimensions that may be partitioned and
12449 1 for the other ones -- if a loop is (erroneously) spawned at
12450 an outer level, we don't want to try and partition it. */
12451
12452 tree
12453 build_oacc_routine_dims (tree clauses)
12454 {
12455 /* Must match GOMP_DIM ordering. */
12456 static const omp_clause_code ids[] =
12457 {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ};
12458 int ix;
12459 int level = -1;
12460
12461 for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses))
12462 for (ix = GOMP_DIM_MAX + 1; ix--;)
12463 if (OMP_CLAUSE_CODE (clauses) == ids[ix])
12464 {
12465 if (level >= 0)
12466 error_at (OMP_CLAUSE_LOCATION (clauses),
12467 "multiple loop axes specified for routine");
12468 level = ix;
12469 break;
12470 }
12471
12472 /* Default to SEQ. */
12473 if (level < 0)
12474 level = GOMP_DIM_MAX;
12475
12476 tree dims = NULL_TREE;
12477
12478 for (ix = GOMP_DIM_MAX; ix--;)
12479 dims = tree_cons (build_int_cst (boolean_type_node, ix >= level),
12480 build_int_cst (integer_type_node, ix < level), dims);
12481
12482 return dims;
12483 }
12484
12485 /* Retrieve the oacc function attrib and return it. Non-oacc
12486 functions will return NULL. */
12487
12488 tree
12489 get_oacc_fn_attrib (tree fn)
12490 {
12491 return lookup_attribute (OACC_FN_ATTRIB, DECL_ATTRIBUTES (fn));
12492 }
12493
12494 /* Extract an oacc execution dimension from FN. FN must be an
12495 offloaded function or routine that has already had its execution
12496 dimensions lowered to the target-specific values. */
12497
12498 int
12499 get_oacc_fn_dim_size (tree fn, int axis)
12500 {
12501 tree attrs = get_oacc_fn_attrib (fn);
12502
12503 gcc_assert (axis < GOMP_DIM_MAX);
12504
12505 tree dims = TREE_VALUE (attrs);
12506 while (axis--)
12507 dims = TREE_CHAIN (dims);
12508
12509 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
12510
12511 return size;
12512 }
12513
12514 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
12515 IFN_GOACC_DIM_SIZE call. */
12516
12517 int
12518 get_oacc_ifn_dim_arg (const gimple *stmt)
12519 {
12520 gcc_checking_assert (gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_SIZE
12521 || gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_POS);
12522 tree arg = gimple_call_arg (stmt, 0);
12523 HOST_WIDE_INT axis = TREE_INT_CST_LOW (arg);
12524
12525 gcc_checking_assert (axis >= 0 && axis < GOMP_DIM_MAX);
12526 return (int) axis;
12527 }
12528
12529 /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending
12530 at REGION_EXIT. */
12531
12532 static void
12533 mark_loops_in_oacc_kernels_region (basic_block region_entry,
12534 basic_block region_exit)
12535 {
12536 struct loop *outer = region_entry->loop_father;
12537 gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
12538
12539 /* Don't parallelize the kernels region if it contains more than one outer
12540 loop. */
12541 unsigned int nr_outer_loops = 0;
12542 struct loop *single_outer;
12543 for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next)
12544 {
12545 gcc_assert (loop_outer (loop) == outer);
12546
12547 if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry))
12548 continue;
12549
12550 if (region_exit != NULL
12551 && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit))
12552 continue;
12553
12554 nr_outer_loops++;
12555 single_outer = loop;
12556 }
12557 if (nr_outer_loops != 1)
12558 return;
12559
12560 for (struct loop *loop = single_outer->inner; loop != NULL; loop = loop->inner)
12561 if (loop->next)
12562 return;
12563
12564 /* Mark the loops in the region. */
12565 for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner)
12566 loop->in_oacc_kernels_region = true;
12567 }
12568
12569 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
12570
12571 static void
12572 expand_omp_target (struct omp_region *region)
12573 {
12574 basic_block entry_bb, exit_bb, new_bb;
12575 struct function *child_cfun;
12576 tree child_fn, block, t;
12577 gimple_stmt_iterator gsi;
12578 gomp_target *entry_stmt;
12579 gimple *stmt;
12580 edge e;
12581 bool offloaded, data_region;
12582
12583 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
12584 new_bb = region->entry;
12585
12586 offloaded = is_gimple_omp_offloaded (entry_stmt);
12587 switch (gimple_omp_target_kind (entry_stmt))
12588 {
12589 case GF_OMP_TARGET_KIND_REGION:
12590 case GF_OMP_TARGET_KIND_UPDATE:
12591 case GF_OMP_TARGET_KIND_ENTER_DATA:
12592 case GF_OMP_TARGET_KIND_EXIT_DATA:
12593 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
12594 case GF_OMP_TARGET_KIND_OACC_KERNELS:
12595 case GF_OMP_TARGET_KIND_OACC_UPDATE:
12596 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
12597 case GF_OMP_TARGET_KIND_OACC_DECLARE:
12598 data_region = false;
12599 break;
12600 case GF_OMP_TARGET_KIND_DATA:
12601 case GF_OMP_TARGET_KIND_OACC_DATA:
12602 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
12603 data_region = true;
12604 break;
12605 default:
12606 gcc_unreachable ();
12607 }
12608
12609 child_fn = NULL_TREE;
12610 child_cfun = NULL;
12611 if (offloaded)
12612 {
12613 child_fn = gimple_omp_target_child_fn (entry_stmt);
12614 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
12615 }
12616
12617 /* Supported by expand_omp_taskreg, but not here. */
12618 if (child_cfun != NULL)
12619 gcc_checking_assert (!child_cfun->cfg);
12620 gcc_checking_assert (!gimple_in_ssa_p (cfun));
12621
12622 entry_bb = region->entry;
12623 exit_bb = region->exit;
12624
12625 if (gimple_omp_target_kind (entry_stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
12626 mark_loops_in_oacc_kernels_region (region->entry, region->exit);
12627
12628 if (offloaded)
12629 {
12630 unsigned srcidx, dstidx, num;
12631
12632 /* If the offloading region needs data sent from the parent
12633 function, then the very first statement (except possible
12634 tree profile counter updates) of the offloading body
12635 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
12636 &.OMP_DATA_O is passed as an argument to the child function,
12637 we need to replace it with the argument as seen by the child
12638 function.
12639
12640 In most cases, this will end up being the identity assignment
12641 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
12642 a function call that has been inlined, the original PARM_DECL
12643 .OMP_DATA_I may have been converted into a different local
12644 variable. In which case, we need to keep the assignment. */
12645 tree data_arg = gimple_omp_target_data_arg (entry_stmt);
12646 if (data_arg)
12647 {
12648 basic_block entry_succ_bb = single_succ (entry_bb);
12649 gimple_stmt_iterator gsi;
12650 tree arg;
12651 gimple *tgtcopy_stmt = NULL;
12652 tree sender = TREE_VEC_ELT (data_arg, 0);
12653
12654 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
12655 {
12656 gcc_assert (!gsi_end_p (gsi));
12657 stmt = gsi_stmt (gsi);
12658 if (gimple_code (stmt) != GIMPLE_ASSIGN)
12659 continue;
12660
12661 if (gimple_num_ops (stmt) == 2)
12662 {
12663 tree arg = gimple_assign_rhs1 (stmt);
12664
12665 /* We're ignoring the subcode because we're
12666 effectively doing a STRIP_NOPS. */
12667
12668 if (TREE_CODE (arg) == ADDR_EXPR
12669 && TREE_OPERAND (arg, 0) == sender)
12670 {
12671 tgtcopy_stmt = stmt;
12672 break;
12673 }
12674 }
12675 }
12676
12677 gcc_assert (tgtcopy_stmt != NULL);
12678 arg = DECL_ARGUMENTS (child_fn);
12679
12680 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
12681 gsi_remove (&gsi, true);
12682 }
12683
12684 /* Declare local variables needed in CHILD_CFUN. */
12685 block = DECL_INITIAL (child_fn);
12686 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
12687 /* The gimplifier could record temporaries in the offloading block
12688 rather than in containing function's local_decls chain,
12689 which would mean cgraph missed finalizing them. Do it now. */
12690 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
12691 if (TREE_CODE (t) == VAR_DECL
12692 && TREE_STATIC (t)
12693 && !DECL_EXTERNAL (t))
12694 varpool_node::finalize_decl (t);
12695 DECL_SAVED_TREE (child_fn) = NULL;
12696 /* We'll create a CFG for child_fn, so no gimple body is needed. */
12697 gimple_set_body (child_fn, NULL);
12698 TREE_USED (block) = 1;
12699
12700 /* Reset DECL_CONTEXT on function arguments. */
12701 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
12702 DECL_CONTEXT (t) = child_fn;
12703
12704 /* Split ENTRY_BB at GIMPLE_*,
12705 so that it can be moved to the child function. */
12706 gsi = gsi_last_bb (entry_bb);
12707 stmt = gsi_stmt (gsi);
12708 gcc_assert (stmt
12709 && gimple_code (stmt) == gimple_code (entry_stmt));
12710 e = split_block (entry_bb, stmt);
12711 gsi_remove (&gsi, true);
12712 entry_bb = e->dest;
12713 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
12714
12715 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
12716 if (exit_bb)
12717 {
12718 gsi = gsi_last_bb (exit_bb);
12719 gcc_assert (!gsi_end_p (gsi)
12720 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
12721 stmt = gimple_build_return (NULL);
12722 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
12723 gsi_remove (&gsi, true);
12724 }
12725
12726 /* Move the offloading region into CHILD_CFUN. */
12727
12728 block = gimple_block (entry_stmt);
12729
12730 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
12731 if (exit_bb)
12732 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
12733 /* When the OMP expansion process cannot guarantee an up-to-date
12734 loop tree arrange for the child function to fixup loops. */
12735 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
12736 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
12737
12738 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
12739 num = vec_safe_length (child_cfun->local_decls);
12740 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
12741 {
12742 t = (*child_cfun->local_decls)[srcidx];
12743 if (DECL_CONTEXT (t) == cfun->decl)
12744 continue;
12745 if (srcidx != dstidx)
12746 (*child_cfun->local_decls)[dstidx] = t;
12747 dstidx++;
12748 }
12749 if (dstidx != num)
12750 vec_safe_truncate (child_cfun->local_decls, dstidx);
12751
12752 /* Inform the callgraph about the new function. */
12753 child_cfun->curr_properties = cfun->curr_properties;
12754 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
12755 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
12756 cgraph_node *node = cgraph_node::get_create (child_fn);
12757 node->parallelized_function = 1;
12758 cgraph_node::add_new_function (child_fn, true);
12759
12760 /* Add the new function to the offload table. */
12761 if (ENABLE_OFFLOADING)
12762 vec_safe_push (offload_funcs, child_fn);
12763
12764 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
12765 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
12766
12767 /* Fix the callgraph edges for child_cfun. Those for cfun will be
12768 fixed in a following pass. */
12769 push_cfun (child_cfun);
12770 if (need_asm)
12771 assign_assembler_name_if_neeeded (child_fn);
12772 cgraph_edge::rebuild_edges ();
12773
12774 /* Prevent IPA from removing child_fn as unreachable, since there are no
12775 refs from the parent function to child_fn in offload LTO mode. */
12776 if (ENABLE_OFFLOADING)
12777 cgraph_node::get (child_fn)->mark_force_output ();
12778
12779 /* Some EH regions might become dead, see PR34608. If
12780 pass_cleanup_cfg isn't the first pass to happen with the
12781 new child, these dead EH edges might cause problems.
12782 Clean them up now. */
12783 if (flag_exceptions)
12784 {
12785 basic_block bb;
12786 bool changed = false;
12787
12788 FOR_EACH_BB_FN (bb, cfun)
12789 changed |= gimple_purge_dead_eh_edges (bb);
12790 if (changed)
12791 cleanup_tree_cfg ();
12792 }
12793 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
12794 verify_loop_structure ();
12795 pop_cfun ();
12796
12797 if (dump_file && !gimple_in_ssa_p (cfun))
12798 {
12799 omp_any_child_fn_dumped = true;
12800 dump_function_header (dump_file, child_fn, dump_flags);
12801 dump_function_to_file (child_fn, dump_file, dump_flags);
12802 }
12803 }
12804
12805 /* Emit a library call to launch the offloading region, or do data
12806 transfers. */
12807 tree t1, t2, t3, t4, device, cond, depend, c, clauses;
12808 enum built_in_function start_ix;
12809 location_t clause_loc;
12810 unsigned int flags_i = 0;
12811
12812 switch (gimple_omp_target_kind (entry_stmt))
12813 {
12814 case GF_OMP_TARGET_KIND_REGION:
12815 start_ix = BUILT_IN_GOMP_TARGET;
12816 break;
12817 case GF_OMP_TARGET_KIND_DATA:
12818 start_ix = BUILT_IN_GOMP_TARGET_DATA;
12819 break;
12820 case GF_OMP_TARGET_KIND_UPDATE:
12821 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
12822 break;
12823 case GF_OMP_TARGET_KIND_ENTER_DATA:
12824 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
12825 break;
12826 case GF_OMP_TARGET_KIND_EXIT_DATA:
12827 start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA;
12828 flags_i |= GOMP_TARGET_FLAG_EXIT_DATA;
12829 break;
12830 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
12831 case GF_OMP_TARGET_KIND_OACC_KERNELS:
12832 start_ix = BUILT_IN_GOACC_PARALLEL;
12833 break;
12834 case GF_OMP_TARGET_KIND_OACC_DATA:
12835 start_ix = BUILT_IN_GOACC_DATA_START;
12836 break;
12837 case GF_OMP_TARGET_KIND_OACC_UPDATE:
12838 start_ix = BUILT_IN_GOACC_UPDATE;
12839 break;
12840 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
12841 start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
12842 break;
12843 case GF_OMP_TARGET_KIND_OACC_DECLARE:
12844 start_ix = BUILT_IN_GOACC_DECLARE;
12845 break;
12846 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
12847 start_ix = BUILT_IN_GOACC_HOST_DATA;
12848 break;
12849 default:
12850 gcc_unreachable ();
12851 }
12852
12853 clauses = gimple_omp_target_clauses (entry_stmt);
12854
12855 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
12856 library choose) and there is no conditional. */
12857 cond = NULL_TREE;
12858 device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
12859
12860 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
12861 if (c)
12862 cond = OMP_CLAUSE_IF_EXPR (c);
12863
12864 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
12865 if (c)
12866 {
12867 /* Even if we pass it to all library function calls, it is currently only
12868 defined/used for the OpenMP target ones. */
12869 gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET
12870 || start_ix == BUILT_IN_GOMP_TARGET_DATA
12871 || start_ix == BUILT_IN_GOMP_TARGET_UPDATE
12872 || start_ix == BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA);
12873
12874 device = OMP_CLAUSE_DEVICE_ID (c);
12875 clause_loc = OMP_CLAUSE_LOCATION (c);
12876 }
12877 else
12878 clause_loc = gimple_location (entry_stmt);
12879
12880 c = find_omp_clause (clauses, OMP_CLAUSE_NOWAIT);
12881 if (c)
12882 flags_i |= GOMP_TARGET_FLAG_NOWAIT;
12883
12884 /* Ensure 'device' is of the correct type. */
12885 device = fold_convert_loc (clause_loc, integer_type_node, device);
12886
12887 /* If we found the clause 'if (cond)', build
12888 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
12889 if (cond)
12890 {
12891 cond = gimple_boolify (cond);
12892
12893 basic_block cond_bb, then_bb, else_bb;
12894 edge e;
12895 tree tmp_var;
12896
12897 tmp_var = create_tmp_var (TREE_TYPE (device));
12898 if (offloaded)
12899 e = split_block_after_labels (new_bb);
12900 else
12901 {
12902 gsi = gsi_last_bb (new_bb);
12903 gsi_prev (&gsi);
12904 e = split_block (new_bb, gsi_stmt (gsi));
12905 }
12906 cond_bb = e->src;
12907 new_bb = e->dest;
12908 remove_edge (e);
12909
12910 then_bb = create_empty_bb (cond_bb);
12911 else_bb = create_empty_bb (then_bb);
12912 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
12913 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
12914
12915 stmt = gimple_build_cond_empty (cond);
12916 gsi = gsi_last_bb (cond_bb);
12917 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
12918
12919 gsi = gsi_start_bb (then_bb);
12920 stmt = gimple_build_assign (tmp_var, device);
12921 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
12922
12923 gsi = gsi_start_bb (else_bb);
12924 stmt = gimple_build_assign (tmp_var,
12925 build_int_cst (integer_type_node,
12926 GOMP_DEVICE_HOST_FALLBACK));
12927 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
12928
12929 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
12930 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
12931 add_bb_to_loop (then_bb, cond_bb->loop_father);
12932 add_bb_to_loop (else_bb, cond_bb->loop_father);
12933 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
12934 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
12935
12936 device = tmp_var;
12937 }
12938
12939 gsi = gsi_last_bb (new_bb);
12940 t = gimple_omp_target_data_arg (entry_stmt);
12941 if (t == NULL)
12942 {
12943 t1 = size_zero_node;
12944 t2 = build_zero_cst (ptr_type_node);
12945 t3 = t2;
12946 t4 = t2;
12947 }
12948 else
12949 {
12950 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
12951 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
12952 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
12953 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
12954 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
12955 }
12956
12957 gimple *g;
12958 bool tagging = false;
12959 /* The maximum number used by any start_ix, without varargs. */
12960 auto_vec<tree, 11> args;
12961 args.quick_push (device);
12962 if (offloaded)
12963 args.quick_push (build_fold_addr_expr (child_fn));
12964 args.quick_push (t1);
12965 args.quick_push (t2);
12966 args.quick_push (t3);
12967 args.quick_push (t4);
12968 switch (start_ix)
12969 {
12970 case BUILT_IN_GOACC_DATA_START:
12971 case BUILT_IN_GOACC_DECLARE:
12972 case BUILT_IN_GOMP_TARGET_DATA:
12973 case BUILT_IN_GOACC_HOST_DATA:
12974 break;
12975 case BUILT_IN_GOMP_TARGET:
12976 case BUILT_IN_GOMP_TARGET_UPDATE:
12977 case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA:
12978 args.quick_push (build_int_cst (unsigned_type_node, flags_i));
12979 c = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
12980 if (c)
12981 depend = OMP_CLAUSE_DECL (c);
12982 else
12983 depend = build_int_cst (ptr_type_node, 0);
12984 args.quick_push (depend);
12985 if (start_ix == BUILT_IN_GOMP_TARGET)
12986 {
12987 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_TEAMS);
12988 if (c)
12989 {
12990 t = fold_convert (integer_type_node,
12991 OMP_CLAUSE_NUM_TEAMS_EXPR (c));
12992 t = force_gimple_operand_gsi (&gsi, t, true, NULL,
12993 true, GSI_SAME_STMT);
12994 }
12995 else
12996 t = integer_minus_one_node;
12997 args.quick_push (t);
12998 c = find_omp_clause (clauses, OMP_CLAUSE_THREAD_LIMIT);
12999 if (c)
13000 {
13001 t = fold_convert (integer_type_node,
13002 OMP_CLAUSE_THREAD_LIMIT_EXPR (c));
13003 t = force_gimple_operand_gsi (&gsi, t, true, NULL,
13004 true, GSI_SAME_STMT);
13005 }
13006 else
13007 t = integer_minus_one_node;
13008 args.quick_push (t);
13009 }
13010 break;
13011 case BUILT_IN_GOACC_PARALLEL:
13012 {
13013 set_oacc_fn_attrib (child_fn, clauses, &args);
13014 tagging = true;
13015 }
13016 /* FALLTHRU */
13017 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
13018 case BUILT_IN_GOACC_UPDATE:
13019 {
13020 tree t_async = NULL_TREE;
13021
13022 /* If present, use the value specified by the respective
13023 clause, making sure that is of the correct type. */
13024 c = find_omp_clause (clauses, OMP_CLAUSE_ASYNC);
13025 if (c)
13026 t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
13027 integer_type_node,
13028 OMP_CLAUSE_ASYNC_EXPR (c));
13029 else if (!tagging)
13030 /* Default values for t_async. */
13031 t_async = fold_convert_loc (gimple_location (entry_stmt),
13032 integer_type_node,
13033 build_int_cst (integer_type_node,
13034 GOMP_ASYNC_SYNC));
13035 if (tagging && t_async)
13036 {
13037 unsigned HOST_WIDE_INT i_async;
13038
13039 if (TREE_CODE (t_async) == INTEGER_CST)
13040 {
13041 /* See if we can pack the async arg in to the tag's
13042 operand. */
13043 i_async = TREE_INT_CST_LOW (t_async);
13044
13045 if (i_async < GOMP_LAUNCH_OP_MAX)
13046 t_async = NULL_TREE;
13047 }
13048 if (t_async)
13049 i_async = GOMP_LAUNCH_OP_MAX;
13050 args.safe_push (oacc_launch_pack
13051 (GOMP_LAUNCH_ASYNC, NULL_TREE, i_async));
13052 }
13053 if (t_async)
13054 args.safe_push (t_async);
13055
13056 /* Save the argument index, and ... */
13057 unsigned t_wait_idx = args.length ();
13058 unsigned num_waits = 0;
13059 c = find_omp_clause (clauses, OMP_CLAUSE_WAIT);
13060 if (!tagging || c)
13061 /* ... push a placeholder. */
13062 args.safe_push (integer_zero_node);
13063
13064 for (; c; c = OMP_CLAUSE_CHAIN (c))
13065 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
13066 {
13067 args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c),
13068 integer_type_node,
13069 OMP_CLAUSE_WAIT_EXPR (c)));
13070 num_waits++;
13071 }
13072
13073 if (!tagging || num_waits)
13074 {
13075 tree len;
13076
13077 /* Now that we know the number, update the placeholder. */
13078 if (tagging)
13079 len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits);
13080 else
13081 len = build_int_cst (integer_type_node, num_waits);
13082 len = fold_convert_loc (gimple_location (entry_stmt),
13083 unsigned_type_node, len);
13084 args[t_wait_idx] = len;
13085 }
13086 }
13087 break;
13088 default:
13089 gcc_unreachable ();
13090 }
13091 if (tagging)
13092 /* Push terminal marker - zero. */
13093 args.safe_push (oacc_launch_pack (0, NULL_TREE, 0));
13094
13095 g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
13096 gimple_set_location (g, gimple_location (entry_stmt));
13097 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
13098 if (!offloaded)
13099 {
13100 g = gsi_stmt (gsi);
13101 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
13102 gsi_remove (&gsi, true);
13103 }
13104 if (data_region && region->exit)
13105 {
13106 gsi = gsi_last_bb (region->exit);
13107 g = gsi_stmt (gsi);
13108 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
13109 gsi_remove (&gsi, true);
13110 }
13111 }
13112
13113
13114 /* Expand the parallel region tree rooted at REGION. Expansion
13115 proceeds in depth-first order. Innermost regions are expanded
13116 first. This way, parallel regions that require a new function to
13117 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
13118 internal dependencies in their body. */
13119
13120 static void
13121 expand_omp (struct omp_region *region)
13122 {
13123 omp_any_child_fn_dumped = false;
13124 while (region)
13125 {
13126 location_t saved_location;
13127 gimple *inner_stmt = NULL;
13128
13129 /* First, determine whether this is a combined parallel+workshare
13130 region. */
13131 if (region->type == GIMPLE_OMP_PARALLEL)
13132 determine_parallel_type (region);
13133
13134 if (region->type == GIMPLE_OMP_FOR
13135 && gimple_omp_for_combined_p (last_stmt (region->entry)))
13136 inner_stmt = last_stmt (region->inner->entry);
13137
13138 if (region->inner)
13139 expand_omp (region->inner);
13140
13141 saved_location = input_location;
13142 if (gimple_has_location (last_stmt (region->entry)))
13143 input_location = gimple_location (last_stmt (region->entry));
13144
13145 switch (region->type)
13146 {
13147 case GIMPLE_OMP_PARALLEL:
13148 case GIMPLE_OMP_TASK:
13149 expand_omp_taskreg (region);
13150 break;
13151
13152 case GIMPLE_OMP_FOR:
13153 expand_omp_for (region, inner_stmt);
13154 break;
13155
13156 case GIMPLE_OMP_SECTIONS:
13157 expand_omp_sections (region);
13158 break;
13159
13160 case GIMPLE_OMP_SECTION:
13161 /* Individual omp sections are handled together with their
13162 parent GIMPLE_OMP_SECTIONS region. */
13163 break;
13164
13165 case GIMPLE_OMP_SINGLE:
13166 expand_omp_single (region);
13167 break;
13168
13169 case GIMPLE_OMP_ORDERED:
13170 {
13171 gomp_ordered *ord_stmt
13172 = as_a <gomp_ordered *> (last_stmt (region->entry));
13173 if (find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
13174 OMP_CLAUSE_DEPEND))
13175 {
13176 /* We'll expand these when expanding corresponding
13177 worksharing region with ordered(n) clause. */
13178 gcc_assert (region->outer
13179 && region->outer->type == GIMPLE_OMP_FOR);
13180 region->ord_stmt = ord_stmt;
13181 break;
13182 }
13183 }
13184 /* FALLTHRU */
13185 case GIMPLE_OMP_MASTER:
13186 case GIMPLE_OMP_TASKGROUP:
13187 case GIMPLE_OMP_CRITICAL:
13188 case GIMPLE_OMP_TEAMS:
13189 expand_omp_synch (region);
13190 break;
13191
13192 case GIMPLE_OMP_ATOMIC_LOAD:
13193 expand_omp_atomic (region);
13194 break;
13195
13196 case GIMPLE_OMP_TARGET:
13197 expand_omp_target (region);
13198 break;
13199
13200 default:
13201 gcc_unreachable ();
13202 }
13203
13204 input_location = saved_location;
13205 region = region->next;
13206 }
13207 if (omp_any_child_fn_dumped)
13208 {
13209 if (dump_file)
13210 dump_function_header (dump_file, current_function_decl, dump_flags);
13211 omp_any_child_fn_dumped = false;
13212 }
13213 }
13214
13215
13216 /* Helper for build_omp_regions. Scan the dominator tree starting at
13217 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
13218 true, the function ends once a single tree is built (otherwise, whole
13219 forest of OMP constructs may be built). */
13220
13221 static void
13222 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
13223 bool single_tree)
13224 {
13225 gimple_stmt_iterator gsi;
13226 gimple *stmt;
13227 basic_block son;
13228
13229 gsi = gsi_last_bb (bb);
13230 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
13231 {
13232 struct omp_region *region;
13233 enum gimple_code code;
13234
13235 stmt = gsi_stmt (gsi);
13236 code = gimple_code (stmt);
13237 if (code == GIMPLE_OMP_RETURN)
13238 {
13239 /* STMT is the return point out of region PARENT. Mark it
13240 as the exit point and make PARENT the immediately
13241 enclosing region. */
13242 gcc_assert (parent);
13243 region = parent;
13244 region->exit = bb;
13245 parent = parent->outer;
13246 }
13247 else if (code == GIMPLE_OMP_ATOMIC_STORE)
13248 {
13249 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
13250 GIMPLE_OMP_RETURN, but matches with
13251 GIMPLE_OMP_ATOMIC_LOAD. */
13252 gcc_assert (parent);
13253 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
13254 region = parent;
13255 region->exit = bb;
13256 parent = parent->outer;
13257 }
13258 else if (code == GIMPLE_OMP_CONTINUE)
13259 {
13260 gcc_assert (parent);
13261 parent->cont = bb;
13262 }
13263 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
13264 {
13265 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
13266 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
13267 }
13268 else
13269 {
13270 region = new_omp_region (bb, code, parent);
13271 /* Otherwise... */
13272 if (code == GIMPLE_OMP_TARGET)
13273 {
13274 switch (gimple_omp_target_kind (stmt))
13275 {
13276 case GF_OMP_TARGET_KIND_REGION:
13277 case GF_OMP_TARGET_KIND_DATA:
13278 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
13279 case GF_OMP_TARGET_KIND_OACC_KERNELS:
13280 case GF_OMP_TARGET_KIND_OACC_DATA:
13281 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
13282 break;
13283 case GF_OMP_TARGET_KIND_UPDATE:
13284 case GF_OMP_TARGET_KIND_ENTER_DATA:
13285 case GF_OMP_TARGET_KIND_EXIT_DATA:
13286 case GF_OMP_TARGET_KIND_OACC_UPDATE:
13287 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
13288 case GF_OMP_TARGET_KIND_OACC_DECLARE:
13289 /* ..., other than for those stand-alone directives... */
13290 region = NULL;
13291 break;
13292 default:
13293 gcc_unreachable ();
13294 }
13295 }
13296 else if (code == GIMPLE_OMP_ORDERED
13297 && find_omp_clause (gimple_omp_ordered_clauses
13298 (as_a <gomp_ordered *> (stmt)),
13299 OMP_CLAUSE_DEPEND))
13300 /* #pragma omp ordered depend is also just a stand-alone
13301 directive. */
13302 region = NULL;
13303 /* ..., this directive becomes the parent for a new region. */
13304 if (region)
13305 parent = region;
13306 }
13307 }
13308
13309 if (single_tree && !parent)
13310 return;
13311
13312 for (son = first_dom_son (CDI_DOMINATORS, bb);
13313 son;
13314 son = next_dom_son (CDI_DOMINATORS, son))
13315 build_omp_regions_1 (son, parent, single_tree);
13316 }
13317
13318 /* Builds the tree of OMP regions rooted at ROOT, storing it to
13319 root_omp_region. */
13320
13321 static void
13322 build_omp_regions_root (basic_block root)
13323 {
13324 gcc_assert (root_omp_region == NULL);
13325 build_omp_regions_1 (root, NULL, true);
13326 gcc_assert (root_omp_region != NULL);
13327 }
13328
13329 /* Expands omp construct (and its subconstructs) starting in HEAD. */
13330
13331 void
13332 omp_expand_local (basic_block head)
13333 {
13334 build_omp_regions_root (head);
13335 if (dump_file && (dump_flags & TDF_DETAILS))
13336 {
13337 fprintf (dump_file, "\nOMP region tree\n\n");
13338 dump_omp_region (dump_file, root_omp_region, 0);
13339 fprintf (dump_file, "\n");
13340 }
13341
13342 remove_exit_barriers (root_omp_region);
13343 expand_omp (root_omp_region);
13344
13345 free_omp_regions ();
13346 }
13347
13348 /* Scan the CFG and build a tree of OMP regions. Return the root of
13349 the OMP region tree. */
13350
13351 static void
13352 build_omp_regions (void)
13353 {
13354 gcc_assert (root_omp_region == NULL);
13355 calculate_dominance_info (CDI_DOMINATORS);
13356 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
13357 }
13358
13359 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
13360
13361 static unsigned int
13362 execute_expand_omp (void)
13363 {
13364 build_omp_regions ();
13365
13366 if (!root_omp_region)
13367 return 0;
13368
13369 if (dump_file)
13370 {
13371 fprintf (dump_file, "\nOMP region tree\n\n");
13372 dump_omp_region (dump_file, root_omp_region, 0);
13373 fprintf (dump_file, "\n");
13374 }
13375
13376 remove_exit_barriers (root_omp_region);
13377
13378 expand_omp (root_omp_region);
13379
13380 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
13381 verify_loop_structure ();
13382 cleanup_tree_cfg ();
13383
13384 free_omp_regions ();
13385
13386 return 0;
13387 }
13388
13389 /* OMP expansion -- the default pass, run before creation of SSA form. */
13390
13391 namespace {
13392
13393 const pass_data pass_data_expand_omp =
13394 {
13395 GIMPLE_PASS, /* type */
13396 "ompexp", /* name */
13397 OPTGROUP_NONE, /* optinfo_flags */
13398 TV_NONE, /* tv_id */
13399 PROP_gimple_any, /* properties_required */
13400 PROP_gimple_eomp, /* properties_provided */
13401 0, /* properties_destroyed */
13402 0, /* todo_flags_start */
13403 0, /* todo_flags_finish */
13404 };
13405
13406 class pass_expand_omp : public gimple_opt_pass
13407 {
13408 public:
13409 pass_expand_omp (gcc::context *ctxt)
13410 : gimple_opt_pass (pass_data_expand_omp, ctxt)
13411 {}
13412
13413 /* opt_pass methods: */
13414 virtual unsigned int execute (function *)
13415 {
13416 bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0
13417 || flag_openmp_simd != 0)
13418 && !seen_error ());
13419
13420 /* This pass always runs, to provide PROP_gimple_eomp.
13421 But often, there is nothing to do. */
13422 if (!gate)
13423 return 0;
13424
13425 return execute_expand_omp ();
13426 }
13427
13428 }; // class pass_expand_omp
13429
13430 } // anon namespace
13431
13432 gimple_opt_pass *
13433 make_pass_expand_omp (gcc::context *ctxt)
13434 {
13435 return new pass_expand_omp (ctxt);
13436 }
13437
13438 namespace {
13439
13440 const pass_data pass_data_expand_omp_ssa =
13441 {
13442 GIMPLE_PASS, /* type */
13443 "ompexpssa", /* name */
13444 OPTGROUP_NONE, /* optinfo_flags */
13445 TV_NONE, /* tv_id */
13446 PROP_cfg | PROP_ssa, /* properties_required */
13447 PROP_gimple_eomp, /* properties_provided */
13448 0, /* properties_destroyed */
13449 0, /* todo_flags_start */
13450 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
13451 };
13452
13453 class pass_expand_omp_ssa : public gimple_opt_pass
13454 {
13455 public:
13456 pass_expand_omp_ssa (gcc::context *ctxt)
13457 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
13458 {}
13459
13460 /* opt_pass methods: */
13461 virtual bool gate (function *fun)
13462 {
13463 return !(fun->curr_properties & PROP_gimple_eomp);
13464 }
13465 virtual unsigned int execute (function *) { return execute_expand_omp (); }
13466 opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); }
13467
13468 }; // class pass_expand_omp_ssa
13469
13470 } // anon namespace
13471
13472 gimple_opt_pass *
13473 make_pass_expand_omp_ssa (gcc::context *ctxt)
13474 {
13475 return new pass_expand_omp_ssa (ctxt);
13476 }
13477 \f
13478 /* Routines to lower OMP directives into OMP-GIMPLE. */
13479
13480 /* If ctx is a worksharing context inside of a cancellable parallel
13481 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
13482 and conditional branch to parallel's cancel_label to handle
13483 cancellation in the implicit barrier. */
13484
13485 static void
13486 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
13487 {
13488 gimple *omp_return = gimple_seq_last_stmt (*body);
13489 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
13490 if (gimple_omp_return_nowait_p (omp_return))
13491 return;
13492 if (ctx->outer
13493 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
13494 && ctx->outer->cancellable)
13495 {
13496 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
13497 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
13498 tree lhs = create_tmp_var (c_bool_type);
13499 gimple_omp_return_set_lhs (omp_return, lhs);
13500 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
13501 gimple *g = gimple_build_cond (NE_EXPR, lhs,
13502 fold_convert (c_bool_type,
13503 boolean_false_node),
13504 ctx->outer->cancel_label, fallthru_label);
13505 gimple_seq_add_stmt (body, g);
13506 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
13507 }
13508 }
13509
13510 /* Lower the OpenMP sections directive in the current statement in GSI_P.
13511 CTX is the enclosing OMP context for the current statement. */
13512
13513 static void
13514 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
13515 {
13516 tree block, control;
13517 gimple_stmt_iterator tgsi;
13518 gomp_sections *stmt;
13519 gimple *t;
13520 gbind *new_stmt, *bind;
13521 gimple_seq ilist, dlist, olist, new_body;
13522
13523 stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
13524
13525 push_gimplify_context ();
13526
13527 dlist = NULL;
13528 ilist = NULL;
13529 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
13530 &ilist, &dlist, ctx, NULL);
13531
13532 new_body = gimple_omp_body (stmt);
13533 gimple_omp_set_body (stmt, NULL);
13534 tgsi = gsi_start (new_body);
13535 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
13536 {
13537 omp_context *sctx;
13538 gimple *sec_start;
13539
13540 sec_start = gsi_stmt (tgsi);
13541 sctx = maybe_lookup_ctx (sec_start);
13542 gcc_assert (sctx);
13543
13544 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
13545 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
13546 GSI_CONTINUE_LINKING);
13547 gimple_omp_set_body (sec_start, NULL);
13548
13549 if (gsi_one_before_end_p (tgsi))
13550 {
13551 gimple_seq l = NULL;
13552 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
13553 &l, ctx);
13554 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
13555 gimple_omp_section_set_last (sec_start);
13556 }
13557
13558 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
13559 GSI_CONTINUE_LINKING);
13560 }
13561
13562 block = make_node (BLOCK);
13563 bind = gimple_build_bind (NULL, new_body, block);
13564
13565 olist = NULL;
13566 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
13567
13568 block = make_node (BLOCK);
13569 new_stmt = gimple_build_bind (NULL, NULL, block);
13570 gsi_replace (gsi_p, new_stmt, true);
13571
13572 pop_gimplify_context (new_stmt);
13573 gimple_bind_append_vars (new_stmt, ctx->block_vars);
13574 BLOCK_VARS (block) = gimple_bind_vars (bind);
13575 if (BLOCK_VARS (block))
13576 TREE_USED (block) = 1;
13577
13578 new_body = NULL;
13579 gimple_seq_add_seq (&new_body, ilist);
13580 gimple_seq_add_stmt (&new_body, stmt);
13581 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
13582 gimple_seq_add_stmt (&new_body, bind);
13583
13584 control = create_tmp_var (unsigned_type_node, ".section");
13585 t = gimple_build_omp_continue (control, control);
13586 gimple_omp_sections_set_control (stmt, control);
13587 gimple_seq_add_stmt (&new_body, t);
13588
13589 gimple_seq_add_seq (&new_body, olist);
13590 if (ctx->cancellable)
13591 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
13592 gimple_seq_add_seq (&new_body, dlist);
13593
13594 new_body = maybe_catch_exception (new_body);
13595
13596 t = gimple_build_omp_return
13597 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
13598 OMP_CLAUSE_NOWAIT));
13599 gimple_seq_add_stmt (&new_body, t);
13600 maybe_add_implicit_barrier_cancel (ctx, &new_body);
13601
13602 gimple_bind_set_body (new_stmt, new_body);
13603 }
13604
13605
13606 /* A subroutine of lower_omp_single. Expand the simple form of
13607 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
13608
13609 if (GOMP_single_start ())
13610 BODY;
13611 [ GOMP_barrier (); ] -> unless 'nowait' is present.
13612
13613 FIXME. It may be better to delay expanding the logic of this until
13614 pass_expand_omp. The expanded logic may make the job more difficult
13615 to a synchronization analysis pass. */
13616
13617 static void
13618 lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
13619 {
13620 location_t loc = gimple_location (single_stmt);
13621 tree tlabel = create_artificial_label (loc);
13622 tree flabel = create_artificial_label (loc);
13623 gimple *call, *cond;
13624 tree lhs, decl;
13625
13626 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
13627 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
13628 call = gimple_build_call (decl, 0);
13629 gimple_call_set_lhs (call, lhs);
13630 gimple_seq_add_stmt (pre_p, call);
13631
13632 cond = gimple_build_cond (EQ_EXPR, lhs,
13633 fold_convert_loc (loc, TREE_TYPE (lhs),
13634 boolean_true_node),
13635 tlabel, flabel);
13636 gimple_seq_add_stmt (pre_p, cond);
13637 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
13638 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
13639 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
13640 }
13641
13642
13643 /* A subroutine of lower_omp_single. Expand the simple form of
13644 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
13645
13646 #pragma omp single copyprivate (a, b, c)
13647
13648 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
13649
13650 {
13651 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
13652 {
13653 BODY;
13654 copyout.a = a;
13655 copyout.b = b;
13656 copyout.c = c;
13657 GOMP_single_copy_end (&copyout);
13658 }
13659 else
13660 {
13661 a = copyout_p->a;
13662 b = copyout_p->b;
13663 c = copyout_p->c;
13664 }
13665 GOMP_barrier ();
13666 }
13667
13668 FIXME. It may be better to delay expanding the logic of this until
13669 pass_expand_omp. The expanded logic may make the job more difficult
13670 to a synchronization analysis pass. */
13671
13672 static void
13673 lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
13674 omp_context *ctx)
13675 {
13676 tree ptr_type, t, l0, l1, l2, bfn_decl;
13677 gimple_seq copyin_seq;
13678 location_t loc = gimple_location (single_stmt);
13679
13680 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
13681
13682 ptr_type = build_pointer_type (ctx->record_type);
13683 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
13684
13685 l0 = create_artificial_label (loc);
13686 l1 = create_artificial_label (loc);
13687 l2 = create_artificial_label (loc);
13688
13689 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
13690 t = build_call_expr_loc (loc, bfn_decl, 0);
13691 t = fold_convert_loc (loc, ptr_type, t);
13692 gimplify_assign (ctx->receiver_decl, t, pre_p);
13693
13694 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
13695 build_int_cst (ptr_type, 0));
13696 t = build3 (COND_EXPR, void_type_node, t,
13697 build_and_jump (&l0), build_and_jump (&l1));
13698 gimplify_and_add (t, pre_p);
13699
13700 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
13701
13702 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
13703
13704 copyin_seq = NULL;
13705 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
13706 &copyin_seq, ctx);
13707
13708 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
13709 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
13710 t = build_call_expr_loc (loc, bfn_decl, 1, t);
13711 gimplify_and_add (t, pre_p);
13712
13713 t = build_and_jump (&l2);
13714 gimplify_and_add (t, pre_p);
13715
13716 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
13717
13718 gimple_seq_add_seq (pre_p, copyin_seq);
13719
13720 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
13721 }
13722
13723
13724 /* Expand code for an OpenMP single directive. */
13725
13726 static void
13727 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
13728 {
13729 tree block;
13730 gimple *t;
13731 gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
13732 gbind *bind;
13733 gimple_seq bind_body, bind_body_tail = NULL, dlist;
13734
13735 push_gimplify_context ();
13736
13737 block = make_node (BLOCK);
13738 bind = gimple_build_bind (NULL, NULL, block);
13739 gsi_replace (gsi_p, bind, true);
13740 bind_body = NULL;
13741 dlist = NULL;
13742 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
13743 &bind_body, &dlist, ctx, NULL);
13744 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
13745
13746 gimple_seq_add_stmt (&bind_body, single_stmt);
13747
13748 if (ctx->record_type)
13749 lower_omp_single_copy (single_stmt, &bind_body, ctx);
13750 else
13751 lower_omp_single_simple (single_stmt, &bind_body);
13752
13753 gimple_omp_set_body (single_stmt, NULL);
13754
13755 gimple_seq_add_seq (&bind_body, dlist);
13756
13757 bind_body = maybe_catch_exception (bind_body);
13758
13759 t = gimple_build_omp_return
13760 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
13761 OMP_CLAUSE_NOWAIT));
13762 gimple_seq_add_stmt (&bind_body_tail, t);
13763 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
13764 if (ctx->record_type)
13765 {
13766 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
13767 tree clobber = build_constructor (ctx->record_type, NULL);
13768 TREE_THIS_VOLATILE (clobber) = 1;
13769 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
13770 clobber), GSI_SAME_STMT);
13771 }
13772 gimple_seq_add_seq (&bind_body, bind_body_tail);
13773 gimple_bind_set_body (bind, bind_body);
13774
13775 pop_gimplify_context (bind);
13776
13777 gimple_bind_append_vars (bind, ctx->block_vars);
13778 BLOCK_VARS (block) = ctx->block_vars;
13779 if (BLOCK_VARS (block))
13780 TREE_USED (block) = 1;
13781 }
13782
13783
13784 /* Expand code for an OpenMP master directive. */
13785
13786 static void
13787 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
13788 {
13789 tree block, lab = NULL, x, bfn_decl;
13790 gimple *stmt = gsi_stmt (*gsi_p);
13791 gbind *bind;
13792 location_t loc = gimple_location (stmt);
13793 gimple_seq tseq;
13794
13795 push_gimplify_context ();
13796
13797 block = make_node (BLOCK);
13798 bind = gimple_build_bind (NULL, NULL, block);
13799 gsi_replace (gsi_p, bind, true);
13800 gimple_bind_add_stmt (bind, stmt);
13801
13802 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
13803 x = build_call_expr_loc (loc, bfn_decl, 0);
13804 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
13805 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
13806 tseq = NULL;
13807 gimplify_and_add (x, &tseq);
13808 gimple_bind_add_seq (bind, tseq);
13809
13810 lower_omp (gimple_omp_body_ptr (stmt), ctx);
13811 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
13812 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
13813 gimple_omp_set_body (stmt, NULL);
13814
13815 gimple_bind_add_stmt (bind, gimple_build_label (lab));
13816
13817 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
13818
13819 pop_gimplify_context (bind);
13820
13821 gimple_bind_append_vars (bind, ctx->block_vars);
13822 BLOCK_VARS (block) = ctx->block_vars;
13823 }
13824
13825
13826 /* Expand code for an OpenMP taskgroup directive. */
13827
13828 static void
13829 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
13830 {
13831 gimple *stmt = gsi_stmt (*gsi_p);
13832 gcall *x;
13833 gbind *bind;
13834 tree block = make_node (BLOCK);
13835
13836 bind = gimple_build_bind (NULL, NULL, block);
13837 gsi_replace (gsi_p, bind, true);
13838 gimple_bind_add_stmt (bind, stmt);
13839
13840 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
13841 0);
13842 gimple_bind_add_stmt (bind, x);
13843
13844 lower_omp (gimple_omp_body_ptr (stmt), ctx);
13845 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
13846 gimple_omp_set_body (stmt, NULL);
13847
13848 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
13849
13850 gimple_bind_append_vars (bind, ctx->block_vars);
13851 BLOCK_VARS (block) = ctx->block_vars;
13852 }
13853
13854
13855 /* Fold the OMP_ORDERED_CLAUSES for the OMP_ORDERED in STMT if possible. */
13856
13857 static void
13858 lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
13859 omp_context *ctx)
13860 {
13861 struct omp_for_data fd;
13862 if (!ctx->outer || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR)
13863 return;
13864
13865 unsigned int len = gimple_omp_for_collapse (ctx->outer->stmt);
13866 struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, len);
13867 extract_omp_for_data (as_a <gomp_for *> (ctx->outer->stmt), &fd, loops);
13868 if (!fd.ordered)
13869 return;
13870
13871 tree *list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
13872 tree c = gimple_omp_ordered_clauses (ord_stmt);
13873 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
13874 && OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
13875 {
13876 /* Merge depend clauses from multiple adjacent
13877 #pragma omp ordered depend(sink:...) constructs
13878 into one #pragma omp ordered depend(sink:...), so that
13879 we can optimize them together. */
13880 gimple_stmt_iterator gsi = *gsi_p;
13881 gsi_next (&gsi);
13882 while (!gsi_end_p (gsi))
13883 {
13884 gimple *stmt = gsi_stmt (gsi);
13885 if (is_gimple_debug (stmt)
13886 || gimple_code (stmt) == GIMPLE_NOP)
13887 {
13888 gsi_next (&gsi);
13889 continue;
13890 }
13891 if (gimple_code (stmt) != GIMPLE_OMP_ORDERED)
13892 break;
13893 gomp_ordered *ord_stmt2 = as_a <gomp_ordered *> (stmt);
13894 c = gimple_omp_ordered_clauses (ord_stmt2);
13895 if (c == NULL_TREE
13896 || OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
13897 || OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
13898 break;
13899 while (*list_p)
13900 list_p = &OMP_CLAUSE_CHAIN (*list_p);
13901 *list_p = c;
13902 gsi_remove (&gsi, true);
13903 }
13904 }
13905
13906 /* Canonicalize sink dependence clauses into one folded clause if
13907 possible.
13908
13909 The basic algorithm is to create a sink vector whose first
13910 element is the GCD of all the first elements, and whose remaining
13911 elements are the minimum of the subsequent columns.
13912
13913 We ignore dependence vectors whose first element is zero because
13914 such dependencies are known to be executed by the same thread.
13915
13916 We take into account the direction of the loop, so a minimum
13917 becomes a maximum if the loop is iterating forwards. We also
13918 ignore sink clauses where the loop direction is unknown, or where
13919 the offsets are clearly invalid because they are not a multiple
13920 of the loop increment.
13921
13922 For example:
13923
13924 #pragma omp for ordered(2)
13925 for (i=0; i < N; ++i)
13926 for (j=0; j < M; ++j)
13927 {
13928 #pragma omp ordered \
13929 depend(sink:i-8,j-2) \
13930 depend(sink:i,j-1) \ // Completely ignored because i+0.
13931 depend(sink:i-4,j-3) \
13932 depend(sink:i-6,j-4)
13933 #pragma omp ordered depend(source)
13934 }
13935
13936 Folded clause is:
13937
13938 depend(sink:-gcd(8,4,6),-min(2,3,4))
13939 -or-
13940 depend(sink:-2,-2)
13941 */
13942
13943 /* FIXME: Computing GCD's where the first element is zero is
13944 non-trivial in the presence of collapsed loops. Do this later. */
13945 if (fd.collapse > 1)
13946 return;
13947
13948 wide_int *folded_deps = XALLOCAVEC (wide_int, 2 * len - 1);
13949 memset (folded_deps, 0, sizeof (*folded_deps) * (2 * len - 1));
13950 tree folded_dep = NULL_TREE;
13951 /* TRUE if the first dimension's offset is negative. */
13952 bool neg_offset_p = false;
13953
13954 list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
13955 unsigned int i;
13956 while ((c = *list_p) != NULL)
13957 {
13958 bool remove = false;
13959
13960 gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND);
13961 if (OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
13962 goto next_ordered_clause;
13963
13964 tree vec;
13965 for (vec = OMP_CLAUSE_DECL (c), i = 0;
13966 vec && TREE_CODE (vec) == TREE_LIST;
13967 vec = TREE_CHAIN (vec), ++i)
13968 {
13969 gcc_assert (i < len);
13970
13971 /* extract_omp_for_data has canonicalized the condition. */
13972 gcc_assert (fd.loops[i].cond_code == LT_EXPR
13973 || fd.loops[i].cond_code == GT_EXPR);
13974 bool forward = fd.loops[i].cond_code == LT_EXPR;
13975 bool maybe_lexically_later = true;
13976
13977 /* While the committee makes up its mind, bail if we have any
13978 non-constant steps. */
13979 if (TREE_CODE (fd.loops[i].step) != INTEGER_CST)
13980 goto lower_omp_ordered_ret;
13981
13982 tree itype = TREE_TYPE (TREE_VALUE (vec));
13983 if (POINTER_TYPE_P (itype))
13984 itype = sizetype;
13985 wide_int offset = wide_int::from (TREE_PURPOSE (vec),
13986 TYPE_PRECISION (itype),
13987 TYPE_SIGN (itype));
13988
13989 /* Ignore invalid offsets that are not multiples of the step. */
13990 if (!wi::multiple_of_p
13991 (wi::abs (offset), wi::abs ((wide_int) fd.loops[i].step),
13992 UNSIGNED))
13993 {
13994 warning_at (OMP_CLAUSE_LOCATION (c), 0,
13995 "ignoring sink clause with offset that is not "
13996 "a multiple of the loop step");
13997 remove = true;
13998 goto next_ordered_clause;
13999 }
14000
14001 /* Calculate the first dimension. The first dimension of
14002 the folded dependency vector is the GCD of the first
14003 elements, while ignoring any first elements whose offset
14004 is 0. */
14005 if (i == 0)
14006 {
14007 /* Ignore dependence vectors whose first dimension is 0. */
14008 if (offset == 0)
14009 {
14010 remove = true;
14011 goto next_ordered_clause;
14012 }
14013 else
14014 {
14015 if (!TYPE_UNSIGNED (itype) && (forward ^ wi::neg_p (offset)))
14016 {
14017 error_at (OMP_CLAUSE_LOCATION (c),
14018 "first offset must be in opposite direction "
14019 "of loop iterations");
14020 goto lower_omp_ordered_ret;
14021 }
14022 if (forward)
14023 offset = -offset;
14024 neg_offset_p = forward;
14025 /* Initialize the first time around. */
14026 if (folded_dep == NULL_TREE)
14027 {
14028 folded_dep = c;
14029 folded_deps[0] = offset;
14030 }
14031 else
14032 folded_deps[0] = wi::gcd (folded_deps[0],
14033 offset, UNSIGNED);
14034 }
14035 }
14036 /* Calculate minimum for the remaining dimensions. */
14037 else
14038 {
14039 folded_deps[len + i - 1] = offset;
14040 if (folded_dep == c)
14041 folded_deps[i] = offset;
14042 else if (maybe_lexically_later
14043 && !wi::eq_p (folded_deps[i], offset))
14044 {
14045 if (forward ^ wi::gts_p (folded_deps[i], offset))
14046 {
14047 unsigned int j;
14048 folded_dep = c;
14049 for (j = 1; j <= i; j++)
14050 folded_deps[j] = folded_deps[len + j - 1];
14051 }
14052 else
14053 maybe_lexically_later = false;
14054 }
14055 }
14056 }
14057 gcc_assert (i == len);
14058
14059 remove = true;
14060
14061 next_ordered_clause:
14062 if (remove)
14063 *list_p = OMP_CLAUSE_CHAIN (c);
14064 else
14065 list_p = &OMP_CLAUSE_CHAIN (c);
14066 }
14067
14068 if (folded_dep)
14069 {
14070 if (neg_offset_p)
14071 folded_deps[0] = -folded_deps[0];
14072
14073 tree itype = TREE_TYPE (TREE_VALUE (OMP_CLAUSE_DECL (folded_dep)));
14074 if (POINTER_TYPE_P (itype))
14075 itype = sizetype;
14076
14077 TREE_PURPOSE (OMP_CLAUSE_DECL (folded_dep))
14078 = wide_int_to_tree (itype, folded_deps[0]);
14079 OMP_CLAUSE_CHAIN (folded_dep) = gimple_omp_ordered_clauses (ord_stmt);
14080 *gimple_omp_ordered_clauses_ptr (ord_stmt) = folded_dep;
14081 }
14082
14083 lower_omp_ordered_ret:
14084
14085 /* Ordered without clauses is #pragma omp threads, while we want
14086 a nop instead if we remove all clauses. */
14087 if (gimple_omp_ordered_clauses (ord_stmt) == NULL_TREE)
14088 gsi_replace (gsi_p, gimple_build_nop (), true);
14089 }
14090
14091
14092 /* Expand code for an OpenMP ordered directive. */
14093
14094 static void
14095 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14096 {
14097 tree block;
14098 gimple *stmt = gsi_stmt (*gsi_p);
14099 gomp_ordered *ord_stmt = as_a <gomp_ordered *> (stmt);
14100 gcall *x;
14101 gbind *bind;
14102 bool simd = find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
14103 OMP_CLAUSE_SIMD);
14104 bool threads = find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
14105 OMP_CLAUSE_THREADS);
14106
14107 if (find_omp_clause (gimple_omp_ordered_clauses (ord_stmt),
14108 OMP_CLAUSE_DEPEND))
14109 {
14110 /* FIXME: This is needs to be moved to the expansion to verify various
14111 conditions only testable on cfg with dominators computed, and also
14112 all the depend clauses to be merged still might need to be available
14113 for the runtime checks. */
14114 if (0)
14115 lower_omp_ordered_clauses (gsi_p, ord_stmt, ctx);
14116 return;
14117 }
14118
14119 push_gimplify_context ();
14120
14121 block = make_node (BLOCK);
14122 bind = gimple_build_bind (NULL, NULL, block);
14123 gsi_replace (gsi_p, bind, true);
14124 gimple_bind_add_stmt (bind, stmt);
14125
14126 if (simd)
14127 {
14128 x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_START, 1,
14129 build_int_cst (NULL_TREE, threads));
14130 cfun->has_simduid_loops = true;
14131 }
14132 else
14133 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
14134 0);
14135 gimple_bind_add_stmt (bind, x);
14136
14137 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14138 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
14139 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
14140 gimple_omp_set_body (stmt, NULL);
14141
14142 if (simd)
14143 x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_END, 1,
14144 build_int_cst (NULL_TREE, threads));
14145 else
14146 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END),
14147 0);
14148 gimple_bind_add_stmt (bind, x);
14149
14150 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
14151
14152 pop_gimplify_context (bind);
14153
14154 gimple_bind_append_vars (bind, ctx->block_vars);
14155 BLOCK_VARS (block) = gimple_bind_vars (bind);
14156 }
14157
14158
14159 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
14160 substitution of a couple of function calls. But in the NAMED case,
14161 requires that languages coordinate a symbol name. It is therefore
14162 best put here in common code. */
14163
14164 static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
14165
14166 static void
14167 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14168 {
14169 tree block;
14170 tree name, lock, unlock;
14171 gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
14172 gbind *bind;
14173 location_t loc = gimple_location (stmt);
14174 gimple_seq tbody;
14175
14176 name = gimple_omp_critical_name (stmt);
14177 if (name)
14178 {
14179 tree decl;
14180
14181 if (!critical_name_mutexes)
14182 critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
14183
14184 tree *n = critical_name_mutexes->get (name);
14185 if (n == NULL)
14186 {
14187 char *new_str;
14188
14189 decl = create_tmp_var_raw (ptr_type_node);
14190
14191 new_str = ACONCAT ((".gomp_critical_user_",
14192 IDENTIFIER_POINTER (name), NULL));
14193 DECL_NAME (decl) = get_identifier (new_str);
14194 TREE_PUBLIC (decl) = 1;
14195 TREE_STATIC (decl) = 1;
14196 DECL_COMMON (decl) = 1;
14197 DECL_ARTIFICIAL (decl) = 1;
14198 DECL_IGNORED_P (decl) = 1;
14199
14200 varpool_node::finalize_decl (decl);
14201
14202 critical_name_mutexes->put (name, decl);
14203 }
14204 else
14205 decl = *n;
14206
14207 /* If '#pragma omp critical' is inside offloaded region or
14208 inside function marked as offloadable, the symbol must be
14209 marked as offloadable too. */
14210 omp_context *octx;
14211 if (cgraph_node::get (current_function_decl)->offloadable)
14212 varpool_node::get_create (decl)->offloadable = 1;
14213 else
14214 for (octx = ctx->outer; octx; octx = octx->outer)
14215 if (is_gimple_omp_offloaded (octx->stmt))
14216 {
14217 varpool_node::get_create (decl)->offloadable = 1;
14218 break;
14219 }
14220
14221 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
14222 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
14223
14224 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
14225 unlock = build_call_expr_loc (loc, unlock, 1,
14226 build_fold_addr_expr_loc (loc, decl));
14227 }
14228 else
14229 {
14230 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
14231 lock = build_call_expr_loc (loc, lock, 0);
14232
14233 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
14234 unlock = build_call_expr_loc (loc, unlock, 0);
14235 }
14236
14237 push_gimplify_context ();
14238
14239 block = make_node (BLOCK);
14240 bind = gimple_build_bind (NULL, NULL, block);
14241 gsi_replace (gsi_p, bind, true);
14242 gimple_bind_add_stmt (bind, stmt);
14243
14244 tbody = gimple_bind_body (bind);
14245 gimplify_and_add (lock, &tbody);
14246 gimple_bind_set_body (bind, tbody);
14247
14248 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14249 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
14250 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
14251 gimple_omp_set_body (stmt, NULL);
14252
14253 tbody = gimple_bind_body (bind);
14254 gimplify_and_add (unlock, &tbody);
14255 gimple_bind_set_body (bind, tbody);
14256
14257 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
14258
14259 pop_gimplify_context (bind);
14260 gimple_bind_append_vars (bind, ctx->block_vars);
14261 BLOCK_VARS (block) = gimple_bind_vars (bind);
14262 }
14263
14264
14265 /* A subroutine of lower_omp_for. Generate code to emit the predicate
14266 for a lastprivate clause. Given a loop control predicate of (V
14267 cond N2), we gate the clause on (!(V cond N2)). The lowered form
14268 is appended to *DLIST, iterator initialization is appended to
14269 *BODY_P. */
14270
14271 static void
14272 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
14273 gimple_seq *dlist, struct omp_context *ctx)
14274 {
14275 tree clauses, cond, vinit;
14276 enum tree_code cond_code;
14277 gimple_seq stmts;
14278
14279 cond_code = fd->loop.cond_code;
14280 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
14281
14282 /* When possible, use a strict equality expression. This can let VRP
14283 type optimizations deduce the value and remove a copy. */
14284 if (tree_fits_shwi_p (fd->loop.step))
14285 {
14286 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
14287 if (step == 1 || step == -1)
14288 cond_code = EQ_EXPR;
14289 }
14290
14291 tree n2 = fd->loop.n2;
14292 if (fd->collapse > 1
14293 && TREE_CODE (n2) != INTEGER_CST
14294 && gimple_omp_for_combined_into_p (fd->for_stmt))
14295 {
14296 struct omp_context *taskreg_ctx = NULL;
14297 if (gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
14298 {
14299 gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
14300 if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR
14301 || gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_DISTRIBUTE)
14302 {
14303 if (gimple_omp_for_combined_into_p (gfor))
14304 {
14305 gcc_assert (ctx->outer->outer
14306 && is_parallel_ctx (ctx->outer->outer));
14307 taskreg_ctx = ctx->outer->outer;
14308 }
14309 else
14310 {
14311 struct omp_for_data outer_fd;
14312 extract_omp_for_data (gfor, &outer_fd, NULL);
14313 n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
14314 }
14315 }
14316 else if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_TASKLOOP)
14317 taskreg_ctx = ctx->outer->outer;
14318 }
14319 else if (is_taskreg_ctx (ctx->outer))
14320 taskreg_ctx = ctx->outer;
14321 if (taskreg_ctx)
14322 {
14323 int i;
14324 tree innerc
14325 = find_omp_clause (gimple_omp_taskreg_clauses (taskreg_ctx->stmt),
14326 OMP_CLAUSE__LOOPTEMP_);
14327 gcc_assert (innerc);
14328 for (i = 0; i < fd->collapse; i++)
14329 {
14330 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
14331 OMP_CLAUSE__LOOPTEMP_);
14332 gcc_assert (innerc);
14333 }
14334 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
14335 OMP_CLAUSE__LOOPTEMP_);
14336 if (innerc)
14337 n2 = fold_convert (TREE_TYPE (n2),
14338 lookup_decl (OMP_CLAUSE_DECL (innerc),
14339 taskreg_ctx));
14340 }
14341 }
14342 cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
14343
14344 clauses = gimple_omp_for_clauses (fd->for_stmt);
14345 stmts = NULL;
14346 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
14347 if (!gimple_seq_empty_p (stmts))
14348 {
14349 gimple_seq_add_seq (&stmts, *dlist);
14350 *dlist = stmts;
14351
14352 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
14353 vinit = fd->loop.n1;
14354 if (cond_code == EQ_EXPR
14355 && tree_fits_shwi_p (fd->loop.n2)
14356 && ! integer_zerop (fd->loop.n2))
14357 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
14358 else
14359 vinit = unshare_expr (vinit);
14360
14361 /* Initialize the iterator variable, so that threads that don't execute
14362 any iterations don't execute the lastprivate clauses by accident. */
14363 gimplify_assign (fd->loop.v, vinit, body_p);
14364 }
14365 }
14366
14367
14368 /* Lower code for an OMP loop directive. */
14369
14370 static void
14371 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14372 {
14373 tree *rhs_p, block;
14374 struct omp_for_data fd, *fdp = NULL;
14375 gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
14376 gbind *new_stmt;
14377 gimple_seq omp_for_body, body, dlist;
14378 gimple_seq oacc_head = NULL, oacc_tail = NULL;
14379 size_t i;
14380
14381 push_gimplify_context ();
14382
14383 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
14384
14385 block = make_node (BLOCK);
14386 new_stmt = gimple_build_bind (NULL, NULL, block);
14387 /* Replace at gsi right away, so that 'stmt' is no member
14388 of a sequence anymore as we're going to add to a different
14389 one below. */
14390 gsi_replace (gsi_p, new_stmt, true);
14391
14392 /* Move declaration of temporaries in the loop body before we make
14393 it go away. */
14394 omp_for_body = gimple_omp_body (stmt);
14395 if (!gimple_seq_empty_p (omp_for_body)
14396 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
14397 {
14398 gbind *inner_bind
14399 = as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
14400 tree vars = gimple_bind_vars (inner_bind);
14401 gimple_bind_append_vars (new_stmt, vars);
14402 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
14403 keep them on the inner_bind and it's block. */
14404 gimple_bind_set_vars (inner_bind, NULL_TREE);
14405 if (gimple_bind_block (inner_bind))
14406 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
14407 }
14408
14409 if (gimple_omp_for_combined_into_p (stmt))
14410 {
14411 extract_omp_for_data (stmt, &fd, NULL);
14412 fdp = &fd;
14413
14414 /* We need two temporaries with fd.loop.v type (istart/iend)
14415 and then (fd.collapse - 1) temporaries with the same
14416 type for count2 ... countN-1 vars if not constant. */
14417 size_t count = 2;
14418 tree type = fd.iter_type;
14419 if (fd.collapse > 1
14420 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
14421 count += fd.collapse - 1;
14422 bool taskreg_for
14423 = (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
14424 || gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP);
14425 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
14426 tree clauses = *pc;
14427 if (taskreg_for)
14428 outerc
14429 = find_omp_clause (gimple_omp_taskreg_clauses (ctx->outer->stmt),
14430 OMP_CLAUSE__LOOPTEMP_);
14431 for (i = 0; i < count; i++)
14432 {
14433 tree temp;
14434 if (taskreg_for)
14435 {
14436 gcc_assert (outerc);
14437 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
14438 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
14439 OMP_CLAUSE__LOOPTEMP_);
14440 }
14441 else
14442 {
14443 temp = create_tmp_var (type);
14444 insert_decl_map (&ctx->outer->cb, temp, temp);
14445 }
14446 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
14447 OMP_CLAUSE_DECL (*pc) = temp;
14448 pc = &OMP_CLAUSE_CHAIN (*pc);
14449 }
14450 *pc = clauses;
14451 }
14452
14453 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
14454 dlist = NULL;
14455 body = NULL;
14456 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
14457 fdp);
14458 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
14459
14460 lower_omp (gimple_omp_body_ptr (stmt), ctx);
14461
14462 /* Lower the header expressions. At this point, we can assume that
14463 the header is of the form:
14464
14465 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
14466
14467 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
14468 using the .omp_data_s mapping, if needed. */
14469 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
14470 {
14471 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
14472 if (!is_gimple_min_invariant (*rhs_p))
14473 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
14474
14475 rhs_p = gimple_omp_for_final_ptr (stmt, i);
14476 if (!is_gimple_min_invariant (*rhs_p))
14477 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
14478
14479 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
14480 if (!is_gimple_min_invariant (*rhs_p))
14481 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
14482 }
14483
14484 /* Once lowered, extract the bounds and clauses. */
14485 extract_omp_for_data (stmt, &fd, NULL);
14486
14487 if (is_gimple_omp_oacc (ctx->stmt)
14488 && !ctx_in_oacc_kernels_region (ctx))
14489 lower_oacc_head_tail (gimple_location (stmt),
14490 gimple_omp_for_clauses (stmt),
14491 &oacc_head, &oacc_tail, ctx);
14492
14493 /* Add OpenACC partitioning and reduction markers just before the loop */
14494 if (oacc_head)
14495 gimple_seq_add_seq (&body, oacc_head);
14496
14497 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
14498
14499 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
14500 for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
14501 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
14502 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
14503 {
14504 OMP_CLAUSE_DECL (c) = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
14505 if (DECL_P (OMP_CLAUSE_LINEAR_STEP (c)))
14506 OMP_CLAUSE_LINEAR_STEP (c)
14507 = maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_LINEAR_STEP (c),
14508 ctx);
14509 }
14510
14511 gimple_seq_add_stmt (&body, stmt);
14512 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
14513
14514 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
14515 fd.loop.v));
14516
14517 /* After the loop, add exit clauses. */
14518 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
14519
14520 if (ctx->cancellable)
14521 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
14522
14523 gimple_seq_add_seq (&body, dlist);
14524
14525 body = maybe_catch_exception (body);
14526
14527 /* Region exit marker goes at the end of the loop body. */
14528 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
14529 maybe_add_implicit_barrier_cancel (ctx, &body);
14530
14531 /* Add OpenACC joining and reduction markers just after the loop. */
14532 if (oacc_tail)
14533 gimple_seq_add_seq (&body, oacc_tail);
14534
14535 pop_gimplify_context (new_stmt);
14536
14537 gimple_bind_append_vars (new_stmt, ctx->block_vars);
14538 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
14539 if (BLOCK_VARS (block))
14540 TREE_USED (block) = 1;
14541
14542 gimple_bind_set_body (new_stmt, body);
14543 gimple_omp_set_body (stmt, NULL);
14544 gimple_omp_for_set_pre_body (stmt, NULL);
14545 }
14546
14547 /* Callback for walk_stmts. Check if the current statement only contains
14548 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
14549
14550 static tree
14551 check_combined_parallel (gimple_stmt_iterator *gsi_p,
14552 bool *handled_ops_p,
14553 struct walk_stmt_info *wi)
14554 {
14555 int *info = (int *) wi->info;
14556 gimple *stmt = gsi_stmt (*gsi_p);
14557
14558 *handled_ops_p = true;
14559 switch (gimple_code (stmt))
14560 {
14561 WALK_SUBSTMTS;
14562
14563 case GIMPLE_OMP_FOR:
14564 case GIMPLE_OMP_SECTIONS:
14565 *info = *info == 0 ? 1 : -1;
14566 break;
14567 default:
14568 *info = -1;
14569 break;
14570 }
14571 return NULL;
14572 }
14573
14574 struct omp_taskcopy_context
14575 {
14576 /* This field must be at the beginning, as we do "inheritance": Some
14577 callback functions for tree-inline.c (e.g., omp_copy_decl)
14578 receive a copy_body_data pointer that is up-casted to an
14579 omp_context pointer. */
14580 copy_body_data cb;
14581 omp_context *ctx;
14582 };
14583
14584 static tree
14585 task_copyfn_copy_decl (tree var, copy_body_data *cb)
14586 {
14587 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
14588
14589 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
14590 return create_tmp_var (TREE_TYPE (var));
14591
14592 return var;
14593 }
14594
14595 static tree
14596 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
14597 {
14598 tree name, new_fields = NULL, type, f;
14599
14600 type = lang_hooks.types.make_type (RECORD_TYPE);
14601 name = DECL_NAME (TYPE_NAME (orig_type));
14602 name = build_decl (gimple_location (tcctx->ctx->stmt),
14603 TYPE_DECL, name, type);
14604 TYPE_NAME (type) = name;
14605
14606 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
14607 {
14608 tree new_f = copy_node (f);
14609 DECL_CONTEXT (new_f) = type;
14610 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
14611 TREE_CHAIN (new_f) = new_fields;
14612 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
14613 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
14614 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
14615 &tcctx->cb, NULL);
14616 new_fields = new_f;
14617 tcctx->cb.decl_map->put (f, new_f);
14618 }
14619 TYPE_FIELDS (type) = nreverse (new_fields);
14620 layout_type (type);
14621 return type;
14622 }
14623
14624 /* Create task copyfn. */
14625
14626 static void
14627 create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
14628 {
14629 struct function *child_cfun;
14630 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
14631 tree record_type, srecord_type, bind, list;
14632 bool record_needs_remap = false, srecord_needs_remap = false;
14633 splay_tree_node n;
14634 struct omp_taskcopy_context tcctx;
14635 location_t loc = gimple_location (task_stmt);
14636
14637 child_fn = gimple_omp_task_copy_fn (task_stmt);
14638 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
14639 gcc_assert (child_cfun->cfg == NULL);
14640 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
14641
14642 /* Reset DECL_CONTEXT on function arguments. */
14643 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
14644 DECL_CONTEXT (t) = child_fn;
14645
14646 /* Populate the function. */
14647 push_gimplify_context ();
14648 push_cfun (child_cfun);
14649
14650 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
14651 TREE_SIDE_EFFECTS (bind) = 1;
14652 list = NULL;
14653 DECL_SAVED_TREE (child_fn) = bind;
14654 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
14655
14656 /* Remap src and dst argument types if needed. */
14657 record_type = ctx->record_type;
14658 srecord_type = ctx->srecord_type;
14659 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
14660 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
14661 {
14662 record_needs_remap = true;
14663 break;
14664 }
14665 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
14666 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
14667 {
14668 srecord_needs_remap = true;
14669 break;
14670 }
14671
14672 if (record_needs_remap || srecord_needs_remap)
14673 {
14674 memset (&tcctx, '\0', sizeof (tcctx));
14675 tcctx.cb.src_fn = ctx->cb.src_fn;
14676 tcctx.cb.dst_fn = child_fn;
14677 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
14678 gcc_checking_assert (tcctx.cb.src_node);
14679 tcctx.cb.dst_node = tcctx.cb.src_node;
14680 tcctx.cb.src_cfun = ctx->cb.src_cfun;
14681 tcctx.cb.copy_decl = task_copyfn_copy_decl;
14682 tcctx.cb.eh_lp_nr = 0;
14683 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
14684 tcctx.cb.decl_map = new hash_map<tree, tree>;
14685 tcctx.ctx = ctx;
14686
14687 if (record_needs_remap)
14688 record_type = task_copyfn_remap_type (&tcctx, record_type);
14689 if (srecord_needs_remap)
14690 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
14691 }
14692 else
14693 tcctx.cb.decl_map = NULL;
14694
14695 arg = DECL_ARGUMENTS (child_fn);
14696 TREE_TYPE (arg) = build_pointer_type (record_type);
14697 sarg = DECL_CHAIN (arg);
14698 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
14699
14700 /* First pass: initialize temporaries used in record_type and srecord_type
14701 sizes and field offsets. */
14702 if (tcctx.cb.decl_map)
14703 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
14704 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
14705 {
14706 tree *p;
14707
14708 decl = OMP_CLAUSE_DECL (c);
14709 p = tcctx.cb.decl_map->get (decl);
14710 if (p == NULL)
14711 continue;
14712 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
14713 sf = (tree) n->value;
14714 sf = *tcctx.cb.decl_map->get (sf);
14715 src = build_simple_mem_ref_loc (loc, sarg);
14716 src = omp_build_component_ref (src, sf);
14717 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
14718 append_to_statement_list (t, &list);
14719 }
14720
14721 /* Second pass: copy shared var pointers and copy construct non-VLA
14722 firstprivate vars. */
14723 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
14724 switch (OMP_CLAUSE_CODE (c))
14725 {
14726 splay_tree_key key;
14727 case OMP_CLAUSE_SHARED:
14728 decl = OMP_CLAUSE_DECL (c);
14729 key = (splay_tree_key) decl;
14730 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
14731 key = (splay_tree_key) &DECL_UID (decl);
14732 n = splay_tree_lookup (ctx->field_map, key);
14733 if (n == NULL)
14734 break;
14735 f = (tree) n->value;
14736 if (tcctx.cb.decl_map)
14737 f = *tcctx.cb.decl_map->get (f);
14738 n = splay_tree_lookup (ctx->sfield_map, key);
14739 sf = (tree) n->value;
14740 if (tcctx.cb.decl_map)
14741 sf = *tcctx.cb.decl_map->get (sf);
14742 src = build_simple_mem_ref_loc (loc, sarg);
14743 src = omp_build_component_ref (src, sf);
14744 dst = build_simple_mem_ref_loc (loc, arg);
14745 dst = omp_build_component_ref (dst, f);
14746 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
14747 append_to_statement_list (t, &list);
14748 break;
14749 case OMP_CLAUSE_FIRSTPRIVATE:
14750 decl = OMP_CLAUSE_DECL (c);
14751 if (is_variable_sized (decl))
14752 break;
14753 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
14754 if (n == NULL)
14755 break;
14756 f = (tree) n->value;
14757 if (tcctx.cb.decl_map)
14758 f = *tcctx.cb.decl_map->get (f);
14759 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
14760 if (n != NULL)
14761 {
14762 sf = (tree) n->value;
14763 if (tcctx.cb.decl_map)
14764 sf = *tcctx.cb.decl_map->get (sf);
14765 src = build_simple_mem_ref_loc (loc, sarg);
14766 src = omp_build_component_ref (src, sf);
14767 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
14768 src = build_simple_mem_ref_loc (loc, src);
14769 }
14770 else
14771 src = decl;
14772 dst = build_simple_mem_ref_loc (loc, arg);
14773 dst = omp_build_component_ref (dst, f);
14774 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
14775 append_to_statement_list (t, &list);
14776 break;
14777 case OMP_CLAUSE_PRIVATE:
14778 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
14779 break;
14780 decl = OMP_CLAUSE_DECL (c);
14781 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
14782 f = (tree) n->value;
14783 if (tcctx.cb.decl_map)
14784 f = *tcctx.cb.decl_map->get (f);
14785 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
14786 if (n != NULL)
14787 {
14788 sf = (tree) n->value;
14789 if (tcctx.cb.decl_map)
14790 sf = *tcctx.cb.decl_map->get (sf);
14791 src = build_simple_mem_ref_loc (loc, sarg);
14792 src = omp_build_component_ref (src, sf);
14793 if (use_pointer_for_field (decl, NULL))
14794 src = build_simple_mem_ref_loc (loc, src);
14795 }
14796 else
14797 src = decl;
14798 dst = build_simple_mem_ref_loc (loc, arg);
14799 dst = omp_build_component_ref (dst, f);
14800 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
14801 append_to_statement_list (t, &list);
14802 break;
14803 default:
14804 break;
14805 }
14806
14807 /* Last pass: handle VLA firstprivates. */
14808 if (tcctx.cb.decl_map)
14809 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
14810 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
14811 {
14812 tree ind, ptr, df;
14813
14814 decl = OMP_CLAUSE_DECL (c);
14815 if (!is_variable_sized (decl))
14816 continue;
14817 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
14818 if (n == NULL)
14819 continue;
14820 f = (tree) n->value;
14821 f = *tcctx.cb.decl_map->get (f);
14822 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
14823 ind = DECL_VALUE_EXPR (decl);
14824 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
14825 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
14826 n = splay_tree_lookup (ctx->sfield_map,
14827 (splay_tree_key) TREE_OPERAND (ind, 0));
14828 sf = (tree) n->value;
14829 sf = *tcctx.cb.decl_map->get (sf);
14830 src = build_simple_mem_ref_loc (loc, sarg);
14831 src = omp_build_component_ref (src, sf);
14832 src = build_simple_mem_ref_loc (loc, src);
14833 dst = build_simple_mem_ref_loc (loc, arg);
14834 dst = omp_build_component_ref (dst, f);
14835 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
14836 append_to_statement_list (t, &list);
14837 n = splay_tree_lookup (ctx->field_map,
14838 (splay_tree_key) TREE_OPERAND (ind, 0));
14839 df = (tree) n->value;
14840 df = *tcctx.cb.decl_map->get (df);
14841 ptr = build_simple_mem_ref_loc (loc, arg);
14842 ptr = omp_build_component_ref (ptr, df);
14843 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
14844 build_fold_addr_expr_loc (loc, dst));
14845 append_to_statement_list (t, &list);
14846 }
14847
14848 t = build1 (RETURN_EXPR, void_type_node, NULL);
14849 append_to_statement_list (t, &list);
14850
14851 if (tcctx.cb.decl_map)
14852 delete tcctx.cb.decl_map;
14853 pop_gimplify_context (NULL);
14854 BIND_EXPR_BODY (bind) = list;
14855 pop_cfun ();
14856 }
14857
14858 static void
14859 lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
14860 {
14861 tree c, clauses;
14862 gimple *g;
14863 size_t n_in = 0, n_out = 0, idx = 2, i;
14864
14865 clauses = find_omp_clause (*pclauses, OMP_CLAUSE_DEPEND);
14866 gcc_assert (clauses);
14867 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
14868 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
14869 switch (OMP_CLAUSE_DEPEND_KIND (c))
14870 {
14871 case OMP_CLAUSE_DEPEND_IN:
14872 n_in++;
14873 break;
14874 case OMP_CLAUSE_DEPEND_OUT:
14875 case OMP_CLAUSE_DEPEND_INOUT:
14876 n_out++;
14877 break;
14878 case OMP_CLAUSE_DEPEND_SOURCE:
14879 case OMP_CLAUSE_DEPEND_SINK:
14880 /* FALLTHRU */
14881 default:
14882 gcc_unreachable ();
14883 }
14884 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
14885 tree array = create_tmp_var (type);
14886 TREE_ADDRESSABLE (array) = 1;
14887 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
14888 NULL_TREE);
14889 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
14890 gimple_seq_add_stmt (iseq, g);
14891 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
14892 NULL_TREE);
14893 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
14894 gimple_seq_add_stmt (iseq, g);
14895 for (i = 0; i < 2; i++)
14896 {
14897 if ((i ? n_in : n_out) == 0)
14898 continue;
14899 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
14900 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
14901 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
14902 {
14903 tree t = OMP_CLAUSE_DECL (c);
14904 t = fold_convert (ptr_type_node, t);
14905 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
14906 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
14907 NULL_TREE, NULL_TREE);
14908 g = gimple_build_assign (r, t);
14909 gimple_seq_add_stmt (iseq, g);
14910 }
14911 }
14912 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
14913 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
14914 OMP_CLAUSE_CHAIN (c) = *pclauses;
14915 *pclauses = c;
14916 tree clobber = build_constructor (type, NULL);
14917 TREE_THIS_VOLATILE (clobber) = 1;
14918 g = gimple_build_assign (array, clobber);
14919 gimple_seq_add_stmt (oseq, g);
14920 }
14921
14922 /* Lower the OpenMP parallel or task directive in the current statement
14923 in GSI_P. CTX holds context information for the directive. */
14924
14925 static void
14926 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
14927 {
14928 tree clauses;
14929 tree child_fn, t;
14930 gimple *stmt = gsi_stmt (*gsi_p);
14931 gbind *par_bind, *bind, *dep_bind = NULL;
14932 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
14933 location_t loc = gimple_location (stmt);
14934
14935 clauses = gimple_omp_taskreg_clauses (stmt);
14936 par_bind
14937 = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
14938 par_body = gimple_bind_body (par_bind);
14939 child_fn = ctx->cb.dst_fn;
14940 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
14941 && !gimple_omp_parallel_combined_p (stmt))
14942 {
14943 struct walk_stmt_info wi;
14944 int ws_num = 0;
14945
14946 memset (&wi, 0, sizeof (wi));
14947 wi.info = &ws_num;
14948 wi.val_only = true;
14949 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
14950 if (ws_num == 1)
14951 gimple_omp_parallel_set_combined_p (stmt, true);
14952 }
14953 gimple_seq dep_ilist = NULL;
14954 gimple_seq dep_olist = NULL;
14955 if (gimple_code (stmt) == GIMPLE_OMP_TASK
14956 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
14957 {
14958 push_gimplify_context ();
14959 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
14960 lower_depend_clauses (gimple_omp_task_clauses_ptr (stmt),
14961 &dep_ilist, &dep_olist);
14962 }
14963
14964 if (ctx->srecord_type)
14965 create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
14966
14967 push_gimplify_context ();
14968
14969 par_olist = NULL;
14970 par_ilist = NULL;
14971 par_rlist = NULL;
14972 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
14973 lower_omp (&par_body, ctx);
14974 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
14975 lower_reduction_clauses (clauses, &par_rlist, ctx);
14976
14977 /* Declare all the variables created by mapping and the variables
14978 declared in the scope of the parallel body. */
14979 record_vars_into (ctx->block_vars, child_fn);
14980 record_vars_into (gimple_bind_vars (par_bind), child_fn);
14981
14982 if (ctx->record_type)
14983 {
14984 ctx->sender_decl
14985 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
14986 : ctx->record_type, ".omp_data_o");
14987 DECL_NAMELESS (ctx->sender_decl) = 1;
14988 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
14989 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
14990 }
14991
14992 olist = NULL;
14993 ilist = NULL;
14994 lower_send_clauses (clauses, &ilist, &olist, ctx);
14995 lower_send_shared_vars (&ilist, &olist, ctx);
14996
14997 if (ctx->record_type)
14998 {
14999 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
15000 TREE_THIS_VOLATILE (clobber) = 1;
15001 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
15002 clobber));
15003 }
15004
15005 /* Once all the expansions are done, sequence all the different
15006 fragments inside gimple_omp_body. */
15007
15008 new_body = NULL;
15009
15010 if (ctx->record_type)
15011 {
15012 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
15013 /* fixup_child_record_type might have changed receiver_decl's type. */
15014 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
15015 gimple_seq_add_stmt (&new_body,
15016 gimple_build_assign (ctx->receiver_decl, t));
15017 }
15018
15019 gimple_seq_add_seq (&new_body, par_ilist);
15020 gimple_seq_add_seq (&new_body, par_body);
15021 gimple_seq_add_seq (&new_body, par_rlist);
15022 if (ctx->cancellable)
15023 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
15024 gimple_seq_add_seq (&new_body, par_olist);
15025 new_body = maybe_catch_exception (new_body);
15026 if (gimple_code (stmt) == GIMPLE_OMP_TASK)
15027 gimple_seq_add_stmt (&new_body,
15028 gimple_build_omp_continue (integer_zero_node,
15029 integer_zero_node));
15030 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
15031 gimple_omp_set_body (stmt, new_body);
15032
15033 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
15034 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
15035 gimple_bind_add_seq (bind, ilist);
15036 gimple_bind_add_stmt (bind, stmt);
15037 gimple_bind_add_seq (bind, olist);
15038
15039 pop_gimplify_context (NULL);
15040
15041 if (dep_bind)
15042 {
15043 gimple_bind_add_seq (dep_bind, dep_ilist);
15044 gimple_bind_add_stmt (dep_bind, bind);
15045 gimple_bind_add_seq (dep_bind, dep_olist);
15046 pop_gimplify_context (dep_bind);
15047 }
15048 }
15049
15050 /* Lower the GIMPLE_OMP_TARGET in the current statement
15051 in GSI_P. CTX holds context information for the directive. */
15052
15053 static void
15054 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
15055 {
15056 tree clauses;
15057 tree child_fn, t, c;
15058 gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
15059 gbind *tgt_bind, *bind, *dep_bind = NULL;
15060 gimple_seq tgt_body, olist, ilist, fplist, new_body;
15061 location_t loc = gimple_location (stmt);
15062 bool offloaded, data_region;
15063 unsigned int map_cnt = 0;
15064 bool has_depend = false;
15065
15066 offloaded = is_gimple_omp_offloaded (stmt);
15067 switch (gimple_omp_target_kind (stmt))
15068 {
15069 case GF_OMP_TARGET_KIND_REGION:
15070 case GF_OMP_TARGET_KIND_UPDATE:
15071 case GF_OMP_TARGET_KIND_ENTER_DATA:
15072 case GF_OMP_TARGET_KIND_EXIT_DATA:
15073 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
15074 case GF_OMP_TARGET_KIND_OACC_KERNELS:
15075 case GF_OMP_TARGET_KIND_OACC_UPDATE:
15076 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
15077 case GF_OMP_TARGET_KIND_OACC_DECLARE:
15078 data_region = false;
15079 break;
15080 case GF_OMP_TARGET_KIND_DATA:
15081 case GF_OMP_TARGET_KIND_OACC_DATA:
15082 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
15083 data_region = true;
15084 break;
15085 default:
15086 gcc_unreachable ();
15087 }
15088
15089 clauses = gimple_omp_target_clauses (stmt);
15090
15091 gimple_seq dep_ilist = NULL;
15092 gimple_seq dep_olist = NULL;
15093 if (find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
15094 {
15095 push_gimplify_context ();
15096 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
15097 lower_depend_clauses (gimple_omp_target_clauses_ptr (stmt),
15098 &dep_ilist, &dep_olist);
15099 has_depend = true;
15100 }
15101
15102 tgt_bind = NULL;
15103 tgt_body = NULL;
15104 if (offloaded)
15105 {
15106 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
15107 tgt_body = gimple_bind_body (tgt_bind);
15108 }
15109 else if (data_region)
15110 tgt_body = gimple_omp_body (stmt);
15111 child_fn = ctx->cb.dst_fn;
15112
15113 push_gimplify_context ();
15114 fplist = NULL;
15115
15116 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
15117 switch (OMP_CLAUSE_CODE (c))
15118 {
15119 tree var, x;
15120
15121 default:
15122 break;
15123 case OMP_CLAUSE_MAP:
15124 #if CHECKING_P
15125 /* First check what we're prepared to handle in the following. */
15126 switch (OMP_CLAUSE_MAP_KIND (c))
15127 {
15128 case GOMP_MAP_ALLOC:
15129 case GOMP_MAP_TO:
15130 case GOMP_MAP_FROM:
15131 case GOMP_MAP_TOFROM:
15132 case GOMP_MAP_POINTER:
15133 case GOMP_MAP_TO_PSET:
15134 case GOMP_MAP_FORCE_DEALLOC:
15135 case GOMP_MAP_RELEASE:
15136 case GOMP_MAP_ALWAYS_TO:
15137 case GOMP_MAP_ALWAYS_FROM:
15138 case GOMP_MAP_ALWAYS_TOFROM:
15139 case GOMP_MAP_FIRSTPRIVATE_POINTER:
15140 case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
15141 case GOMP_MAP_STRUCT:
15142 case GOMP_MAP_ALWAYS_POINTER:
15143 break;
15144 case GOMP_MAP_FORCE_ALLOC:
15145 case GOMP_MAP_FORCE_TO:
15146 case GOMP_MAP_FORCE_FROM:
15147 case GOMP_MAP_FORCE_TOFROM:
15148 case GOMP_MAP_FORCE_PRESENT:
15149 case GOMP_MAP_FORCE_DEVICEPTR:
15150 case GOMP_MAP_DEVICE_RESIDENT:
15151 case GOMP_MAP_LINK:
15152 gcc_assert (is_gimple_omp_oacc (stmt));
15153 break;
15154 default:
15155 gcc_unreachable ();
15156 }
15157 #endif
15158 /* FALLTHRU */
15159 case OMP_CLAUSE_TO:
15160 case OMP_CLAUSE_FROM:
15161 oacc_firstprivate:
15162 var = OMP_CLAUSE_DECL (c);
15163 if (!DECL_P (var))
15164 {
15165 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
15166 || (!OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
15167 && (OMP_CLAUSE_MAP_KIND (c)
15168 != GOMP_MAP_FIRSTPRIVATE_POINTER)))
15169 map_cnt++;
15170 continue;
15171 }
15172
15173 if (DECL_SIZE (var)
15174 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
15175 {
15176 tree var2 = DECL_VALUE_EXPR (var);
15177 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
15178 var2 = TREE_OPERAND (var2, 0);
15179 gcc_assert (DECL_P (var2));
15180 var = var2;
15181 }
15182
15183 if (offloaded
15184 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15185 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
15186 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
15187 {
15188 if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
15189 {
15190 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))
15191 && varpool_node::get_create (var)->offloadable)
15192 continue;
15193
15194 tree type = build_pointer_type (TREE_TYPE (var));
15195 tree new_var = lookup_decl (var, ctx);
15196 x = create_tmp_var_raw (type, get_name (new_var));
15197 gimple_add_tmp_var (x);
15198 x = build_simple_mem_ref (x);
15199 SET_DECL_VALUE_EXPR (new_var, x);
15200 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15201 }
15202 continue;
15203 }
15204
15205 if (!maybe_lookup_field (var, ctx))
15206 continue;
15207
15208 if (offloaded)
15209 {
15210 x = build_receiver_ref (var, true, ctx);
15211 tree new_var = lookup_decl (var, ctx);
15212
15213 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15214 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
15215 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
15216 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
15217 x = build_simple_mem_ref (x);
15218 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
15219 {
15220 gcc_assert (is_gimple_omp_oacc (ctx->stmt));
15221 if (is_reference (new_var))
15222 {
15223 /* Create a local object to hold the instance
15224 value. */
15225 tree type = TREE_TYPE (TREE_TYPE (new_var));
15226 const char *id = IDENTIFIER_POINTER (DECL_NAME (new_var));
15227 tree inst = create_tmp_var (type, id);
15228 gimplify_assign (inst, fold_indirect_ref (x), &fplist);
15229 x = build_fold_addr_expr (inst);
15230 }
15231 gimplify_assign (new_var, x, &fplist);
15232 }
15233 else if (DECL_P (new_var))
15234 {
15235 SET_DECL_VALUE_EXPR (new_var, x);
15236 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15237 }
15238 else
15239 gcc_unreachable ();
15240 }
15241 map_cnt++;
15242 break;
15243
15244 case OMP_CLAUSE_FIRSTPRIVATE:
15245 if (is_oacc_parallel (ctx))
15246 goto oacc_firstprivate;
15247 map_cnt++;
15248 var = OMP_CLAUSE_DECL (c);
15249 if (!is_reference (var)
15250 && !is_gimple_reg_type (TREE_TYPE (var)))
15251 {
15252 tree new_var = lookup_decl (var, ctx);
15253 if (is_variable_sized (var))
15254 {
15255 tree pvar = DECL_VALUE_EXPR (var);
15256 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
15257 pvar = TREE_OPERAND (pvar, 0);
15258 gcc_assert (DECL_P (pvar));
15259 tree new_pvar = lookup_decl (pvar, ctx);
15260 x = build_fold_indirect_ref (new_pvar);
15261 TREE_THIS_NOTRAP (x) = 1;
15262 }
15263 else
15264 x = build_receiver_ref (var, true, ctx);
15265 SET_DECL_VALUE_EXPR (new_var, x);
15266 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15267 }
15268 break;
15269
15270 case OMP_CLAUSE_PRIVATE:
15271 if (is_gimple_omp_oacc (ctx->stmt))
15272 break;
15273 var = OMP_CLAUSE_DECL (c);
15274 if (is_variable_sized (var))
15275 {
15276 tree new_var = lookup_decl (var, ctx);
15277 tree pvar = DECL_VALUE_EXPR (var);
15278 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
15279 pvar = TREE_OPERAND (pvar, 0);
15280 gcc_assert (DECL_P (pvar));
15281 tree new_pvar = lookup_decl (pvar, ctx);
15282 x = build_fold_indirect_ref (new_pvar);
15283 TREE_THIS_NOTRAP (x) = 1;
15284 SET_DECL_VALUE_EXPR (new_var, x);
15285 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15286 }
15287 break;
15288
15289 case OMP_CLAUSE_USE_DEVICE_PTR:
15290 case OMP_CLAUSE_IS_DEVICE_PTR:
15291 var = OMP_CLAUSE_DECL (c);
15292 map_cnt++;
15293 if (is_variable_sized (var))
15294 {
15295 tree new_var = lookup_decl (var, ctx);
15296 tree pvar = DECL_VALUE_EXPR (var);
15297 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
15298 pvar = TREE_OPERAND (pvar, 0);
15299 gcc_assert (DECL_P (pvar));
15300 tree new_pvar = lookup_decl (pvar, ctx);
15301 x = build_fold_indirect_ref (new_pvar);
15302 TREE_THIS_NOTRAP (x) = 1;
15303 SET_DECL_VALUE_EXPR (new_var, x);
15304 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15305 }
15306 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
15307 {
15308 tree new_var = lookup_decl (var, ctx);
15309 tree type = build_pointer_type (TREE_TYPE (var));
15310 x = create_tmp_var_raw (type, get_name (new_var));
15311 gimple_add_tmp_var (x);
15312 x = build_simple_mem_ref (x);
15313 SET_DECL_VALUE_EXPR (new_var, x);
15314 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
15315 }
15316 break;
15317 }
15318
15319 if (offloaded)
15320 {
15321 target_nesting_level++;
15322 lower_omp (&tgt_body, ctx);
15323 target_nesting_level--;
15324 }
15325 else if (data_region)
15326 lower_omp (&tgt_body, ctx);
15327
15328 if (offloaded)
15329 {
15330 /* Declare all the variables created by mapping and the variables
15331 declared in the scope of the target body. */
15332 record_vars_into (ctx->block_vars, child_fn);
15333 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
15334 }
15335
15336 olist = NULL;
15337 ilist = NULL;
15338 if (ctx->record_type)
15339 {
15340 ctx->sender_decl
15341 = create_tmp_var (ctx->record_type, ".omp_data_arr");
15342 DECL_NAMELESS (ctx->sender_decl) = 1;
15343 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
15344 t = make_tree_vec (3);
15345 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
15346 TREE_VEC_ELT (t, 1)
15347 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
15348 ".omp_data_sizes");
15349 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
15350 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
15351 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
15352 tree tkind_type = short_unsigned_type_node;
15353 int talign_shift = 8;
15354 TREE_VEC_ELT (t, 2)
15355 = create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
15356 ".omp_data_kinds");
15357 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
15358 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
15359 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
15360 gimple_omp_target_set_data_arg (stmt, t);
15361
15362 vec<constructor_elt, va_gc> *vsize;
15363 vec<constructor_elt, va_gc> *vkind;
15364 vec_alloc (vsize, map_cnt);
15365 vec_alloc (vkind, map_cnt);
15366 unsigned int map_idx = 0;
15367
15368 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
15369 switch (OMP_CLAUSE_CODE (c))
15370 {
15371 tree ovar, nc, s, purpose, var, x, type;
15372 unsigned int talign;
15373
15374 default:
15375 break;
15376
15377 case OMP_CLAUSE_MAP:
15378 case OMP_CLAUSE_TO:
15379 case OMP_CLAUSE_FROM:
15380 oacc_firstprivate_map:
15381 nc = c;
15382 ovar = OMP_CLAUSE_DECL (c);
15383 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15384 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
15385 || (OMP_CLAUSE_MAP_KIND (c)
15386 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
15387 break;
15388 if (!DECL_P (ovar))
15389 {
15390 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15391 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
15392 {
15393 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
15394 == get_base_address (ovar));
15395 nc = OMP_CLAUSE_CHAIN (c);
15396 ovar = OMP_CLAUSE_DECL (nc);
15397 }
15398 else
15399 {
15400 tree x = build_sender_ref (ovar, ctx);
15401 tree v
15402 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
15403 gimplify_assign (x, v, &ilist);
15404 nc = NULL_TREE;
15405 }
15406 }
15407 else
15408 {
15409 if (DECL_SIZE (ovar)
15410 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
15411 {
15412 tree ovar2 = DECL_VALUE_EXPR (ovar);
15413 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
15414 ovar2 = TREE_OPERAND (ovar2, 0);
15415 gcc_assert (DECL_P (ovar2));
15416 ovar = ovar2;
15417 }
15418 if (!maybe_lookup_field (ovar, ctx))
15419 continue;
15420 }
15421
15422 talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
15423 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
15424 talign = DECL_ALIGN_UNIT (ovar);
15425 if (nc)
15426 {
15427 var = lookup_decl_in_outer_ctx (ovar, ctx);
15428 x = build_sender_ref (ovar, ctx);
15429
15430 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
15431 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
15432 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
15433 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
15434 {
15435 gcc_assert (offloaded);
15436 tree avar
15437 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
15438 mark_addressable (avar);
15439 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
15440 talign = DECL_ALIGN_UNIT (avar);
15441 avar = build_fold_addr_expr (avar);
15442 gimplify_assign (x, avar, &ilist);
15443 }
15444 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
15445 {
15446 gcc_assert (is_gimple_omp_oacc (ctx->stmt));
15447 if (!is_reference (var))
15448 var = build_fold_addr_expr (var);
15449 else
15450 talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
15451 gimplify_assign (x, var, &ilist);
15452 }
15453 else if (is_gimple_reg (var))
15454 {
15455 gcc_assert (offloaded);
15456 tree avar = create_tmp_var (TREE_TYPE (var));
15457 mark_addressable (avar);
15458 enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
15459 if (GOMP_MAP_COPY_TO_P (map_kind)
15460 || map_kind == GOMP_MAP_POINTER
15461 || map_kind == GOMP_MAP_TO_PSET
15462 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
15463 gimplify_assign (avar, var, &ilist);
15464 avar = build_fold_addr_expr (avar);
15465 gimplify_assign (x, avar, &ilist);
15466 if ((GOMP_MAP_COPY_FROM_P (map_kind)
15467 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
15468 && !TYPE_READONLY (TREE_TYPE (var)))
15469 {
15470 x = unshare_expr (x);
15471 x = build_simple_mem_ref (x);
15472 gimplify_assign (var, x, &olist);
15473 }
15474 }
15475 else
15476 {
15477 var = build_fold_addr_expr (var);
15478 gimplify_assign (x, var, &ilist);
15479 }
15480 }
15481 s = NULL_TREE;
15482 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
15483 {
15484 gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
15485 s = TREE_TYPE (ovar);
15486 if (TREE_CODE (s) == REFERENCE_TYPE)
15487 s = TREE_TYPE (s);
15488 s = TYPE_SIZE_UNIT (s);
15489 }
15490 else
15491 s = OMP_CLAUSE_SIZE (c);
15492 if (s == NULL_TREE)
15493 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
15494 s = fold_convert (size_type_node, s);
15495 purpose = size_int (map_idx++);
15496 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
15497 if (TREE_CODE (s) != INTEGER_CST)
15498 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
15499
15500 unsigned HOST_WIDE_INT tkind, tkind_zero;
15501 switch (OMP_CLAUSE_CODE (c))
15502 {
15503 case OMP_CLAUSE_MAP:
15504 tkind = OMP_CLAUSE_MAP_KIND (c);
15505 tkind_zero = tkind;
15506 if (OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c))
15507 switch (tkind)
15508 {
15509 case GOMP_MAP_ALLOC:
15510 case GOMP_MAP_TO:
15511 case GOMP_MAP_FROM:
15512 case GOMP_MAP_TOFROM:
15513 case GOMP_MAP_ALWAYS_TO:
15514 case GOMP_MAP_ALWAYS_FROM:
15515 case GOMP_MAP_ALWAYS_TOFROM:
15516 case GOMP_MAP_RELEASE:
15517 tkind_zero = GOMP_MAP_ZERO_LEN_ARRAY_SECTION;
15518 break;
15519 case GOMP_MAP_DELETE:
15520 tkind_zero = GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION;
15521 default:
15522 break;
15523 }
15524 if (tkind_zero != tkind)
15525 {
15526 if (integer_zerop (s))
15527 tkind = tkind_zero;
15528 else if (integer_nonzerop (s))
15529 tkind_zero = tkind;
15530 }
15531 break;
15532 case OMP_CLAUSE_FIRSTPRIVATE:
15533 gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
15534 tkind = GOMP_MAP_TO;
15535 tkind_zero = tkind;
15536 break;
15537 case OMP_CLAUSE_TO:
15538 tkind = GOMP_MAP_TO;
15539 tkind_zero = tkind;
15540 break;
15541 case OMP_CLAUSE_FROM:
15542 tkind = GOMP_MAP_FROM;
15543 tkind_zero = tkind;
15544 break;
15545 default:
15546 gcc_unreachable ();
15547 }
15548 gcc_checking_assert (tkind
15549 < (HOST_WIDE_INT_C (1U) << talign_shift));
15550 gcc_checking_assert (tkind_zero
15551 < (HOST_WIDE_INT_C (1U) << talign_shift));
15552 talign = ceil_log2 (talign);
15553 tkind |= talign << talign_shift;
15554 tkind_zero |= talign << talign_shift;
15555 gcc_checking_assert (tkind
15556 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
15557 gcc_checking_assert (tkind_zero
15558 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
15559 if (tkind == tkind_zero)
15560 x = build_int_cstu (tkind_type, tkind);
15561 else
15562 {
15563 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 0;
15564 x = build3 (COND_EXPR, tkind_type,
15565 fold_build2 (EQ_EXPR, boolean_type_node,
15566 unshare_expr (s), size_zero_node),
15567 build_int_cstu (tkind_type, tkind_zero),
15568 build_int_cstu (tkind_type, tkind));
15569 }
15570 CONSTRUCTOR_APPEND_ELT (vkind, purpose, x);
15571 if (nc && nc != c)
15572 c = nc;
15573 break;
15574
15575 case OMP_CLAUSE_FIRSTPRIVATE:
15576 if (is_oacc_parallel (ctx))
15577 goto oacc_firstprivate_map;
15578 ovar = OMP_CLAUSE_DECL (c);
15579 if (is_reference (ovar))
15580 talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
15581 else
15582 talign = DECL_ALIGN_UNIT (ovar);
15583 var = lookup_decl_in_outer_ctx (ovar, ctx);
15584 x = build_sender_ref (ovar, ctx);
15585 tkind = GOMP_MAP_FIRSTPRIVATE;
15586 type = TREE_TYPE (ovar);
15587 if (is_reference (ovar))
15588 type = TREE_TYPE (type);
15589 bool use_firstprivate_int, force_addr;
15590 use_firstprivate_int = false;
15591 force_addr = false;
15592 if ((INTEGRAL_TYPE_P (type)
15593 && TYPE_PRECISION (type) <= POINTER_SIZE)
15594 || TREE_CODE (type) == POINTER_TYPE)
15595 use_firstprivate_int = true;
15596 if (has_depend)
15597 {
15598 if (is_reference (var))
15599 use_firstprivate_int = false;
15600 else if (is_gimple_reg (var))
15601 {
15602 if (DECL_HAS_VALUE_EXPR_P (var))
15603 {
15604 tree v = get_base_address (var);
15605 if (DECL_P (v) && TREE_ADDRESSABLE (v))
15606 {
15607 use_firstprivate_int = false;
15608 force_addr = true;
15609 }
15610 else
15611 switch (TREE_CODE (v))
15612 {
15613 case INDIRECT_REF:
15614 case MEM_REF:
15615 use_firstprivate_int = false;
15616 force_addr = true;
15617 break;
15618 default:
15619 break;
15620 }
15621 }
15622 }
15623 else
15624 use_firstprivate_int = false;
15625 }
15626 if (use_firstprivate_int)
15627 {
15628 tkind = GOMP_MAP_FIRSTPRIVATE_INT;
15629 tree t = var;
15630 if (is_reference (var))
15631 t = build_simple_mem_ref (var);
15632 if (TREE_CODE (type) != POINTER_TYPE)
15633 t = fold_convert (pointer_sized_int_node, t);
15634 t = fold_convert (TREE_TYPE (x), t);
15635 gimplify_assign (x, t, &ilist);
15636 }
15637 else if (is_reference (var))
15638 gimplify_assign (x, var, &ilist);
15639 else if (!force_addr && is_gimple_reg (var))
15640 {
15641 tree avar = create_tmp_var (TREE_TYPE (var));
15642 mark_addressable (avar);
15643 gimplify_assign (avar, var, &ilist);
15644 avar = build_fold_addr_expr (avar);
15645 gimplify_assign (x, avar, &ilist);
15646 }
15647 else
15648 {
15649 var = build_fold_addr_expr (var);
15650 gimplify_assign (x, var, &ilist);
15651 }
15652 if (tkind == GOMP_MAP_FIRSTPRIVATE_INT)
15653 s = size_int (0);
15654 else if (is_reference (var))
15655 s = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
15656 else
15657 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
15658 s = fold_convert (size_type_node, s);
15659 purpose = size_int (map_idx++);
15660 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
15661 if (TREE_CODE (s) != INTEGER_CST)
15662 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
15663
15664 gcc_checking_assert (tkind
15665 < (HOST_WIDE_INT_C (1U) << talign_shift));
15666 talign = ceil_log2 (talign);
15667 tkind |= talign << talign_shift;
15668 gcc_checking_assert (tkind
15669 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
15670 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
15671 build_int_cstu (tkind_type, tkind));
15672 break;
15673
15674 case OMP_CLAUSE_USE_DEVICE_PTR:
15675 case OMP_CLAUSE_IS_DEVICE_PTR:
15676 ovar = OMP_CLAUSE_DECL (c);
15677 var = lookup_decl_in_outer_ctx (ovar, ctx);
15678 x = build_sender_ref (ovar, ctx);
15679 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
15680 tkind = GOMP_MAP_USE_DEVICE_PTR;
15681 else
15682 tkind = GOMP_MAP_FIRSTPRIVATE_INT;
15683 type = TREE_TYPE (ovar);
15684 if (TREE_CODE (type) == ARRAY_TYPE)
15685 var = build_fold_addr_expr (var);
15686 else
15687 {
15688 if (is_reference (ovar))
15689 {
15690 type = TREE_TYPE (type);
15691 if (TREE_CODE (type) != ARRAY_TYPE)
15692 var = build_simple_mem_ref (var);
15693 var = fold_convert (TREE_TYPE (x), var);
15694 }
15695 }
15696 gimplify_assign (x, var, &ilist);
15697 s = size_int (0);
15698 purpose = size_int (map_idx++);
15699 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
15700 gcc_checking_assert (tkind
15701 < (HOST_WIDE_INT_C (1U) << talign_shift));
15702 gcc_checking_assert (tkind
15703 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
15704 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
15705 build_int_cstu (tkind_type, tkind));
15706 break;
15707 }
15708
15709 gcc_assert (map_idx == map_cnt);
15710
15711 DECL_INITIAL (TREE_VEC_ELT (t, 1))
15712 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
15713 DECL_INITIAL (TREE_VEC_ELT (t, 2))
15714 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
15715 for (int i = 1; i <= 2; i++)
15716 if (!TREE_STATIC (TREE_VEC_ELT (t, i)))
15717 {
15718 gimple_seq initlist = NULL;
15719 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
15720 TREE_VEC_ELT (t, i)),
15721 &initlist, true, NULL_TREE);
15722 gimple_seq_add_seq (&ilist, initlist);
15723
15724 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, i)),
15725 NULL);
15726 TREE_THIS_VOLATILE (clobber) = 1;
15727 gimple_seq_add_stmt (&olist,
15728 gimple_build_assign (TREE_VEC_ELT (t, i),
15729 clobber));
15730 }
15731
15732 tree clobber = build_constructor (ctx->record_type, NULL);
15733 TREE_THIS_VOLATILE (clobber) = 1;
15734 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
15735 clobber));
15736 }
15737
15738 /* Once all the expansions are done, sequence all the different
15739 fragments inside gimple_omp_body. */
15740
15741 new_body = NULL;
15742
15743 if (offloaded
15744 && ctx->record_type)
15745 {
15746 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
15747 /* fixup_child_record_type might have changed receiver_decl's type. */
15748 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
15749 gimple_seq_add_stmt (&new_body,
15750 gimple_build_assign (ctx->receiver_decl, t));
15751 }
15752 gimple_seq_add_seq (&new_body, fplist);
15753
15754 if (offloaded || data_region)
15755 {
15756 tree prev = NULL_TREE;
15757 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
15758 switch (OMP_CLAUSE_CODE (c))
15759 {
15760 tree var, x;
15761 default:
15762 break;
15763 case OMP_CLAUSE_FIRSTPRIVATE:
15764 if (is_gimple_omp_oacc (ctx->stmt))
15765 break;
15766 var = OMP_CLAUSE_DECL (c);
15767 if (is_reference (var)
15768 || is_gimple_reg_type (TREE_TYPE (var)))
15769 {
15770 tree new_var = lookup_decl (var, ctx);
15771 tree type;
15772 type = TREE_TYPE (var);
15773 if (is_reference (var))
15774 type = TREE_TYPE (type);
15775 bool use_firstprivate_int;
15776 use_firstprivate_int = false;
15777 if ((INTEGRAL_TYPE_P (type)
15778 && TYPE_PRECISION (type) <= POINTER_SIZE)
15779 || TREE_CODE (type) == POINTER_TYPE)
15780 use_firstprivate_int = true;
15781 if (has_depend)
15782 {
15783 tree v = lookup_decl_in_outer_ctx (var, ctx);
15784 if (is_reference (v))
15785 use_firstprivate_int = false;
15786 else if (is_gimple_reg (v))
15787 {
15788 if (DECL_HAS_VALUE_EXPR_P (v))
15789 {
15790 v = get_base_address (v);
15791 if (DECL_P (v) && TREE_ADDRESSABLE (v))
15792 use_firstprivate_int = false;
15793 else
15794 switch (TREE_CODE (v))
15795 {
15796 case INDIRECT_REF:
15797 case MEM_REF:
15798 use_firstprivate_int = false;
15799 break;
15800 default:
15801 break;
15802 }
15803 }
15804 }
15805 else
15806 use_firstprivate_int = false;
15807 }
15808 if (use_firstprivate_int)
15809 {
15810 x = build_receiver_ref (var, false, ctx);
15811 if (TREE_CODE (type) != POINTER_TYPE)
15812 x = fold_convert (pointer_sized_int_node, x);
15813 x = fold_convert (type, x);
15814 gimplify_expr (&x, &new_body, NULL, is_gimple_val,
15815 fb_rvalue);
15816 if (is_reference (var))
15817 {
15818 tree v = create_tmp_var_raw (type, get_name (var));
15819 gimple_add_tmp_var (v);
15820 TREE_ADDRESSABLE (v) = 1;
15821 gimple_seq_add_stmt (&new_body,
15822 gimple_build_assign (v, x));
15823 x = build_fold_addr_expr (v);
15824 }
15825 gimple_seq_add_stmt (&new_body,
15826 gimple_build_assign (new_var, x));
15827 }
15828 else
15829 {
15830 x = build_receiver_ref (var, !is_reference (var), ctx);
15831 gimplify_expr (&x, &new_body, NULL, is_gimple_val,
15832 fb_rvalue);
15833 gimple_seq_add_stmt (&new_body,
15834 gimple_build_assign (new_var, x));
15835 }
15836 }
15837 else if (is_variable_sized (var))
15838 {
15839 tree pvar = DECL_VALUE_EXPR (var);
15840 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
15841 pvar = TREE_OPERAND (pvar, 0);
15842 gcc_assert (DECL_P (pvar));
15843 tree new_var = lookup_decl (pvar, ctx);
15844 x = build_receiver_ref (var, false, ctx);
15845 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
15846 gimple_seq_add_stmt (&new_body,
15847 gimple_build_assign (new_var, x));
15848 }
15849 break;
15850 case OMP_CLAUSE_PRIVATE:
15851 if (is_gimple_omp_oacc (ctx->stmt))
15852 break;
15853 var = OMP_CLAUSE_DECL (c);
15854 if (is_reference (var))
15855 {
15856 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
15857 tree new_var = lookup_decl (var, ctx);
15858 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
15859 if (TREE_CONSTANT (x))
15860 {
15861 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
15862 get_name (var));
15863 gimple_add_tmp_var (x);
15864 TREE_ADDRESSABLE (x) = 1;
15865 x = build_fold_addr_expr_loc (clause_loc, x);
15866 }
15867 else
15868 {
15869 tree atmp
15870 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
15871 tree rtype = TREE_TYPE (TREE_TYPE (new_var));
15872 tree al = size_int (TYPE_ALIGN (rtype));
15873 x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
15874 }
15875
15876 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
15877 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
15878 gimple_seq_add_stmt (&new_body,
15879 gimple_build_assign (new_var, x));
15880 }
15881 break;
15882 case OMP_CLAUSE_USE_DEVICE_PTR:
15883 case OMP_CLAUSE_IS_DEVICE_PTR:
15884 var = OMP_CLAUSE_DECL (c);
15885 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR)
15886 x = build_sender_ref (var, ctx);
15887 else
15888 x = build_receiver_ref (var, false, ctx);
15889 if (is_variable_sized (var))
15890 {
15891 tree pvar = DECL_VALUE_EXPR (var);
15892 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
15893 pvar = TREE_OPERAND (pvar, 0);
15894 gcc_assert (DECL_P (pvar));
15895 tree new_var = lookup_decl (pvar, ctx);
15896 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
15897 gimple_seq_add_stmt (&new_body,
15898 gimple_build_assign (new_var, x));
15899 }
15900 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
15901 {
15902 tree new_var = lookup_decl (var, ctx);
15903 new_var = DECL_VALUE_EXPR (new_var);
15904 gcc_assert (TREE_CODE (new_var) == MEM_REF);
15905 new_var = TREE_OPERAND (new_var, 0);
15906 gcc_assert (DECL_P (new_var));
15907 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
15908 gimple_seq_add_stmt (&new_body,
15909 gimple_build_assign (new_var, x));
15910 }
15911 else
15912 {
15913 tree type = TREE_TYPE (var);
15914 tree new_var = lookup_decl (var, ctx);
15915 if (is_reference (var))
15916 {
15917 type = TREE_TYPE (type);
15918 if (TREE_CODE (type) != ARRAY_TYPE)
15919 {
15920 tree v = create_tmp_var_raw (type, get_name (var));
15921 gimple_add_tmp_var (v);
15922 TREE_ADDRESSABLE (v) = 1;
15923 x = fold_convert (type, x);
15924 gimplify_expr (&x, &new_body, NULL, is_gimple_val,
15925 fb_rvalue);
15926 gimple_seq_add_stmt (&new_body,
15927 gimple_build_assign (v, x));
15928 x = build_fold_addr_expr (v);
15929 }
15930 }
15931 x = fold_convert (TREE_TYPE (new_var), x);
15932 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
15933 gimple_seq_add_stmt (&new_body,
15934 gimple_build_assign (new_var, x));
15935 }
15936 break;
15937 }
15938 /* Handle GOMP_MAP_FIRSTPRIVATE_{POINTER,REFERENCE} in second pass,
15939 so that firstprivate vars holding OMP_CLAUSE_SIZE if needed
15940 are already handled. */
15941 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
15942 switch (OMP_CLAUSE_CODE (c))
15943 {
15944 tree var;
15945 default:
15946 break;
15947 case OMP_CLAUSE_MAP:
15948 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
15949 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
15950 {
15951 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
15952 HOST_WIDE_INT offset = 0;
15953 gcc_assert (prev);
15954 var = OMP_CLAUSE_DECL (c);
15955 if (DECL_P (var)
15956 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
15957 && is_global_var (maybe_lookup_decl_in_outer_ctx (var,
15958 ctx))
15959 && varpool_node::get_create (var)->offloadable)
15960 break;
15961 if (TREE_CODE (var) == INDIRECT_REF
15962 && TREE_CODE (TREE_OPERAND (var, 0)) == COMPONENT_REF)
15963 var = TREE_OPERAND (var, 0);
15964 if (TREE_CODE (var) == COMPONENT_REF)
15965 {
15966 var = get_addr_base_and_unit_offset (var, &offset);
15967 gcc_assert (var != NULL_TREE && DECL_P (var));
15968 }
15969 else if (DECL_SIZE (var)
15970 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
15971 {
15972 tree var2 = DECL_VALUE_EXPR (var);
15973 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
15974 var2 = TREE_OPERAND (var2, 0);
15975 gcc_assert (DECL_P (var2));
15976 var = var2;
15977 }
15978 tree new_var = lookup_decl (var, ctx), x;
15979 tree type = TREE_TYPE (new_var);
15980 bool is_ref;
15981 if (TREE_CODE (OMP_CLAUSE_DECL (c)) == INDIRECT_REF
15982 && (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
15983 == COMPONENT_REF))
15984 {
15985 type = TREE_TYPE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0));
15986 is_ref = true;
15987 new_var = build2 (MEM_REF, type,
15988 build_fold_addr_expr (new_var),
15989 build_int_cst (build_pointer_type (type),
15990 offset));
15991 }
15992 else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
15993 {
15994 type = TREE_TYPE (OMP_CLAUSE_DECL (c));
15995 is_ref = TREE_CODE (type) == REFERENCE_TYPE;
15996 new_var = build2 (MEM_REF, type,
15997 build_fold_addr_expr (new_var),
15998 build_int_cst (build_pointer_type (type),
15999 offset));
16000 }
16001 else
16002 is_ref = is_reference (var);
16003 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
16004 is_ref = false;
16005 bool ref_to_array = false;
16006 if (is_ref)
16007 {
16008 type = TREE_TYPE (type);
16009 if (TREE_CODE (type) == ARRAY_TYPE)
16010 {
16011 type = build_pointer_type (type);
16012 ref_to_array = true;
16013 }
16014 }
16015 else if (TREE_CODE (type) == ARRAY_TYPE)
16016 {
16017 tree decl2 = DECL_VALUE_EXPR (new_var);
16018 gcc_assert (TREE_CODE (decl2) == MEM_REF);
16019 decl2 = TREE_OPERAND (decl2, 0);
16020 gcc_assert (DECL_P (decl2));
16021 new_var = decl2;
16022 type = TREE_TYPE (new_var);
16023 }
16024 x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
16025 x = fold_convert_loc (clause_loc, type, x);
16026 if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
16027 {
16028 tree bias = OMP_CLAUSE_SIZE (c);
16029 if (DECL_P (bias))
16030 bias = lookup_decl (bias, ctx);
16031 bias = fold_convert_loc (clause_loc, sizetype, bias);
16032 bias = fold_build1_loc (clause_loc, NEGATE_EXPR, sizetype,
16033 bias);
16034 x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
16035 TREE_TYPE (x), x, bias);
16036 }
16037 if (ref_to_array)
16038 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
16039 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16040 if (is_ref && !ref_to_array)
16041 {
16042 tree t = create_tmp_var_raw (type, get_name (var));
16043 gimple_add_tmp_var (t);
16044 TREE_ADDRESSABLE (t) = 1;
16045 gimple_seq_add_stmt (&new_body,
16046 gimple_build_assign (t, x));
16047 x = build_fold_addr_expr_loc (clause_loc, t);
16048 }
16049 gimple_seq_add_stmt (&new_body,
16050 gimple_build_assign (new_var, x));
16051 prev = NULL_TREE;
16052 }
16053 else if (OMP_CLAUSE_CHAIN (c)
16054 && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c))
16055 == OMP_CLAUSE_MAP
16056 && (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
16057 == GOMP_MAP_FIRSTPRIVATE_POINTER
16058 || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
16059 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
16060 prev = c;
16061 break;
16062 case OMP_CLAUSE_PRIVATE:
16063 var = OMP_CLAUSE_DECL (c);
16064 if (is_variable_sized (var))
16065 {
16066 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
16067 tree new_var = lookup_decl (var, ctx);
16068 tree pvar = DECL_VALUE_EXPR (var);
16069 gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
16070 pvar = TREE_OPERAND (pvar, 0);
16071 gcc_assert (DECL_P (pvar));
16072 tree new_pvar = lookup_decl (pvar, ctx);
16073 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
16074 tree al = size_int (DECL_ALIGN (var));
16075 tree x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
16076 x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
16077 x = fold_convert_loc (clause_loc, TREE_TYPE (new_pvar), x);
16078 gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
16079 gimple_seq_add_stmt (&new_body,
16080 gimple_build_assign (new_pvar, x));
16081 }
16082 break;
16083 }
16084
16085 gimple_seq fork_seq = NULL;
16086 gimple_seq join_seq = NULL;
16087
16088 if (is_oacc_parallel (ctx))
16089 {
16090 /* If there are reductions on the offloaded region itself, treat
16091 them as a dummy GANG loop. */
16092 tree level = build_int_cst (integer_type_node, GOMP_DIM_GANG);
16093
16094 lower_oacc_reductions (gimple_location (ctx->stmt), clauses, level,
16095 false, NULL, NULL, &fork_seq, &join_seq, ctx);
16096 }
16097
16098 gimple_seq_add_seq (&new_body, fork_seq);
16099 gimple_seq_add_seq (&new_body, tgt_body);
16100 gimple_seq_add_seq (&new_body, join_seq);
16101
16102 if (offloaded)
16103 new_body = maybe_catch_exception (new_body);
16104
16105 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
16106 gimple_omp_set_body (stmt, new_body);
16107 }
16108
16109 bind = gimple_build_bind (NULL, NULL,
16110 tgt_bind ? gimple_bind_block (tgt_bind)
16111 : NULL_TREE);
16112 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
16113 gimple_bind_add_seq (bind, ilist);
16114 gimple_bind_add_stmt (bind, stmt);
16115 gimple_bind_add_seq (bind, olist);
16116
16117 pop_gimplify_context (NULL);
16118
16119 if (dep_bind)
16120 {
16121 gimple_bind_add_seq (dep_bind, dep_ilist);
16122 gimple_bind_add_stmt (dep_bind, bind);
16123 gimple_bind_add_seq (dep_bind, dep_olist);
16124 pop_gimplify_context (dep_bind);
16125 }
16126 }
16127
16128 /* Expand code for an OpenMP teams directive. */
16129
16130 static void
16131 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
16132 {
16133 gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
16134 push_gimplify_context ();
16135
16136 tree block = make_node (BLOCK);
16137 gbind *bind = gimple_build_bind (NULL, NULL, block);
16138 gsi_replace (gsi_p, bind, true);
16139 gimple_seq bind_body = NULL;
16140 gimple_seq dlist = NULL;
16141 gimple_seq olist = NULL;
16142
16143 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
16144 OMP_CLAUSE_NUM_TEAMS);
16145 if (num_teams == NULL_TREE)
16146 num_teams = build_int_cst (unsigned_type_node, 0);
16147 else
16148 {
16149 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
16150 num_teams = fold_convert (unsigned_type_node, num_teams);
16151 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
16152 }
16153 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
16154 OMP_CLAUSE_THREAD_LIMIT);
16155 if (thread_limit == NULL_TREE)
16156 thread_limit = build_int_cst (unsigned_type_node, 0);
16157 else
16158 {
16159 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
16160 thread_limit = fold_convert (unsigned_type_node, thread_limit);
16161 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
16162 fb_rvalue);
16163 }
16164
16165 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
16166 &bind_body, &dlist, ctx, NULL);
16167 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
16168 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
16169 gimple_seq_add_stmt (&bind_body, teams_stmt);
16170
16171 location_t loc = gimple_location (teams_stmt);
16172 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
16173 gimple *call = gimple_build_call (decl, 2, num_teams, thread_limit);
16174 gimple_set_location (call, loc);
16175 gimple_seq_add_stmt (&bind_body, call);
16176
16177 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
16178 gimple_omp_set_body (teams_stmt, NULL);
16179 gimple_seq_add_seq (&bind_body, olist);
16180 gimple_seq_add_seq (&bind_body, dlist);
16181 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
16182 gimple_bind_set_body (bind, bind_body);
16183
16184 pop_gimplify_context (bind);
16185
16186 gimple_bind_append_vars (bind, ctx->block_vars);
16187 BLOCK_VARS (block) = ctx->block_vars;
16188 if (BLOCK_VARS (block))
16189 TREE_USED (block) = 1;
16190 }
16191
16192
16193 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
16194 regimplified. If DATA is non-NULL, lower_omp_1 is outside
16195 of OMP context, but with task_shared_vars set. */
16196
16197 static tree
16198 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
16199 void *data)
16200 {
16201 tree t = *tp;
16202
16203 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
16204 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
16205 return t;
16206
16207 if (task_shared_vars
16208 && DECL_P (t)
16209 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
16210 return t;
16211
16212 /* If a global variable has been privatized, TREE_CONSTANT on
16213 ADDR_EXPR might be wrong. */
16214 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
16215 recompute_tree_invariant_for_addr_expr (t);
16216
16217 *walk_subtrees = !IS_TYPE_OR_DECL_P (t);
16218 return NULL_TREE;
16219 }
16220
16221 /* Data to be communicated between lower_omp_regimplify_operands and
16222 lower_omp_regimplify_operands_p. */
16223
16224 struct lower_omp_regimplify_operands_data
16225 {
16226 omp_context *ctx;
16227 vec<tree> *decls;
16228 };
16229
16230 /* Helper function for lower_omp_regimplify_operands. Find
16231 omp_member_access_dummy_var vars and adjust temporarily their
16232 DECL_VALUE_EXPRs if needed. */
16233
16234 static tree
16235 lower_omp_regimplify_operands_p (tree *tp, int *walk_subtrees,
16236 void *data)
16237 {
16238 tree t = omp_member_access_dummy_var (*tp);
16239 if (t)
16240 {
16241 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
16242 lower_omp_regimplify_operands_data *ldata
16243 = (lower_omp_regimplify_operands_data *) wi->info;
16244 tree o = maybe_lookup_decl (t, ldata->ctx);
16245 if (o != t)
16246 {
16247 ldata->decls->safe_push (DECL_VALUE_EXPR (*tp));
16248 ldata->decls->safe_push (*tp);
16249 tree v = unshare_and_remap (DECL_VALUE_EXPR (*tp), t, o);
16250 SET_DECL_VALUE_EXPR (*tp, v);
16251 }
16252 }
16253 *walk_subtrees = !IS_TYPE_OR_DECL_P (*tp);
16254 return NULL_TREE;
16255 }
16256
16257 /* Wrapper around gimple_regimplify_operands that adjusts DECL_VALUE_EXPRs
16258 of omp_member_access_dummy_var vars during regimplification. */
16259
16260 static void
16261 lower_omp_regimplify_operands (omp_context *ctx, gimple *stmt,
16262 gimple_stmt_iterator *gsi_p)
16263 {
16264 auto_vec<tree, 10> decls;
16265 if (ctx)
16266 {
16267 struct walk_stmt_info wi;
16268 memset (&wi, '\0', sizeof (wi));
16269 struct lower_omp_regimplify_operands_data data;
16270 data.ctx = ctx;
16271 data.decls = &decls;
16272 wi.info = &data;
16273 walk_gimple_op (stmt, lower_omp_regimplify_operands_p, &wi);
16274 }
16275 gimple_regimplify_operands (stmt, gsi_p);
16276 while (!decls.is_empty ())
16277 {
16278 tree t = decls.pop ();
16279 tree v = decls.pop ();
16280 SET_DECL_VALUE_EXPR (t, v);
16281 }
16282 }
16283
16284 static void
16285 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
16286 {
16287 gimple *stmt = gsi_stmt (*gsi_p);
16288 struct walk_stmt_info wi;
16289 gcall *call_stmt;
16290
16291 if (gimple_has_location (stmt))
16292 input_location = gimple_location (stmt);
16293
16294 if (task_shared_vars)
16295 memset (&wi, '\0', sizeof (wi));
16296
16297 /* If we have issued syntax errors, avoid doing any heavy lifting.
16298 Just replace the OMP directives with a NOP to avoid
16299 confusing RTL expansion. */
16300 if (seen_error () && is_gimple_omp (stmt))
16301 {
16302 gsi_replace (gsi_p, gimple_build_nop (), true);
16303 return;
16304 }
16305
16306 switch (gimple_code (stmt))
16307 {
16308 case GIMPLE_COND:
16309 {
16310 gcond *cond_stmt = as_a <gcond *> (stmt);
16311 if ((ctx || task_shared_vars)
16312 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
16313 lower_omp_regimplify_p,
16314 ctx ? NULL : &wi, NULL)
16315 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
16316 lower_omp_regimplify_p,
16317 ctx ? NULL : &wi, NULL)))
16318 lower_omp_regimplify_operands (ctx, cond_stmt, gsi_p);
16319 }
16320 break;
16321 case GIMPLE_CATCH:
16322 lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
16323 break;
16324 case GIMPLE_EH_FILTER:
16325 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
16326 break;
16327 case GIMPLE_TRY:
16328 lower_omp (gimple_try_eval_ptr (stmt), ctx);
16329 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
16330 break;
16331 case GIMPLE_TRANSACTION:
16332 lower_omp (gimple_transaction_body_ptr (
16333 as_a <gtransaction *> (stmt)),
16334 ctx);
16335 break;
16336 case GIMPLE_BIND:
16337 lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
16338 break;
16339 case GIMPLE_OMP_PARALLEL:
16340 case GIMPLE_OMP_TASK:
16341 ctx = maybe_lookup_ctx (stmt);
16342 gcc_assert (ctx);
16343 if (ctx->cancellable)
16344 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
16345 lower_omp_taskreg (gsi_p, ctx);
16346 break;
16347 case GIMPLE_OMP_FOR:
16348 ctx = maybe_lookup_ctx (stmt);
16349 gcc_assert (ctx);
16350 if (ctx->cancellable)
16351 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
16352 lower_omp_for (gsi_p, ctx);
16353 break;
16354 case GIMPLE_OMP_SECTIONS:
16355 ctx = maybe_lookup_ctx (stmt);
16356 gcc_assert (ctx);
16357 if (ctx->cancellable)
16358 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
16359 lower_omp_sections (gsi_p, ctx);
16360 break;
16361 case GIMPLE_OMP_SINGLE:
16362 ctx = maybe_lookup_ctx (stmt);
16363 gcc_assert (ctx);
16364 lower_omp_single (gsi_p, ctx);
16365 break;
16366 case GIMPLE_OMP_MASTER:
16367 ctx = maybe_lookup_ctx (stmt);
16368 gcc_assert (ctx);
16369 lower_omp_master (gsi_p, ctx);
16370 break;
16371 case GIMPLE_OMP_TASKGROUP:
16372 ctx = maybe_lookup_ctx (stmt);
16373 gcc_assert (ctx);
16374 lower_omp_taskgroup (gsi_p, ctx);
16375 break;
16376 case GIMPLE_OMP_ORDERED:
16377 ctx = maybe_lookup_ctx (stmt);
16378 gcc_assert (ctx);
16379 lower_omp_ordered (gsi_p, ctx);
16380 break;
16381 case GIMPLE_OMP_CRITICAL:
16382 ctx = maybe_lookup_ctx (stmt);
16383 gcc_assert (ctx);
16384 lower_omp_critical (gsi_p, ctx);
16385 break;
16386 case GIMPLE_OMP_ATOMIC_LOAD:
16387 if ((ctx || task_shared_vars)
16388 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
16389 as_a <gomp_atomic_load *> (stmt)),
16390 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
16391 lower_omp_regimplify_operands (ctx, stmt, gsi_p);
16392 break;
16393 case GIMPLE_OMP_TARGET:
16394 ctx = maybe_lookup_ctx (stmt);
16395 gcc_assert (ctx);
16396 lower_omp_target (gsi_p, ctx);
16397 break;
16398 case GIMPLE_OMP_TEAMS:
16399 ctx = maybe_lookup_ctx (stmt);
16400 gcc_assert (ctx);
16401 lower_omp_teams (gsi_p, ctx);
16402 break;
16403 case GIMPLE_CALL:
16404 tree fndecl;
16405 call_stmt = as_a <gcall *> (stmt);
16406 fndecl = gimple_call_fndecl (call_stmt);
16407 if (fndecl
16408 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
16409 switch (DECL_FUNCTION_CODE (fndecl))
16410 {
16411 case BUILT_IN_GOMP_BARRIER:
16412 if (ctx == NULL)
16413 break;
16414 /* FALLTHRU */
16415 case BUILT_IN_GOMP_CANCEL:
16416 case BUILT_IN_GOMP_CANCELLATION_POINT:
16417 omp_context *cctx;
16418 cctx = ctx;
16419 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
16420 cctx = cctx->outer;
16421 gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
16422 if (!cctx->cancellable)
16423 {
16424 if (DECL_FUNCTION_CODE (fndecl)
16425 == BUILT_IN_GOMP_CANCELLATION_POINT)
16426 {
16427 stmt = gimple_build_nop ();
16428 gsi_replace (gsi_p, stmt, false);
16429 }
16430 break;
16431 }
16432 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
16433 {
16434 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
16435 gimple_call_set_fndecl (call_stmt, fndecl);
16436 gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
16437 }
16438 tree lhs;
16439 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
16440 gimple_call_set_lhs (call_stmt, lhs);
16441 tree fallthru_label;
16442 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
16443 gimple *g;
16444 g = gimple_build_label (fallthru_label);
16445 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
16446 g = gimple_build_cond (NE_EXPR, lhs,
16447 fold_convert (TREE_TYPE (lhs),
16448 boolean_false_node),
16449 cctx->cancel_label, fallthru_label);
16450 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
16451 break;
16452 default:
16453 break;
16454 }
16455 /* FALLTHRU */
16456 default:
16457 if ((ctx || task_shared_vars)
16458 && walk_gimple_op (stmt, lower_omp_regimplify_p,
16459 ctx ? NULL : &wi))
16460 {
16461 /* Just remove clobbers, this should happen only if we have
16462 "privatized" local addressable variables in SIMD regions,
16463 the clobber isn't needed in that case and gimplifying address
16464 of the ARRAY_REF into a pointer and creating MEM_REF based
16465 clobber would create worse code than we get with the clobber
16466 dropped. */
16467 if (gimple_clobber_p (stmt))
16468 {
16469 gsi_replace (gsi_p, gimple_build_nop (), true);
16470 break;
16471 }
16472 lower_omp_regimplify_operands (ctx, stmt, gsi_p);
16473 }
16474 break;
16475 }
16476 }
16477
16478 static void
16479 lower_omp (gimple_seq *body, omp_context *ctx)
16480 {
16481 location_t saved_location = input_location;
16482 gimple_stmt_iterator gsi;
16483 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
16484 lower_omp_1 (&gsi, ctx);
16485 /* During gimplification, we haven't folded statments inside offloading
16486 or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
16487 if (target_nesting_level || taskreg_nesting_level)
16488 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
16489 fold_stmt (&gsi);
16490 input_location = saved_location;
16491 }
16492 \f
16493 /* Main entry point. */
16494
16495 static unsigned int
16496 execute_lower_omp (void)
16497 {
16498 gimple_seq body;
16499 int i;
16500 omp_context *ctx;
16501
16502 /* This pass always runs, to provide PROP_gimple_lomp.
16503 But often, there is nothing to do. */
16504 if (flag_cilkplus == 0 && flag_openacc == 0 && flag_openmp == 0
16505 && flag_openmp_simd == 0)
16506 return 0;
16507
16508 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
16509 delete_omp_context);
16510
16511 body = gimple_body (current_function_decl);
16512 scan_omp (&body, NULL);
16513 gcc_assert (taskreg_nesting_level == 0);
16514 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
16515 finish_taskreg_scan (ctx);
16516 taskreg_contexts.release ();
16517
16518 if (all_contexts->root)
16519 {
16520 if (task_shared_vars)
16521 push_gimplify_context ();
16522 lower_omp (&body, NULL);
16523 if (task_shared_vars)
16524 pop_gimplify_context (NULL);
16525 }
16526
16527 if (all_contexts)
16528 {
16529 splay_tree_delete (all_contexts);
16530 all_contexts = NULL;
16531 }
16532 BITMAP_FREE (task_shared_vars);
16533 return 0;
16534 }
16535
16536 namespace {
16537
16538 const pass_data pass_data_lower_omp =
16539 {
16540 GIMPLE_PASS, /* type */
16541 "omplower", /* name */
16542 OPTGROUP_NONE, /* optinfo_flags */
16543 TV_NONE, /* tv_id */
16544 PROP_gimple_any, /* properties_required */
16545 PROP_gimple_lomp, /* properties_provided */
16546 0, /* properties_destroyed */
16547 0, /* todo_flags_start */
16548 0, /* todo_flags_finish */
16549 };
16550
16551 class pass_lower_omp : public gimple_opt_pass
16552 {
16553 public:
16554 pass_lower_omp (gcc::context *ctxt)
16555 : gimple_opt_pass (pass_data_lower_omp, ctxt)
16556 {}
16557
16558 /* opt_pass methods: */
16559 virtual unsigned int execute (function *) { return execute_lower_omp (); }
16560
16561 }; // class pass_lower_omp
16562
16563 } // anon namespace
16564
16565 gimple_opt_pass *
16566 make_pass_lower_omp (gcc::context *ctxt)
16567 {
16568 return new pass_lower_omp (ctxt);
16569 }
16570 \f
16571 /* The following is a utility to diagnose structured block violations.
16572 It is not part of the "omplower" pass, as that's invoked too late. It
16573 should be invoked by the respective front ends after gimplification. */
16574
16575 static splay_tree all_labels;
16576
16577 /* Check for mismatched contexts and generate an error if needed. Return
16578 true if an error is detected. */
16579
16580 static bool
16581 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
16582 gimple *branch_ctx, gimple *label_ctx)
16583 {
16584 gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
16585 gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
16586
16587 if (label_ctx == branch_ctx)
16588 return false;
16589
16590 const char* kind = NULL;
16591
16592 if (flag_cilkplus)
16593 {
16594 if ((branch_ctx
16595 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
16596 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
16597 || (label_ctx
16598 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
16599 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
16600 kind = "Cilk Plus";
16601 }
16602 if (flag_openacc)
16603 {
16604 if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
16605 || (label_ctx && is_gimple_omp_oacc (label_ctx)))
16606 {
16607 gcc_checking_assert (kind == NULL);
16608 kind = "OpenACC";
16609 }
16610 }
16611 if (kind == NULL)
16612 {
16613 gcc_checking_assert (flag_openmp);
16614 kind = "OpenMP";
16615 }
16616
16617 /*
16618 Previously we kept track of the label's entire context in diagnose_sb_[12]
16619 so we could traverse it and issue a correct "exit" or "enter" error
16620 message upon a structured block violation.
16621
16622 We built the context by building a list with tree_cons'ing, but there is
16623 no easy counterpart in gimple tuples. It seems like far too much work
16624 for issuing exit/enter error messages. If someone really misses the
16625 distinct error message... patches welcome.
16626 */
16627
16628 #if 0
16629 /* Try to avoid confusing the user by producing and error message
16630 with correct "exit" or "enter" verbiage. We prefer "exit"
16631 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
16632 if (branch_ctx == NULL)
16633 exit_p = false;
16634 else
16635 {
16636 while (label_ctx)
16637 {
16638 if (TREE_VALUE (label_ctx) == branch_ctx)
16639 {
16640 exit_p = false;
16641 break;
16642 }
16643 label_ctx = TREE_CHAIN (label_ctx);
16644 }
16645 }
16646
16647 if (exit_p)
16648 error ("invalid exit from %s structured block", kind);
16649 else
16650 error ("invalid entry to %s structured block", kind);
16651 #endif
16652
16653 /* If it's obvious we have an invalid entry, be specific about the error. */
16654 if (branch_ctx == NULL)
16655 error ("invalid entry to %s structured block", kind);
16656 else
16657 {
16658 /* Otherwise, be vague and lazy, but efficient. */
16659 error ("invalid branch to/from %s structured block", kind);
16660 }
16661
16662 gsi_replace (gsi_p, gimple_build_nop (), false);
16663 return true;
16664 }
16665
16666 /* Pass 1: Create a minimal tree of structured blocks, and record
16667 where each label is found. */
16668
16669 static tree
16670 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
16671 struct walk_stmt_info *wi)
16672 {
16673 gimple *context = (gimple *) wi->info;
16674 gimple *inner_context;
16675 gimple *stmt = gsi_stmt (*gsi_p);
16676
16677 *handled_ops_p = true;
16678
16679 switch (gimple_code (stmt))
16680 {
16681 WALK_SUBSTMTS;
16682
16683 case GIMPLE_OMP_PARALLEL:
16684 case GIMPLE_OMP_TASK:
16685 case GIMPLE_OMP_SECTIONS:
16686 case GIMPLE_OMP_SINGLE:
16687 case GIMPLE_OMP_SECTION:
16688 case GIMPLE_OMP_MASTER:
16689 case GIMPLE_OMP_ORDERED:
16690 case GIMPLE_OMP_CRITICAL:
16691 case GIMPLE_OMP_TARGET:
16692 case GIMPLE_OMP_TEAMS:
16693 case GIMPLE_OMP_TASKGROUP:
16694 /* The minimal context here is just the current OMP construct. */
16695 inner_context = stmt;
16696 wi->info = inner_context;
16697 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
16698 wi->info = context;
16699 break;
16700
16701 case GIMPLE_OMP_FOR:
16702 inner_context = stmt;
16703 wi->info = inner_context;
16704 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
16705 walk them. */
16706 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
16707 diagnose_sb_1, NULL, wi);
16708 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
16709 wi->info = context;
16710 break;
16711
16712 case GIMPLE_LABEL:
16713 splay_tree_insert (all_labels,
16714 (splay_tree_key) gimple_label_label (
16715 as_a <glabel *> (stmt)),
16716 (splay_tree_value) context);
16717 break;
16718
16719 default:
16720 break;
16721 }
16722
16723 return NULL_TREE;
16724 }
16725
16726 /* Pass 2: Check each branch and see if its context differs from that of
16727 the destination label's context. */
16728
16729 static tree
16730 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
16731 struct walk_stmt_info *wi)
16732 {
16733 gimple *context = (gimple *) wi->info;
16734 splay_tree_node n;
16735 gimple *stmt = gsi_stmt (*gsi_p);
16736
16737 *handled_ops_p = true;
16738
16739 switch (gimple_code (stmt))
16740 {
16741 WALK_SUBSTMTS;
16742
16743 case GIMPLE_OMP_PARALLEL:
16744 case GIMPLE_OMP_TASK:
16745 case GIMPLE_OMP_SECTIONS:
16746 case GIMPLE_OMP_SINGLE:
16747 case GIMPLE_OMP_SECTION:
16748 case GIMPLE_OMP_MASTER:
16749 case GIMPLE_OMP_ORDERED:
16750 case GIMPLE_OMP_CRITICAL:
16751 case GIMPLE_OMP_TARGET:
16752 case GIMPLE_OMP_TEAMS:
16753 case GIMPLE_OMP_TASKGROUP:
16754 wi->info = stmt;
16755 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
16756 wi->info = context;
16757 break;
16758
16759 case GIMPLE_OMP_FOR:
16760 wi->info = stmt;
16761 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
16762 walk them. */
16763 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
16764 diagnose_sb_2, NULL, wi);
16765 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
16766 wi->info = context;
16767 break;
16768
16769 case GIMPLE_COND:
16770 {
16771 gcond *cond_stmt = as_a <gcond *> (stmt);
16772 tree lab = gimple_cond_true_label (cond_stmt);
16773 if (lab)
16774 {
16775 n = splay_tree_lookup (all_labels,
16776 (splay_tree_key) lab);
16777 diagnose_sb_0 (gsi_p, context,
16778 n ? (gimple *) n->value : NULL);
16779 }
16780 lab = gimple_cond_false_label (cond_stmt);
16781 if (lab)
16782 {
16783 n = splay_tree_lookup (all_labels,
16784 (splay_tree_key) lab);
16785 diagnose_sb_0 (gsi_p, context,
16786 n ? (gimple *) n->value : NULL);
16787 }
16788 }
16789 break;
16790
16791 case GIMPLE_GOTO:
16792 {
16793 tree lab = gimple_goto_dest (stmt);
16794 if (TREE_CODE (lab) != LABEL_DECL)
16795 break;
16796
16797 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
16798 diagnose_sb_0 (gsi_p, context, n ? (gimple *) n->value : NULL);
16799 }
16800 break;
16801
16802 case GIMPLE_SWITCH:
16803 {
16804 gswitch *switch_stmt = as_a <gswitch *> (stmt);
16805 unsigned int i;
16806 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
16807 {
16808 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
16809 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
16810 if (n && diagnose_sb_0 (gsi_p, context, (gimple *) n->value))
16811 break;
16812 }
16813 }
16814 break;
16815
16816 case GIMPLE_RETURN:
16817 diagnose_sb_0 (gsi_p, context, NULL);
16818 break;
16819
16820 default:
16821 break;
16822 }
16823
16824 return NULL_TREE;
16825 }
16826
16827 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
16828 GIMPLE_* codes. */
16829 bool
16830 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
16831 int *region_idx)
16832 {
16833 gimple *last = last_stmt (bb);
16834 enum gimple_code code = gimple_code (last);
16835 struct omp_region *cur_region = *region;
16836 bool fallthru = false;
16837
16838 switch (code)
16839 {
16840 case GIMPLE_OMP_PARALLEL:
16841 case GIMPLE_OMP_TASK:
16842 case GIMPLE_OMP_FOR:
16843 case GIMPLE_OMP_SINGLE:
16844 case GIMPLE_OMP_TEAMS:
16845 case GIMPLE_OMP_MASTER:
16846 case GIMPLE_OMP_TASKGROUP:
16847 case GIMPLE_OMP_CRITICAL:
16848 case GIMPLE_OMP_SECTION:
16849 cur_region = new_omp_region (bb, code, cur_region);
16850 fallthru = true;
16851 break;
16852
16853 case GIMPLE_OMP_ORDERED:
16854 cur_region = new_omp_region (bb, code, cur_region);
16855 fallthru = true;
16856 if (find_omp_clause (gimple_omp_ordered_clauses
16857 (as_a <gomp_ordered *> (last)),
16858 OMP_CLAUSE_DEPEND))
16859 cur_region = cur_region->outer;
16860 break;
16861
16862 case GIMPLE_OMP_TARGET:
16863 cur_region = new_omp_region (bb, code, cur_region);
16864 fallthru = true;
16865 switch (gimple_omp_target_kind (last))
16866 {
16867 case GF_OMP_TARGET_KIND_REGION:
16868 case GF_OMP_TARGET_KIND_DATA:
16869 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
16870 case GF_OMP_TARGET_KIND_OACC_KERNELS:
16871 case GF_OMP_TARGET_KIND_OACC_DATA:
16872 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
16873 break;
16874 case GF_OMP_TARGET_KIND_UPDATE:
16875 case GF_OMP_TARGET_KIND_ENTER_DATA:
16876 case GF_OMP_TARGET_KIND_EXIT_DATA:
16877 case GF_OMP_TARGET_KIND_OACC_UPDATE:
16878 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
16879 case GF_OMP_TARGET_KIND_OACC_DECLARE:
16880 cur_region = cur_region->outer;
16881 break;
16882 default:
16883 gcc_unreachable ();
16884 }
16885 break;
16886
16887 case GIMPLE_OMP_SECTIONS:
16888 cur_region = new_omp_region (bb, code, cur_region);
16889 fallthru = true;
16890 break;
16891
16892 case GIMPLE_OMP_SECTIONS_SWITCH:
16893 fallthru = false;
16894 break;
16895
16896 case GIMPLE_OMP_ATOMIC_LOAD:
16897 case GIMPLE_OMP_ATOMIC_STORE:
16898 fallthru = true;
16899 break;
16900
16901 case GIMPLE_OMP_RETURN:
16902 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
16903 somewhere other than the next block. This will be
16904 created later. */
16905 cur_region->exit = bb;
16906 if (cur_region->type == GIMPLE_OMP_TASK)
16907 /* Add an edge corresponding to not scheduling the task
16908 immediately. */
16909 make_edge (cur_region->entry, bb, EDGE_ABNORMAL);
16910 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
16911 cur_region = cur_region->outer;
16912 break;
16913
16914 case GIMPLE_OMP_CONTINUE:
16915 cur_region->cont = bb;
16916 switch (cur_region->type)
16917 {
16918 case GIMPLE_OMP_FOR:
16919 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
16920 succs edges as abnormal to prevent splitting
16921 them. */
16922 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
16923 /* Make the loopback edge. */
16924 make_edge (bb, single_succ (cur_region->entry),
16925 EDGE_ABNORMAL);
16926
16927 /* Create an edge from GIMPLE_OMP_FOR to exit, which
16928 corresponds to the case that the body of the loop
16929 is not executed at all. */
16930 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
16931 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
16932 fallthru = false;
16933 break;
16934
16935 case GIMPLE_OMP_SECTIONS:
16936 /* Wire up the edges into and out of the nested sections. */
16937 {
16938 basic_block switch_bb = single_succ (cur_region->entry);
16939
16940 struct omp_region *i;
16941 for (i = cur_region->inner; i ; i = i->next)
16942 {
16943 gcc_assert (i->type == GIMPLE_OMP_SECTION);
16944 make_edge (switch_bb, i->entry, 0);
16945 make_edge (i->exit, bb, EDGE_FALLTHRU);
16946 }
16947
16948 /* Make the loopback edge to the block with
16949 GIMPLE_OMP_SECTIONS_SWITCH. */
16950 make_edge (bb, switch_bb, 0);
16951
16952 /* Make the edge from the switch to exit. */
16953 make_edge (switch_bb, bb->next_bb, 0);
16954 fallthru = false;
16955 }
16956 break;
16957
16958 case GIMPLE_OMP_TASK:
16959 fallthru = true;
16960 break;
16961
16962 default:
16963 gcc_unreachable ();
16964 }
16965 break;
16966
16967 default:
16968 gcc_unreachable ();
16969 }
16970
16971 if (*region != cur_region)
16972 {
16973 *region = cur_region;
16974 if (cur_region)
16975 *region_idx = cur_region->entry->index;
16976 else
16977 *region_idx = 0;
16978 }
16979
16980 return fallthru;
16981 }
16982
16983 static unsigned int
16984 diagnose_omp_structured_block_errors (void)
16985 {
16986 struct walk_stmt_info wi;
16987 gimple_seq body = gimple_body (current_function_decl);
16988
16989 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
16990
16991 memset (&wi, 0, sizeof (wi));
16992 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
16993
16994 memset (&wi, 0, sizeof (wi));
16995 wi.want_locations = true;
16996 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
16997
16998 gimple_set_body (current_function_decl, body);
16999
17000 splay_tree_delete (all_labels);
17001 all_labels = NULL;
17002
17003 return 0;
17004 }
17005
17006 namespace {
17007
17008 const pass_data pass_data_diagnose_omp_blocks =
17009 {
17010 GIMPLE_PASS, /* type */
17011 "*diagnose_omp_blocks", /* name */
17012 OPTGROUP_NONE, /* optinfo_flags */
17013 TV_NONE, /* tv_id */
17014 PROP_gimple_any, /* properties_required */
17015 0, /* properties_provided */
17016 0, /* properties_destroyed */
17017 0, /* todo_flags_start */
17018 0, /* todo_flags_finish */
17019 };
17020
17021 class pass_diagnose_omp_blocks : public gimple_opt_pass
17022 {
17023 public:
17024 pass_diagnose_omp_blocks (gcc::context *ctxt)
17025 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
17026 {}
17027
17028 /* opt_pass methods: */
17029 virtual bool gate (function *)
17030 {
17031 return flag_cilkplus || flag_openacc || flag_openmp;
17032 }
17033 virtual unsigned int execute (function *)
17034 {
17035 return diagnose_omp_structured_block_errors ();
17036 }
17037
17038 }; // class pass_diagnose_omp_blocks
17039
17040 } // anon namespace
17041
17042 gimple_opt_pass *
17043 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
17044 {
17045 return new pass_diagnose_omp_blocks (ctxt);
17046 }
17047 \f
17048 /* SIMD clone supporting code. */
17049
17050 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
17051 of arguments to reserve space for. */
17052
17053 static struct cgraph_simd_clone *
17054 simd_clone_struct_alloc (int nargs)
17055 {
17056 struct cgraph_simd_clone *clone_info;
17057 size_t len = (sizeof (struct cgraph_simd_clone)
17058 + nargs * sizeof (struct cgraph_simd_clone_arg));
17059 clone_info = (struct cgraph_simd_clone *)
17060 ggc_internal_cleared_alloc (len);
17061 return clone_info;
17062 }
17063
17064 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
17065
17066 static inline void
17067 simd_clone_struct_copy (struct cgraph_simd_clone *to,
17068 struct cgraph_simd_clone *from)
17069 {
17070 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
17071 + ((from->nargs - from->inbranch)
17072 * sizeof (struct cgraph_simd_clone_arg))));
17073 }
17074
17075 /* Return vector of parameter types of function FNDECL. This uses
17076 TYPE_ARG_TYPES if available, otherwise falls back to types of
17077 DECL_ARGUMENTS types. */
17078
17079 vec<tree>
17080 simd_clone_vector_of_formal_parm_types (tree fndecl)
17081 {
17082 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
17083 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
17084 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
17085 unsigned int i;
17086 tree arg;
17087 FOR_EACH_VEC_ELT (args, i, arg)
17088 args[i] = TREE_TYPE (args[i]);
17089 return args;
17090 }
17091
17092 /* Given a simd function in NODE, extract the simd specific
17093 information from the OMP clauses passed in CLAUSES, and return
17094 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
17095 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
17096 otherwise set to FALSE. */
17097
17098 static struct cgraph_simd_clone *
17099 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
17100 bool *inbranch_specified)
17101 {
17102 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
17103 tree t;
17104 int n;
17105 *inbranch_specified = false;
17106
17107 n = args.length ();
17108 if (n > 0 && args.last () == void_type_node)
17109 n--;
17110
17111 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
17112 be cloned have a distinctive artificial label in addition to "omp
17113 declare simd". */
17114 bool cilk_clone
17115 = (flag_cilkplus
17116 && lookup_attribute ("cilk simd function",
17117 DECL_ATTRIBUTES (node->decl)));
17118
17119 /* Allocate one more than needed just in case this is an in-branch
17120 clone which will require a mask argument. */
17121 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
17122 clone_info->nargs = n;
17123 clone_info->cilk_elemental = cilk_clone;
17124
17125 if (!clauses)
17126 {
17127 args.release ();
17128 return clone_info;
17129 }
17130 clauses = TREE_VALUE (clauses);
17131 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
17132 return clone_info;
17133
17134 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
17135 {
17136 switch (OMP_CLAUSE_CODE (t))
17137 {
17138 case OMP_CLAUSE_INBRANCH:
17139 clone_info->inbranch = 1;
17140 *inbranch_specified = true;
17141 break;
17142 case OMP_CLAUSE_NOTINBRANCH:
17143 clone_info->inbranch = 0;
17144 *inbranch_specified = true;
17145 break;
17146 case OMP_CLAUSE_SIMDLEN:
17147 clone_info->simdlen
17148 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
17149 break;
17150 case OMP_CLAUSE_LINEAR:
17151 {
17152 tree decl = OMP_CLAUSE_DECL (t);
17153 tree step = OMP_CLAUSE_LINEAR_STEP (t);
17154 int argno = TREE_INT_CST_LOW (decl);
17155 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
17156 {
17157 enum cgraph_simd_clone_arg_type arg_type;
17158 if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
17159 switch (OMP_CLAUSE_LINEAR_KIND (t))
17160 {
17161 case OMP_CLAUSE_LINEAR_REF:
17162 arg_type
17163 = SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP;
17164 break;
17165 case OMP_CLAUSE_LINEAR_UVAL:
17166 arg_type
17167 = SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP;
17168 break;
17169 case OMP_CLAUSE_LINEAR_VAL:
17170 case OMP_CLAUSE_LINEAR_DEFAULT:
17171 arg_type
17172 = SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP;
17173 break;
17174 default:
17175 gcc_unreachable ();
17176 }
17177 else
17178 arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
17179 clone_info->args[argno].arg_type = arg_type;
17180 clone_info->args[argno].linear_step = tree_to_shwi (step);
17181 gcc_assert (clone_info->args[argno].linear_step >= 0
17182 && clone_info->args[argno].linear_step < n);
17183 }
17184 else
17185 {
17186 if (POINTER_TYPE_P (args[argno]))
17187 step = fold_convert (ssizetype, step);
17188 if (!tree_fits_shwi_p (step))
17189 {
17190 warning_at (OMP_CLAUSE_LOCATION (t), 0,
17191 "ignoring large linear step");
17192 args.release ();
17193 return NULL;
17194 }
17195 else if (integer_zerop (step))
17196 {
17197 warning_at (OMP_CLAUSE_LOCATION (t), 0,
17198 "ignoring zero linear step");
17199 args.release ();
17200 return NULL;
17201 }
17202 else
17203 {
17204 enum cgraph_simd_clone_arg_type arg_type;
17205 if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
17206 switch (OMP_CLAUSE_LINEAR_KIND (t))
17207 {
17208 case OMP_CLAUSE_LINEAR_REF:
17209 arg_type
17210 = SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP;
17211 break;
17212 case OMP_CLAUSE_LINEAR_UVAL:
17213 arg_type
17214 = SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP;
17215 break;
17216 case OMP_CLAUSE_LINEAR_VAL:
17217 case OMP_CLAUSE_LINEAR_DEFAULT:
17218 arg_type
17219 = SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP;
17220 break;
17221 default:
17222 gcc_unreachable ();
17223 }
17224 else
17225 arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
17226 clone_info->args[argno].arg_type = arg_type;
17227 clone_info->args[argno].linear_step = tree_to_shwi (step);
17228 }
17229 }
17230 break;
17231 }
17232 case OMP_CLAUSE_UNIFORM:
17233 {
17234 tree decl = OMP_CLAUSE_DECL (t);
17235 int argno = tree_to_uhwi (decl);
17236 clone_info->args[argno].arg_type
17237 = SIMD_CLONE_ARG_TYPE_UNIFORM;
17238 break;
17239 }
17240 case OMP_CLAUSE_ALIGNED:
17241 {
17242 tree decl = OMP_CLAUSE_DECL (t);
17243 int argno = tree_to_uhwi (decl);
17244 clone_info->args[argno].alignment
17245 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
17246 break;
17247 }
17248 default:
17249 break;
17250 }
17251 }
17252 args.release ();
17253 return clone_info;
17254 }
17255
17256 /* Given a SIMD clone in NODE, calculate the characteristic data
17257 type and return the coresponding type. The characteristic data
17258 type is computed as described in the Intel Vector ABI. */
17259
17260 static tree
17261 simd_clone_compute_base_data_type (struct cgraph_node *node,
17262 struct cgraph_simd_clone *clone_info)
17263 {
17264 tree type = integer_type_node;
17265 tree fndecl = node->decl;
17266
17267 /* a) For non-void function, the characteristic data type is the
17268 return type. */
17269 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
17270 type = TREE_TYPE (TREE_TYPE (fndecl));
17271
17272 /* b) If the function has any non-uniform, non-linear parameters,
17273 then the characteristic data type is the type of the first
17274 such parameter. */
17275 else
17276 {
17277 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
17278 for (unsigned int i = 0; i < clone_info->nargs; ++i)
17279 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
17280 {
17281 type = map[i];
17282 break;
17283 }
17284 map.release ();
17285 }
17286
17287 /* c) If the characteristic data type determined by a) or b) above
17288 is struct, union, or class type which is pass-by-value (except
17289 for the type that maps to the built-in complex data type), the
17290 characteristic data type is int. */
17291 if (RECORD_OR_UNION_TYPE_P (type)
17292 && !aggregate_value_p (type, NULL)
17293 && TREE_CODE (type) != COMPLEX_TYPE)
17294 return integer_type_node;
17295
17296 /* d) If none of the above three classes is applicable, the
17297 characteristic data type is int. */
17298
17299 return type;
17300
17301 /* e) For Intel Xeon Phi native and offload compilation, if the
17302 resulting characteristic data type is 8-bit or 16-bit integer
17303 data type, the characteristic data type is int. */
17304 /* Well, we don't handle Xeon Phi yet. */
17305 }
17306
17307 static tree
17308 simd_clone_mangle (struct cgraph_node *node,
17309 struct cgraph_simd_clone *clone_info)
17310 {
17311 char vecsize_mangle = clone_info->vecsize_mangle;
17312 char mask = clone_info->inbranch ? 'M' : 'N';
17313 unsigned int simdlen = clone_info->simdlen;
17314 unsigned int n;
17315 pretty_printer pp;
17316
17317 gcc_assert (vecsize_mangle && simdlen);
17318
17319 pp_string (&pp, "_ZGV");
17320 pp_character (&pp, vecsize_mangle);
17321 pp_character (&pp, mask);
17322 pp_decimal_int (&pp, simdlen);
17323
17324 for (n = 0; n < clone_info->nargs; ++n)
17325 {
17326 struct cgraph_simd_clone_arg arg = clone_info->args[n];
17327
17328 switch (arg.arg_type)
17329 {
17330 case SIMD_CLONE_ARG_TYPE_UNIFORM:
17331 pp_character (&pp, 'u');
17332 break;
17333 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
17334 pp_character (&pp, 'l');
17335 goto mangle_linear;
17336 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
17337 pp_character (&pp, 'R');
17338 goto mangle_linear;
17339 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
17340 pp_character (&pp, 'L');
17341 goto mangle_linear;
17342 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
17343 pp_character (&pp, 'U');
17344 goto mangle_linear;
17345 mangle_linear:
17346 gcc_assert (arg.linear_step != 0);
17347 if (arg.linear_step > 1)
17348 pp_unsigned_wide_integer (&pp, arg.linear_step);
17349 else if (arg.linear_step < 0)
17350 {
17351 pp_character (&pp, 'n');
17352 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
17353 arg.linear_step));
17354 }
17355 break;
17356 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
17357 pp_string (&pp, "ls");
17358 pp_unsigned_wide_integer (&pp, arg.linear_step);
17359 break;
17360 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
17361 pp_string (&pp, "Rs");
17362 pp_unsigned_wide_integer (&pp, arg.linear_step);
17363 break;
17364 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
17365 pp_string (&pp, "Ls");
17366 pp_unsigned_wide_integer (&pp, arg.linear_step);
17367 break;
17368 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
17369 pp_string (&pp, "Us");
17370 pp_unsigned_wide_integer (&pp, arg.linear_step);
17371 break;
17372 default:
17373 pp_character (&pp, 'v');
17374 }
17375 if (arg.alignment)
17376 {
17377 pp_character (&pp, 'a');
17378 pp_decimal_int (&pp, arg.alignment);
17379 }
17380 }
17381
17382 pp_underscore (&pp);
17383 const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
17384 if (*str == '*')
17385 ++str;
17386 pp_string (&pp, str);
17387 str = pp_formatted_text (&pp);
17388
17389 /* If there already is a SIMD clone with the same mangled name, don't
17390 add another one. This can happen e.g. for
17391 #pragma omp declare simd
17392 #pragma omp declare simd simdlen(8)
17393 int foo (int, int);
17394 if the simdlen is assumed to be 8 for the first one, etc. */
17395 for (struct cgraph_node *clone = node->simd_clones; clone;
17396 clone = clone->simdclone->next_clone)
17397 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
17398 str) == 0)
17399 return NULL_TREE;
17400
17401 return get_identifier (str);
17402 }
17403
17404 /* Create a simd clone of OLD_NODE and return it. */
17405
17406 static struct cgraph_node *
17407 simd_clone_create (struct cgraph_node *old_node)
17408 {
17409 struct cgraph_node *new_node;
17410 if (old_node->definition)
17411 {
17412 if (!old_node->has_gimple_body_p ())
17413 return NULL;
17414 old_node->get_body ();
17415 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
17416 false, NULL, NULL,
17417 "simdclone");
17418 }
17419 else
17420 {
17421 tree old_decl = old_node->decl;
17422 tree new_decl = copy_node (old_node->decl);
17423 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
17424 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
17425 SET_DECL_RTL (new_decl, NULL);
17426 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
17427 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
17428 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
17429 if (old_node->in_other_partition)
17430 new_node->in_other_partition = 1;
17431 symtab->call_cgraph_insertion_hooks (new_node);
17432 }
17433 if (new_node == NULL)
17434 return new_node;
17435
17436 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
17437
17438 /* The function cgraph_function_versioning () will force the new
17439 symbol local. Undo this, and inherit external visability from
17440 the old node. */
17441 new_node->local.local = old_node->local.local;
17442 new_node->externally_visible = old_node->externally_visible;
17443
17444 return new_node;
17445 }
17446
17447 /* Adjust the return type of the given function to its appropriate
17448 vector counterpart. Returns a simd array to be used throughout the
17449 function as a return value. */
17450
17451 static tree
17452 simd_clone_adjust_return_type (struct cgraph_node *node)
17453 {
17454 tree fndecl = node->decl;
17455 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
17456 unsigned int veclen;
17457 tree t;
17458
17459 /* Adjust the function return type. */
17460 if (orig_rettype == void_type_node)
17461 return NULL_TREE;
17462 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
17463 t = TREE_TYPE (TREE_TYPE (fndecl));
17464 if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
17465 veclen = node->simdclone->vecsize_int;
17466 else
17467 veclen = node->simdclone->vecsize_float;
17468 veclen /= GET_MODE_BITSIZE (TYPE_MODE (t));
17469 if (veclen > node->simdclone->simdlen)
17470 veclen = node->simdclone->simdlen;
17471 if (POINTER_TYPE_P (t))
17472 t = pointer_sized_int_node;
17473 if (veclen == node->simdclone->simdlen)
17474 t = build_vector_type (t, node->simdclone->simdlen);
17475 else
17476 {
17477 t = build_vector_type (t, veclen);
17478 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
17479 }
17480 TREE_TYPE (TREE_TYPE (fndecl)) = t;
17481 if (!node->definition)
17482 return NULL_TREE;
17483
17484 t = DECL_RESULT (fndecl);
17485 /* Adjust the DECL_RESULT. */
17486 gcc_assert (TREE_TYPE (t) != void_type_node);
17487 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
17488 relayout_decl (t);
17489
17490 tree atype = build_array_type_nelts (orig_rettype,
17491 node->simdclone->simdlen);
17492 if (veclen != node->simdclone->simdlen)
17493 return build1 (VIEW_CONVERT_EXPR, atype, t);
17494
17495 /* Set up a SIMD array to use as the return value. */
17496 tree retval = create_tmp_var_raw (atype, "retval");
17497 gimple_add_tmp_var (retval);
17498 return retval;
17499 }
17500
17501 /* Each vector argument has a corresponding array to be used locally
17502 as part of the eventual loop. Create such temporary array and
17503 return it.
17504
17505 PREFIX is the prefix to be used for the temporary.
17506
17507 TYPE is the inner element type.
17508
17509 SIMDLEN is the number of elements. */
17510
17511 static tree
17512 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
17513 {
17514 tree atype = build_array_type_nelts (type, simdlen);
17515 tree avar = create_tmp_var_raw (atype, prefix);
17516 gimple_add_tmp_var (avar);
17517 return avar;
17518 }
17519
17520 /* Modify the function argument types to their corresponding vector
17521 counterparts if appropriate. Also, create one array for each simd
17522 argument to be used locally when using the function arguments as
17523 part of the loop.
17524
17525 NODE is the function whose arguments are to be adjusted.
17526
17527 Returns an adjustment vector that will be filled describing how the
17528 argument types will be adjusted. */
17529
17530 static ipa_parm_adjustment_vec
17531 simd_clone_adjust_argument_types (struct cgraph_node *node)
17532 {
17533 vec<tree> args;
17534 ipa_parm_adjustment_vec adjustments;
17535
17536 if (node->definition)
17537 args = ipa_get_vector_of_formal_parms (node->decl);
17538 else
17539 args = simd_clone_vector_of_formal_parm_types (node->decl);
17540 adjustments.create (args.length ());
17541 unsigned i, j, veclen;
17542 struct ipa_parm_adjustment adj;
17543 for (i = 0; i < node->simdclone->nargs; ++i)
17544 {
17545 memset (&adj, 0, sizeof (adj));
17546 tree parm = args[i];
17547 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
17548 adj.base_index = i;
17549 adj.base = parm;
17550
17551 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
17552 node->simdclone->args[i].orig_type = parm_type;
17553
17554 switch (node->simdclone->args[i].arg_type)
17555 {
17556 default:
17557 /* No adjustment necessary for scalar arguments. */
17558 adj.op = IPA_PARM_OP_COPY;
17559 break;
17560 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
17561 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
17562 if (node->definition)
17563 node->simdclone->args[i].simd_array
17564 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
17565 TREE_TYPE (parm_type),
17566 node->simdclone->simdlen);
17567 adj.op = IPA_PARM_OP_COPY;
17568 break;
17569 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
17570 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
17571 case SIMD_CLONE_ARG_TYPE_VECTOR:
17572 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
17573 veclen = node->simdclone->vecsize_int;
17574 else
17575 veclen = node->simdclone->vecsize_float;
17576 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
17577 if (veclen > node->simdclone->simdlen)
17578 veclen = node->simdclone->simdlen;
17579 adj.arg_prefix = "simd";
17580 if (POINTER_TYPE_P (parm_type))
17581 adj.type = build_vector_type (pointer_sized_int_node, veclen);
17582 else
17583 adj.type = build_vector_type (parm_type, veclen);
17584 node->simdclone->args[i].vector_type = adj.type;
17585 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
17586 {
17587 adjustments.safe_push (adj);
17588 if (j == veclen)
17589 {
17590 memset (&adj, 0, sizeof (adj));
17591 adj.op = IPA_PARM_OP_NEW;
17592 adj.arg_prefix = "simd";
17593 adj.base_index = i;
17594 adj.type = node->simdclone->args[i].vector_type;
17595 }
17596 }
17597
17598 if (node->definition)
17599 node->simdclone->args[i].simd_array
17600 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
17601 parm_type, node->simdclone->simdlen);
17602 }
17603 adjustments.safe_push (adj);
17604 }
17605
17606 if (node->simdclone->inbranch)
17607 {
17608 tree base_type
17609 = simd_clone_compute_base_data_type (node->simdclone->origin,
17610 node->simdclone);
17611
17612 memset (&adj, 0, sizeof (adj));
17613 adj.op = IPA_PARM_OP_NEW;
17614 adj.arg_prefix = "mask";
17615
17616 adj.base_index = i;
17617 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
17618 veclen = node->simdclone->vecsize_int;
17619 else
17620 veclen = node->simdclone->vecsize_float;
17621 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
17622 if (veclen > node->simdclone->simdlen)
17623 veclen = node->simdclone->simdlen;
17624 if (POINTER_TYPE_P (base_type))
17625 adj.type = build_vector_type (pointer_sized_int_node, veclen);
17626 else
17627 adj.type = build_vector_type (base_type, veclen);
17628 adjustments.safe_push (adj);
17629
17630 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
17631 adjustments.safe_push (adj);
17632
17633 /* We have previously allocated one extra entry for the mask. Use
17634 it and fill it. */
17635 struct cgraph_simd_clone *sc = node->simdclone;
17636 sc->nargs++;
17637 if (node->definition)
17638 {
17639 sc->args[i].orig_arg
17640 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
17641 sc->args[i].simd_array
17642 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
17643 }
17644 sc->args[i].orig_type = base_type;
17645 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
17646 }
17647
17648 if (node->definition)
17649 ipa_modify_formal_parameters (node->decl, adjustments);
17650 else
17651 {
17652 tree new_arg_types = NULL_TREE, new_reversed;
17653 bool last_parm_void = false;
17654 if (args.length () > 0 && args.last () == void_type_node)
17655 last_parm_void = true;
17656
17657 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
17658 j = adjustments.length ();
17659 for (i = 0; i < j; i++)
17660 {
17661 struct ipa_parm_adjustment *adj = &adjustments[i];
17662 tree ptype;
17663 if (adj->op == IPA_PARM_OP_COPY)
17664 ptype = args[adj->base_index];
17665 else
17666 ptype = adj->type;
17667 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
17668 }
17669 new_reversed = nreverse (new_arg_types);
17670 if (last_parm_void)
17671 {
17672 if (new_reversed)
17673 TREE_CHAIN (new_arg_types) = void_list_node;
17674 else
17675 new_reversed = void_list_node;
17676 }
17677
17678 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
17679 TYPE_ARG_TYPES (new_type) = new_reversed;
17680 TREE_TYPE (node->decl) = new_type;
17681
17682 adjustments.release ();
17683 }
17684 args.release ();
17685 return adjustments;
17686 }
17687
17688 /* Initialize and copy the function arguments in NODE to their
17689 corresponding local simd arrays. Returns a fresh gimple_seq with
17690 the instruction sequence generated. */
17691
17692 static gimple_seq
17693 simd_clone_init_simd_arrays (struct cgraph_node *node,
17694 ipa_parm_adjustment_vec adjustments)
17695 {
17696 gimple_seq seq = NULL;
17697 unsigned i = 0, j = 0, k;
17698
17699 for (tree arg = DECL_ARGUMENTS (node->decl);
17700 arg;
17701 arg = DECL_CHAIN (arg), i++, j++)
17702 {
17703 if (adjustments[j].op == IPA_PARM_OP_COPY
17704 || POINTER_TYPE_P (TREE_TYPE (arg)))
17705 continue;
17706
17707 node->simdclone->args[i].vector_arg = arg;
17708
17709 tree array = node->simdclone->args[i].simd_array;
17710 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
17711 {
17712 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
17713 tree ptr = build_fold_addr_expr (array);
17714 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
17715 build_int_cst (ptype, 0));
17716 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
17717 gimplify_and_add (t, &seq);
17718 }
17719 else
17720 {
17721 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
17722 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
17723 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
17724 {
17725 tree ptr = build_fold_addr_expr (array);
17726 int elemsize;
17727 if (k)
17728 {
17729 arg = DECL_CHAIN (arg);
17730 j++;
17731 }
17732 elemsize
17733 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
17734 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
17735 build_int_cst (ptype, k * elemsize));
17736 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
17737 gimplify_and_add (t, &seq);
17738 }
17739 }
17740 }
17741 return seq;
17742 }
17743
17744 /* Callback info for ipa_simd_modify_stmt_ops below. */
17745
17746 struct modify_stmt_info {
17747 ipa_parm_adjustment_vec adjustments;
17748 gimple *stmt;
17749 /* True if the parent statement was modified by
17750 ipa_simd_modify_stmt_ops. */
17751 bool modified;
17752 };
17753
17754 /* Callback for walk_gimple_op.
17755
17756 Adjust operands from a given statement as specified in the
17757 adjustments vector in the callback data. */
17758
17759 static tree
17760 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
17761 {
17762 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
17763 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
17764 tree *orig_tp = tp;
17765 if (TREE_CODE (*tp) == ADDR_EXPR)
17766 tp = &TREE_OPERAND (*tp, 0);
17767 struct ipa_parm_adjustment *cand = NULL;
17768 if (TREE_CODE (*tp) == PARM_DECL)
17769 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
17770 else
17771 {
17772 if (TYPE_P (*tp))
17773 *walk_subtrees = 0;
17774 }
17775
17776 tree repl = NULL_TREE;
17777 if (cand)
17778 repl = unshare_expr (cand->new_decl);
17779 else
17780 {
17781 if (tp != orig_tp)
17782 {
17783 *walk_subtrees = 0;
17784 bool modified = info->modified;
17785 info->modified = false;
17786 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
17787 if (!info->modified)
17788 {
17789 info->modified = modified;
17790 return NULL_TREE;
17791 }
17792 info->modified = modified;
17793 repl = *tp;
17794 }
17795 else
17796 return NULL_TREE;
17797 }
17798
17799 if (tp != orig_tp)
17800 {
17801 repl = build_fold_addr_expr (repl);
17802 gimple *stmt;
17803 if (is_gimple_debug (info->stmt))
17804 {
17805 tree vexpr = make_node (DEBUG_EXPR_DECL);
17806 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
17807 DECL_ARTIFICIAL (vexpr) = 1;
17808 TREE_TYPE (vexpr) = TREE_TYPE (repl);
17809 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
17810 repl = vexpr;
17811 }
17812 else
17813 {
17814 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
17815 repl = gimple_assign_lhs (stmt);
17816 }
17817 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
17818 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
17819 *orig_tp = repl;
17820 }
17821 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
17822 {
17823 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
17824 *tp = vce;
17825 }
17826 else
17827 *tp = repl;
17828
17829 info->modified = true;
17830 return NULL_TREE;
17831 }
17832
17833 /* Traverse the function body and perform all modifications as
17834 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
17835 modified such that the replacement/reduction value will now be an
17836 offset into the corresponding simd_array.
17837
17838 This function will replace all function argument uses with their
17839 corresponding simd array elements, and ajust the return values
17840 accordingly. */
17841
17842 static void
17843 ipa_simd_modify_function_body (struct cgraph_node *node,
17844 ipa_parm_adjustment_vec adjustments,
17845 tree retval_array, tree iter)
17846 {
17847 basic_block bb;
17848 unsigned int i, j, l;
17849
17850 /* Re-use the adjustments array, but this time use it to replace
17851 every function argument use to an offset into the corresponding
17852 simd_array. */
17853 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
17854 {
17855 if (!node->simdclone->args[i].vector_arg)
17856 continue;
17857
17858 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
17859 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
17860 adjustments[j].new_decl
17861 = build4 (ARRAY_REF,
17862 basetype,
17863 node->simdclone->args[i].simd_array,
17864 iter,
17865 NULL_TREE, NULL_TREE);
17866 if (adjustments[j].op == IPA_PARM_OP_NONE
17867 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
17868 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
17869 }
17870
17871 l = adjustments.length ();
17872 for (i = 1; i < num_ssa_names; i++)
17873 {
17874 tree name = ssa_name (i);
17875 if (name
17876 && SSA_NAME_VAR (name)
17877 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
17878 {
17879 for (j = 0; j < l; j++)
17880 if (SSA_NAME_VAR (name) == adjustments[j].base
17881 && adjustments[j].new_decl)
17882 {
17883 tree base_var;
17884 if (adjustments[j].new_ssa_base == NULL_TREE)
17885 {
17886 base_var
17887 = copy_var_decl (adjustments[j].base,
17888 DECL_NAME (adjustments[j].base),
17889 TREE_TYPE (adjustments[j].base));
17890 adjustments[j].new_ssa_base = base_var;
17891 }
17892 else
17893 base_var = adjustments[j].new_ssa_base;
17894 if (SSA_NAME_IS_DEFAULT_DEF (name))
17895 {
17896 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
17897 gimple_stmt_iterator gsi = gsi_after_labels (bb);
17898 tree new_decl = unshare_expr (adjustments[j].new_decl);
17899 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
17900 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
17901 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
17902 gimple *stmt = gimple_build_assign (name, new_decl);
17903 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
17904 }
17905 else
17906 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
17907 }
17908 }
17909 }
17910
17911 struct modify_stmt_info info;
17912 info.adjustments = adjustments;
17913
17914 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
17915 {
17916 gimple_stmt_iterator gsi;
17917
17918 gsi = gsi_start_bb (bb);
17919 while (!gsi_end_p (gsi))
17920 {
17921 gimple *stmt = gsi_stmt (gsi);
17922 info.stmt = stmt;
17923 struct walk_stmt_info wi;
17924
17925 memset (&wi, 0, sizeof (wi));
17926 info.modified = false;
17927 wi.info = &info;
17928 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
17929
17930 if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
17931 {
17932 tree retval = gimple_return_retval (return_stmt);
17933 if (!retval)
17934 {
17935 gsi_remove (&gsi, true);
17936 continue;
17937 }
17938
17939 /* Replace `return foo' with `retval_array[iter] = foo'. */
17940 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
17941 retval_array, iter, NULL, NULL);
17942 stmt = gimple_build_assign (ref, retval);
17943 gsi_replace (&gsi, stmt, true);
17944 info.modified = true;
17945 }
17946
17947 if (info.modified)
17948 {
17949 update_stmt (stmt);
17950 if (maybe_clean_eh_stmt (stmt))
17951 gimple_purge_dead_eh_edges (gimple_bb (stmt));
17952 }
17953 gsi_next (&gsi);
17954 }
17955 }
17956 }
17957
17958 /* Helper function of simd_clone_adjust, return linear step addend
17959 of Ith argument. */
17960
17961 static tree
17962 simd_clone_linear_addend (struct cgraph_node *node, unsigned int i,
17963 tree addtype, basic_block entry_bb)
17964 {
17965 tree ptype = NULL_TREE;
17966 switch (node->simdclone->args[i].arg_type)
17967 {
17968 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
17969 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
17970 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
17971 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
17972 return build_int_cst (addtype, node->simdclone->args[i].linear_step);
17973 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
17974 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
17975 ptype = TREE_TYPE (node->simdclone->args[i].orig_arg);
17976 break;
17977 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
17978 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
17979 ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg));
17980 break;
17981 default:
17982 gcc_unreachable ();
17983 }
17984
17985 unsigned int idx = node->simdclone->args[i].linear_step;
17986 tree arg = node->simdclone->args[idx].orig_arg;
17987 gcc_assert (is_gimple_reg_type (TREE_TYPE (arg)));
17988 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
17989 gimple *g;
17990 tree ret;
17991 if (is_gimple_reg (arg))
17992 ret = get_or_create_ssa_default_def (cfun, arg);
17993 else
17994 {
17995 g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg);
17996 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
17997 ret = gimple_assign_lhs (g);
17998 }
17999 if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE)
18000 {
18001 g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))),
18002 build_simple_mem_ref (ret));
18003 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18004 ret = gimple_assign_lhs (g);
18005 }
18006 if (!useless_type_conversion_p (addtype, TREE_TYPE (ret)))
18007 {
18008 g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret);
18009 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18010 ret = gimple_assign_lhs (g);
18011 }
18012 if (POINTER_TYPE_P (ptype))
18013 {
18014 tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype));
18015 if (size && TREE_CODE (size) == INTEGER_CST)
18016 {
18017 g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR,
18018 ret, fold_convert (addtype, size));
18019 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18020 ret = gimple_assign_lhs (g);
18021 }
18022 }
18023 return ret;
18024 }
18025
18026 /* Adjust the argument types in NODE to their appropriate vector
18027 counterparts. */
18028
18029 static void
18030 simd_clone_adjust (struct cgraph_node *node)
18031 {
18032 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
18033
18034 targetm.simd_clone.adjust (node);
18035
18036 tree retval = simd_clone_adjust_return_type (node);
18037 ipa_parm_adjustment_vec adjustments
18038 = simd_clone_adjust_argument_types (node);
18039
18040 push_gimplify_context ();
18041
18042 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
18043
18044 /* Adjust all uses of vector arguments accordingly. Adjust all
18045 return values accordingly. */
18046 tree iter = create_tmp_var (unsigned_type_node, "iter");
18047 tree iter1 = make_ssa_name (iter);
18048 tree iter2 = make_ssa_name (iter);
18049 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
18050
18051 /* Initialize the iteration variable. */
18052 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
18053 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
18054 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
18055 /* Insert the SIMD array and iv initialization at function
18056 entry. */
18057 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
18058
18059 pop_gimplify_context (NULL);
18060
18061 /* Create a new BB right before the original exit BB, to hold the
18062 iteration increment and the condition/branch. */
18063 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
18064 basic_block incr_bb = create_empty_bb (orig_exit);
18065 add_bb_to_loop (incr_bb, body_bb->loop_father);
18066 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
18067 flag. Set it now to be a FALLTHRU_EDGE. */
18068 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
18069 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
18070 for (unsigned i = 0;
18071 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
18072 {
18073 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
18074 redirect_edge_succ (e, incr_bb);
18075 }
18076 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
18077 e->probability = REG_BR_PROB_BASE;
18078 gsi = gsi_last_bb (incr_bb);
18079 gimple *g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
18080 build_int_cst (unsigned_type_node, 1));
18081 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
18082
18083 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
18084 struct loop *loop = alloc_loop ();
18085 cfun->has_force_vectorize_loops = true;
18086 loop->safelen = node->simdclone->simdlen;
18087 loop->force_vectorize = true;
18088 loop->header = body_bb;
18089
18090 /* Branch around the body if the mask applies. */
18091 if (node->simdclone->inbranch)
18092 {
18093 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
18094 tree mask_array
18095 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
18096 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
18097 tree aref = build4 (ARRAY_REF,
18098 TREE_TYPE (TREE_TYPE (mask_array)),
18099 mask_array, iter1,
18100 NULL, NULL);
18101 g = gimple_build_assign (mask, aref);
18102 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
18103 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
18104 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
18105 {
18106 aref = build1 (VIEW_CONVERT_EXPR,
18107 build_nonstandard_integer_type (bitsize, 0), mask);
18108 mask = make_ssa_name (TREE_TYPE (aref));
18109 g = gimple_build_assign (mask, aref);
18110 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
18111 }
18112
18113 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
18114 NULL, NULL);
18115 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
18116 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
18117 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
18118 }
18119
18120 /* Generate the condition. */
18121 g = gimple_build_cond (LT_EXPR,
18122 iter2,
18123 build_int_cst (unsigned_type_node,
18124 node->simdclone->simdlen),
18125 NULL, NULL);
18126 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
18127 e = split_block (incr_bb, gsi_stmt (gsi));
18128 basic_block latch_bb = e->dest;
18129 basic_block new_exit_bb;
18130 new_exit_bb = split_block_after_labels (latch_bb)->dest;
18131 loop->latch = latch_bb;
18132
18133 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
18134
18135 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
18136 /* The successor of incr_bb is already pointing to latch_bb; just
18137 change the flags.
18138 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
18139 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
18140
18141 gphi *phi = create_phi_node (iter1, body_bb);
18142 edge preheader_edge = find_edge (entry_bb, body_bb);
18143 edge latch_edge = single_succ_edge (latch_bb);
18144 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
18145 UNKNOWN_LOCATION);
18146 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
18147
18148 /* Generate the new return. */
18149 gsi = gsi_last_bb (new_exit_bb);
18150 if (retval
18151 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
18152 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
18153 retval = TREE_OPERAND (retval, 0);
18154 else if (retval)
18155 {
18156 retval = build1 (VIEW_CONVERT_EXPR,
18157 TREE_TYPE (TREE_TYPE (node->decl)),
18158 retval);
18159 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
18160 false, GSI_CONTINUE_LINKING);
18161 }
18162 g = gimple_build_return (retval);
18163 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
18164
18165 /* Handle aligned clauses by replacing default defs of the aligned
18166 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
18167 lhs. Handle linear by adding PHIs. */
18168 for (unsigned i = 0; i < node->simdclone->nargs; i++)
18169 if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
18170 && (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
18171 || !is_gimple_reg_type
18172 (TREE_TYPE (node->simdclone->args[i].orig_arg))))
18173 {
18174 tree orig_arg = node->simdclone->args[i].orig_arg;
18175 if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
18176 iter1 = make_ssa_name (TREE_TYPE (orig_arg));
18177 else
18178 {
18179 iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
18180 gimple_add_tmp_var (iter1);
18181 }
18182 gsi = gsi_after_labels (entry_bb);
18183 g = gimple_build_assign (iter1, orig_arg);
18184 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
18185 gsi = gsi_after_labels (body_bb);
18186 g = gimple_build_assign (orig_arg, iter1);
18187 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
18188 }
18189 else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
18190 && DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
18191 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
18192 == REFERENCE_TYPE
18193 && TREE_ADDRESSABLE
18194 (TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
18195 {
18196 tree orig_arg = node->simdclone->args[i].orig_arg;
18197 tree def = ssa_default_def (cfun, orig_arg);
18198 if (def && !has_zero_uses (def))
18199 {
18200 iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
18201 gimple_add_tmp_var (iter1);
18202 gsi = gsi_after_labels (entry_bb);
18203 g = gimple_build_assign (iter1, build_simple_mem_ref (def));
18204 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
18205 gsi = gsi_after_labels (body_bb);
18206 g = gimple_build_assign (build_simple_mem_ref (def), iter1);
18207 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
18208 }
18209 }
18210 else if (node->simdclone->args[i].alignment
18211 && node->simdclone->args[i].arg_type
18212 == SIMD_CLONE_ARG_TYPE_UNIFORM
18213 && (node->simdclone->args[i].alignment
18214 & (node->simdclone->args[i].alignment - 1)) == 0
18215 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
18216 == POINTER_TYPE)
18217 {
18218 unsigned int alignment = node->simdclone->args[i].alignment;
18219 tree orig_arg = node->simdclone->args[i].orig_arg;
18220 tree def = ssa_default_def (cfun, orig_arg);
18221 if (def && !has_zero_uses (def))
18222 {
18223 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
18224 gimple_seq seq = NULL;
18225 bool need_cvt = false;
18226 gcall *call
18227 = gimple_build_call (fn, 2, def, size_int (alignment));
18228 g = call;
18229 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
18230 ptr_type_node))
18231 need_cvt = true;
18232 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
18233 gimple_call_set_lhs (g, t);
18234 gimple_seq_add_stmt_without_update (&seq, g);
18235 if (need_cvt)
18236 {
18237 t = make_ssa_name (orig_arg);
18238 g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
18239 gimple_seq_add_stmt_without_update (&seq, g);
18240 }
18241 gsi_insert_seq_on_edge_immediate
18242 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
18243
18244 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
18245 int freq = compute_call_stmt_bb_frequency (current_function_decl,
18246 entry_bb);
18247 node->create_edge (cgraph_node::get_create (fn),
18248 call, entry_bb->count, freq);
18249
18250 imm_use_iterator iter;
18251 use_operand_p use_p;
18252 gimple *use_stmt;
18253 tree repl = gimple_get_lhs (g);
18254 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
18255 if (is_gimple_debug (use_stmt) || use_stmt == call)
18256 continue;
18257 else
18258 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
18259 SET_USE (use_p, repl);
18260 }
18261 }
18262 else if ((node->simdclone->args[i].arg_type
18263 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
18264 || (node->simdclone->args[i].arg_type
18265 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)
18266 || (node->simdclone->args[i].arg_type
18267 == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
18268 || (node->simdclone->args[i].arg_type
18269 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP))
18270 {
18271 tree orig_arg = node->simdclone->args[i].orig_arg;
18272 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
18273 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
18274 tree def = NULL_TREE;
18275 if (TREE_ADDRESSABLE (orig_arg))
18276 {
18277 def = make_ssa_name (TREE_TYPE (orig_arg));
18278 iter1 = make_ssa_name (TREE_TYPE (orig_arg));
18279 iter2 = make_ssa_name (TREE_TYPE (orig_arg));
18280 gsi = gsi_after_labels (entry_bb);
18281 g = gimple_build_assign (def, orig_arg);
18282 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
18283 }
18284 else
18285 {
18286 def = ssa_default_def (cfun, orig_arg);
18287 if (!def || has_zero_uses (def))
18288 def = NULL_TREE;
18289 else
18290 {
18291 iter1 = make_ssa_name (orig_arg);
18292 iter2 = make_ssa_name (orig_arg);
18293 }
18294 }
18295 if (def)
18296 {
18297 phi = create_phi_node (iter1, body_bb);
18298 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
18299 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
18300 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
18301 ? PLUS_EXPR : POINTER_PLUS_EXPR;
18302 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
18303 ? TREE_TYPE (orig_arg) : sizetype;
18304 tree addcst = simd_clone_linear_addend (node, i, addtype,
18305 entry_bb);
18306 gsi = gsi_last_bb (incr_bb);
18307 g = gimple_build_assign (iter2, code, iter1, addcst);
18308 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18309
18310 imm_use_iterator iter;
18311 use_operand_p use_p;
18312 gimple *use_stmt;
18313 if (TREE_ADDRESSABLE (orig_arg))
18314 {
18315 gsi = gsi_after_labels (body_bb);
18316 g = gimple_build_assign (orig_arg, iter1);
18317 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
18318 }
18319 else
18320 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
18321 if (use_stmt == phi)
18322 continue;
18323 else
18324 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
18325 SET_USE (use_p, iter1);
18326 }
18327 }
18328 else if (node->simdclone->args[i].arg_type
18329 == SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
18330 || (node->simdclone->args[i].arg_type
18331 == SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP))
18332 {
18333 tree orig_arg = node->simdclone->args[i].orig_arg;
18334 tree def = ssa_default_def (cfun, orig_arg);
18335 gcc_assert (!TREE_ADDRESSABLE (orig_arg)
18336 && TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE);
18337 if (def && !has_zero_uses (def))
18338 {
18339 tree rtype = TREE_TYPE (TREE_TYPE (orig_arg));
18340 iter1 = make_ssa_name (orig_arg);
18341 iter2 = make_ssa_name (orig_arg);
18342 tree iter3 = make_ssa_name (rtype);
18343 tree iter4 = make_ssa_name (rtype);
18344 tree iter5 = make_ssa_name (rtype);
18345 gsi = gsi_after_labels (entry_bb);
18346 gimple *load
18347 = gimple_build_assign (iter3, build_simple_mem_ref (def));
18348 gsi_insert_before (&gsi, load, GSI_NEW_STMT);
18349
18350 tree array = node->simdclone->args[i].simd_array;
18351 TREE_ADDRESSABLE (array) = 1;
18352 tree ptr = build_fold_addr_expr (array);
18353 phi = create_phi_node (iter1, body_bb);
18354 add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION);
18355 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
18356 g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1,
18357 TYPE_SIZE_UNIT (TREE_TYPE (iter3)));
18358 gsi = gsi_last_bb (incr_bb);
18359 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18360
18361 phi = create_phi_node (iter4, body_bb);
18362 add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION);
18363 add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION);
18364 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
18365 ? PLUS_EXPR : POINTER_PLUS_EXPR;
18366 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
18367 ? TREE_TYPE (iter3) : sizetype;
18368 tree addcst = simd_clone_linear_addend (node, i, addtype,
18369 entry_bb);
18370 g = gimple_build_assign (iter5, code, iter4, addcst);
18371 gsi = gsi_last_bb (incr_bb);
18372 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18373
18374 g = gimple_build_assign (build_simple_mem_ref (iter1), iter4);
18375 gsi = gsi_after_labels (body_bb);
18376 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18377
18378 imm_use_iterator iter;
18379 use_operand_p use_p;
18380 gimple *use_stmt;
18381 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
18382 if (use_stmt == load)
18383 continue;
18384 else
18385 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
18386 SET_USE (use_p, iter1);
18387
18388 if (!TYPE_READONLY (rtype))
18389 {
18390 tree v = make_ssa_name (rtype);
18391 tree aref = build4 (ARRAY_REF, rtype, array,
18392 size_zero_node, NULL_TREE,
18393 NULL_TREE);
18394 gsi = gsi_after_labels (new_exit_bb);
18395 g = gimple_build_assign (v, aref);
18396 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18397 g = gimple_build_assign (build_simple_mem_ref (def), v);
18398 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
18399 }
18400 }
18401 }
18402
18403 calculate_dominance_info (CDI_DOMINATORS);
18404 add_loop (loop, loop->header->loop_father);
18405 update_ssa (TODO_update_ssa);
18406
18407 pop_cfun ();
18408 }
18409
18410 /* If the function in NODE is tagged as an elemental SIMD function,
18411 create the appropriate SIMD clones. */
18412
18413 static void
18414 expand_simd_clones (struct cgraph_node *node)
18415 {
18416 tree attr = lookup_attribute ("omp declare simd",
18417 DECL_ATTRIBUTES (node->decl));
18418 if (attr == NULL_TREE
18419 || node->global.inlined_to
18420 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
18421 return;
18422
18423 /* Ignore
18424 #pragma omp declare simd
18425 extern int foo ();
18426 in C, there we don't know the argument types at all. */
18427 if (!node->definition
18428 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
18429 return;
18430
18431 /* Call this before creating clone_info, as it might ggc_collect. */
18432 if (node->definition && node->has_gimple_body_p ())
18433 node->get_body ();
18434
18435 do
18436 {
18437 /* Start with parsing the "omp declare simd" attribute(s). */
18438 bool inbranch_clause_specified;
18439 struct cgraph_simd_clone *clone_info
18440 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
18441 &inbranch_clause_specified);
18442 if (clone_info == NULL)
18443 continue;
18444
18445 int orig_simdlen = clone_info->simdlen;
18446 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
18447 /* The target can return 0 (no simd clones should be created),
18448 1 (just one ISA of simd clones should be created) or higher
18449 count of ISA variants. In that case, clone_info is initialized
18450 for the first ISA variant. */
18451 int count
18452 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
18453 base_type, 0);
18454 if (count == 0)
18455 continue;
18456
18457 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
18458 also create one inbranch and one !inbranch clone of it. */
18459 for (int i = 0; i < count * 2; i++)
18460 {
18461 struct cgraph_simd_clone *clone = clone_info;
18462 if (inbranch_clause_specified && (i & 1) != 0)
18463 continue;
18464
18465 if (i != 0)
18466 {
18467 clone = simd_clone_struct_alloc (clone_info->nargs
18468 + ((i & 1) != 0));
18469 simd_clone_struct_copy (clone, clone_info);
18470 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
18471 and simd_clone_adjust_argument_types did to the first
18472 clone's info. */
18473 clone->nargs -= clone_info->inbranch;
18474 clone->simdlen = orig_simdlen;
18475 /* And call the target hook again to get the right ISA. */
18476 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
18477 base_type,
18478 i / 2);
18479 if ((i & 1) != 0)
18480 clone->inbranch = 1;
18481 }
18482
18483 /* simd_clone_mangle might fail if such a clone has been created
18484 already. */
18485 tree id = simd_clone_mangle (node, clone);
18486 if (id == NULL_TREE)
18487 continue;
18488
18489 /* Only when we are sure we want to create the clone actually
18490 clone the function (or definitions) or create another
18491 extern FUNCTION_DECL (for prototypes without definitions). */
18492 struct cgraph_node *n = simd_clone_create (node);
18493 if (n == NULL)
18494 continue;
18495
18496 n->simdclone = clone;
18497 clone->origin = node;
18498 clone->next_clone = NULL;
18499 if (node->simd_clones == NULL)
18500 {
18501 clone->prev_clone = n;
18502 node->simd_clones = n;
18503 }
18504 else
18505 {
18506 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
18507 clone->prev_clone->simdclone->next_clone = n;
18508 node->simd_clones->simdclone->prev_clone = n;
18509 }
18510 symtab->change_decl_assembler_name (n->decl, id);
18511 /* And finally adjust the return type, parameters and for
18512 definitions also function body. */
18513 if (node->definition)
18514 simd_clone_adjust (n);
18515 else
18516 {
18517 simd_clone_adjust_return_type (n);
18518 simd_clone_adjust_argument_types (n);
18519 }
18520 }
18521 }
18522 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
18523 }
18524
18525 /* Entry point for IPA simd clone creation pass. */
18526
18527 static unsigned int
18528 ipa_omp_simd_clone (void)
18529 {
18530 struct cgraph_node *node;
18531 FOR_EACH_FUNCTION (node)
18532 expand_simd_clones (node);
18533 return 0;
18534 }
18535
18536 namespace {
18537
18538 const pass_data pass_data_omp_simd_clone =
18539 {
18540 SIMPLE_IPA_PASS, /* type */
18541 "simdclone", /* name */
18542 OPTGROUP_NONE, /* optinfo_flags */
18543 TV_NONE, /* tv_id */
18544 ( PROP_ssa | PROP_cfg ), /* properties_required */
18545 0, /* properties_provided */
18546 0, /* properties_destroyed */
18547 0, /* todo_flags_start */
18548 0, /* todo_flags_finish */
18549 };
18550
18551 class pass_omp_simd_clone : public simple_ipa_opt_pass
18552 {
18553 public:
18554 pass_omp_simd_clone(gcc::context *ctxt)
18555 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
18556 {}
18557
18558 /* opt_pass methods: */
18559 virtual bool gate (function *);
18560 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
18561 };
18562
18563 bool
18564 pass_omp_simd_clone::gate (function *)
18565 {
18566 return targetm.simd_clone.compute_vecsize_and_simdlen != NULL;
18567 }
18568
18569 } // anon namespace
18570
18571 simple_ipa_opt_pass *
18572 make_pass_omp_simd_clone (gcc::context *ctxt)
18573 {
18574 return new pass_omp_simd_clone (ctxt);
18575 }
18576
18577 /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and
18578 adds their addresses and sizes to constructor-vector V_CTOR. */
18579 static void
18580 add_decls_addresses_to_decl_constructor (vec<tree, va_gc> *v_decls,
18581 vec<constructor_elt, va_gc> *v_ctor)
18582 {
18583 unsigned len = vec_safe_length (v_decls);
18584 for (unsigned i = 0; i < len; i++)
18585 {
18586 tree it = (*v_decls)[i];
18587 bool is_var = TREE_CODE (it) == VAR_DECL;
18588 bool is_link_var
18589 = is_var
18590 #ifdef ACCEL_COMPILER
18591 && DECL_HAS_VALUE_EXPR_P (it)
18592 #endif
18593 && lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (it));
18594
18595 tree size = NULL_TREE;
18596 if (is_var)
18597 size = fold_convert (const_ptr_type_node, DECL_SIZE_UNIT (it));
18598
18599 tree addr;
18600 if (!is_link_var)
18601 addr = build_fold_addr_expr (it);
18602 else
18603 {
18604 #ifdef ACCEL_COMPILER
18605 /* For "omp declare target link" vars add address of the pointer to
18606 the target table, instead of address of the var. */
18607 tree value_expr = DECL_VALUE_EXPR (it);
18608 tree link_ptr_decl = TREE_OPERAND (value_expr, 0);
18609 varpool_node::finalize_decl (link_ptr_decl);
18610 addr = build_fold_addr_expr (link_ptr_decl);
18611 #else
18612 addr = build_fold_addr_expr (it);
18613 #endif
18614
18615 /* Most significant bit of the size marks "omp declare target link"
18616 vars in host and target tables. */
18617 unsigned HOST_WIDE_INT isize = tree_to_uhwi (size);
18618 isize |= 1ULL << (int_size_in_bytes (const_ptr_type_node)
18619 * BITS_PER_UNIT - 1);
18620 size = wide_int_to_tree (const_ptr_type_node, isize);
18621 }
18622
18623 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, addr);
18624 if (is_var)
18625 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, size);
18626 }
18627 }
18628
18629 /* Create new symbols containing (address, size) pairs for global variables,
18630 marked with "omp declare target" attribute, as well as addresses for the
18631 functions, which are outlined offloading regions. */
18632 void
18633 omp_finish_file (void)
18634 {
18635 unsigned num_funcs = vec_safe_length (offload_funcs);
18636 unsigned num_vars = vec_safe_length (offload_vars);
18637
18638 if (num_funcs == 0 && num_vars == 0)
18639 return;
18640
18641 if (targetm_common.have_named_sections)
18642 {
18643 vec<constructor_elt, va_gc> *v_f, *v_v;
18644 vec_alloc (v_f, num_funcs);
18645 vec_alloc (v_v, num_vars * 2);
18646
18647 add_decls_addresses_to_decl_constructor (offload_funcs, v_f);
18648 add_decls_addresses_to_decl_constructor (offload_vars, v_v);
18649
18650 tree vars_decl_type = build_array_type_nelts (pointer_sized_int_node,
18651 num_vars * 2);
18652 tree funcs_decl_type = build_array_type_nelts (pointer_sized_int_node,
18653 num_funcs);
18654 TYPE_ALIGN (vars_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
18655 TYPE_ALIGN (funcs_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
18656 tree ctor_v = build_constructor (vars_decl_type, v_v);
18657 tree ctor_f = build_constructor (funcs_decl_type, v_f);
18658 TREE_CONSTANT (ctor_v) = TREE_CONSTANT (ctor_f) = 1;
18659 TREE_STATIC (ctor_v) = TREE_STATIC (ctor_f) = 1;
18660 tree funcs_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
18661 get_identifier (".offload_func_table"),
18662 funcs_decl_type);
18663 tree vars_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
18664 get_identifier (".offload_var_table"),
18665 vars_decl_type);
18666 TREE_STATIC (funcs_decl) = TREE_STATIC (vars_decl) = 1;
18667 /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node),
18668 otherwise a joint table in a binary will contain padding between
18669 tables from multiple object files. */
18670 DECL_USER_ALIGN (funcs_decl) = DECL_USER_ALIGN (vars_decl) = 1;
18671 DECL_ALIGN (funcs_decl) = TYPE_ALIGN (funcs_decl_type);
18672 DECL_ALIGN (vars_decl) = TYPE_ALIGN (vars_decl_type);
18673 DECL_INITIAL (funcs_decl) = ctor_f;
18674 DECL_INITIAL (vars_decl) = ctor_v;
18675 set_decl_section_name (funcs_decl, OFFLOAD_FUNC_TABLE_SECTION_NAME);
18676 set_decl_section_name (vars_decl, OFFLOAD_VAR_TABLE_SECTION_NAME);
18677
18678 varpool_node::finalize_decl (vars_decl);
18679 varpool_node::finalize_decl (funcs_decl);
18680 }
18681 else
18682 {
18683 for (unsigned i = 0; i < num_funcs; i++)
18684 {
18685 tree it = (*offload_funcs)[i];
18686 targetm.record_offload_symbol (it);
18687 }
18688 for (unsigned i = 0; i < num_vars; i++)
18689 {
18690 tree it = (*offload_vars)[i];
18691 targetm.record_offload_symbol (it);
18692 }
18693 }
18694 }
18695
18696 /* Find the number of threads (POS = false), or thread number (POS =
18697 true) for an OpenACC region partitioned as MASK. Setup code
18698 required for the calculation is added to SEQ. */
18699
18700 static tree
18701 oacc_thread_numbers (bool pos, int mask, gimple_seq *seq)
18702 {
18703 tree res = pos ? NULL_TREE : build_int_cst (unsigned_type_node, 1);
18704 unsigned ix;
18705
18706 /* Start at gang level, and examine relevant dimension indices. */
18707 for (ix = GOMP_DIM_GANG; ix != GOMP_DIM_MAX; ix++)
18708 if (GOMP_DIM_MASK (ix) & mask)
18709 {
18710 tree arg = build_int_cst (unsigned_type_node, ix);
18711
18712 if (res)
18713 {
18714 /* We had an outer index, so scale that by the size of
18715 this dimension. */
18716 tree n = create_tmp_var (integer_type_node);
18717 gimple *call
18718 = gimple_build_call_internal (IFN_GOACC_DIM_SIZE, 1, arg);
18719
18720 gimple_call_set_lhs (call, n);
18721 gimple_seq_add_stmt (seq, call);
18722 res = fold_build2 (MULT_EXPR, integer_type_node, res, n);
18723 }
18724 if (pos)
18725 {
18726 /* Determine index in this dimension. */
18727 tree id = create_tmp_var (integer_type_node);
18728 gimple *call = gimple_build_call_internal
18729 (IFN_GOACC_DIM_POS, 1, arg);
18730
18731 gimple_call_set_lhs (call, id);
18732 gimple_seq_add_stmt (seq, call);
18733 if (res)
18734 res = fold_build2 (PLUS_EXPR, integer_type_node, res, id);
18735 else
18736 res = id;
18737 }
18738 }
18739
18740 if (res == NULL_TREE)
18741 res = integer_zero_node;
18742
18743 return res;
18744 }
18745
18746 /* Transform IFN_GOACC_LOOP calls to actual code. See
18747 expand_oacc_for for where these are generated. At the vector
18748 level, we stride loops, such that each member of a warp will
18749 operate on adjacent iterations. At the worker and gang level,
18750 each gang/warp executes a set of contiguous iterations. Chunking
18751 can override this such that each iteration engine executes a
18752 contiguous chunk, and then moves on to stride to the next chunk. */
18753
18754 static void
18755 oacc_xform_loop (gcall *call)
18756 {
18757 gimple_stmt_iterator gsi = gsi_for_stmt (call);
18758 enum ifn_goacc_loop_kind code
18759 = (enum ifn_goacc_loop_kind) TREE_INT_CST_LOW (gimple_call_arg (call, 0));
18760 tree dir = gimple_call_arg (call, 1);
18761 tree range = gimple_call_arg (call, 2);
18762 tree step = gimple_call_arg (call, 3);
18763 tree chunk_size = NULL_TREE;
18764 unsigned mask = (unsigned) TREE_INT_CST_LOW (gimple_call_arg (call, 5));
18765 tree lhs = gimple_call_lhs (call);
18766 tree type = TREE_TYPE (lhs);
18767 tree diff_type = TREE_TYPE (range);
18768 tree r = NULL_TREE;
18769 gimple_seq seq = NULL;
18770 bool chunking = false, striding = true;
18771 unsigned outer_mask = mask & (~mask + 1); // Outermost partitioning
18772 unsigned inner_mask = mask & ~outer_mask; // Inner partitioning (if any)
18773
18774 #ifdef ACCEL_COMPILER
18775 chunk_size = gimple_call_arg (call, 4);
18776 if (integer_minus_onep (chunk_size) /* Force static allocation. */
18777 || integer_zerop (chunk_size)) /* Default (also static). */
18778 {
18779 /* If we're at the gang level, we want each to execute a
18780 contiguous run of iterations. Otherwise we want each element
18781 to stride. */
18782 striding = !(outer_mask & GOMP_DIM_MASK (GOMP_DIM_GANG));
18783 chunking = false;
18784 }
18785 else
18786 {
18787 /* Chunk of size 1 is striding. */
18788 striding = integer_onep (chunk_size);
18789 chunking = !striding;
18790 }
18791 #endif
18792
18793 /* striding=true, chunking=true
18794 -> invalid.
18795 striding=true, chunking=false
18796 -> chunks=1
18797 striding=false,chunking=true
18798 -> chunks=ceil (range/(chunksize*threads*step))
18799 striding=false,chunking=false
18800 -> chunk_size=ceil(range/(threads*step)),chunks=1 */
18801 push_gimplify_context (true);
18802
18803 switch (code)
18804 {
18805 default: gcc_unreachable ();
18806
18807 case IFN_GOACC_LOOP_CHUNKS:
18808 if (!chunking)
18809 r = build_int_cst (type, 1);
18810 else
18811 {
18812 /* chunk_max
18813 = (range - dir) / (chunks * step * num_threads) + dir */
18814 tree per = oacc_thread_numbers (false, mask, &seq);
18815 per = fold_convert (type, per);
18816 chunk_size = fold_convert (type, chunk_size);
18817 per = fold_build2 (MULT_EXPR, type, per, chunk_size);
18818 per = fold_build2 (MULT_EXPR, type, per, step);
18819 r = build2 (MINUS_EXPR, type, range, dir);
18820 r = build2 (PLUS_EXPR, type, r, per);
18821 r = build2 (TRUNC_DIV_EXPR, type, r, per);
18822 }
18823 break;
18824
18825 case IFN_GOACC_LOOP_STEP:
18826 {
18827 /* If striding, step by the entire compute volume, otherwise
18828 step by the inner volume. */
18829 unsigned volume = striding ? mask : inner_mask;
18830
18831 r = oacc_thread_numbers (false, volume, &seq);
18832 r = build2 (MULT_EXPR, type, fold_convert (type, r), step);
18833 }
18834 break;
18835
18836 case IFN_GOACC_LOOP_OFFSET:
18837 if (striding)
18838 {
18839 r = oacc_thread_numbers (true, mask, &seq);
18840 r = fold_convert (diff_type, r);
18841 }
18842 else
18843 {
18844 tree inner_size = oacc_thread_numbers (false, inner_mask, &seq);
18845 tree outer_size = oacc_thread_numbers (false, outer_mask, &seq);
18846 tree volume = fold_build2 (MULT_EXPR, TREE_TYPE (inner_size),
18847 inner_size, outer_size);
18848
18849 volume = fold_convert (diff_type, volume);
18850 if (chunking)
18851 chunk_size = fold_convert (diff_type, chunk_size);
18852 else
18853 {
18854 tree per = fold_build2 (MULT_EXPR, diff_type, volume, step);
18855
18856 chunk_size = build2 (MINUS_EXPR, diff_type, range, dir);
18857 chunk_size = build2 (PLUS_EXPR, diff_type, chunk_size, per);
18858 chunk_size = build2 (TRUNC_DIV_EXPR, diff_type, chunk_size, per);
18859 }
18860
18861 tree span = build2 (MULT_EXPR, diff_type, chunk_size,
18862 fold_convert (diff_type, inner_size));
18863 r = oacc_thread_numbers (true, outer_mask, &seq);
18864 r = fold_convert (diff_type, r);
18865 r = build2 (MULT_EXPR, diff_type, r, span);
18866
18867 tree inner = oacc_thread_numbers (true, inner_mask, &seq);
18868 inner = fold_convert (diff_type, inner);
18869 r = fold_build2 (PLUS_EXPR, diff_type, r, inner);
18870
18871 if (chunking)
18872 {
18873 tree chunk = fold_convert (diff_type, gimple_call_arg (call, 6));
18874 tree per
18875 = fold_build2 (MULT_EXPR, diff_type, volume, chunk_size);
18876 per = build2 (MULT_EXPR, diff_type, per, chunk);
18877
18878 r = build2 (PLUS_EXPR, diff_type, r, per);
18879 }
18880 }
18881 r = fold_build2 (MULT_EXPR, diff_type, r, step);
18882 if (type != diff_type)
18883 r = fold_convert (type, r);
18884 break;
18885
18886 case IFN_GOACC_LOOP_BOUND:
18887 if (striding)
18888 r = range;
18889 else
18890 {
18891 tree inner_size = oacc_thread_numbers (false, inner_mask, &seq);
18892 tree outer_size = oacc_thread_numbers (false, outer_mask, &seq);
18893 tree volume = fold_build2 (MULT_EXPR, TREE_TYPE (inner_size),
18894 inner_size, outer_size);
18895
18896 volume = fold_convert (diff_type, volume);
18897 if (chunking)
18898 chunk_size = fold_convert (diff_type, chunk_size);
18899 else
18900 {
18901 tree per = fold_build2 (MULT_EXPR, diff_type, volume, step);
18902
18903 chunk_size = build2 (MINUS_EXPR, diff_type, range, dir);
18904 chunk_size = build2 (PLUS_EXPR, diff_type, chunk_size, per);
18905 chunk_size = build2 (TRUNC_DIV_EXPR, diff_type, chunk_size, per);
18906 }
18907
18908 tree span = build2 (MULT_EXPR, diff_type, chunk_size,
18909 fold_convert (diff_type, inner_size));
18910
18911 r = fold_build2 (MULT_EXPR, diff_type, span, step);
18912
18913 tree offset = gimple_call_arg (call, 6);
18914 r = build2 (PLUS_EXPR, diff_type, r,
18915 fold_convert (diff_type, offset));
18916 r = build2 (integer_onep (dir) ? MIN_EXPR : MAX_EXPR,
18917 diff_type, r, range);
18918 }
18919 if (diff_type != type)
18920 r = fold_convert (type, r);
18921 break;
18922 }
18923
18924 gimplify_assign (lhs, r, &seq);
18925
18926 pop_gimplify_context (NULL);
18927
18928 gsi_replace_with_seq (&gsi, seq, true);
18929 }
18930
18931 /* Validate and update the dimensions for offloaded FN. ATTRS is the
18932 raw attribute. DIMS is an array of dimensions, which is returned.
18933 Returns the function level dimensionality -- the level at which an
18934 offload routine wishes to partition a loop. */
18935
18936 static int
18937 oacc_validate_dims (tree fn, tree attrs, int *dims)
18938 {
18939 tree purpose[GOMP_DIM_MAX];
18940 unsigned ix;
18941 tree pos = TREE_VALUE (attrs);
18942 int fn_level = -1;
18943
18944 /* Make sure the attribute creator attached the dimension
18945 information. */
18946 gcc_assert (pos);
18947
18948 for (ix = 0; ix != GOMP_DIM_MAX; ix++)
18949 {
18950 purpose[ix] = TREE_PURPOSE (pos);
18951
18952 if (purpose[ix])
18953 {
18954 if (integer_zerop (purpose[ix]))
18955 fn_level = ix + 1;
18956 else if (fn_level < 0)
18957 fn_level = ix;
18958 }
18959
18960 tree val = TREE_VALUE (pos);
18961 dims[ix] = val ? TREE_INT_CST_LOW (val) : -1;
18962 pos = TREE_CHAIN (pos);
18963 }
18964
18965 bool changed = targetm.goacc.validate_dims (fn, dims, fn_level);
18966
18967 /* Default anything left to 1. */
18968 for (ix = 0; ix != GOMP_DIM_MAX; ix++)
18969 if (dims[ix] < 0)
18970 {
18971 dims[ix] = 1;
18972 changed = true;
18973 }
18974
18975 if (changed)
18976 {
18977 /* Replace the attribute with new values. */
18978 pos = NULL_TREE;
18979 for (ix = GOMP_DIM_MAX; ix--;)
18980 pos = tree_cons (purpose[ix],
18981 build_int_cst (integer_type_node, dims[ix]),
18982 pos);
18983 replace_oacc_fn_attrib (fn, pos);
18984 }
18985
18986 return fn_level;
18987 }
18988
18989 /* Create an empty OpenACC loop structure at LOC. */
18990
18991 static oacc_loop *
18992 new_oacc_loop_raw (oacc_loop *parent, location_t loc)
18993 {
18994 oacc_loop *loop = XCNEW (oacc_loop);
18995
18996 loop->parent = parent;
18997 loop->child = loop->sibling = NULL;
18998
18999 if (parent)
19000 {
19001 loop->sibling = parent->child;
19002 parent->child = loop;
19003 }
19004
19005 loop->loc = loc;
19006 loop->marker = NULL;
19007 memset (loop->heads, 0, sizeof (loop->heads));
19008 memset (loop->tails, 0, sizeof (loop->tails));
19009 loop->routine = NULL_TREE;
19010
19011 loop->mask = loop->flags = 0;
19012 loop->chunk_size = 0;
19013 loop->head_end = NULL;
19014
19015 return loop;
19016 }
19017
19018 /* Create an outermost, dummy OpenACC loop for offloaded function
19019 DECL. */
19020
19021 static oacc_loop *
19022 new_oacc_loop_outer (tree decl)
19023 {
19024 return new_oacc_loop_raw (NULL, DECL_SOURCE_LOCATION (decl));
19025 }
19026
19027 /* Start a new OpenACC loop structure beginning at head marker HEAD.
19028 Link into PARENT loop. Return the new loop. */
19029
19030 static oacc_loop *
19031 new_oacc_loop (oacc_loop *parent, gcall *marker)
19032 {
19033 oacc_loop *loop = new_oacc_loop_raw (parent, gimple_location (marker));
19034
19035 loop->marker = marker;
19036
19037 /* TODO: This is where device_type flattening would occur for the loop
19038 flags. */
19039
19040 loop->flags = TREE_INT_CST_LOW (gimple_call_arg (marker, 3));
19041
19042 tree chunk_size = integer_zero_node;
19043 if (loop->flags & OLF_GANG_STATIC)
19044 chunk_size = gimple_call_arg (marker, 4);
19045 loop->chunk_size = chunk_size;
19046
19047 return loop;
19048 }
19049
19050 /* Create a dummy loop encompassing a call to a openACC routine.
19051 Extract the routine's partitioning requirements. */
19052
19053 static void
19054 new_oacc_loop_routine (oacc_loop *parent, gcall *call, tree decl, tree attrs)
19055 {
19056 oacc_loop *loop = new_oacc_loop_raw (parent, gimple_location (call));
19057 int dims[GOMP_DIM_MAX];
19058 int level = oacc_validate_dims (decl, attrs, dims);
19059
19060 gcc_assert (level >= 0);
19061
19062 loop->marker = call;
19063 loop->routine = decl;
19064 loop->mask = ((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1)
19065 ^ (GOMP_DIM_MASK (level) - 1));
19066 }
19067
19068 /* Finish off the current OpenACC loop ending at tail marker TAIL.
19069 Return the parent loop. */
19070
19071 static oacc_loop *
19072 finish_oacc_loop (oacc_loop *loop)
19073 {
19074 return loop->parent;
19075 }
19076
19077 /* Free all OpenACC loop structures within LOOP (inclusive). */
19078
19079 static void
19080 free_oacc_loop (oacc_loop *loop)
19081 {
19082 if (loop->sibling)
19083 free_oacc_loop (loop->sibling);
19084 if (loop->child)
19085 free_oacc_loop (loop->child);
19086
19087 free (loop);
19088 }
19089
19090 /* Dump out the OpenACC loop head or tail beginning at FROM. */
19091
19092 static void
19093 dump_oacc_loop_part (FILE *file, gcall *from, int depth,
19094 const char *title, int level)
19095 {
19096 enum ifn_unique_kind kind
19097 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (from, 0));
19098
19099 fprintf (file, "%*s%s-%d:\n", depth * 2, "", title, level);
19100 for (gimple_stmt_iterator gsi = gsi_for_stmt (from);;)
19101 {
19102 gimple *stmt = gsi_stmt (gsi);
19103
19104 if (is_gimple_call (stmt)
19105 && gimple_call_internal_p (stmt)
19106 && gimple_call_internal_fn (stmt) == IFN_UNIQUE)
19107 {
19108 enum ifn_unique_kind k
19109 = ((enum ifn_unique_kind) TREE_INT_CST_LOW
19110 (gimple_call_arg (stmt, 0)));
19111
19112 if (k == kind && stmt != from)
19113 break;
19114 }
19115 print_gimple_stmt (file, stmt, depth * 2 + 2, 0);
19116
19117 gsi_next (&gsi);
19118 while (gsi_end_p (gsi))
19119 gsi = gsi_start_bb (single_succ (gsi_bb (gsi)));
19120 }
19121 }
19122
19123 /* Dump OpenACC loops LOOP, its siblings and its children. */
19124
19125 static void
19126 dump_oacc_loop (FILE *file, oacc_loop *loop, int depth)
19127 {
19128 int ix;
19129
19130 fprintf (file, "%*sLoop %x(%x) %s:%u\n", depth * 2, "",
19131 loop->flags, loop->mask,
19132 LOCATION_FILE (loop->loc), LOCATION_LINE (loop->loc));
19133
19134 if (loop->marker)
19135 print_gimple_stmt (file, loop->marker, depth * 2, 0);
19136
19137 if (loop->routine)
19138 fprintf (file, "%*sRoutine %s:%u:%s\n",
19139 depth * 2, "", DECL_SOURCE_FILE (loop->routine),
19140 DECL_SOURCE_LINE (loop->routine),
19141 IDENTIFIER_POINTER (DECL_NAME (loop->routine)));
19142
19143 for (ix = GOMP_DIM_GANG; ix != GOMP_DIM_MAX; ix++)
19144 if (loop->heads[ix])
19145 dump_oacc_loop_part (file, loop->heads[ix], depth, "Head", ix);
19146 for (ix = GOMP_DIM_MAX; ix--;)
19147 if (loop->tails[ix])
19148 dump_oacc_loop_part (file, loop->tails[ix], depth, "Tail", ix);
19149
19150 if (loop->child)
19151 dump_oacc_loop (file, loop->child, depth + 1);
19152 if (loop->sibling)
19153 dump_oacc_loop (file, loop->sibling, depth);
19154 }
19155
19156 void debug_oacc_loop (oacc_loop *);
19157
19158 /* Dump loops to stderr. */
19159
19160 DEBUG_FUNCTION void
19161 debug_oacc_loop (oacc_loop *loop)
19162 {
19163 dump_oacc_loop (stderr, loop, 0);
19164 }
19165
19166 /* DFS walk of basic blocks BB onwards, creating OpenACC loop
19167 structures as we go. By construction these loops are properly
19168 nested. */
19169
19170 static void
19171 oacc_loop_discover_walk (oacc_loop *loop, basic_block bb)
19172 {
19173 int marker = 0;
19174 int remaining = 0;
19175
19176 if (bb->flags & BB_VISITED)
19177 return;
19178
19179 follow:
19180 bb->flags |= BB_VISITED;
19181
19182 /* Scan for loop markers. */
19183 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
19184 gsi_next (&gsi))
19185 {
19186 gimple *stmt = gsi_stmt (gsi);
19187
19188 if (!is_gimple_call (stmt))
19189 continue;
19190
19191 gcall *call = as_a <gcall *> (stmt);
19192
19193 /* If this is a routine, make a dummy loop for it. */
19194 if (tree decl = gimple_call_fndecl (call))
19195 if (tree attrs = get_oacc_fn_attrib (decl))
19196 {
19197 gcc_assert (!marker);
19198 new_oacc_loop_routine (loop, call, decl, attrs);
19199 }
19200
19201 if (!gimple_call_internal_p (call))
19202 continue;
19203
19204 if (gimple_call_internal_fn (call) != IFN_UNIQUE)
19205 continue;
19206
19207 enum ifn_unique_kind kind
19208 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (call, 0));
19209 if (kind == IFN_UNIQUE_OACC_HEAD_MARK
19210 || kind == IFN_UNIQUE_OACC_TAIL_MARK)
19211 {
19212 if (gimple_call_num_args (call) == 2)
19213 {
19214 gcc_assert (marker && !remaining);
19215 marker = 0;
19216 if (kind == IFN_UNIQUE_OACC_TAIL_MARK)
19217 loop = finish_oacc_loop (loop);
19218 else
19219 loop->head_end = call;
19220 }
19221 else
19222 {
19223 int count = TREE_INT_CST_LOW (gimple_call_arg (call, 2));
19224
19225 if (!marker)
19226 {
19227 if (kind == IFN_UNIQUE_OACC_HEAD_MARK)
19228 loop = new_oacc_loop (loop, call);
19229 remaining = count;
19230 }
19231 gcc_assert (count == remaining);
19232 if (remaining)
19233 {
19234 remaining--;
19235 if (kind == IFN_UNIQUE_OACC_HEAD_MARK)
19236 loop->heads[marker] = call;
19237 else
19238 loop->tails[remaining] = call;
19239 }
19240 marker++;
19241 }
19242 }
19243 }
19244 if (remaining || marker)
19245 {
19246 bb = single_succ (bb);
19247 gcc_assert (single_pred_p (bb) && !(bb->flags & BB_VISITED));
19248 goto follow;
19249 }
19250
19251 /* Walk successor blocks. */
19252 edge e;
19253 edge_iterator ei;
19254
19255 FOR_EACH_EDGE (e, ei, bb->succs)
19256 oacc_loop_discover_walk (loop, e->dest);
19257 }
19258
19259 /* LOOP is the first sibling. Reverse the order in place and return
19260 the new first sibling. Recurse to child loops. */
19261
19262 static oacc_loop *
19263 oacc_loop_sibling_nreverse (oacc_loop *loop)
19264 {
19265 oacc_loop *last = NULL;
19266 do
19267 {
19268 if (loop->child)
19269 loop->child = oacc_loop_sibling_nreverse (loop->child);
19270
19271 oacc_loop *next = loop->sibling;
19272 loop->sibling = last;
19273 last = loop;
19274 loop = next;
19275 }
19276 while (loop);
19277
19278 return last;
19279 }
19280
19281 /* Discover the OpenACC loops marked up by HEAD and TAIL markers for
19282 the current function. */
19283
19284 static oacc_loop *
19285 oacc_loop_discovery ()
19286 {
19287 basic_block bb;
19288
19289 oacc_loop *top = new_oacc_loop_outer (current_function_decl);
19290 oacc_loop_discover_walk (top, ENTRY_BLOCK_PTR_FOR_FN (cfun));
19291
19292 /* The siblings were constructed in reverse order, reverse them so
19293 that diagnostics come out in an unsurprising order. */
19294 top = oacc_loop_sibling_nreverse (top);
19295
19296 /* Reset the visited flags. */
19297 FOR_ALL_BB_FN (bb, cfun)
19298 bb->flags &= ~BB_VISITED;
19299
19300 return top;
19301 }
19302
19303 /* Transform the abstract internal function markers starting at FROM
19304 to be for partitioning level LEVEL. Stop when we meet another HEAD
19305 or TAIL marker. */
19306
19307 static void
19308 oacc_loop_xform_head_tail (gcall *from, int level)
19309 {
19310 enum ifn_unique_kind kind
19311 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (from, 0));
19312 tree replacement = build_int_cst (unsigned_type_node, level);
19313
19314 for (gimple_stmt_iterator gsi = gsi_for_stmt (from);;)
19315 {
19316 gimple *stmt = gsi_stmt (gsi);
19317
19318 if (is_gimple_call (stmt)
19319 && gimple_call_internal_p (stmt)
19320 && gimple_call_internal_fn (stmt) == IFN_UNIQUE)
19321 {
19322 enum ifn_unique_kind k
19323 = ((enum ifn_unique_kind)
19324 TREE_INT_CST_LOW (gimple_call_arg (stmt, 0)));
19325
19326 if (k == IFN_UNIQUE_OACC_FORK || k == IFN_UNIQUE_OACC_JOIN)
19327 *gimple_call_arg_ptr (stmt, 2) = replacement;
19328 else if (k == kind && stmt != from)
19329 break;
19330 }
19331 else if (is_gimple_call (stmt)
19332 && gimple_call_internal_p (stmt)
19333 && gimple_call_internal_fn (stmt) == IFN_GOACC_REDUCTION)
19334 *gimple_call_arg_ptr (stmt, 3) = replacement;
19335
19336 gsi_next (&gsi);
19337 while (gsi_end_p (gsi))
19338 gsi = gsi_start_bb (single_succ (gsi_bb (gsi)));
19339 }
19340 }
19341
19342 /* Transform the IFN_GOACC_LOOP internal functions by providing the
19343 determined partitioning mask and chunking argument. */
19344
19345 static void
19346 oacc_loop_xform_loop (gcall *end_marker, tree mask_arg, tree chunk_arg)
19347 {
19348 gimple_stmt_iterator gsi = gsi_for_stmt (end_marker);
19349
19350 for (;;)
19351 {
19352 for (; !gsi_end_p (gsi); gsi_next (&gsi))
19353 {
19354 gimple *stmt = gsi_stmt (gsi);
19355
19356 if (!is_gimple_call (stmt))
19357 continue;
19358
19359 gcall *call = as_a <gcall *> (stmt);
19360
19361 if (!gimple_call_internal_p (call))
19362 continue;
19363
19364 if (gimple_call_internal_fn (call) != IFN_GOACC_LOOP)
19365 continue;
19366
19367 *gimple_call_arg_ptr (call, 5) = mask_arg;
19368 *gimple_call_arg_ptr (call, 4) = chunk_arg;
19369 if (TREE_INT_CST_LOW (gimple_call_arg (call, 0))
19370 == IFN_GOACC_LOOP_BOUND)
19371 return;
19372 }
19373
19374 /* If we didn't see LOOP_BOUND, it should be in the single
19375 successor block. */
19376 basic_block bb = single_succ (gsi_bb (gsi));
19377 gsi = gsi_start_bb (bb);
19378 }
19379 }
19380
19381 /* Process the discovered OpenACC loops, setting the correct
19382 partitioning level etc. */
19383
19384 static void
19385 oacc_loop_process (oacc_loop *loop)
19386 {
19387 if (loop->child)
19388 oacc_loop_process (loop->child);
19389
19390 if (loop->mask && !loop->routine)
19391 {
19392 int ix;
19393 unsigned mask = loop->mask;
19394 unsigned dim = GOMP_DIM_GANG;
19395 tree mask_arg = build_int_cst (unsigned_type_node, mask);
19396 tree chunk_arg = loop->chunk_size;
19397
19398 oacc_loop_xform_loop (loop->head_end, mask_arg, chunk_arg);
19399
19400 for (ix = 0; ix != GOMP_DIM_MAX && loop->heads[ix]; ix++)
19401 {
19402 gcc_assert (mask);
19403
19404 while (!(GOMP_DIM_MASK (dim) & mask))
19405 dim++;
19406
19407 oacc_loop_xform_head_tail (loop->heads[ix], dim);
19408 oacc_loop_xform_head_tail (loop->tails[ix], dim);
19409
19410 mask ^= GOMP_DIM_MASK (dim);
19411 }
19412 }
19413
19414 if (loop->sibling)
19415 oacc_loop_process (loop->sibling);
19416 }
19417
19418 /* Walk the OpenACC loop heirarchy checking and assigning the
19419 programmer-specified partitionings. OUTER_MASK is the partitioning
19420 this loop is contained within. Return true if we contain an
19421 auto-partitionable loop. */
19422
19423 static bool
19424 oacc_loop_fixed_partitions (oacc_loop *loop, unsigned outer_mask)
19425 {
19426 unsigned this_mask = loop->mask;
19427 bool has_auto = false;
19428 bool noisy = true;
19429
19430 #ifdef ACCEL_COMPILER
19431 /* When device_type is supported, we want the device compiler to be
19432 noisy, if the loop parameters are device_type-specific. */
19433 noisy = false;
19434 #endif
19435
19436 if (!loop->routine)
19437 {
19438 bool auto_par = (loop->flags & OLF_AUTO) != 0;
19439 bool seq_par = (loop->flags & OLF_SEQ) != 0;
19440
19441 this_mask = ((loop->flags >> OLF_DIM_BASE)
19442 & (GOMP_DIM_MASK (GOMP_DIM_MAX) - 1));
19443
19444 if ((this_mask != 0) + auto_par + seq_par > 1)
19445 {
19446 if (noisy)
19447 error_at (loop->loc,
19448 seq_par
19449 ? "%<seq%> overrides other OpenACC loop specifiers"
19450 : "%<auto%> conflicts with other OpenACC loop specifiers");
19451 auto_par = false;
19452 loop->flags &= ~OLF_AUTO;
19453 if (seq_par)
19454 {
19455 loop->flags &=
19456 ~((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1) << OLF_DIM_BASE);
19457 this_mask = 0;
19458 }
19459 }
19460 if (auto_par && (loop->flags & OLF_INDEPENDENT))
19461 has_auto = true;
19462 }
19463
19464 if (this_mask & outer_mask)
19465 {
19466 const oacc_loop *outer;
19467 for (outer = loop->parent; outer; outer = outer->parent)
19468 if (outer->mask & this_mask)
19469 break;
19470
19471 if (noisy)
19472 {
19473 if (outer)
19474 {
19475 error_at (loop->loc,
19476 "%s uses same OpenACC parallelism as containing loop",
19477 loop->routine ? "routine call" : "inner loop");
19478 inform (outer->loc, "containing loop here");
19479 }
19480 else
19481 error_at (loop->loc,
19482 "%s uses OpenACC parallelism disallowed by containing routine",
19483 loop->routine ? "routine call" : "loop");
19484
19485 if (loop->routine)
19486 inform (DECL_SOURCE_LOCATION (loop->routine),
19487 "routine %qD declared here", loop->routine);
19488 }
19489 this_mask &= ~outer_mask;
19490 }
19491 else
19492 {
19493 unsigned outermost = this_mask & -this_mask;
19494
19495 if (outermost && outermost <= outer_mask)
19496 {
19497 if (noisy)
19498 {
19499 error_at (loop->loc,
19500 "incorrectly nested OpenACC loop parallelism");
19501
19502 const oacc_loop *outer;
19503 for (outer = loop->parent;
19504 outer->flags && outer->flags < outermost;
19505 outer = outer->parent)
19506 continue;
19507 inform (outer->loc, "containing loop here");
19508 }
19509
19510 this_mask &= ~outermost;
19511 }
19512 }
19513
19514 loop->mask = this_mask;
19515
19516 if (loop->child
19517 && oacc_loop_fixed_partitions (loop->child, outer_mask | this_mask))
19518 has_auto = true;
19519
19520 if (loop->sibling
19521 && oacc_loop_fixed_partitions (loop->sibling, outer_mask))
19522 has_auto = true;
19523
19524 return has_auto;
19525 }
19526
19527 /* Walk the OpenACC loop heirarchy to assign auto-partitioned loops.
19528 OUTER_MASK is the partitioning this loop is contained within.
19529 Return the cumulative partitioning used by this loop, siblings and
19530 children. */
19531
19532 static unsigned
19533 oacc_loop_auto_partitions (oacc_loop *loop, unsigned outer_mask)
19534 {
19535 unsigned inner_mask = 0;
19536 bool noisy = true;
19537
19538 #ifdef ACCEL_COMPILER
19539 /* When device_type is supported, we want the device compiler to be
19540 noisy, if the loop parameters are device_type-specific. */
19541 noisy = false;
19542 #endif
19543
19544 if (loop->child)
19545 inner_mask |= oacc_loop_auto_partitions (loop->child,
19546 outer_mask | loop->mask);
19547
19548 if ((loop->flags & OLF_AUTO) && (loop->flags & OLF_INDEPENDENT))
19549 {
19550 unsigned this_mask = 0;
19551
19552 /* Determine the outermost partitioning used within this loop. */
19553 this_mask = inner_mask | GOMP_DIM_MASK (GOMP_DIM_MAX);
19554 this_mask = (this_mask & -this_mask);
19555
19556 /* Pick the partitioning just inside that one. */
19557 this_mask >>= 1;
19558
19559 /* And avoid picking one use by an outer loop. */
19560 this_mask &= ~outer_mask;
19561
19562 if (!this_mask && noisy)
19563 warning_at (loop->loc, 0,
19564 "insufficient partitioning available to parallelize loop");
19565
19566 loop->mask = this_mask;
19567 }
19568 inner_mask |= loop->mask;
19569
19570 if (loop->sibling)
19571 inner_mask |= oacc_loop_auto_partitions (loop->sibling, outer_mask);
19572
19573 return inner_mask;
19574 }
19575
19576 /* Walk the OpenACC loop heirarchy to check and assign partitioning
19577 axes. */
19578
19579 static void
19580 oacc_loop_partition (oacc_loop *loop, unsigned outer_mask)
19581 {
19582 if (oacc_loop_fixed_partitions (loop, outer_mask))
19583 oacc_loop_auto_partitions (loop, outer_mask);
19584 }
19585
19586 /* Default fork/join early expander. Delete the function calls if
19587 there is no RTL expander. */
19588
19589 bool
19590 default_goacc_fork_join (gcall *ARG_UNUSED (call),
19591 const int *ARG_UNUSED (dims), bool is_fork)
19592 {
19593 if (is_fork)
19594 return targetm.have_oacc_fork ();
19595 else
19596 return targetm.have_oacc_join ();
19597 }
19598
19599 /* Default goacc.reduction early expander.
19600
19601 LHS-opt = IFN_REDUCTION (KIND, RES_PTR, VAR, LEVEL, OP, OFFSET)
19602 If RES_PTR is not integer-zerop:
19603 SETUP - emit 'LHS = *RES_PTR', LHS = NULL
19604 TEARDOWN - emit '*RES_PTR = VAR'
19605 If LHS is not NULL
19606 emit 'LHS = VAR' */
19607
19608 void
19609 default_goacc_reduction (gcall *call)
19610 {
19611 unsigned code = (unsigned)TREE_INT_CST_LOW (gimple_call_arg (call, 0));
19612 gimple_stmt_iterator gsi = gsi_for_stmt (call);
19613 tree lhs = gimple_call_lhs (call);
19614 tree var = gimple_call_arg (call, 2);
19615 gimple_seq seq = NULL;
19616
19617 if (code == IFN_GOACC_REDUCTION_SETUP
19618 || code == IFN_GOACC_REDUCTION_TEARDOWN)
19619 {
19620 /* Setup and Teardown need to copy from/to the receiver object,
19621 if there is one. */
19622 tree ref_to_res = gimple_call_arg (call, 1);
19623
19624 if (!integer_zerop (ref_to_res))
19625 {
19626 tree dst = build_simple_mem_ref (ref_to_res);
19627 tree src = var;
19628
19629 if (code == IFN_GOACC_REDUCTION_SETUP)
19630 {
19631 src = dst;
19632 dst = lhs;
19633 lhs = NULL;
19634 }
19635 gimple_seq_add_stmt (&seq, gimple_build_assign (dst, src));
19636 }
19637 }
19638
19639 /* Copy VAR to LHS, if there is an LHS. */
19640 if (lhs)
19641 gimple_seq_add_stmt (&seq, gimple_build_assign (lhs, var));
19642
19643 gsi_replace_with_seq (&gsi, seq, true);
19644 }
19645
19646 /* Main entry point for oacc transformations which run on the device
19647 compiler after LTO, so we know what the target device is at this
19648 point (including the host fallback). */
19649
19650 static unsigned int
19651 execute_oacc_device_lower ()
19652 {
19653 tree attrs = get_oacc_fn_attrib (current_function_decl);
19654 int dims[GOMP_DIM_MAX];
19655
19656 if (!attrs)
19657 /* Not an offloaded function. */
19658 return 0;
19659
19660 int fn_level = oacc_validate_dims (current_function_decl, attrs, dims);
19661
19662 /* Discover, partition and process the loops. */
19663 oacc_loop *loops = oacc_loop_discovery ();
19664 unsigned outer_mask = fn_level >= 0 ? GOMP_DIM_MASK (fn_level) - 1 : 0;
19665 oacc_loop_partition (loops, outer_mask);
19666 oacc_loop_process (loops);
19667 if (dump_file)
19668 {
19669 fprintf (dump_file, "OpenACC loops\n");
19670 dump_oacc_loop (dump_file, loops, 0);
19671 fprintf (dump_file, "\n");
19672 }
19673
19674 /* Offloaded targets may introduce new basic blocks, which require
19675 dominance information to update SSA. */
19676 calculate_dominance_info (CDI_DOMINATORS);
19677
19678 /* Now lower internal loop functions to target-specific code
19679 sequences. */
19680 basic_block bb;
19681 FOR_ALL_BB_FN (bb, cfun)
19682 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
19683 {
19684 gimple *stmt = gsi_stmt (gsi);
19685 if (!is_gimple_call (stmt))
19686 {
19687 gsi_next (&gsi);
19688 continue;
19689 }
19690
19691 gcall *call = as_a <gcall *> (stmt);
19692 if (!gimple_call_internal_p (call))
19693 {
19694 gsi_next (&gsi);
19695 continue;
19696 }
19697
19698 /* Rewind to allow rescan. */
19699 gsi_prev (&gsi);
19700 bool rescan = false, remove = false;
19701 enum internal_fn ifn_code = gimple_call_internal_fn (call);
19702
19703 switch (ifn_code)
19704 {
19705 default: break;
19706
19707 case IFN_GOACC_LOOP:
19708 oacc_xform_loop (call);
19709 rescan = true;
19710 break;
19711
19712 case IFN_GOACC_REDUCTION:
19713 /* Mark the function for SSA renaming. */
19714 mark_virtual_operands_for_renaming (cfun);
19715
19716 /* If the level is -1, this ended up being an unused
19717 axis. Handle as a default. */
19718 if (integer_minus_onep (gimple_call_arg (call, 3)))
19719 default_goacc_reduction (call);
19720 else
19721 targetm.goacc.reduction (call);
19722 rescan = true;
19723 break;
19724
19725 case IFN_UNIQUE:
19726 {
19727 enum ifn_unique_kind kind
19728 = ((enum ifn_unique_kind)
19729 TREE_INT_CST_LOW (gimple_call_arg (call, 0)));
19730
19731 switch (kind)
19732 {
19733 default:
19734 gcc_unreachable ();
19735
19736 case IFN_UNIQUE_OACC_FORK:
19737 case IFN_UNIQUE_OACC_JOIN:
19738 if (integer_minus_onep (gimple_call_arg (call, 2)))
19739 remove = true;
19740 else if (!targetm.goacc.fork_join
19741 (call, dims, kind == IFN_UNIQUE_OACC_FORK))
19742 remove = true;
19743 break;
19744
19745 case IFN_UNIQUE_OACC_HEAD_MARK:
19746 case IFN_UNIQUE_OACC_TAIL_MARK:
19747 remove = true;
19748 break;
19749 }
19750 break;
19751 }
19752 }
19753
19754 if (gsi_end_p (gsi))
19755 /* We rewound past the beginning of the BB. */
19756 gsi = gsi_start_bb (bb);
19757 else
19758 /* Undo the rewind. */
19759 gsi_next (&gsi);
19760
19761 if (remove)
19762 {
19763 if (gimple_vdef (call))
19764 replace_uses_by (gimple_vdef (call), gimple_vuse (call));
19765 if (gimple_call_lhs (call))
19766 {
19767 /* Propagate the data dependency var. */
19768 gimple *ass = gimple_build_assign (gimple_call_lhs (call),
19769 gimple_call_arg (call, 1));
19770 gsi_replace (&gsi, ass, false);
19771 }
19772 else
19773 gsi_remove (&gsi, true);
19774 }
19775 else if (!rescan)
19776 /* If not rescanning, advance over the call. */
19777 gsi_next (&gsi);
19778 }
19779
19780 free_oacc_loop (loops);
19781
19782 return 0;
19783 }
19784
19785 /* Default launch dimension validator. Force everything to 1. A
19786 backend that wants to provide larger dimensions must override this
19787 hook. */
19788
19789 bool
19790 default_goacc_validate_dims (tree ARG_UNUSED (decl), int *dims,
19791 int ARG_UNUSED (fn_level))
19792 {
19793 bool changed = false;
19794
19795 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
19796 {
19797 if (dims[ix] != 1)
19798 {
19799 dims[ix] = 1;
19800 changed = true;
19801 }
19802 }
19803
19804 return changed;
19805 }
19806
19807 /* Default dimension bound is unknown on accelerator and 1 on host. */
19808
19809 int
19810 default_goacc_dim_limit (int ARG_UNUSED (axis))
19811 {
19812 #ifdef ACCEL_COMPILER
19813 return 0;
19814 #else
19815 return 1;
19816 #endif
19817 }
19818
19819 namespace {
19820
19821 const pass_data pass_data_oacc_device_lower =
19822 {
19823 GIMPLE_PASS, /* type */
19824 "oaccdevlow", /* name */
19825 OPTGROUP_NONE, /* optinfo_flags */
19826 TV_NONE, /* tv_id */
19827 PROP_cfg, /* properties_required */
19828 0 /* Possibly PROP_gimple_eomp. */, /* properties_provided */
19829 0, /* properties_destroyed */
19830 0, /* todo_flags_start */
19831 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
19832 };
19833
19834 class pass_oacc_device_lower : public gimple_opt_pass
19835 {
19836 public:
19837 pass_oacc_device_lower (gcc::context *ctxt)
19838 : gimple_opt_pass (pass_data_oacc_device_lower, ctxt)
19839 {}
19840
19841 /* opt_pass methods: */
19842 virtual unsigned int execute (function *)
19843 {
19844 bool gate = flag_openacc != 0;
19845
19846 if (!gate)
19847 return 0;
19848
19849 return execute_oacc_device_lower ();
19850 }
19851
19852 }; // class pass_oacc_device_lower
19853
19854 } // anon namespace
19855
19856 gimple_opt_pass *
19857 make_pass_oacc_device_lower (gcc::context *ctxt)
19858 {
19859 return new pass_oacc_device_lower (ctxt);
19860 }
19861
19862 /* "omp declare target link" handling pass. */
19863
19864 namespace {
19865
19866 const pass_data pass_data_omp_target_link =
19867 {
19868 GIMPLE_PASS, /* type */
19869 "omptargetlink", /* name */
19870 OPTGROUP_NONE, /* optinfo_flags */
19871 TV_NONE, /* tv_id */
19872 PROP_ssa, /* properties_required */
19873 0, /* properties_provided */
19874 0, /* properties_destroyed */
19875 0, /* todo_flags_start */
19876 TODO_update_ssa, /* todo_flags_finish */
19877 };
19878
19879 class pass_omp_target_link : public gimple_opt_pass
19880 {
19881 public:
19882 pass_omp_target_link (gcc::context *ctxt)
19883 : gimple_opt_pass (pass_data_omp_target_link, ctxt)
19884 {}
19885
19886 /* opt_pass methods: */
19887 virtual bool gate (function *fun)
19888 {
19889 #ifdef ACCEL_COMPILER
19890 tree attrs = DECL_ATTRIBUTES (fun->decl);
19891 return lookup_attribute ("omp declare target", attrs)
19892 || lookup_attribute ("omp target entrypoint", attrs);
19893 #else
19894 (void) fun;
19895 return false;
19896 #endif
19897 }
19898
19899 virtual unsigned execute (function *);
19900 };
19901
19902 /* Callback for walk_gimple_stmt used to scan for link var operands. */
19903
19904 static tree
19905 find_link_var_op (tree *tp, int *walk_subtrees, void *)
19906 {
19907 tree t = *tp;
19908
19909 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)
19910 && lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (t)))
19911 {
19912 *walk_subtrees = 0;
19913 return t;
19914 }
19915
19916 return NULL_TREE;
19917 }
19918
19919 unsigned
19920 pass_omp_target_link::execute (function *fun)
19921 {
19922 basic_block bb;
19923 FOR_EACH_BB_FN (bb, fun)
19924 {
19925 gimple_stmt_iterator gsi;
19926 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
19927 if (walk_gimple_stmt (&gsi, NULL, find_link_var_op, NULL))
19928 gimple_regimplify_operands (gsi_stmt (gsi), &gsi);
19929 }
19930
19931 return 0;
19932 }
19933
19934 } // anon namespace
19935
19936 gimple_opt_pass *
19937 make_pass_omp_target_link (gcc::context *ctxt)
19938 {
19939 return new pass_omp_target_link (ctxt);
19940 }
19941
19942 #include "gt-omp-low.h"