replace several uses of the anon namespace with GCC_FINAL
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
4
5 Contributed by Diego Novillo <dnovillo@redhat.com>
6
7 Copyright (C) 2005-2015 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "backend.h"
29 #include "cfghooks.h"
30 #include "tree.h"
31 #include "gimple.h"
32 #include "rtl.h"
33 #include "ssa.h"
34 #include "alias.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "internal-fn.h"
39 #include "gimple-fold.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "gimple-walk.h"
44 #include "tree-iterator.h"
45 #include "tree-inline.h"
46 #include "langhooks.h"
47 #include "diagnostic-core.h"
48 #include "cgraph.h"
49 #include "tree-cfg.h"
50 #include "tree-into-ssa.h"
51 #include "flags.h"
52 #include "insn-config.h"
53 #include "expmed.h"
54 #include "dojump.h"
55 #include "explow.h"
56 #include "calls.h"
57 #include "emit-rtl.h"
58 #include "varasm.h"
59 #include "stmt.h"
60 #include "expr.h"
61 #include "tree-dfa.h"
62 #include "tree-ssa.h"
63 #include "tree-pass.h"
64 #include "except.h"
65 #include "splay-tree.h"
66 #include "insn-codes.h"
67 #include "optabs.h"
68 #include "cfgloop.h"
69 #include "target.h"
70 #include "common/common-target.h"
71 #include "omp-low.h"
72 #include "gimple-low.h"
73 #include "tree-cfgcleanup.h"
74 #include "pretty-print.h"
75 #include "alloc-pool.h"
76 #include "symbol-summary.h"
77 #include "ipa-prop.h"
78 #include "tree-nested.h"
79 #include "tree-eh.h"
80 #include "cilk.h"
81 #include "context.h"
82 #include "lto-section-names.h"
83 #include "gomp-constants.h"
84
85
86 /* Lowering of OMP parallel and workshare constructs proceeds in two
87 phases. The first phase scans the function looking for OMP statements
88 and then for variables that must be replaced to satisfy data sharing
89 clauses. The second phase expands code for the constructs, as well as
90 re-gimplifying things when variables have been replaced with complex
91 expressions.
92
93 Final code generation is done by pass_expand_omp. The flowgraph is
94 scanned for regions which are then moved to a new
95 function, to be invoked by the thread library, or offloaded. */
96
97 /* OMP region information. Every parallel and workshare
98 directive is enclosed between two markers, the OMP_* directive
99 and a corresponding OMP_RETURN statement. */
100
101 struct omp_region
102 {
103 /* The enclosing region. */
104 struct omp_region *outer;
105
106 /* First child region. */
107 struct omp_region *inner;
108
109 /* Next peer region. */
110 struct omp_region *next;
111
112 /* Block containing the omp directive as its last stmt. */
113 basic_block entry;
114
115 /* Block containing the OMP_RETURN as its last stmt. */
116 basic_block exit;
117
118 /* Block containing the OMP_CONTINUE as its last stmt. */
119 basic_block cont;
120
121 /* If this is a combined parallel+workshare region, this is a list
122 of additional arguments needed by the combined parallel+workshare
123 library call. */
124 vec<tree, va_gc> *ws_args;
125
126 /* The code for the omp directive of this region. */
127 enum gimple_code type;
128
129 /* Schedule kind, only used for OMP_FOR type regions. */
130 enum omp_clause_schedule_kind sched_kind;
131
132 /* True if this is a combined parallel+workshare region. */
133 bool is_combined_parallel;
134 };
135
136 /* Levels of parallelism as defined by OpenACC. Increasing numbers
137 correspond to deeper loop nesting levels. */
138 #define MASK_GANG 1
139 #define MASK_WORKER 2
140 #define MASK_VECTOR 4
141
142 /* Context structure. Used to store information about each parallel
143 directive in the code. */
144
145 typedef struct omp_context
146 {
147 /* This field must be at the beginning, as we do "inheritance": Some
148 callback functions for tree-inline.c (e.g., omp_copy_decl)
149 receive a copy_body_data pointer that is up-casted to an
150 omp_context pointer. */
151 copy_body_data cb;
152
153 /* The tree of contexts corresponding to the encountered constructs. */
154 struct omp_context *outer;
155 gimple stmt;
156
157 /* Map variables to fields in a structure that allows communication
158 between sending and receiving threads. */
159 splay_tree field_map;
160 tree record_type;
161 tree sender_decl;
162 tree receiver_decl;
163
164 /* These are used just by task contexts, if task firstprivate fn is
165 needed. srecord_type is used to communicate from the thread
166 that encountered the task construct to task firstprivate fn,
167 record_type is allocated by GOMP_task, initialized by task firstprivate
168 fn and passed to the task body fn. */
169 splay_tree sfield_map;
170 tree srecord_type;
171
172 /* A chain of variables to add to the top-level block surrounding the
173 construct. In the case of a parallel, this is in the child function. */
174 tree block_vars;
175
176 /* A map of reduction pointer variables. For accelerators, each
177 reduction variable is replaced with an array. Each thread, in turn,
178 is assigned to a slot on that array. */
179 splay_tree reduction_map;
180
181 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
182 barriers should jump to during omplower pass. */
183 tree cancel_label;
184
185 /* What to do with variables with implicitly determined sharing
186 attributes. */
187 enum omp_clause_default_kind default_kind;
188
189 /* Nesting depth of this context. Used to beautify error messages re
190 invalid gotos. The outermost ctx is depth 1, with depth 0 being
191 reserved for the main body of the function. */
192 int depth;
193
194 /* True if this parallel directive is nested within another. */
195 bool is_nested;
196
197 /* True if this construct can be cancelled. */
198 bool cancellable;
199
200 /* For OpenACC loops, a mask of gang, worker and vector used at
201 levels below this one. */
202 int gwv_below;
203 /* For OpenACC loops, a mask of gang, worker and vector used at
204 this level and above. For parallel and kernels clauses, a mask
205 indicating which of num_gangs/num_workers/num_vectors was used. */
206 int gwv_this;
207 } omp_context;
208
209 /* A structure holding the elements of:
210 for (V = N1; V cond N2; V += STEP) [...] */
211
212 struct omp_for_data_loop
213 {
214 tree v, n1, n2, step;
215 enum tree_code cond_code;
216 };
217
218 /* A structure describing the main elements of a parallel loop. */
219
220 struct omp_for_data
221 {
222 struct omp_for_data_loop loop;
223 tree chunk_size;
224 gomp_for *for_stmt;
225 tree pre, iter_type;
226 int collapse;
227 bool have_nowait, have_ordered;
228 enum omp_clause_schedule_kind sched_kind;
229 struct omp_for_data_loop *loops;
230 };
231
232
233 static splay_tree all_contexts;
234 static int taskreg_nesting_level;
235 static int target_nesting_level;
236 static struct omp_region *root_omp_region;
237 static bitmap task_shared_vars;
238 static vec<omp_context *> taskreg_contexts;
239
240 static void scan_omp (gimple_seq *, omp_context *);
241 static tree scan_omp_1_op (tree *, int *, void *);
242
243 #define WALK_SUBSTMTS \
244 case GIMPLE_BIND: \
245 case GIMPLE_TRY: \
246 case GIMPLE_CATCH: \
247 case GIMPLE_EH_FILTER: \
248 case GIMPLE_TRANSACTION: \
249 /* The sub-statements for these should be walked. */ \
250 *handled_ops_p = false; \
251 break;
252
253 /* Helper function to get the name of the array containing the partial
254 reductions for OpenACC reductions. */
255 static const char *
256 oacc_get_reduction_array_id (tree node)
257 {
258 const char *id = IDENTIFIER_POINTER (DECL_NAME (node));
259 int len = strlen ("OACC") + strlen (id);
260 char *temp_name = XALLOCAVEC (char, len + 1);
261 snprintf (temp_name, len + 1, "OACC%s", id);
262 return IDENTIFIER_POINTER (get_identifier (temp_name));
263 }
264
265 /* Determine the number of threads OpenACC threads used to determine the
266 size of the array of partial reductions. Currently, this is num_gangs
267 * vector_length. This value may be different than GOACC_GET_NUM_THREADS,
268 because it is independed of the device used. */
269
270 static tree
271 oacc_max_threads (omp_context *ctx)
272 {
273 tree nthreads, vector_length, gangs, clauses;
274
275 gangs = fold_convert (sizetype, integer_one_node);
276 vector_length = gangs;
277
278 /* The reduction clause may be nested inside a loop directive.
279 Scan for the innermost vector_length clause. */
280 for (omp_context *oc = ctx; oc; oc = oc->outer)
281 {
282 if (gimple_code (oc->stmt) != GIMPLE_OMP_TARGET
283 || (gimple_omp_target_kind (oc->stmt)
284 != GF_OMP_TARGET_KIND_OACC_PARALLEL))
285 continue;
286
287 clauses = gimple_omp_target_clauses (oc->stmt);
288
289 vector_length = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH);
290 if (vector_length)
291 vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (vector_length),
292 sizetype,
293 OMP_CLAUSE_VECTOR_LENGTH_EXPR
294 (vector_length));
295 else
296 vector_length = fold_convert (sizetype, integer_one_node);
297
298 gangs = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS);
299 if (gangs)
300 gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (gangs), sizetype,
301 OMP_CLAUSE_NUM_GANGS_EXPR (gangs));
302 else
303 gangs = fold_convert (sizetype, integer_one_node);
304
305 break;
306 }
307
308 nthreads = fold_build2 (MULT_EXPR, sizetype, gangs, vector_length);
309
310 return nthreads;
311 }
312
313 /* Holds offload tables with decls. */
314 vec<tree, va_gc> *offload_funcs, *offload_vars;
315
316 /* Convenience function for calling scan_omp_1_op on tree operands. */
317
318 static inline tree
319 scan_omp_op (tree *tp, omp_context *ctx)
320 {
321 struct walk_stmt_info wi;
322
323 memset (&wi, 0, sizeof (wi));
324 wi.info = ctx;
325 wi.want_locations = true;
326
327 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
328 }
329
330 static void lower_omp (gimple_seq *, omp_context *);
331 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
332 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
333
334 /* Find an OMP clause of type KIND within CLAUSES. */
335
336 tree
337 find_omp_clause (tree clauses, enum omp_clause_code kind)
338 {
339 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
340 if (OMP_CLAUSE_CODE (clauses) == kind)
341 return clauses;
342
343 return NULL_TREE;
344 }
345
346 /* Return true if CTX is for an omp parallel. */
347
348 static inline bool
349 is_parallel_ctx (omp_context *ctx)
350 {
351 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
352 }
353
354
355 /* Return true if CTX is for an omp task. */
356
357 static inline bool
358 is_task_ctx (omp_context *ctx)
359 {
360 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
361 }
362
363
364 /* Return true if CTX is for an omp parallel or omp task. */
365
366 static inline bool
367 is_taskreg_ctx (omp_context *ctx)
368 {
369 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
370 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
371 }
372
373
374 /* Return true if REGION is a combined parallel+workshare region. */
375
376 static inline bool
377 is_combined_parallel (struct omp_region *region)
378 {
379 return region->is_combined_parallel;
380 }
381
382
383 /* Extract the header elements of parallel loop FOR_STMT and store
384 them into *FD. */
385
386 static void
387 extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
388 struct omp_for_data_loop *loops)
389 {
390 tree t, var, *collapse_iter, *collapse_count;
391 tree count = NULL_TREE, iter_type = long_integer_type_node;
392 struct omp_for_data_loop *loop;
393 int i;
394 struct omp_for_data_loop dummy_loop;
395 location_t loc = gimple_location (for_stmt);
396 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
397 bool distribute = gimple_omp_for_kind (for_stmt)
398 == GF_OMP_FOR_KIND_DISTRIBUTE;
399
400 fd->for_stmt = for_stmt;
401 fd->pre = NULL;
402 fd->collapse = gimple_omp_for_collapse (for_stmt);
403 if (fd->collapse > 1)
404 fd->loops = loops;
405 else
406 fd->loops = &fd->loop;
407
408 fd->have_nowait = distribute || simd;
409 fd->have_ordered = false;
410 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
411 fd->chunk_size = NULL_TREE;
412 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
413 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
414 collapse_iter = NULL;
415 collapse_count = NULL;
416
417 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
418 switch (OMP_CLAUSE_CODE (t))
419 {
420 case OMP_CLAUSE_NOWAIT:
421 fd->have_nowait = true;
422 break;
423 case OMP_CLAUSE_ORDERED:
424 fd->have_ordered = true;
425 break;
426 case OMP_CLAUSE_SCHEDULE:
427 gcc_assert (!distribute);
428 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
429 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
430 break;
431 case OMP_CLAUSE_DIST_SCHEDULE:
432 gcc_assert (distribute);
433 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
434 break;
435 case OMP_CLAUSE_COLLAPSE:
436 if (fd->collapse > 1)
437 {
438 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
439 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
440 }
441 break;
442 default:
443 break;
444 }
445
446 /* FIXME: for now map schedule(auto) to schedule(static).
447 There should be analysis to determine whether all iterations
448 are approximately the same amount of work (then schedule(static)
449 is best) or if it varies (then schedule(dynamic,N) is better). */
450 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
451 {
452 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
453 gcc_assert (fd->chunk_size == NULL);
454 }
455 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
456 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
457 gcc_assert (fd->chunk_size == NULL);
458 else if (fd->chunk_size == NULL)
459 {
460 /* We only need to compute a default chunk size for ordered
461 static loops and dynamic loops. */
462 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
463 || fd->have_ordered)
464 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
465 ? integer_zero_node : integer_one_node;
466 }
467
468 for (i = 0; i < fd->collapse; i++)
469 {
470 if (fd->collapse == 1)
471 loop = &fd->loop;
472 else if (loops != NULL)
473 loop = loops + i;
474 else
475 loop = &dummy_loop;
476
477 loop->v = gimple_omp_for_index (for_stmt, i);
478 gcc_assert (SSA_VAR_P (loop->v));
479 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
480 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
481 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
482 loop->n1 = gimple_omp_for_initial (for_stmt, i);
483
484 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
485 loop->n2 = gimple_omp_for_final (for_stmt, i);
486 switch (loop->cond_code)
487 {
488 case LT_EXPR:
489 case GT_EXPR:
490 break;
491 case NE_EXPR:
492 gcc_assert (gimple_omp_for_kind (for_stmt)
493 == GF_OMP_FOR_KIND_CILKSIMD
494 || (gimple_omp_for_kind (for_stmt)
495 == GF_OMP_FOR_KIND_CILKFOR));
496 break;
497 case LE_EXPR:
498 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
499 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
500 else
501 loop->n2 = fold_build2_loc (loc,
502 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
503 build_int_cst (TREE_TYPE (loop->n2), 1));
504 loop->cond_code = LT_EXPR;
505 break;
506 case GE_EXPR:
507 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
508 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
509 else
510 loop->n2 = fold_build2_loc (loc,
511 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
512 build_int_cst (TREE_TYPE (loop->n2), 1));
513 loop->cond_code = GT_EXPR;
514 break;
515 default:
516 gcc_unreachable ();
517 }
518
519 t = gimple_omp_for_incr (for_stmt, i);
520 gcc_assert (TREE_OPERAND (t, 0) == var);
521 switch (TREE_CODE (t))
522 {
523 case PLUS_EXPR:
524 loop->step = TREE_OPERAND (t, 1);
525 break;
526 case POINTER_PLUS_EXPR:
527 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
528 break;
529 case MINUS_EXPR:
530 loop->step = TREE_OPERAND (t, 1);
531 loop->step = fold_build1_loc (loc,
532 NEGATE_EXPR, TREE_TYPE (loop->step),
533 loop->step);
534 break;
535 default:
536 gcc_unreachable ();
537 }
538
539 if (simd
540 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
541 && !fd->have_ordered))
542 {
543 if (fd->collapse == 1)
544 iter_type = TREE_TYPE (loop->v);
545 else if (i == 0
546 || TYPE_PRECISION (iter_type)
547 < TYPE_PRECISION (TREE_TYPE (loop->v)))
548 iter_type
549 = build_nonstandard_integer_type
550 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
551 }
552 else if (iter_type != long_long_unsigned_type_node)
553 {
554 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
555 iter_type = long_long_unsigned_type_node;
556 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
557 && TYPE_PRECISION (TREE_TYPE (loop->v))
558 >= TYPE_PRECISION (iter_type))
559 {
560 tree n;
561
562 if (loop->cond_code == LT_EXPR)
563 n = fold_build2_loc (loc,
564 PLUS_EXPR, TREE_TYPE (loop->v),
565 loop->n2, loop->step);
566 else
567 n = loop->n1;
568 if (TREE_CODE (n) != INTEGER_CST
569 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
570 iter_type = long_long_unsigned_type_node;
571 }
572 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
573 > TYPE_PRECISION (iter_type))
574 {
575 tree n1, n2;
576
577 if (loop->cond_code == LT_EXPR)
578 {
579 n1 = loop->n1;
580 n2 = fold_build2_loc (loc,
581 PLUS_EXPR, TREE_TYPE (loop->v),
582 loop->n2, loop->step);
583 }
584 else
585 {
586 n1 = fold_build2_loc (loc,
587 MINUS_EXPR, TREE_TYPE (loop->v),
588 loop->n2, loop->step);
589 n2 = loop->n1;
590 }
591 if (TREE_CODE (n1) != INTEGER_CST
592 || TREE_CODE (n2) != INTEGER_CST
593 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
594 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
595 iter_type = long_long_unsigned_type_node;
596 }
597 }
598
599 if (collapse_count && *collapse_count == NULL)
600 {
601 t = fold_binary (loop->cond_code, boolean_type_node,
602 fold_convert (TREE_TYPE (loop->v), loop->n1),
603 fold_convert (TREE_TYPE (loop->v), loop->n2));
604 if (t && integer_zerop (t))
605 count = build_zero_cst (long_long_unsigned_type_node);
606 else if ((i == 0 || count != NULL_TREE)
607 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
608 && TREE_CONSTANT (loop->n1)
609 && TREE_CONSTANT (loop->n2)
610 && TREE_CODE (loop->step) == INTEGER_CST)
611 {
612 tree itype = TREE_TYPE (loop->v);
613
614 if (POINTER_TYPE_P (itype))
615 itype = signed_type_for (itype);
616 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
617 t = fold_build2_loc (loc,
618 PLUS_EXPR, itype,
619 fold_convert_loc (loc, itype, loop->step), t);
620 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
621 fold_convert_loc (loc, itype, loop->n2));
622 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
623 fold_convert_loc (loc, itype, loop->n1));
624 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
625 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
626 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
627 fold_build1_loc (loc, NEGATE_EXPR, itype,
628 fold_convert_loc (loc, itype,
629 loop->step)));
630 else
631 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
632 fold_convert_loc (loc, itype, loop->step));
633 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
634 if (count != NULL_TREE)
635 count = fold_build2_loc (loc,
636 MULT_EXPR, long_long_unsigned_type_node,
637 count, t);
638 else
639 count = t;
640 if (TREE_CODE (count) != INTEGER_CST)
641 count = NULL_TREE;
642 }
643 else if (count && !integer_zerop (count))
644 count = NULL_TREE;
645 }
646 }
647
648 if (count
649 && !simd
650 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
651 || fd->have_ordered))
652 {
653 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
654 iter_type = long_long_unsigned_type_node;
655 else
656 iter_type = long_integer_type_node;
657 }
658 else if (collapse_iter && *collapse_iter != NULL)
659 iter_type = TREE_TYPE (*collapse_iter);
660 fd->iter_type = iter_type;
661 if (collapse_iter && *collapse_iter == NULL)
662 *collapse_iter = create_tmp_var (iter_type, ".iter");
663 if (collapse_count && *collapse_count == NULL)
664 {
665 if (count)
666 *collapse_count = fold_convert_loc (loc, iter_type, count);
667 else
668 *collapse_count = create_tmp_var (iter_type, ".count");
669 }
670
671 if (fd->collapse > 1)
672 {
673 fd->loop.v = *collapse_iter;
674 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
675 fd->loop.n2 = *collapse_count;
676 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
677 fd->loop.cond_code = LT_EXPR;
678 }
679
680 /* For OpenACC loops, force a chunk size of one, as this avoids the default
681 scheduling where several subsequent iterations are being executed by the
682 same thread. */
683 if (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
684 {
685 gcc_assert (fd->chunk_size == NULL_TREE);
686 fd->chunk_size = build_int_cst (TREE_TYPE (fd->loop.v), 1);
687 }
688 }
689
690
691 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
692 is the immediate dominator of PAR_ENTRY_BB, return true if there
693 are no data dependencies that would prevent expanding the parallel
694 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
695
696 When expanding a combined parallel+workshare region, the call to
697 the child function may need additional arguments in the case of
698 GIMPLE_OMP_FOR regions. In some cases, these arguments are
699 computed out of variables passed in from the parent to the child
700 via 'struct .omp_data_s'. For instance:
701
702 #pragma omp parallel for schedule (guided, i * 4)
703 for (j ...)
704
705 Is lowered into:
706
707 # BLOCK 2 (PAR_ENTRY_BB)
708 .omp_data_o.i = i;
709 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
710
711 # BLOCK 3 (WS_ENTRY_BB)
712 .omp_data_i = &.omp_data_o;
713 D.1667 = .omp_data_i->i;
714 D.1598 = D.1667 * 4;
715 #pragma omp for schedule (guided, D.1598)
716
717 When we outline the parallel region, the call to the child function
718 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
719 that value is computed *after* the call site. So, in principle we
720 cannot do the transformation.
721
722 To see whether the code in WS_ENTRY_BB blocks the combined
723 parallel+workshare call, we collect all the variables used in the
724 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
725 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
726 call.
727
728 FIXME. If we had the SSA form built at this point, we could merely
729 hoist the code in block 3 into block 2 and be done with it. But at
730 this point we don't have dataflow information and though we could
731 hack something up here, it is really not worth the aggravation. */
732
733 static bool
734 workshare_safe_to_combine_p (basic_block ws_entry_bb)
735 {
736 struct omp_for_data fd;
737 gimple ws_stmt = last_stmt (ws_entry_bb);
738
739 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
740 return true;
741
742 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
743
744 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
745
746 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
747 return false;
748 if (fd.iter_type != long_integer_type_node)
749 return false;
750
751 /* FIXME. We give up too easily here. If any of these arguments
752 are not constants, they will likely involve variables that have
753 been mapped into fields of .omp_data_s for sharing with the child
754 function. With appropriate data flow, it would be possible to
755 see through this. */
756 if (!is_gimple_min_invariant (fd.loop.n1)
757 || !is_gimple_min_invariant (fd.loop.n2)
758 || !is_gimple_min_invariant (fd.loop.step)
759 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
760 return false;
761
762 return true;
763 }
764
765
766 /* Collect additional arguments needed to emit a combined
767 parallel+workshare call. WS_STMT is the workshare directive being
768 expanded. */
769
770 static vec<tree, va_gc> *
771 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
772 {
773 tree t;
774 location_t loc = gimple_location (ws_stmt);
775 vec<tree, va_gc> *ws_args;
776
777 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
778 {
779 struct omp_for_data fd;
780 tree n1, n2;
781
782 extract_omp_for_data (for_stmt, &fd, NULL);
783 n1 = fd.loop.n1;
784 n2 = fd.loop.n2;
785
786 if (gimple_omp_for_combined_into_p (for_stmt))
787 {
788 tree innerc
789 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
790 OMP_CLAUSE__LOOPTEMP_);
791 gcc_assert (innerc);
792 n1 = OMP_CLAUSE_DECL (innerc);
793 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
794 OMP_CLAUSE__LOOPTEMP_);
795 gcc_assert (innerc);
796 n2 = OMP_CLAUSE_DECL (innerc);
797 }
798
799 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
800
801 t = fold_convert_loc (loc, long_integer_type_node, n1);
802 ws_args->quick_push (t);
803
804 t = fold_convert_loc (loc, long_integer_type_node, n2);
805 ws_args->quick_push (t);
806
807 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
808 ws_args->quick_push (t);
809
810 if (fd.chunk_size)
811 {
812 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
813 ws_args->quick_push (t);
814 }
815
816 return ws_args;
817 }
818 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
819 {
820 /* Number of sections is equal to the number of edges from the
821 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
822 the exit of the sections region. */
823 basic_block bb = single_succ (gimple_bb (ws_stmt));
824 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
825 vec_alloc (ws_args, 1);
826 ws_args->quick_push (t);
827 return ws_args;
828 }
829
830 gcc_unreachable ();
831 }
832
833
834 /* Discover whether REGION is a combined parallel+workshare region. */
835
836 static void
837 determine_parallel_type (struct omp_region *region)
838 {
839 basic_block par_entry_bb, par_exit_bb;
840 basic_block ws_entry_bb, ws_exit_bb;
841
842 if (region == NULL || region->inner == NULL
843 || region->exit == NULL || region->inner->exit == NULL
844 || region->inner->cont == NULL)
845 return;
846
847 /* We only support parallel+for and parallel+sections. */
848 if (region->type != GIMPLE_OMP_PARALLEL
849 || (region->inner->type != GIMPLE_OMP_FOR
850 && region->inner->type != GIMPLE_OMP_SECTIONS))
851 return;
852
853 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
854 WS_EXIT_BB -> PAR_EXIT_BB. */
855 par_entry_bb = region->entry;
856 par_exit_bb = region->exit;
857 ws_entry_bb = region->inner->entry;
858 ws_exit_bb = region->inner->exit;
859
860 if (single_succ (par_entry_bb) == ws_entry_bb
861 && single_succ (ws_exit_bb) == par_exit_bb
862 && workshare_safe_to_combine_p (ws_entry_bb)
863 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
864 || (last_and_only_stmt (ws_entry_bb)
865 && last_and_only_stmt (par_exit_bb))))
866 {
867 gimple par_stmt = last_stmt (par_entry_bb);
868 gimple ws_stmt = last_stmt (ws_entry_bb);
869
870 if (region->inner->type == GIMPLE_OMP_FOR)
871 {
872 /* If this is a combined parallel loop, we need to determine
873 whether or not to use the combined library calls. There
874 are two cases where we do not apply the transformation:
875 static loops and any kind of ordered loop. In the first
876 case, we already open code the loop so there is no need
877 to do anything else. In the latter case, the combined
878 parallel loop call would still need extra synchronization
879 to implement ordered semantics, so there would not be any
880 gain in using the combined call. */
881 tree clauses = gimple_omp_for_clauses (ws_stmt);
882 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
883 if (c == NULL
884 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
885 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
886 {
887 region->is_combined_parallel = false;
888 region->inner->is_combined_parallel = false;
889 return;
890 }
891 }
892
893 region->is_combined_parallel = true;
894 region->inner->is_combined_parallel = true;
895 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
896 }
897 }
898
899
900 /* Return true if EXPR is variable sized. */
901
902 static inline bool
903 is_variable_sized (const_tree expr)
904 {
905 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
906 }
907
908 /* Return true if DECL is a reference type. */
909
910 static inline bool
911 is_reference (tree decl)
912 {
913 return lang_hooks.decls.omp_privatize_by_reference (decl);
914 }
915
916 /* Return the type of a decl. If the decl is reference type,
917 return its base type. */
918 static inline tree
919 get_base_type (tree decl)
920 {
921 tree type = TREE_TYPE (decl);
922 if (is_reference (decl))
923 type = TREE_TYPE (type);
924 return type;
925 }
926
927 /* Lookup variables. The "maybe" form
928 allows for the variable form to not have been entered, otherwise we
929 assert that the variable must have been entered. */
930
931 static inline tree
932 lookup_decl (tree var, omp_context *ctx)
933 {
934 tree *n = ctx->cb.decl_map->get (var);
935 return *n;
936 }
937
938 static inline tree
939 maybe_lookup_decl (const_tree var, omp_context *ctx)
940 {
941 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
942 return n ? *n : NULL_TREE;
943 }
944
945 static inline tree
946 lookup_field (tree var, omp_context *ctx)
947 {
948 splay_tree_node n;
949 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
950 return (tree) n->value;
951 }
952
953 static inline tree
954 lookup_sfield (tree var, omp_context *ctx)
955 {
956 splay_tree_node n;
957 n = splay_tree_lookup (ctx->sfield_map
958 ? ctx->sfield_map : ctx->field_map,
959 (splay_tree_key) var);
960 return (tree) n->value;
961 }
962
963 static inline tree
964 maybe_lookup_field (tree var, omp_context *ctx)
965 {
966 splay_tree_node n;
967 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
968 return n ? (tree) n->value : NULL_TREE;
969 }
970
971 static inline tree
972 lookup_oacc_reduction (const char *id, omp_context *ctx)
973 {
974 splay_tree_node n;
975 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) id);
976 return (tree) n->value;
977 }
978
979 static inline tree
980 maybe_lookup_oacc_reduction (tree var, omp_context *ctx)
981 {
982 splay_tree_node n = NULL;
983 if (ctx->reduction_map)
984 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) var);
985 return n ? (tree) n->value : NULL_TREE;
986 }
987
988 /* Return true if DECL should be copied by pointer. SHARED_CTX is
989 the parallel context if DECL is to be shared. */
990
991 static bool
992 use_pointer_for_field (tree decl, omp_context *shared_ctx)
993 {
994 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
995 return true;
996
997 /* We can only use copy-in/copy-out semantics for shared variables
998 when we know the value is not accessible from an outer scope. */
999 if (shared_ctx)
1000 {
1001 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
1002
1003 /* ??? Trivially accessible from anywhere. But why would we even
1004 be passing an address in this case? Should we simply assert
1005 this to be false, or should we have a cleanup pass that removes
1006 these from the list of mappings? */
1007 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
1008 return true;
1009
1010 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1011 without analyzing the expression whether or not its location
1012 is accessible to anyone else. In the case of nested parallel
1013 regions it certainly may be. */
1014 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1015 return true;
1016
1017 /* Do not use copy-in/copy-out for variables that have their
1018 address taken. */
1019 if (TREE_ADDRESSABLE (decl))
1020 return true;
1021
1022 /* lower_send_shared_vars only uses copy-in, but not copy-out
1023 for these. */
1024 if (TREE_READONLY (decl)
1025 || ((TREE_CODE (decl) == RESULT_DECL
1026 || TREE_CODE (decl) == PARM_DECL)
1027 && DECL_BY_REFERENCE (decl)))
1028 return false;
1029
1030 /* Disallow copy-in/out in nested parallel if
1031 decl is shared in outer parallel, otherwise
1032 each thread could store the shared variable
1033 in its own copy-in location, making the
1034 variable no longer really shared. */
1035 if (shared_ctx->is_nested)
1036 {
1037 omp_context *up;
1038
1039 for (up = shared_ctx->outer; up; up = up->outer)
1040 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
1041 break;
1042
1043 if (up)
1044 {
1045 tree c;
1046
1047 for (c = gimple_omp_taskreg_clauses (up->stmt);
1048 c; c = OMP_CLAUSE_CHAIN (c))
1049 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1050 && OMP_CLAUSE_DECL (c) == decl)
1051 break;
1052
1053 if (c)
1054 goto maybe_mark_addressable_and_ret;
1055 }
1056 }
1057
1058 /* For tasks avoid using copy-in/out. As tasks can be
1059 deferred or executed in different thread, when GOMP_task
1060 returns, the task hasn't necessarily terminated. */
1061 if (is_task_ctx (shared_ctx))
1062 {
1063 tree outer;
1064 maybe_mark_addressable_and_ret:
1065 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
1066 if (is_gimple_reg (outer))
1067 {
1068 /* Taking address of OUTER in lower_send_shared_vars
1069 might need regimplification of everything that uses the
1070 variable. */
1071 if (!task_shared_vars)
1072 task_shared_vars = BITMAP_ALLOC (NULL);
1073 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
1074 TREE_ADDRESSABLE (outer) = 1;
1075 }
1076 return true;
1077 }
1078 }
1079
1080 return false;
1081 }
1082
1083 /* Construct a new automatic decl similar to VAR. */
1084
1085 static tree
1086 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
1087 {
1088 tree copy = copy_var_decl (var, name, type);
1089
1090 DECL_CONTEXT (copy) = current_function_decl;
1091 DECL_CHAIN (copy) = ctx->block_vars;
1092 ctx->block_vars = copy;
1093
1094 return copy;
1095 }
1096
1097 static tree
1098 omp_copy_decl_1 (tree var, omp_context *ctx)
1099 {
1100 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1101 }
1102
1103 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1104 as appropriate. */
1105 static tree
1106 omp_build_component_ref (tree obj, tree field)
1107 {
1108 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1109 if (TREE_THIS_VOLATILE (field))
1110 TREE_THIS_VOLATILE (ret) |= 1;
1111 if (TREE_READONLY (field))
1112 TREE_READONLY (ret) |= 1;
1113 return ret;
1114 }
1115
1116 /* Build tree nodes to access the field for VAR on the receiver side. */
1117
1118 static tree
1119 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1120 {
1121 tree x, field = lookup_field (var, ctx);
1122
1123 /* If the receiver record type was remapped in the child function,
1124 remap the field into the new record type. */
1125 x = maybe_lookup_field (field, ctx);
1126 if (x != NULL)
1127 field = x;
1128
1129 x = build_simple_mem_ref (ctx->receiver_decl);
1130 TREE_THIS_NOTRAP (x) = 1;
1131 x = omp_build_component_ref (x, field);
1132 if (by_ref)
1133 x = build_simple_mem_ref (x);
1134
1135 return x;
1136 }
1137
1138 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
1139 of a parallel, this is a component reference; for workshare constructs
1140 this is some variable. */
1141
1142 static tree
1143 build_outer_var_ref (tree var, omp_context *ctx)
1144 {
1145 tree x;
1146
1147 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1148 x = var;
1149 else if (is_variable_sized (var))
1150 {
1151 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1152 x = build_outer_var_ref (x, ctx);
1153 x = build_simple_mem_ref (x);
1154 }
1155 else if (is_taskreg_ctx (ctx))
1156 {
1157 bool by_ref = use_pointer_for_field (var, NULL);
1158 x = build_receiver_ref (var, by_ref, ctx);
1159 }
1160 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1161 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1162 {
1163 /* #pragma omp simd isn't a worksharing construct, and can reference even
1164 private vars in its linear etc. clauses. */
1165 x = NULL_TREE;
1166 if (ctx->outer && is_taskreg_ctx (ctx))
1167 x = lookup_decl (var, ctx->outer);
1168 else if (ctx->outer)
1169 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1170 if (x == NULL_TREE)
1171 x = var;
1172 }
1173 else if (ctx->outer)
1174 x = lookup_decl (var, ctx->outer);
1175 else if (is_reference (var))
1176 /* This can happen with orphaned constructs. If var is reference, it is
1177 possible it is shared and as such valid. */
1178 x = var;
1179 else
1180 gcc_unreachable ();
1181
1182 if (is_reference (var))
1183 x = build_simple_mem_ref (x);
1184
1185 return x;
1186 }
1187
1188 /* Build tree nodes to access the field for VAR on the sender side. */
1189
1190 static tree
1191 build_sender_ref (tree var, omp_context *ctx)
1192 {
1193 tree field = lookup_sfield (var, ctx);
1194 return omp_build_component_ref (ctx->sender_decl, field);
1195 }
1196
1197 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1198
1199 static void
1200 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1201 {
1202 tree field, type, sfield = NULL_TREE;
1203
1204 gcc_assert ((mask & 1) == 0
1205 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1206 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1207 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1208 gcc_assert ((mask & 3) == 3
1209 || !is_gimple_omp_oacc (ctx->stmt));
1210
1211 type = TREE_TYPE (var);
1212 if (mask & 4)
1213 {
1214 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1215 type = build_pointer_type (build_pointer_type (type));
1216 }
1217 else if (by_ref)
1218 type = build_pointer_type (type);
1219 else if ((mask & 3) == 1 && is_reference (var))
1220 type = TREE_TYPE (type);
1221
1222 field = build_decl (DECL_SOURCE_LOCATION (var),
1223 FIELD_DECL, DECL_NAME (var), type);
1224
1225 /* Remember what variable this field was created for. This does have a
1226 side effect of making dwarf2out ignore this member, so for helpful
1227 debugging we clear it later in delete_omp_context. */
1228 DECL_ABSTRACT_ORIGIN (field) = var;
1229 if (type == TREE_TYPE (var))
1230 {
1231 DECL_ALIGN (field) = DECL_ALIGN (var);
1232 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1233 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1234 }
1235 else
1236 DECL_ALIGN (field) = TYPE_ALIGN (type);
1237
1238 if ((mask & 3) == 3)
1239 {
1240 insert_field_into_struct (ctx->record_type, field);
1241 if (ctx->srecord_type)
1242 {
1243 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1244 FIELD_DECL, DECL_NAME (var), type);
1245 DECL_ABSTRACT_ORIGIN (sfield) = var;
1246 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1247 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1248 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1249 insert_field_into_struct (ctx->srecord_type, sfield);
1250 }
1251 }
1252 else
1253 {
1254 if (ctx->srecord_type == NULL_TREE)
1255 {
1256 tree t;
1257
1258 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1259 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1260 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1261 {
1262 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1263 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1264 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1265 insert_field_into_struct (ctx->srecord_type, sfield);
1266 splay_tree_insert (ctx->sfield_map,
1267 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1268 (splay_tree_value) sfield);
1269 }
1270 }
1271 sfield = field;
1272 insert_field_into_struct ((mask & 1) ? ctx->record_type
1273 : ctx->srecord_type, field);
1274 }
1275
1276 if (mask & 1)
1277 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1278 (splay_tree_value) field);
1279 if ((mask & 2) && ctx->sfield_map)
1280 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1281 (splay_tree_value) sfield);
1282 }
1283
1284 static tree
1285 install_var_local (tree var, omp_context *ctx)
1286 {
1287 tree new_var = omp_copy_decl_1 (var, ctx);
1288 insert_decl_map (&ctx->cb, var, new_var);
1289 return new_var;
1290 }
1291
1292 /* Adjust the replacement for DECL in CTX for the new context. This means
1293 copying the DECL_VALUE_EXPR, and fixing up the type. */
1294
1295 static void
1296 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1297 {
1298 tree new_decl, size;
1299
1300 new_decl = lookup_decl (decl, ctx);
1301
1302 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1303
1304 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1305 && DECL_HAS_VALUE_EXPR_P (decl))
1306 {
1307 tree ve = DECL_VALUE_EXPR (decl);
1308 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1309 SET_DECL_VALUE_EXPR (new_decl, ve);
1310 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1311 }
1312
1313 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1314 {
1315 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1316 if (size == error_mark_node)
1317 size = TYPE_SIZE (TREE_TYPE (new_decl));
1318 DECL_SIZE (new_decl) = size;
1319
1320 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1321 if (size == error_mark_node)
1322 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1323 DECL_SIZE_UNIT (new_decl) = size;
1324 }
1325 }
1326
1327 /* The callback for remap_decl. Search all containing contexts for a
1328 mapping of the variable; this avoids having to duplicate the splay
1329 tree ahead of time. We know a mapping doesn't already exist in the
1330 given context. Create new mappings to implement default semantics. */
1331
1332 static tree
1333 omp_copy_decl (tree var, copy_body_data *cb)
1334 {
1335 omp_context *ctx = (omp_context *) cb;
1336 tree new_var;
1337
1338 if (TREE_CODE (var) == LABEL_DECL)
1339 {
1340 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1341 DECL_CONTEXT (new_var) = current_function_decl;
1342 insert_decl_map (&ctx->cb, var, new_var);
1343 return new_var;
1344 }
1345
1346 while (!is_taskreg_ctx (ctx))
1347 {
1348 ctx = ctx->outer;
1349 if (ctx == NULL)
1350 return var;
1351 new_var = maybe_lookup_decl (var, ctx);
1352 if (new_var)
1353 return new_var;
1354 }
1355
1356 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1357 return var;
1358
1359 return error_mark_node;
1360 }
1361
1362
1363 /* Debugging dumps for parallel regions. */
1364 void dump_omp_region (FILE *, struct omp_region *, int);
1365 void debug_omp_region (struct omp_region *);
1366 void debug_all_omp_regions (void);
1367
1368 /* Dump the parallel region tree rooted at REGION. */
1369
1370 void
1371 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1372 {
1373 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1374 gimple_code_name[region->type]);
1375
1376 if (region->inner)
1377 dump_omp_region (file, region->inner, indent + 4);
1378
1379 if (region->cont)
1380 {
1381 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1382 region->cont->index);
1383 }
1384
1385 if (region->exit)
1386 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1387 region->exit->index);
1388 else
1389 fprintf (file, "%*s[no exit marker]\n", indent, "");
1390
1391 if (region->next)
1392 dump_omp_region (file, region->next, indent);
1393 }
1394
1395 DEBUG_FUNCTION void
1396 debug_omp_region (struct omp_region *region)
1397 {
1398 dump_omp_region (stderr, region, 0);
1399 }
1400
1401 DEBUG_FUNCTION void
1402 debug_all_omp_regions (void)
1403 {
1404 dump_omp_region (stderr, root_omp_region, 0);
1405 }
1406
1407
1408 /* Create a new parallel region starting at STMT inside region PARENT. */
1409
1410 static struct omp_region *
1411 new_omp_region (basic_block bb, enum gimple_code type,
1412 struct omp_region *parent)
1413 {
1414 struct omp_region *region = XCNEW (struct omp_region);
1415
1416 region->outer = parent;
1417 region->entry = bb;
1418 region->type = type;
1419
1420 if (parent)
1421 {
1422 /* This is a nested region. Add it to the list of inner
1423 regions in PARENT. */
1424 region->next = parent->inner;
1425 parent->inner = region;
1426 }
1427 else
1428 {
1429 /* This is a toplevel region. Add it to the list of toplevel
1430 regions in ROOT_OMP_REGION. */
1431 region->next = root_omp_region;
1432 root_omp_region = region;
1433 }
1434
1435 return region;
1436 }
1437
1438 /* Release the memory associated with the region tree rooted at REGION. */
1439
1440 static void
1441 free_omp_region_1 (struct omp_region *region)
1442 {
1443 struct omp_region *i, *n;
1444
1445 for (i = region->inner; i ; i = n)
1446 {
1447 n = i->next;
1448 free_omp_region_1 (i);
1449 }
1450
1451 free (region);
1452 }
1453
1454 /* Release the memory for the entire omp region tree. */
1455
1456 void
1457 free_omp_regions (void)
1458 {
1459 struct omp_region *r, *n;
1460 for (r = root_omp_region; r ; r = n)
1461 {
1462 n = r->next;
1463 free_omp_region_1 (r);
1464 }
1465 root_omp_region = NULL;
1466 }
1467
1468
1469 /* Create a new context, with OUTER_CTX being the surrounding context. */
1470
1471 static omp_context *
1472 new_omp_context (gimple stmt, omp_context *outer_ctx)
1473 {
1474 omp_context *ctx = XCNEW (omp_context);
1475
1476 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1477 (splay_tree_value) ctx);
1478 ctx->stmt = stmt;
1479
1480 if (outer_ctx)
1481 {
1482 ctx->outer = outer_ctx;
1483 ctx->cb = outer_ctx->cb;
1484 ctx->cb.block = NULL;
1485 ctx->depth = outer_ctx->depth + 1;
1486 ctx->reduction_map = outer_ctx->reduction_map;
1487 }
1488 else
1489 {
1490 ctx->cb.src_fn = current_function_decl;
1491 ctx->cb.dst_fn = current_function_decl;
1492 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1493 gcc_checking_assert (ctx->cb.src_node);
1494 ctx->cb.dst_node = ctx->cb.src_node;
1495 ctx->cb.src_cfun = cfun;
1496 ctx->cb.copy_decl = omp_copy_decl;
1497 ctx->cb.eh_lp_nr = 0;
1498 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1499 ctx->depth = 1;
1500 }
1501
1502 ctx->cb.decl_map = new hash_map<tree, tree>;
1503
1504 return ctx;
1505 }
1506
1507 static gimple_seq maybe_catch_exception (gimple_seq);
1508
1509 /* Finalize task copyfn. */
1510
1511 static void
1512 finalize_task_copyfn (gomp_task *task_stmt)
1513 {
1514 struct function *child_cfun;
1515 tree child_fn;
1516 gimple_seq seq = NULL, new_seq;
1517 gbind *bind;
1518
1519 child_fn = gimple_omp_task_copy_fn (task_stmt);
1520 if (child_fn == NULL_TREE)
1521 return;
1522
1523 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1524 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1525
1526 push_cfun (child_cfun);
1527 bind = gimplify_body (child_fn, false);
1528 gimple_seq_add_stmt (&seq, bind);
1529 new_seq = maybe_catch_exception (seq);
1530 if (new_seq != seq)
1531 {
1532 bind = gimple_build_bind (NULL, new_seq, NULL);
1533 seq = NULL;
1534 gimple_seq_add_stmt (&seq, bind);
1535 }
1536 gimple_set_body (child_fn, seq);
1537 pop_cfun ();
1538
1539 /* Inform the callgraph about the new function. */
1540 cgraph_node *node = cgraph_node::get_create (child_fn);
1541 node->parallelized_function = 1;
1542 cgraph_node::add_new_function (child_fn, false);
1543 }
1544
1545 /* Destroy a omp_context data structures. Called through the splay tree
1546 value delete callback. */
1547
1548 static void
1549 delete_omp_context (splay_tree_value value)
1550 {
1551 omp_context *ctx = (omp_context *) value;
1552
1553 delete ctx->cb.decl_map;
1554
1555 if (ctx->field_map)
1556 splay_tree_delete (ctx->field_map);
1557 if (ctx->sfield_map)
1558 splay_tree_delete (ctx->sfield_map);
1559 /* Reduction map is copied to nested contexts, so only delete it in the
1560 owner. */
1561 if (ctx->reduction_map
1562 && gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
1563 && is_gimple_omp_offloaded (ctx->stmt)
1564 && is_gimple_omp_oacc (ctx->stmt))
1565 splay_tree_delete (ctx->reduction_map);
1566
1567 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1568 it produces corrupt debug information. */
1569 if (ctx->record_type)
1570 {
1571 tree t;
1572 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1573 DECL_ABSTRACT_ORIGIN (t) = NULL;
1574 }
1575 if (ctx->srecord_type)
1576 {
1577 tree t;
1578 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1579 DECL_ABSTRACT_ORIGIN (t) = NULL;
1580 }
1581
1582 if (is_task_ctx (ctx))
1583 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1584
1585 XDELETE (ctx);
1586 }
1587
1588 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1589 context. */
1590
1591 static void
1592 fixup_child_record_type (omp_context *ctx)
1593 {
1594 tree f, type = ctx->record_type;
1595
1596 /* ??? It isn't sufficient to just call remap_type here, because
1597 variably_modified_type_p doesn't work the way we expect for
1598 record types. Testing each field for whether it needs remapping
1599 and creating a new record by hand works, however. */
1600 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1601 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1602 break;
1603 if (f)
1604 {
1605 tree name, new_fields = NULL;
1606
1607 type = lang_hooks.types.make_type (RECORD_TYPE);
1608 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1609 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1610 TYPE_DECL, name, type);
1611 TYPE_NAME (type) = name;
1612
1613 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1614 {
1615 tree new_f = copy_node (f);
1616 DECL_CONTEXT (new_f) = type;
1617 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1618 DECL_CHAIN (new_f) = new_fields;
1619 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1620 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1621 &ctx->cb, NULL);
1622 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1623 &ctx->cb, NULL);
1624 new_fields = new_f;
1625
1626 /* Arrange to be able to look up the receiver field
1627 given the sender field. */
1628 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1629 (splay_tree_value) new_f);
1630 }
1631 TYPE_FIELDS (type) = nreverse (new_fields);
1632 layout_type (type);
1633 }
1634
1635 TREE_TYPE (ctx->receiver_decl)
1636 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1637 }
1638
1639 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1640 specified by CLAUSES. */
1641
1642 static void
1643 scan_sharing_clauses (tree clauses, omp_context *ctx)
1644 {
1645 tree c, decl;
1646 bool scan_array_reductions = false;
1647
1648 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1649 {
1650 bool by_ref;
1651
1652 switch (OMP_CLAUSE_CODE (c))
1653 {
1654 case OMP_CLAUSE_PRIVATE:
1655 decl = OMP_CLAUSE_DECL (c);
1656 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1657 goto do_private;
1658 else if (!is_variable_sized (decl))
1659 install_var_local (decl, ctx);
1660 break;
1661
1662 case OMP_CLAUSE_SHARED:
1663 decl = OMP_CLAUSE_DECL (c);
1664 /* Ignore shared directives in teams construct. */
1665 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1666 {
1667 /* Global variables don't need to be copied,
1668 the receiver side will use them directly. */
1669 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1670 if (is_global_var (odecl))
1671 break;
1672 insert_decl_map (&ctx->cb, decl, odecl);
1673 break;
1674 }
1675 gcc_assert (is_taskreg_ctx (ctx));
1676 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1677 || !is_variable_sized (decl));
1678 /* Global variables don't need to be copied,
1679 the receiver side will use them directly. */
1680 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1681 break;
1682 by_ref = use_pointer_for_field (decl, ctx);
1683 if (! TREE_READONLY (decl)
1684 || TREE_ADDRESSABLE (decl)
1685 || by_ref
1686 || is_reference (decl))
1687 {
1688 install_var_field (decl, by_ref, 3, ctx);
1689 install_var_local (decl, ctx);
1690 break;
1691 }
1692 /* We don't need to copy const scalar vars back. */
1693 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1694 goto do_private;
1695
1696 case OMP_CLAUSE_LASTPRIVATE:
1697 /* Let the corresponding firstprivate clause create
1698 the variable. */
1699 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1700 break;
1701 /* FALLTHRU */
1702
1703 case OMP_CLAUSE_FIRSTPRIVATE:
1704 if (is_gimple_omp_oacc (ctx->stmt))
1705 {
1706 sorry ("clause not supported yet");
1707 break;
1708 }
1709 /* FALLTHRU */
1710 case OMP_CLAUSE_REDUCTION:
1711 case OMP_CLAUSE_LINEAR:
1712 decl = OMP_CLAUSE_DECL (c);
1713 do_private:
1714 if (is_variable_sized (decl))
1715 {
1716 if (is_task_ctx (ctx))
1717 install_var_field (decl, false, 1, ctx);
1718 break;
1719 }
1720 else if (is_taskreg_ctx (ctx))
1721 {
1722 bool global
1723 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1724 by_ref = use_pointer_for_field (decl, NULL);
1725
1726 if (is_task_ctx (ctx)
1727 && (global || by_ref || is_reference (decl)))
1728 {
1729 install_var_field (decl, false, 1, ctx);
1730 if (!global)
1731 install_var_field (decl, by_ref, 2, ctx);
1732 }
1733 else if (!global)
1734 install_var_field (decl, by_ref, 3, ctx);
1735 }
1736 install_var_local (decl, ctx);
1737 if (is_gimple_omp_oacc (ctx->stmt)
1738 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1739 {
1740 /* Create a decl for the reduction array. */
1741 tree var = OMP_CLAUSE_DECL (c);
1742 tree type = get_base_type (var);
1743 tree ptype = build_pointer_type (type);
1744 tree array = create_tmp_var (ptype,
1745 oacc_get_reduction_array_id (var));
1746 omp_context *c = (ctx->field_map ? ctx : ctx->outer);
1747 install_var_field (array, true, 3, c);
1748 install_var_local (array, c);
1749
1750 /* Insert it into the current context. */
1751 splay_tree_insert (ctx->reduction_map, (splay_tree_key)
1752 oacc_get_reduction_array_id (var),
1753 (splay_tree_value) array);
1754 splay_tree_insert (ctx->reduction_map,
1755 (splay_tree_key) array,
1756 (splay_tree_value) array);
1757 }
1758 break;
1759
1760 case OMP_CLAUSE__LOOPTEMP_:
1761 gcc_assert (is_parallel_ctx (ctx));
1762 decl = OMP_CLAUSE_DECL (c);
1763 install_var_field (decl, false, 3, ctx);
1764 install_var_local (decl, ctx);
1765 break;
1766
1767 case OMP_CLAUSE_COPYPRIVATE:
1768 case OMP_CLAUSE_COPYIN:
1769 decl = OMP_CLAUSE_DECL (c);
1770 by_ref = use_pointer_for_field (decl, NULL);
1771 install_var_field (decl, by_ref, 3, ctx);
1772 break;
1773
1774 case OMP_CLAUSE_DEFAULT:
1775 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1776 break;
1777
1778 case OMP_CLAUSE_FINAL:
1779 case OMP_CLAUSE_IF:
1780 case OMP_CLAUSE_NUM_THREADS:
1781 case OMP_CLAUSE_NUM_TEAMS:
1782 case OMP_CLAUSE_THREAD_LIMIT:
1783 case OMP_CLAUSE_DEVICE:
1784 case OMP_CLAUSE_SCHEDULE:
1785 case OMP_CLAUSE_DIST_SCHEDULE:
1786 case OMP_CLAUSE_DEPEND:
1787 case OMP_CLAUSE__CILK_FOR_COUNT_:
1788 case OMP_CLAUSE_NUM_GANGS:
1789 case OMP_CLAUSE_NUM_WORKERS:
1790 case OMP_CLAUSE_VECTOR_LENGTH:
1791 if (ctx->outer)
1792 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1793 break;
1794
1795 case OMP_CLAUSE_TO:
1796 case OMP_CLAUSE_FROM:
1797 case OMP_CLAUSE_MAP:
1798 if (ctx->outer)
1799 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1800 decl = OMP_CLAUSE_DECL (c);
1801 /* Global variables with "omp declare target" attribute
1802 don't need to be copied, the receiver side will use them
1803 directly. */
1804 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1805 && DECL_P (decl)
1806 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1807 && varpool_node::get_create (decl)->offloadable)
1808 break;
1809 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1810 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
1811 {
1812 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
1813 not offloaded; there is nothing to map for those. */
1814 if (!is_gimple_omp_offloaded (ctx->stmt)
1815 && !POINTER_TYPE_P (TREE_TYPE (decl))
1816 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
1817 break;
1818 }
1819 if (DECL_P (decl))
1820 {
1821 if (DECL_SIZE (decl)
1822 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1823 {
1824 tree decl2 = DECL_VALUE_EXPR (decl);
1825 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1826 decl2 = TREE_OPERAND (decl2, 0);
1827 gcc_assert (DECL_P (decl2));
1828 install_var_field (decl2, true, 3, ctx);
1829 install_var_local (decl2, ctx);
1830 install_var_local (decl, ctx);
1831 }
1832 else
1833 {
1834 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1835 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1836 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1837 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1838 install_var_field (decl, true, 7, ctx);
1839 else
1840 install_var_field (decl, true, 3, ctx);
1841 if (is_gimple_omp_offloaded (ctx->stmt))
1842 install_var_local (decl, ctx);
1843 }
1844 }
1845 else
1846 {
1847 tree base = get_base_address (decl);
1848 tree nc = OMP_CLAUSE_CHAIN (c);
1849 if (DECL_P (base)
1850 && nc != NULL_TREE
1851 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1852 && OMP_CLAUSE_DECL (nc) == base
1853 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
1854 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1855 {
1856 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1857 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1858 }
1859 else
1860 {
1861 if (ctx->outer)
1862 {
1863 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1864 decl = OMP_CLAUSE_DECL (c);
1865 }
1866 gcc_assert (!splay_tree_lookup (ctx->field_map,
1867 (splay_tree_key) decl));
1868 tree field
1869 = build_decl (OMP_CLAUSE_LOCATION (c),
1870 FIELD_DECL, NULL_TREE, ptr_type_node);
1871 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1872 insert_field_into_struct (ctx->record_type, field);
1873 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1874 (splay_tree_value) field);
1875 }
1876 }
1877 break;
1878
1879 case OMP_CLAUSE_NOWAIT:
1880 case OMP_CLAUSE_ORDERED:
1881 case OMP_CLAUSE_COLLAPSE:
1882 case OMP_CLAUSE_UNTIED:
1883 case OMP_CLAUSE_MERGEABLE:
1884 case OMP_CLAUSE_PROC_BIND:
1885 case OMP_CLAUSE_SAFELEN:
1886 case OMP_CLAUSE_ASYNC:
1887 case OMP_CLAUSE_WAIT:
1888 case OMP_CLAUSE_GANG:
1889 case OMP_CLAUSE_WORKER:
1890 case OMP_CLAUSE_VECTOR:
1891 break;
1892
1893 case OMP_CLAUSE_ALIGNED:
1894 decl = OMP_CLAUSE_DECL (c);
1895 if (is_global_var (decl)
1896 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1897 install_var_local (decl, ctx);
1898 break;
1899
1900 case OMP_CLAUSE_DEVICE_RESIDENT:
1901 case OMP_CLAUSE_USE_DEVICE:
1902 case OMP_CLAUSE__CACHE_:
1903 case OMP_CLAUSE_INDEPENDENT:
1904 case OMP_CLAUSE_AUTO:
1905 case OMP_CLAUSE_SEQ:
1906 sorry ("Clause not supported yet");
1907 break;
1908
1909 default:
1910 gcc_unreachable ();
1911 }
1912 }
1913
1914 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1915 {
1916 switch (OMP_CLAUSE_CODE (c))
1917 {
1918 case OMP_CLAUSE_LASTPRIVATE:
1919 /* Let the corresponding firstprivate clause create
1920 the variable. */
1921 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1922 scan_array_reductions = true;
1923 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1924 break;
1925 /* FALLTHRU */
1926
1927 case OMP_CLAUSE_FIRSTPRIVATE:
1928 if (is_gimple_omp_oacc (ctx->stmt))
1929 {
1930 sorry ("clause not supported yet");
1931 break;
1932 }
1933 /* FALLTHRU */
1934 case OMP_CLAUSE_PRIVATE:
1935 case OMP_CLAUSE_REDUCTION:
1936 case OMP_CLAUSE_LINEAR:
1937 decl = OMP_CLAUSE_DECL (c);
1938 if (is_variable_sized (decl))
1939 install_var_local (decl, ctx);
1940 fixup_remapped_decl (decl, ctx,
1941 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1942 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1943 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1944 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1945 scan_array_reductions = true;
1946 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1947 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1948 scan_array_reductions = true;
1949 break;
1950
1951 case OMP_CLAUSE_SHARED:
1952 /* Ignore shared directives in teams construct. */
1953 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1954 break;
1955 decl = OMP_CLAUSE_DECL (c);
1956 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1957 fixup_remapped_decl (decl, ctx, false);
1958 break;
1959
1960 case OMP_CLAUSE_MAP:
1961 if (!is_gimple_omp_offloaded (ctx->stmt))
1962 break;
1963 decl = OMP_CLAUSE_DECL (c);
1964 if (DECL_P (decl)
1965 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1966 && varpool_node::get_create (decl)->offloadable)
1967 break;
1968 if (DECL_P (decl))
1969 {
1970 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1971 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1972 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1973 {
1974 tree new_decl = lookup_decl (decl, ctx);
1975 TREE_TYPE (new_decl)
1976 = remap_type (TREE_TYPE (decl), &ctx->cb);
1977 }
1978 else if (DECL_SIZE (decl)
1979 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1980 {
1981 tree decl2 = DECL_VALUE_EXPR (decl);
1982 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1983 decl2 = TREE_OPERAND (decl2, 0);
1984 gcc_assert (DECL_P (decl2));
1985 fixup_remapped_decl (decl2, ctx, false);
1986 fixup_remapped_decl (decl, ctx, true);
1987 }
1988 else
1989 fixup_remapped_decl (decl, ctx, false);
1990 }
1991 break;
1992
1993 case OMP_CLAUSE_COPYPRIVATE:
1994 case OMP_CLAUSE_COPYIN:
1995 case OMP_CLAUSE_DEFAULT:
1996 case OMP_CLAUSE_IF:
1997 case OMP_CLAUSE_NUM_THREADS:
1998 case OMP_CLAUSE_NUM_TEAMS:
1999 case OMP_CLAUSE_THREAD_LIMIT:
2000 case OMP_CLAUSE_DEVICE:
2001 case OMP_CLAUSE_SCHEDULE:
2002 case OMP_CLAUSE_DIST_SCHEDULE:
2003 case OMP_CLAUSE_NOWAIT:
2004 case OMP_CLAUSE_ORDERED:
2005 case OMP_CLAUSE_COLLAPSE:
2006 case OMP_CLAUSE_UNTIED:
2007 case OMP_CLAUSE_FINAL:
2008 case OMP_CLAUSE_MERGEABLE:
2009 case OMP_CLAUSE_PROC_BIND:
2010 case OMP_CLAUSE_SAFELEN:
2011 case OMP_CLAUSE_ALIGNED:
2012 case OMP_CLAUSE_DEPEND:
2013 case OMP_CLAUSE__LOOPTEMP_:
2014 case OMP_CLAUSE_TO:
2015 case OMP_CLAUSE_FROM:
2016 case OMP_CLAUSE__CILK_FOR_COUNT_:
2017 case OMP_CLAUSE_ASYNC:
2018 case OMP_CLAUSE_WAIT:
2019 case OMP_CLAUSE_NUM_GANGS:
2020 case OMP_CLAUSE_NUM_WORKERS:
2021 case OMP_CLAUSE_VECTOR_LENGTH:
2022 case OMP_CLAUSE_GANG:
2023 case OMP_CLAUSE_WORKER:
2024 case OMP_CLAUSE_VECTOR:
2025 break;
2026
2027 case OMP_CLAUSE_DEVICE_RESIDENT:
2028 case OMP_CLAUSE_USE_DEVICE:
2029 case OMP_CLAUSE__CACHE_:
2030 case OMP_CLAUSE_INDEPENDENT:
2031 case OMP_CLAUSE_AUTO:
2032 case OMP_CLAUSE_SEQ:
2033 sorry ("Clause not supported yet");
2034 break;
2035
2036 default:
2037 gcc_unreachable ();
2038 }
2039 }
2040
2041 gcc_checking_assert (!scan_array_reductions
2042 || !is_gimple_omp_oacc (ctx->stmt));
2043 if (scan_array_reductions)
2044 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2045 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
2046 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2047 {
2048 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2049 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2050 }
2051 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2052 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2053 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2054 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2055 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2056 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
2057 }
2058
2059 /* Create a new name for omp child function. Returns an identifier. If
2060 IS_CILK_FOR is true then the suffix for the child function is
2061 "_cilk_for_fn." */
2062
2063 static tree
2064 create_omp_child_function_name (bool task_copy, bool is_cilk_for)
2065 {
2066 if (is_cilk_for)
2067 return clone_function_name (current_function_decl, "_cilk_for_fn");
2068 return clone_function_name (current_function_decl,
2069 task_copy ? "_omp_cpyfn" : "_omp_fn");
2070 }
2071
2072 /* Returns the type of the induction variable for the child function for
2073 _Cilk_for and the types for _high and _low variables based on TYPE. */
2074
2075 static tree
2076 cilk_for_check_loop_diff_type (tree type)
2077 {
2078 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
2079 {
2080 if (TYPE_UNSIGNED (type))
2081 return uint32_type_node;
2082 else
2083 return integer_type_node;
2084 }
2085 else
2086 {
2087 if (TYPE_UNSIGNED (type))
2088 return uint64_type_node;
2089 else
2090 return long_long_integer_type_node;
2091 }
2092 }
2093
2094 /* Build a decl for the omp child function. It'll not contain a body
2095 yet, just the bare decl. */
2096
2097 static void
2098 create_omp_child_function (omp_context *ctx, bool task_copy)
2099 {
2100 tree decl, type, name, t;
2101
2102 tree cilk_for_count
2103 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2104 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2105 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
2106 tree cilk_var_type = NULL_TREE;
2107
2108 name = create_omp_child_function_name (task_copy,
2109 cilk_for_count != NULL_TREE);
2110 if (task_copy)
2111 type = build_function_type_list (void_type_node, ptr_type_node,
2112 ptr_type_node, NULL_TREE);
2113 else if (cilk_for_count)
2114 {
2115 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
2116 cilk_var_type = cilk_for_check_loop_diff_type (type);
2117 type = build_function_type_list (void_type_node, ptr_type_node,
2118 cilk_var_type, cilk_var_type, NULL_TREE);
2119 }
2120 else
2121 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
2122
2123 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
2124
2125 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
2126 || !task_copy);
2127 if (!task_copy)
2128 ctx->cb.dst_fn = decl;
2129 else
2130 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
2131
2132 TREE_STATIC (decl) = 1;
2133 TREE_USED (decl) = 1;
2134 DECL_ARTIFICIAL (decl) = 1;
2135 DECL_IGNORED_P (decl) = 0;
2136 TREE_PUBLIC (decl) = 0;
2137 DECL_UNINLINABLE (decl) = 1;
2138 DECL_EXTERNAL (decl) = 0;
2139 DECL_CONTEXT (decl) = NULL_TREE;
2140 DECL_INITIAL (decl) = make_node (BLOCK);
2141 if (cgraph_node::get (current_function_decl)->offloadable)
2142 cgraph_node::get_create (decl)->offloadable = 1;
2143 else
2144 {
2145 omp_context *octx;
2146 for (octx = ctx; octx; octx = octx->outer)
2147 if (is_gimple_omp_offloaded (octx->stmt))
2148 {
2149 cgraph_node::get_create (decl)->offloadable = 1;
2150 #ifdef ENABLE_OFFLOADING
2151 g->have_offload = true;
2152 #endif
2153 break;
2154 }
2155 }
2156
2157 if (cgraph_node::get_create (decl)->offloadable
2158 && !lookup_attribute ("omp declare target",
2159 DECL_ATTRIBUTES (current_function_decl)))
2160 DECL_ATTRIBUTES (decl)
2161 = tree_cons (get_identifier ("omp target entrypoint"),
2162 NULL_TREE, DECL_ATTRIBUTES (decl));
2163
2164 t = build_decl (DECL_SOURCE_LOCATION (decl),
2165 RESULT_DECL, NULL_TREE, void_type_node);
2166 DECL_ARTIFICIAL (t) = 1;
2167 DECL_IGNORED_P (t) = 1;
2168 DECL_CONTEXT (t) = decl;
2169 DECL_RESULT (decl) = t;
2170
2171 /* _Cilk_for's child function requires two extra parameters called
2172 __low and __high that are set the by Cilk runtime when it calls this
2173 function. */
2174 if (cilk_for_count)
2175 {
2176 t = build_decl (DECL_SOURCE_LOCATION (decl),
2177 PARM_DECL, get_identifier ("__high"), cilk_var_type);
2178 DECL_ARTIFICIAL (t) = 1;
2179 DECL_NAMELESS (t) = 1;
2180 DECL_ARG_TYPE (t) = ptr_type_node;
2181 DECL_CONTEXT (t) = current_function_decl;
2182 TREE_USED (t) = 1;
2183 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2184 DECL_ARGUMENTS (decl) = t;
2185
2186 t = build_decl (DECL_SOURCE_LOCATION (decl),
2187 PARM_DECL, get_identifier ("__low"), cilk_var_type);
2188 DECL_ARTIFICIAL (t) = 1;
2189 DECL_NAMELESS (t) = 1;
2190 DECL_ARG_TYPE (t) = ptr_type_node;
2191 DECL_CONTEXT (t) = current_function_decl;
2192 TREE_USED (t) = 1;
2193 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2194 DECL_ARGUMENTS (decl) = t;
2195 }
2196
2197 tree data_name = get_identifier (".omp_data_i");
2198 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2199 ptr_type_node);
2200 DECL_ARTIFICIAL (t) = 1;
2201 DECL_NAMELESS (t) = 1;
2202 DECL_ARG_TYPE (t) = ptr_type_node;
2203 DECL_CONTEXT (t) = current_function_decl;
2204 TREE_USED (t) = 1;
2205 if (cilk_for_count)
2206 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2207 DECL_ARGUMENTS (decl) = t;
2208 if (!task_copy)
2209 ctx->receiver_decl = t;
2210 else
2211 {
2212 t = build_decl (DECL_SOURCE_LOCATION (decl),
2213 PARM_DECL, get_identifier (".omp_data_o"),
2214 ptr_type_node);
2215 DECL_ARTIFICIAL (t) = 1;
2216 DECL_NAMELESS (t) = 1;
2217 DECL_ARG_TYPE (t) = ptr_type_node;
2218 DECL_CONTEXT (t) = current_function_decl;
2219 TREE_USED (t) = 1;
2220 TREE_ADDRESSABLE (t) = 1;
2221 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2222 DECL_ARGUMENTS (decl) = t;
2223 }
2224
2225 /* Allocate memory for the function structure. The call to
2226 allocate_struct_function clobbers CFUN, so we need to restore
2227 it afterward. */
2228 push_struct_function (decl);
2229 cfun->function_end_locus = gimple_location (ctx->stmt);
2230 pop_cfun ();
2231 }
2232
2233 /* Callback for walk_gimple_seq. Check if combined parallel
2234 contains gimple_omp_for_combined_into_p OMP_FOR. */
2235
2236 static tree
2237 find_combined_for (gimple_stmt_iterator *gsi_p,
2238 bool *handled_ops_p,
2239 struct walk_stmt_info *wi)
2240 {
2241 gimple stmt = gsi_stmt (*gsi_p);
2242
2243 *handled_ops_p = true;
2244 switch (gimple_code (stmt))
2245 {
2246 WALK_SUBSTMTS;
2247
2248 case GIMPLE_OMP_FOR:
2249 if (gimple_omp_for_combined_into_p (stmt)
2250 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2251 {
2252 wi->info = stmt;
2253 return integer_zero_node;
2254 }
2255 break;
2256 default:
2257 break;
2258 }
2259 return NULL;
2260 }
2261
2262 /* Scan an OpenMP parallel directive. */
2263
2264 static void
2265 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2266 {
2267 omp_context *ctx;
2268 tree name;
2269 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2270
2271 /* Ignore parallel directives with empty bodies, unless there
2272 are copyin clauses. */
2273 if (optimize > 0
2274 && empty_body_p (gimple_omp_body (stmt))
2275 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2276 OMP_CLAUSE_COPYIN) == NULL)
2277 {
2278 gsi_replace (gsi, gimple_build_nop (), false);
2279 return;
2280 }
2281
2282 if (gimple_omp_parallel_combined_p (stmt))
2283 {
2284 struct walk_stmt_info wi;
2285
2286 memset (&wi, 0, sizeof (wi));
2287 wi.val_only = true;
2288 walk_gimple_seq (gimple_omp_body (stmt),
2289 find_combined_for, NULL, &wi);
2290 if (wi.info)
2291 {
2292 gomp_for *for_stmt = as_a <gomp_for *> ((gimple) wi.info);
2293 struct omp_for_data fd;
2294 extract_omp_for_data (for_stmt, &fd, NULL);
2295 /* We need two temporaries with fd.loop.v type (istart/iend)
2296 and then (fd.collapse - 1) temporaries with the same
2297 type for count2 ... countN-1 vars if not constant. */
2298 size_t count = 2, i;
2299 tree type = fd.iter_type;
2300 if (fd.collapse > 1
2301 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2302 count += fd.collapse - 1;
2303 for (i = 0; i < count; i++)
2304 {
2305 tree temp = create_tmp_var (type);
2306 tree c = build_omp_clause (UNKNOWN_LOCATION,
2307 OMP_CLAUSE__LOOPTEMP_);
2308 insert_decl_map (&outer_ctx->cb, temp, temp);
2309 OMP_CLAUSE_DECL (c) = temp;
2310 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2311 gimple_omp_parallel_set_clauses (stmt, c);
2312 }
2313 }
2314 }
2315
2316 ctx = new_omp_context (stmt, outer_ctx);
2317 taskreg_contexts.safe_push (ctx);
2318 if (taskreg_nesting_level > 1)
2319 ctx->is_nested = true;
2320 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2321 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2322 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2323 name = create_tmp_var_name (".omp_data_s");
2324 name = build_decl (gimple_location (stmt),
2325 TYPE_DECL, name, ctx->record_type);
2326 DECL_ARTIFICIAL (name) = 1;
2327 DECL_NAMELESS (name) = 1;
2328 TYPE_NAME (ctx->record_type) = name;
2329 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2330 create_omp_child_function (ctx, false);
2331 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2332
2333 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2334 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2335
2336 if (TYPE_FIELDS (ctx->record_type) == NULL)
2337 ctx->record_type = ctx->receiver_decl = NULL;
2338 }
2339
2340 /* Scan an OpenMP task directive. */
2341
2342 static void
2343 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2344 {
2345 omp_context *ctx;
2346 tree name, t;
2347 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2348
2349 /* Ignore task directives with empty bodies. */
2350 if (optimize > 0
2351 && empty_body_p (gimple_omp_body (stmt)))
2352 {
2353 gsi_replace (gsi, gimple_build_nop (), false);
2354 return;
2355 }
2356
2357 ctx = new_omp_context (stmt, outer_ctx);
2358 taskreg_contexts.safe_push (ctx);
2359 if (taskreg_nesting_level > 1)
2360 ctx->is_nested = true;
2361 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2362 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2363 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2364 name = create_tmp_var_name (".omp_data_s");
2365 name = build_decl (gimple_location (stmt),
2366 TYPE_DECL, name, ctx->record_type);
2367 DECL_ARTIFICIAL (name) = 1;
2368 DECL_NAMELESS (name) = 1;
2369 TYPE_NAME (ctx->record_type) = name;
2370 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2371 create_omp_child_function (ctx, false);
2372 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2373
2374 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2375
2376 if (ctx->srecord_type)
2377 {
2378 name = create_tmp_var_name (".omp_data_a");
2379 name = build_decl (gimple_location (stmt),
2380 TYPE_DECL, name, ctx->srecord_type);
2381 DECL_ARTIFICIAL (name) = 1;
2382 DECL_NAMELESS (name) = 1;
2383 TYPE_NAME (ctx->srecord_type) = name;
2384 TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
2385 create_omp_child_function (ctx, true);
2386 }
2387
2388 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2389
2390 if (TYPE_FIELDS (ctx->record_type) == NULL)
2391 {
2392 ctx->record_type = ctx->receiver_decl = NULL;
2393 t = build_int_cst (long_integer_type_node, 0);
2394 gimple_omp_task_set_arg_size (stmt, t);
2395 t = build_int_cst (long_integer_type_node, 1);
2396 gimple_omp_task_set_arg_align (stmt, t);
2397 }
2398 }
2399
2400
2401 /* If any decls have been made addressable during scan_omp,
2402 adjust their fields if needed, and layout record types
2403 of parallel/task constructs. */
2404
2405 static void
2406 finish_taskreg_scan (omp_context *ctx)
2407 {
2408 if (ctx->record_type == NULL_TREE)
2409 return;
2410
2411 /* If any task_shared_vars were needed, verify all
2412 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2413 statements if use_pointer_for_field hasn't changed
2414 because of that. If it did, update field types now. */
2415 if (task_shared_vars)
2416 {
2417 tree c;
2418
2419 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2420 c; c = OMP_CLAUSE_CHAIN (c))
2421 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2422 {
2423 tree decl = OMP_CLAUSE_DECL (c);
2424
2425 /* Global variables don't need to be copied,
2426 the receiver side will use them directly. */
2427 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2428 continue;
2429 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2430 || !use_pointer_for_field (decl, ctx))
2431 continue;
2432 tree field = lookup_field (decl, ctx);
2433 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2434 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2435 continue;
2436 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2437 TREE_THIS_VOLATILE (field) = 0;
2438 DECL_USER_ALIGN (field) = 0;
2439 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2440 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2441 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2442 if (ctx->srecord_type)
2443 {
2444 tree sfield = lookup_sfield (decl, ctx);
2445 TREE_TYPE (sfield) = TREE_TYPE (field);
2446 TREE_THIS_VOLATILE (sfield) = 0;
2447 DECL_USER_ALIGN (sfield) = 0;
2448 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2449 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2450 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2451 }
2452 }
2453 }
2454
2455 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2456 {
2457 layout_type (ctx->record_type);
2458 fixup_child_record_type (ctx);
2459 }
2460 else
2461 {
2462 location_t loc = gimple_location (ctx->stmt);
2463 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2464 /* Move VLA fields to the end. */
2465 p = &TYPE_FIELDS (ctx->record_type);
2466 while (*p)
2467 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2468 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2469 {
2470 *q = *p;
2471 *p = TREE_CHAIN (*p);
2472 TREE_CHAIN (*q) = NULL_TREE;
2473 q = &TREE_CHAIN (*q);
2474 }
2475 else
2476 p = &DECL_CHAIN (*p);
2477 *p = vla_fields;
2478 layout_type (ctx->record_type);
2479 fixup_child_record_type (ctx);
2480 if (ctx->srecord_type)
2481 layout_type (ctx->srecord_type);
2482 tree t = fold_convert_loc (loc, long_integer_type_node,
2483 TYPE_SIZE_UNIT (ctx->record_type));
2484 gimple_omp_task_set_arg_size (ctx->stmt, t);
2485 t = build_int_cst (long_integer_type_node,
2486 TYPE_ALIGN_UNIT (ctx->record_type));
2487 gimple_omp_task_set_arg_align (ctx->stmt, t);
2488 }
2489 }
2490
2491
2492 static omp_context *
2493 enclosing_target_ctx (omp_context *ctx)
2494 {
2495 while (ctx != NULL
2496 && gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
2497 ctx = ctx->outer;
2498 gcc_assert (ctx != NULL);
2499 return ctx;
2500 }
2501
2502 static bool
2503 oacc_loop_or_target_p (gimple stmt)
2504 {
2505 enum gimple_code outer_type = gimple_code (stmt);
2506 return ((outer_type == GIMPLE_OMP_TARGET
2507 && ((gimple_omp_target_kind (stmt)
2508 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2509 || (gimple_omp_target_kind (stmt)
2510 == GF_OMP_TARGET_KIND_OACC_KERNELS)))
2511 || (outer_type == GIMPLE_OMP_FOR
2512 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP));
2513 }
2514
2515 /* Scan a GIMPLE_OMP_FOR. */
2516
2517 static void
2518 scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2519 {
2520 enum gimple_code outer_type = GIMPLE_ERROR_MARK;
2521 omp_context *ctx;
2522 size_t i;
2523 tree clauses = gimple_omp_for_clauses (stmt);
2524
2525 if (outer_ctx)
2526 outer_type = gimple_code (outer_ctx->stmt);
2527
2528 ctx = new_omp_context (stmt, outer_ctx);
2529
2530 if (is_gimple_omp_oacc (stmt))
2531 {
2532 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2533 ctx->gwv_this = outer_ctx->gwv_this;
2534 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2535 {
2536 int val;
2537 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_GANG)
2538 val = MASK_GANG;
2539 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WORKER)
2540 val = MASK_WORKER;
2541 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR)
2542 val = MASK_VECTOR;
2543 else
2544 continue;
2545 ctx->gwv_this |= val;
2546 if (!outer_ctx)
2547 {
2548 /* Skip; not nested inside a region. */
2549 continue;
2550 }
2551 if (!oacc_loop_or_target_p (outer_ctx->stmt))
2552 {
2553 /* Skip; not nested inside an OpenACC region. */
2554 continue;
2555 }
2556 if (outer_type == GIMPLE_OMP_FOR)
2557 outer_ctx->gwv_below |= val;
2558 if (OMP_CLAUSE_OPERAND (c, 0) != NULL_TREE)
2559 {
2560 omp_context *enclosing = enclosing_target_ctx (outer_ctx);
2561 if (gimple_omp_target_kind (enclosing->stmt)
2562 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2563 error_at (gimple_location (stmt),
2564 "no arguments allowed to gang, worker and vector clauses inside parallel");
2565 }
2566 }
2567 }
2568
2569 scan_sharing_clauses (clauses, ctx);
2570
2571 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2572 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2573 {
2574 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2575 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2576 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2577 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2578 }
2579 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2580
2581 if (is_gimple_omp_oacc (stmt))
2582 {
2583 if (ctx->gwv_this & ctx->gwv_below)
2584 error_at (gimple_location (stmt),
2585 "gang, worker and vector may occur only once in a loop nest");
2586 else if (ctx->gwv_below != 0
2587 && ctx->gwv_this > ctx->gwv_below)
2588 error_at (gimple_location (stmt),
2589 "gang, worker and vector must occur in this order in a loop nest");
2590 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2591 outer_ctx->gwv_below |= ctx->gwv_below;
2592 }
2593 }
2594
2595 /* Scan an OpenMP sections directive. */
2596
2597 static void
2598 scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
2599 {
2600 omp_context *ctx;
2601
2602 ctx = new_omp_context (stmt, outer_ctx);
2603 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2604 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2605 }
2606
2607 /* Scan an OpenMP single directive. */
2608
2609 static void
2610 scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
2611 {
2612 omp_context *ctx;
2613 tree name;
2614
2615 ctx = new_omp_context (stmt, outer_ctx);
2616 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2617 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2618 name = create_tmp_var_name (".omp_copy_s");
2619 name = build_decl (gimple_location (stmt),
2620 TYPE_DECL, name, ctx->record_type);
2621 TYPE_NAME (ctx->record_type) = name;
2622
2623 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2624 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2625
2626 if (TYPE_FIELDS (ctx->record_type) == NULL)
2627 ctx->record_type = NULL;
2628 else
2629 layout_type (ctx->record_type);
2630 }
2631
2632 /* Scan a GIMPLE_OMP_TARGET. */
2633
2634 static void
2635 scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
2636 {
2637 omp_context *ctx;
2638 tree name;
2639 bool offloaded = is_gimple_omp_offloaded (stmt);
2640 tree clauses = gimple_omp_target_clauses (stmt);
2641
2642 ctx = new_omp_context (stmt, outer_ctx);
2643 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2644 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2645 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2646 name = create_tmp_var_name (".omp_data_t");
2647 name = build_decl (gimple_location (stmt),
2648 TYPE_DECL, name, ctx->record_type);
2649 DECL_ARTIFICIAL (name) = 1;
2650 DECL_NAMELESS (name) = 1;
2651 TYPE_NAME (ctx->record_type) = name;
2652 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2653 if (offloaded)
2654 {
2655 if (is_gimple_omp_oacc (stmt))
2656 ctx->reduction_map = splay_tree_new (splay_tree_compare_pointers,
2657 0, 0);
2658
2659 create_omp_child_function (ctx, false);
2660 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2661 }
2662
2663 if (is_gimple_omp_oacc (stmt))
2664 {
2665 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2666 {
2667 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_GANGS)
2668 ctx->gwv_this |= MASK_GANG;
2669 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_WORKERS)
2670 ctx->gwv_this |= MASK_WORKER;
2671 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR_LENGTH)
2672 ctx->gwv_this |= MASK_VECTOR;
2673 }
2674 }
2675
2676 scan_sharing_clauses (clauses, ctx);
2677 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2678
2679 if (TYPE_FIELDS (ctx->record_type) == NULL)
2680 ctx->record_type = ctx->receiver_decl = NULL;
2681 else
2682 {
2683 TYPE_FIELDS (ctx->record_type)
2684 = nreverse (TYPE_FIELDS (ctx->record_type));
2685 #ifdef ENABLE_CHECKING
2686 tree field;
2687 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2688 for (field = TYPE_FIELDS (ctx->record_type);
2689 field;
2690 field = DECL_CHAIN (field))
2691 gcc_assert (DECL_ALIGN (field) == align);
2692 #endif
2693 layout_type (ctx->record_type);
2694 if (offloaded)
2695 fixup_child_record_type (ctx);
2696 }
2697 }
2698
2699 /* Scan an OpenMP teams directive. */
2700
2701 static void
2702 scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
2703 {
2704 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2705 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2706 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2707 }
2708
2709 /* Check nesting restrictions. */
2710 static bool
2711 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2712 {
2713 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
2714 inside an OpenACC CTX. */
2715 if (!(is_gimple_omp (stmt)
2716 && is_gimple_omp_oacc (stmt)))
2717 {
2718 for (omp_context *ctx_ = ctx; ctx_ != NULL; ctx_ = ctx_->outer)
2719 if (is_gimple_omp (ctx_->stmt)
2720 && is_gimple_omp_oacc (ctx_->stmt))
2721 {
2722 error_at (gimple_location (stmt),
2723 "non-OpenACC construct inside of OpenACC region");
2724 return false;
2725 }
2726 }
2727
2728 if (ctx != NULL)
2729 {
2730 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2731 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2732 {
2733 error_at (gimple_location (stmt),
2734 "OpenMP constructs may not be nested inside simd region");
2735 return false;
2736 }
2737 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2738 {
2739 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2740 || (gimple_omp_for_kind (stmt)
2741 != GF_OMP_FOR_KIND_DISTRIBUTE))
2742 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2743 {
2744 error_at (gimple_location (stmt),
2745 "only distribute or parallel constructs are allowed to "
2746 "be closely nested inside teams construct");
2747 return false;
2748 }
2749 }
2750 }
2751 switch (gimple_code (stmt))
2752 {
2753 case GIMPLE_OMP_FOR:
2754 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2755 return true;
2756 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2757 {
2758 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2759 {
2760 error_at (gimple_location (stmt),
2761 "distribute construct must be closely nested inside "
2762 "teams construct");
2763 return false;
2764 }
2765 return true;
2766 }
2767 /* FALLTHRU */
2768 case GIMPLE_CALL:
2769 if (is_gimple_call (stmt)
2770 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2771 == BUILT_IN_GOMP_CANCEL
2772 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2773 == BUILT_IN_GOMP_CANCELLATION_POINT))
2774 {
2775 const char *bad = NULL;
2776 const char *kind = NULL;
2777 if (ctx == NULL)
2778 {
2779 error_at (gimple_location (stmt), "orphaned %qs construct",
2780 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2781 == BUILT_IN_GOMP_CANCEL
2782 ? "#pragma omp cancel"
2783 : "#pragma omp cancellation point");
2784 return false;
2785 }
2786 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2787 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2788 : 0)
2789 {
2790 case 1:
2791 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2792 bad = "#pragma omp parallel";
2793 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2794 == BUILT_IN_GOMP_CANCEL
2795 && !integer_zerop (gimple_call_arg (stmt, 1)))
2796 ctx->cancellable = true;
2797 kind = "parallel";
2798 break;
2799 case 2:
2800 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2801 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2802 bad = "#pragma omp for";
2803 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2804 == BUILT_IN_GOMP_CANCEL
2805 && !integer_zerop (gimple_call_arg (stmt, 1)))
2806 {
2807 ctx->cancellable = true;
2808 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2809 OMP_CLAUSE_NOWAIT))
2810 warning_at (gimple_location (stmt), 0,
2811 "%<#pragma omp cancel for%> inside "
2812 "%<nowait%> for construct");
2813 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2814 OMP_CLAUSE_ORDERED))
2815 warning_at (gimple_location (stmt), 0,
2816 "%<#pragma omp cancel for%> inside "
2817 "%<ordered%> for construct");
2818 }
2819 kind = "for";
2820 break;
2821 case 4:
2822 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2823 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2824 bad = "#pragma omp sections";
2825 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2826 == BUILT_IN_GOMP_CANCEL
2827 && !integer_zerop (gimple_call_arg (stmt, 1)))
2828 {
2829 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2830 {
2831 ctx->cancellable = true;
2832 if (find_omp_clause (gimple_omp_sections_clauses
2833 (ctx->stmt),
2834 OMP_CLAUSE_NOWAIT))
2835 warning_at (gimple_location (stmt), 0,
2836 "%<#pragma omp cancel sections%> inside "
2837 "%<nowait%> sections construct");
2838 }
2839 else
2840 {
2841 gcc_assert (ctx->outer
2842 && gimple_code (ctx->outer->stmt)
2843 == GIMPLE_OMP_SECTIONS);
2844 ctx->outer->cancellable = true;
2845 if (find_omp_clause (gimple_omp_sections_clauses
2846 (ctx->outer->stmt),
2847 OMP_CLAUSE_NOWAIT))
2848 warning_at (gimple_location (stmt), 0,
2849 "%<#pragma omp cancel sections%> inside "
2850 "%<nowait%> sections construct");
2851 }
2852 }
2853 kind = "sections";
2854 break;
2855 case 8:
2856 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2857 bad = "#pragma omp task";
2858 else
2859 ctx->cancellable = true;
2860 kind = "taskgroup";
2861 break;
2862 default:
2863 error_at (gimple_location (stmt), "invalid arguments");
2864 return false;
2865 }
2866 if (bad)
2867 {
2868 error_at (gimple_location (stmt),
2869 "%<%s %s%> construct not closely nested inside of %qs",
2870 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2871 == BUILT_IN_GOMP_CANCEL
2872 ? "#pragma omp cancel"
2873 : "#pragma omp cancellation point", kind, bad);
2874 return false;
2875 }
2876 }
2877 /* FALLTHRU */
2878 case GIMPLE_OMP_SECTIONS:
2879 case GIMPLE_OMP_SINGLE:
2880 for (; ctx != NULL; ctx = ctx->outer)
2881 switch (gimple_code (ctx->stmt))
2882 {
2883 case GIMPLE_OMP_FOR:
2884 case GIMPLE_OMP_SECTIONS:
2885 case GIMPLE_OMP_SINGLE:
2886 case GIMPLE_OMP_ORDERED:
2887 case GIMPLE_OMP_MASTER:
2888 case GIMPLE_OMP_TASK:
2889 case GIMPLE_OMP_CRITICAL:
2890 if (is_gimple_call (stmt))
2891 {
2892 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2893 != BUILT_IN_GOMP_BARRIER)
2894 return true;
2895 error_at (gimple_location (stmt),
2896 "barrier region may not be closely nested inside "
2897 "of work-sharing, critical, ordered, master or "
2898 "explicit task region");
2899 return false;
2900 }
2901 error_at (gimple_location (stmt),
2902 "work-sharing region may not be closely nested inside "
2903 "of work-sharing, critical, ordered, master or explicit "
2904 "task region");
2905 return false;
2906 case GIMPLE_OMP_PARALLEL:
2907 return true;
2908 default:
2909 break;
2910 }
2911 break;
2912 case GIMPLE_OMP_MASTER:
2913 for (; ctx != NULL; ctx = ctx->outer)
2914 switch (gimple_code (ctx->stmt))
2915 {
2916 case GIMPLE_OMP_FOR:
2917 case GIMPLE_OMP_SECTIONS:
2918 case GIMPLE_OMP_SINGLE:
2919 case GIMPLE_OMP_TASK:
2920 error_at (gimple_location (stmt),
2921 "master region may not be closely nested inside "
2922 "of work-sharing or explicit task region");
2923 return false;
2924 case GIMPLE_OMP_PARALLEL:
2925 return true;
2926 default:
2927 break;
2928 }
2929 break;
2930 case GIMPLE_OMP_ORDERED:
2931 for (; ctx != NULL; ctx = ctx->outer)
2932 switch (gimple_code (ctx->stmt))
2933 {
2934 case GIMPLE_OMP_CRITICAL:
2935 case GIMPLE_OMP_TASK:
2936 error_at (gimple_location (stmt),
2937 "ordered region may not be closely nested inside "
2938 "of critical or explicit task region");
2939 return false;
2940 case GIMPLE_OMP_FOR:
2941 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2942 OMP_CLAUSE_ORDERED) == NULL)
2943 {
2944 error_at (gimple_location (stmt),
2945 "ordered region must be closely nested inside "
2946 "a loop region with an ordered clause");
2947 return false;
2948 }
2949 return true;
2950 case GIMPLE_OMP_PARALLEL:
2951 error_at (gimple_location (stmt),
2952 "ordered region must be closely nested inside "
2953 "a loop region with an ordered clause");
2954 return false;
2955 default:
2956 break;
2957 }
2958 break;
2959 case GIMPLE_OMP_CRITICAL:
2960 {
2961 tree this_stmt_name
2962 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
2963 for (; ctx != NULL; ctx = ctx->outer)
2964 if (gomp_critical *other_crit
2965 = dyn_cast <gomp_critical *> (ctx->stmt))
2966 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2967 {
2968 error_at (gimple_location (stmt),
2969 "critical region may not be nested inside a critical "
2970 "region with the same name");
2971 return false;
2972 }
2973 }
2974 break;
2975 case GIMPLE_OMP_TEAMS:
2976 if (ctx == NULL
2977 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2978 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2979 {
2980 error_at (gimple_location (stmt),
2981 "teams construct not closely nested inside of target "
2982 "region");
2983 return false;
2984 }
2985 break;
2986 case GIMPLE_OMP_TARGET:
2987 for (; ctx != NULL; ctx = ctx->outer)
2988 {
2989 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
2990 {
2991 if (is_gimple_omp (stmt)
2992 && is_gimple_omp_oacc (stmt)
2993 && is_gimple_omp (ctx->stmt))
2994 {
2995 error_at (gimple_location (stmt),
2996 "OpenACC construct inside of non-OpenACC region");
2997 return false;
2998 }
2999 continue;
3000 }
3001
3002 const char *stmt_name, *ctx_stmt_name;
3003 switch (gimple_omp_target_kind (stmt))
3004 {
3005 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
3006 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
3007 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
3008 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
3009 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
3010 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
3011 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
3012 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: stmt_name = "enter/exit data"; break;
3013 default: gcc_unreachable ();
3014 }
3015 switch (gimple_omp_target_kind (ctx->stmt))
3016 {
3017 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
3018 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
3019 case GF_OMP_TARGET_KIND_OACC_PARALLEL: ctx_stmt_name = "parallel"; break;
3020 case GF_OMP_TARGET_KIND_OACC_KERNELS: ctx_stmt_name = "kernels"; break;
3021 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
3022 default: gcc_unreachable ();
3023 }
3024
3025 /* OpenACC/OpenMP mismatch? */
3026 if (is_gimple_omp_oacc (stmt)
3027 != is_gimple_omp_oacc (ctx->stmt))
3028 {
3029 error_at (gimple_location (stmt),
3030 "%s %s construct inside of %s %s region",
3031 (is_gimple_omp_oacc (stmt)
3032 ? "OpenACC" : "OpenMP"), stmt_name,
3033 (is_gimple_omp_oacc (ctx->stmt)
3034 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
3035 return false;
3036 }
3037 if (is_gimple_omp_offloaded (ctx->stmt))
3038 {
3039 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3040 if (is_gimple_omp_oacc (ctx->stmt))
3041 {
3042 error_at (gimple_location (stmt),
3043 "%s construct inside of %s region",
3044 stmt_name, ctx_stmt_name);
3045 return false;
3046 }
3047 else
3048 {
3049 gcc_checking_assert (!is_gimple_omp_oacc (stmt));
3050 warning_at (gimple_location (stmt), 0,
3051 "%s construct inside of %s region",
3052 stmt_name, ctx_stmt_name);
3053 }
3054 }
3055 }
3056 break;
3057 default:
3058 break;
3059 }
3060 return true;
3061 }
3062
3063
3064 /* Helper function scan_omp.
3065
3066 Callback for walk_tree or operators in walk_gimple_stmt used to
3067 scan for OMP directives in TP. */
3068
3069 static tree
3070 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
3071 {
3072 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
3073 omp_context *ctx = (omp_context *) wi->info;
3074 tree t = *tp;
3075
3076 switch (TREE_CODE (t))
3077 {
3078 case VAR_DECL:
3079 case PARM_DECL:
3080 case LABEL_DECL:
3081 case RESULT_DECL:
3082 if (ctx)
3083 *tp = remap_decl (t, &ctx->cb);
3084 break;
3085
3086 default:
3087 if (ctx && TYPE_P (t))
3088 *tp = remap_type (t, &ctx->cb);
3089 else if (!DECL_P (t))
3090 {
3091 *walk_subtrees = 1;
3092 if (ctx)
3093 {
3094 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3095 if (tem != TREE_TYPE (t))
3096 {
3097 if (TREE_CODE (t) == INTEGER_CST)
3098 *tp = wide_int_to_tree (tem, t);
3099 else
3100 TREE_TYPE (t) = tem;
3101 }
3102 }
3103 }
3104 break;
3105 }
3106
3107 return NULL_TREE;
3108 }
3109
3110 /* Return true if FNDECL is a setjmp or a longjmp. */
3111
3112 static bool
3113 setjmp_or_longjmp_p (const_tree fndecl)
3114 {
3115 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3116 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3117 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3118 return true;
3119
3120 tree declname = DECL_NAME (fndecl);
3121 if (!declname)
3122 return false;
3123 const char *name = IDENTIFIER_POINTER (declname);
3124 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3125 }
3126
3127
3128 /* Helper function for scan_omp.
3129
3130 Callback for walk_gimple_stmt used to scan for OMP directives in
3131 the current statement in GSI. */
3132
3133 static tree
3134 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3135 struct walk_stmt_info *wi)
3136 {
3137 gimple stmt = gsi_stmt (*gsi);
3138 omp_context *ctx = (omp_context *) wi->info;
3139
3140 if (gimple_has_location (stmt))
3141 input_location = gimple_location (stmt);
3142
3143 /* Check the nesting restrictions. */
3144 bool remove = false;
3145 if (is_gimple_omp (stmt))
3146 remove = !check_omp_nesting_restrictions (stmt, ctx);
3147 else if (is_gimple_call (stmt))
3148 {
3149 tree fndecl = gimple_call_fndecl (stmt);
3150 if (fndecl)
3151 {
3152 if (setjmp_or_longjmp_p (fndecl)
3153 && ctx
3154 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3155 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3156 {
3157 remove = true;
3158 error_at (gimple_location (stmt),
3159 "setjmp/longjmp inside simd construct");
3160 }
3161 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3162 switch (DECL_FUNCTION_CODE (fndecl))
3163 {
3164 case BUILT_IN_GOMP_BARRIER:
3165 case BUILT_IN_GOMP_CANCEL:
3166 case BUILT_IN_GOMP_CANCELLATION_POINT:
3167 case BUILT_IN_GOMP_TASKYIELD:
3168 case BUILT_IN_GOMP_TASKWAIT:
3169 case BUILT_IN_GOMP_TASKGROUP_START:
3170 case BUILT_IN_GOMP_TASKGROUP_END:
3171 remove = !check_omp_nesting_restrictions (stmt, ctx);
3172 break;
3173 default:
3174 break;
3175 }
3176 }
3177 }
3178 if (remove)
3179 {
3180 stmt = gimple_build_nop ();
3181 gsi_replace (gsi, stmt, false);
3182 }
3183
3184 *handled_ops_p = true;
3185
3186 switch (gimple_code (stmt))
3187 {
3188 case GIMPLE_OMP_PARALLEL:
3189 taskreg_nesting_level++;
3190 scan_omp_parallel (gsi, ctx);
3191 taskreg_nesting_level--;
3192 break;
3193
3194 case GIMPLE_OMP_TASK:
3195 taskreg_nesting_level++;
3196 scan_omp_task (gsi, ctx);
3197 taskreg_nesting_level--;
3198 break;
3199
3200 case GIMPLE_OMP_FOR:
3201 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3202 break;
3203
3204 case GIMPLE_OMP_SECTIONS:
3205 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3206 break;
3207
3208 case GIMPLE_OMP_SINGLE:
3209 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3210 break;
3211
3212 case GIMPLE_OMP_SECTION:
3213 case GIMPLE_OMP_MASTER:
3214 case GIMPLE_OMP_TASKGROUP:
3215 case GIMPLE_OMP_ORDERED:
3216 case GIMPLE_OMP_CRITICAL:
3217 ctx = new_omp_context (stmt, ctx);
3218 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3219 break;
3220
3221 case GIMPLE_OMP_TARGET:
3222 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3223 break;
3224
3225 case GIMPLE_OMP_TEAMS:
3226 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3227 break;
3228
3229 case GIMPLE_BIND:
3230 {
3231 tree var;
3232
3233 *handled_ops_p = false;
3234 if (ctx)
3235 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3236 var ;
3237 var = DECL_CHAIN (var))
3238 insert_decl_map (&ctx->cb, var, var);
3239 }
3240 break;
3241 default:
3242 *handled_ops_p = false;
3243 break;
3244 }
3245
3246 return NULL_TREE;
3247 }
3248
3249
3250 /* Scan all the statements starting at the current statement. CTX
3251 contains context information about the OMP directives and
3252 clauses found during the scan. */
3253
3254 static void
3255 scan_omp (gimple_seq *body_p, omp_context *ctx)
3256 {
3257 location_t saved_location;
3258 struct walk_stmt_info wi;
3259
3260 memset (&wi, 0, sizeof (wi));
3261 wi.info = ctx;
3262 wi.want_locations = true;
3263
3264 saved_location = input_location;
3265 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
3266 input_location = saved_location;
3267 }
3268 \f
3269 /* Re-gimplification and code generation routines. */
3270
3271 /* Build a call to GOMP_barrier. */
3272
3273 static gimple
3274 build_omp_barrier (tree lhs)
3275 {
3276 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
3277 : BUILT_IN_GOMP_BARRIER);
3278 gcall *g = gimple_build_call (fndecl, 0);
3279 if (lhs)
3280 gimple_call_set_lhs (g, lhs);
3281 return g;
3282 }
3283
3284 /* If a context was created for STMT when it was scanned, return it. */
3285
3286 static omp_context *
3287 maybe_lookup_ctx (gimple stmt)
3288 {
3289 splay_tree_node n;
3290 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
3291 return n ? (omp_context *) n->value : NULL;
3292 }
3293
3294
3295 /* Find the mapping for DECL in CTX or the immediately enclosing
3296 context that has a mapping for DECL.
3297
3298 If CTX is a nested parallel directive, we may have to use the decl
3299 mappings created in CTX's parent context. Suppose that we have the
3300 following parallel nesting (variable UIDs showed for clarity):
3301
3302 iD.1562 = 0;
3303 #omp parallel shared(iD.1562) -> outer parallel
3304 iD.1562 = iD.1562 + 1;
3305
3306 #omp parallel shared (iD.1562) -> inner parallel
3307 iD.1562 = iD.1562 - 1;
3308
3309 Each parallel structure will create a distinct .omp_data_s structure
3310 for copying iD.1562 in/out of the directive:
3311
3312 outer parallel .omp_data_s.1.i -> iD.1562
3313 inner parallel .omp_data_s.2.i -> iD.1562
3314
3315 A shared variable mapping will produce a copy-out operation before
3316 the parallel directive and a copy-in operation after it. So, in
3317 this case we would have:
3318
3319 iD.1562 = 0;
3320 .omp_data_o.1.i = iD.1562;
3321 #omp parallel shared(iD.1562) -> outer parallel
3322 .omp_data_i.1 = &.omp_data_o.1
3323 .omp_data_i.1->i = .omp_data_i.1->i + 1;
3324
3325 .omp_data_o.2.i = iD.1562; -> **
3326 #omp parallel shared(iD.1562) -> inner parallel
3327 .omp_data_i.2 = &.omp_data_o.2
3328 .omp_data_i.2->i = .omp_data_i.2->i - 1;
3329
3330
3331 ** This is a problem. The symbol iD.1562 cannot be referenced
3332 inside the body of the outer parallel region. But since we are
3333 emitting this copy operation while expanding the inner parallel
3334 directive, we need to access the CTX structure of the outer
3335 parallel directive to get the correct mapping:
3336
3337 .omp_data_o.2.i = .omp_data_i.1->i
3338
3339 Since there may be other workshare or parallel directives enclosing
3340 the parallel directive, it may be necessary to walk up the context
3341 parent chain. This is not a problem in general because nested
3342 parallelism happens only rarely. */
3343
3344 static tree
3345 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3346 {
3347 tree t;
3348 omp_context *up;
3349
3350 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3351 t = maybe_lookup_decl (decl, up);
3352
3353 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
3354
3355 return t ? t : decl;
3356 }
3357
3358
3359 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
3360 in outer contexts. */
3361
3362 static tree
3363 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3364 {
3365 tree t = NULL;
3366 omp_context *up;
3367
3368 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3369 t = maybe_lookup_decl (decl, up);
3370
3371 return t ? t : decl;
3372 }
3373
3374
3375 /* Construct the initialization value for reduction CLAUSE. */
3376
3377 tree
3378 omp_reduction_init (tree clause, tree type)
3379 {
3380 location_t loc = OMP_CLAUSE_LOCATION (clause);
3381 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
3382 {
3383 case PLUS_EXPR:
3384 case MINUS_EXPR:
3385 case BIT_IOR_EXPR:
3386 case BIT_XOR_EXPR:
3387 case TRUTH_OR_EXPR:
3388 case TRUTH_ORIF_EXPR:
3389 case TRUTH_XOR_EXPR:
3390 case NE_EXPR:
3391 return build_zero_cst (type);
3392
3393 case MULT_EXPR:
3394 case TRUTH_AND_EXPR:
3395 case TRUTH_ANDIF_EXPR:
3396 case EQ_EXPR:
3397 return fold_convert_loc (loc, type, integer_one_node);
3398
3399 case BIT_AND_EXPR:
3400 return fold_convert_loc (loc, type, integer_minus_one_node);
3401
3402 case MAX_EXPR:
3403 if (SCALAR_FLOAT_TYPE_P (type))
3404 {
3405 REAL_VALUE_TYPE max, min;
3406 if (HONOR_INFINITIES (type))
3407 {
3408 real_inf (&max);
3409 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3410 }
3411 else
3412 real_maxval (&min, 1, TYPE_MODE (type));
3413 return build_real (type, min);
3414 }
3415 else
3416 {
3417 gcc_assert (INTEGRAL_TYPE_P (type));
3418 return TYPE_MIN_VALUE (type);
3419 }
3420
3421 case MIN_EXPR:
3422 if (SCALAR_FLOAT_TYPE_P (type))
3423 {
3424 REAL_VALUE_TYPE max;
3425 if (HONOR_INFINITIES (type))
3426 real_inf (&max);
3427 else
3428 real_maxval (&max, 0, TYPE_MODE (type));
3429 return build_real (type, max);
3430 }
3431 else
3432 {
3433 gcc_assert (INTEGRAL_TYPE_P (type));
3434 return TYPE_MAX_VALUE (type);
3435 }
3436
3437 default:
3438 gcc_unreachable ();
3439 }
3440 }
3441
3442 /* Return alignment to be assumed for var in CLAUSE, which should be
3443 OMP_CLAUSE_ALIGNED. */
3444
3445 static tree
3446 omp_clause_aligned_alignment (tree clause)
3447 {
3448 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3449 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3450
3451 /* Otherwise return implementation defined alignment. */
3452 unsigned int al = 1;
3453 machine_mode mode, vmode;
3454 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3455 if (vs)
3456 vs = 1 << floor_log2 (vs);
3457 static enum mode_class classes[]
3458 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3459 for (int i = 0; i < 4; i += 2)
3460 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3461 mode != VOIDmode;
3462 mode = GET_MODE_WIDER_MODE (mode))
3463 {
3464 vmode = targetm.vectorize.preferred_simd_mode (mode);
3465 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3466 continue;
3467 while (vs
3468 && GET_MODE_SIZE (vmode) < vs
3469 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3470 vmode = GET_MODE_2XWIDER_MODE (vmode);
3471
3472 tree type = lang_hooks.types.type_for_mode (mode, 1);
3473 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3474 continue;
3475 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3476 / GET_MODE_SIZE (mode));
3477 if (TYPE_MODE (type) != vmode)
3478 continue;
3479 if (TYPE_ALIGN_UNIT (type) > al)
3480 al = TYPE_ALIGN_UNIT (type);
3481 }
3482 return build_int_cst (integer_type_node, al);
3483 }
3484
3485 /* Return maximum possible vectorization factor for the target. */
3486
3487 static int
3488 omp_max_vf (void)
3489 {
3490 if (!optimize
3491 || optimize_debug
3492 || !flag_tree_loop_optimize
3493 || (!flag_tree_loop_vectorize
3494 && (global_options_set.x_flag_tree_loop_vectorize
3495 || global_options_set.x_flag_tree_vectorize)))
3496 return 1;
3497
3498 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3499 if (vs)
3500 {
3501 vs = 1 << floor_log2 (vs);
3502 return vs;
3503 }
3504 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3505 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3506 return GET_MODE_NUNITS (vqimode);
3507 return 1;
3508 }
3509
3510 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3511 privatization. */
3512
3513 static bool
3514 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3515 tree &idx, tree &lane, tree &ivar, tree &lvar)
3516 {
3517 if (max_vf == 0)
3518 {
3519 max_vf = omp_max_vf ();
3520 if (max_vf > 1)
3521 {
3522 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3523 OMP_CLAUSE_SAFELEN);
3524 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3525 max_vf = 1;
3526 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3527 max_vf) == -1)
3528 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3529 }
3530 if (max_vf > 1)
3531 {
3532 idx = create_tmp_var (unsigned_type_node);
3533 lane = create_tmp_var (unsigned_type_node);
3534 }
3535 }
3536 if (max_vf == 1)
3537 return false;
3538
3539 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3540 tree avar = create_tmp_var_raw (atype);
3541 if (TREE_ADDRESSABLE (new_var))
3542 TREE_ADDRESSABLE (avar) = 1;
3543 DECL_ATTRIBUTES (avar)
3544 = tree_cons (get_identifier ("omp simd array"), NULL,
3545 DECL_ATTRIBUTES (avar));
3546 gimple_add_tmp_var (avar);
3547 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3548 NULL_TREE, NULL_TREE);
3549 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3550 NULL_TREE, NULL_TREE);
3551 if (DECL_P (new_var))
3552 {
3553 SET_DECL_VALUE_EXPR (new_var, lvar);
3554 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3555 }
3556 return true;
3557 }
3558
3559 /* Helper function of lower_rec_input_clauses. For a reference
3560 in simd reduction, add an underlying variable it will reference. */
3561
3562 static void
3563 handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3564 {
3565 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3566 if (TREE_CONSTANT (z))
3567 {
3568 const char *name = NULL;
3569 if (DECL_NAME (new_vard))
3570 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3571
3572 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3573 gimple_add_tmp_var (z);
3574 TREE_ADDRESSABLE (z) = 1;
3575 z = build_fold_addr_expr_loc (loc, z);
3576 gimplify_assign (new_vard, z, ilist);
3577 }
3578 }
3579
3580 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3581 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3582 private variables. Initialization statements go in ILIST, while calls
3583 to destructors go in DLIST. */
3584
3585 static void
3586 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3587 omp_context *ctx, struct omp_for_data *fd)
3588 {
3589 tree c, dtor, copyin_seq, x, ptr;
3590 bool copyin_by_ref = false;
3591 bool lastprivate_firstprivate = false;
3592 bool reduction_omp_orig_ref = false;
3593 int pass;
3594 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3595 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3596 int max_vf = 0;
3597 tree lane = NULL_TREE, idx = NULL_TREE;
3598 tree ivar = NULL_TREE, lvar = NULL_TREE;
3599 gimple_seq llist[2] = { NULL, NULL };
3600
3601 copyin_seq = NULL;
3602
3603 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3604 with data sharing clauses referencing variable sized vars. That
3605 is unnecessarily hard to support and very unlikely to result in
3606 vectorized code anyway. */
3607 if (is_simd)
3608 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3609 switch (OMP_CLAUSE_CODE (c))
3610 {
3611 case OMP_CLAUSE_LINEAR:
3612 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3613 max_vf = 1;
3614 /* FALLTHRU */
3615 case OMP_CLAUSE_REDUCTION:
3616 case OMP_CLAUSE_PRIVATE:
3617 case OMP_CLAUSE_FIRSTPRIVATE:
3618 case OMP_CLAUSE_LASTPRIVATE:
3619 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3620 max_vf = 1;
3621 break;
3622 default:
3623 continue;
3624 }
3625
3626 /* Do all the fixed sized types in the first pass, and the variable sized
3627 types in the second pass. This makes sure that the scalar arguments to
3628 the variable sized types are processed before we use them in the
3629 variable sized operations. */
3630 for (pass = 0; pass < 2; ++pass)
3631 {
3632 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3633 {
3634 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3635 tree var, new_var;
3636 bool by_ref;
3637 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3638
3639 switch (c_kind)
3640 {
3641 case OMP_CLAUSE_PRIVATE:
3642 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3643 continue;
3644 break;
3645 case OMP_CLAUSE_SHARED:
3646 /* Ignore shared directives in teams construct. */
3647 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3648 continue;
3649 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3650 {
3651 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3652 continue;
3653 }
3654 case OMP_CLAUSE_FIRSTPRIVATE:
3655 case OMP_CLAUSE_COPYIN:
3656 case OMP_CLAUSE_LINEAR:
3657 break;
3658 case OMP_CLAUSE_REDUCTION:
3659 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3660 reduction_omp_orig_ref = true;
3661 break;
3662 case OMP_CLAUSE__LOOPTEMP_:
3663 /* Handle _looptemp_ clauses only on parallel. */
3664 if (fd)
3665 continue;
3666 break;
3667 case OMP_CLAUSE_LASTPRIVATE:
3668 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3669 {
3670 lastprivate_firstprivate = true;
3671 if (pass != 0)
3672 continue;
3673 }
3674 /* Even without corresponding firstprivate, if
3675 decl is Fortran allocatable, it needs outer var
3676 reference. */
3677 else if (pass == 0
3678 && lang_hooks.decls.omp_private_outer_ref
3679 (OMP_CLAUSE_DECL (c)))
3680 lastprivate_firstprivate = true;
3681 break;
3682 case OMP_CLAUSE_ALIGNED:
3683 if (pass == 0)
3684 continue;
3685 var = OMP_CLAUSE_DECL (c);
3686 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3687 && !is_global_var (var))
3688 {
3689 new_var = maybe_lookup_decl (var, ctx);
3690 if (new_var == NULL_TREE)
3691 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3692 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3693 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3694 omp_clause_aligned_alignment (c));
3695 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3696 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3697 gimplify_and_add (x, ilist);
3698 }
3699 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3700 && is_global_var (var))
3701 {
3702 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3703 new_var = lookup_decl (var, ctx);
3704 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3705 t = build_fold_addr_expr_loc (clause_loc, t);
3706 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3707 t = build_call_expr_loc (clause_loc, t2, 2, t,
3708 omp_clause_aligned_alignment (c));
3709 t = fold_convert_loc (clause_loc, ptype, t);
3710 x = create_tmp_var (ptype);
3711 t = build2 (MODIFY_EXPR, ptype, x, t);
3712 gimplify_and_add (t, ilist);
3713 t = build_simple_mem_ref_loc (clause_loc, x);
3714 SET_DECL_VALUE_EXPR (new_var, t);
3715 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3716 }
3717 continue;
3718 default:
3719 continue;
3720 }
3721
3722 new_var = var = OMP_CLAUSE_DECL (c);
3723 if (c_kind != OMP_CLAUSE_COPYIN)
3724 new_var = lookup_decl (var, ctx);
3725
3726 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3727 {
3728 if (pass != 0)
3729 continue;
3730 }
3731 else if (is_variable_sized (var))
3732 {
3733 /* For variable sized types, we need to allocate the
3734 actual storage here. Call alloca and store the
3735 result in the pointer decl that we created elsewhere. */
3736 if (pass == 0)
3737 continue;
3738
3739 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3740 {
3741 gcall *stmt;
3742 tree tmp, atmp;
3743
3744 ptr = DECL_VALUE_EXPR (new_var);
3745 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3746 ptr = TREE_OPERAND (ptr, 0);
3747 gcc_assert (DECL_P (ptr));
3748 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3749
3750 /* void *tmp = __builtin_alloca */
3751 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3752 stmt = gimple_build_call (atmp, 1, x);
3753 tmp = create_tmp_var_raw (ptr_type_node);
3754 gimple_add_tmp_var (tmp);
3755 gimple_call_set_lhs (stmt, tmp);
3756
3757 gimple_seq_add_stmt (ilist, stmt);
3758
3759 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3760 gimplify_assign (ptr, x, ilist);
3761 }
3762 }
3763 else if (is_reference (var))
3764 {
3765 /* For references that are being privatized for Fortran,
3766 allocate new backing storage for the new pointer
3767 variable. This allows us to avoid changing all the
3768 code that expects a pointer to something that expects
3769 a direct variable. */
3770 if (pass == 0)
3771 continue;
3772
3773 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3774 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3775 {
3776 x = build_receiver_ref (var, false, ctx);
3777 x = build_fold_addr_expr_loc (clause_loc, x);
3778 }
3779 else if (TREE_CONSTANT (x))
3780 {
3781 /* For reduction in SIMD loop, defer adding the
3782 initialization of the reference, because if we decide
3783 to use SIMD array for it, the initilization could cause
3784 expansion ICE. */
3785 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3786 x = NULL_TREE;
3787 else
3788 {
3789 const char *name = NULL;
3790 if (DECL_NAME (var))
3791 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3792
3793 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3794 name);
3795 gimple_add_tmp_var (x);
3796 TREE_ADDRESSABLE (x) = 1;
3797 x = build_fold_addr_expr_loc (clause_loc, x);
3798 }
3799 }
3800 else
3801 {
3802 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3803 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3804 }
3805
3806 if (x)
3807 {
3808 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3809 gimplify_assign (new_var, x, ilist);
3810 }
3811
3812 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3813 }
3814 else if (c_kind == OMP_CLAUSE_REDUCTION
3815 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3816 {
3817 if (pass == 0)
3818 continue;
3819 }
3820 else if (pass != 0)
3821 continue;
3822
3823 switch (OMP_CLAUSE_CODE (c))
3824 {
3825 case OMP_CLAUSE_SHARED:
3826 /* Ignore shared directives in teams construct. */
3827 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3828 continue;
3829 /* Shared global vars are just accessed directly. */
3830 if (is_global_var (new_var))
3831 break;
3832 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3833 needs to be delayed until after fixup_child_record_type so
3834 that we get the correct type during the dereference. */
3835 by_ref = use_pointer_for_field (var, ctx);
3836 x = build_receiver_ref (var, by_ref, ctx);
3837 SET_DECL_VALUE_EXPR (new_var, x);
3838 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3839
3840 /* ??? If VAR is not passed by reference, and the variable
3841 hasn't been initialized yet, then we'll get a warning for
3842 the store into the omp_data_s structure. Ideally, we'd be
3843 able to notice this and not store anything at all, but
3844 we're generating code too early. Suppress the warning. */
3845 if (!by_ref)
3846 TREE_NO_WARNING (var) = 1;
3847 break;
3848
3849 case OMP_CLAUSE_LASTPRIVATE:
3850 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3851 break;
3852 /* FALLTHRU */
3853
3854 case OMP_CLAUSE_PRIVATE:
3855 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3856 x = build_outer_var_ref (var, ctx);
3857 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3858 {
3859 if (is_task_ctx (ctx))
3860 x = build_receiver_ref (var, false, ctx);
3861 else
3862 x = build_outer_var_ref (var, ctx);
3863 }
3864 else
3865 x = NULL;
3866 do_private:
3867 tree nx;
3868 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3869 if (is_simd)
3870 {
3871 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3872 if ((TREE_ADDRESSABLE (new_var) || nx || y
3873 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3874 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3875 idx, lane, ivar, lvar))
3876 {
3877 if (nx)
3878 x = lang_hooks.decls.omp_clause_default_ctor
3879 (c, unshare_expr (ivar), x);
3880 if (nx && x)
3881 gimplify_and_add (x, &llist[0]);
3882 if (y)
3883 {
3884 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3885 if (y)
3886 {
3887 gimple_seq tseq = NULL;
3888
3889 dtor = y;
3890 gimplify_stmt (&dtor, &tseq);
3891 gimple_seq_add_seq (&llist[1], tseq);
3892 }
3893 }
3894 break;
3895 }
3896 }
3897 if (nx)
3898 gimplify_and_add (nx, ilist);
3899 /* FALLTHRU */
3900
3901 do_dtor:
3902 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3903 if (x)
3904 {
3905 gimple_seq tseq = NULL;
3906
3907 dtor = x;
3908 gimplify_stmt (&dtor, &tseq);
3909 gimple_seq_add_seq (dlist, tseq);
3910 }
3911 break;
3912
3913 case OMP_CLAUSE_LINEAR:
3914 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3915 goto do_firstprivate;
3916 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3917 x = NULL;
3918 else
3919 x = build_outer_var_ref (var, ctx);
3920 goto do_private;
3921
3922 case OMP_CLAUSE_FIRSTPRIVATE:
3923 if (is_task_ctx (ctx))
3924 {
3925 if (is_reference (var) || is_variable_sized (var))
3926 goto do_dtor;
3927 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3928 ctx))
3929 || use_pointer_for_field (var, NULL))
3930 {
3931 x = build_receiver_ref (var, false, ctx);
3932 SET_DECL_VALUE_EXPR (new_var, x);
3933 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3934 goto do_dtor;
3935 }
3936 }
3937 do_firstprivate:
3938 x = build_outer_var_ref (var, ctx);
3939 if (is_simd)
3940 {
3941 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3942 && gimple_omp_for_combined_into_p (ctx->stmt))
3943 {
3944 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3945 tree stept = TREE_TYPE (t);
3946 tree ct = find_omp_clause (clauses,
3947 OMP_CLAUSE__LOOPTEMP_);
3948 gcc_assert (ct);
3949 tree l = OMP_CLAUSE_DECL (ct);
3950 tree n1 = fd->loop.n1;
3951 tree step = fd->loop.step;
3952 tree itype = TREE_TYPE (l);
3953 if (POINTER_TYPE_P (itype))
3954 itype = signed_type_for (itype);
3955 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3956 if (TYPE_UNSIGNED (itype)
3957 && fd->loop.cond_code == GT_EXPR)
3958 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3959 fold_build1 (NEGATE_EXPR, itype, l),
3960 fold_build1 (NEGATE_EXPR,
3961 itype, step));
3962 else
3963 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3964 t = fold_build2 (MULT_EXPR, stept,
3965 fold_convert (stept, l), t);
3966
3967 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3968 {
3969 x = lang_hooks.decls.omp_clause_linear_ctor
3970 (c, new_var, x, t);
3971 gimplify_and_add (x, ilist);
3972 goto do_dtor;
3973 }
3974
3975 if (POINTER_TYPE_P (TREE_TYPE (x)))
3976 x = fold_build2 (POINTER_PLUS_EXPR,
3977 TREE_TYPE (x), x, t);
3978 else
3979 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3980 }
3981
3982 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3983 || TREE_ADDRESSABLE (new_var))
3984 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3985 idx, lane, ivar, lvar))
3986 {
3987 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3988 {
3989 tree iv = create_tmp_var (TREE_TYPE (new_var));
3990 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3991 gimplify_and_add (x, ilist);
3992 gimple_stmt_iterator gsi
3993 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3994 gassign *g
3995 = gimple_build_assign (unshare_expr (lvar), iv);
3996 gsi_insert_before_without_update (&gsi, g,
3997 GSI_SAME_STMT);
3998 tree t = OMP_CLAUSE_LINEAR_STEP (c);
3999 enum tree_code code = PLUS_EXPR;
4000 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
4001 code = POINTER_PLUS_EXPR;
4002 g = gimple_build_assign (iv, code, iv, t);
4003 gsi_insert_before_without_update (&gsi, g,
4004 GSI_SAME_STMT);
4005 break;
4006 }
4007 x = lang_hooks.decls.omp_clause_copy_ctor
4008 (c, unshare_expr (ivar), x);
4009 gimplify_and_add (x, &llist[0]);
4010 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
4011 if (x)
4012 {
4013 gimple_seq tseq = NULL;
4014
4015 dtor = x;
4016 gimplify_stmt (&dtor, &tseq);
4017 gimple_seq_add_seq (&llist[1], tseq);
4018 }
4019 break;
4020 }
4021 }
4022 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
4023 gimplify_and_add (x, ilist);
4024 goto do_dtor;
4025
4026 case OMP_CLAUSE__LOOPTEMP_:
4027 gcc_assert (is_parallel_ctx (ctx));
4028 x = build_outer_var_ref (var, ctx);
4029 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
4030 gimplify_and_add (x, ilist);
4031 break;
4032
4033 case OMP_CLAUSE_COPYIN:
4034 by_ref = use_pointer_for_field (var, NULL);
4035 x = build_receiver_ref (var, by_ref, ctx);
4036 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
4037 append_to_statement_list (x, &copyin_seq);
4038 copyin_by_ref |= by_ref;
4039 break;
4040
4041 case OMP_CLAUSE_REDUCTION:
4042 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4043 {
4044 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4045 gimple tseq;
4046 x = build_outer_var_ref (var, ctx);
4047
4048 if (is_reference (var)
4049 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4050 TREE_TYPE (x)))
4051 x = build_fold_addr_expr_loc (clause_loc, x);
4052 SET_DECL_VALUE_EXPR (placeholder, x);
4053 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4054 tree new_vard = new_var;
4055 if (is_reference (var))
4056 {
4057 gcc_assert (TREE_CODE (new_var) == MEM_REF);
4058 new_vard = TREE_OPERAND (new_var, 0);
4059 gcc_assert (DECL_P (new_vard));
4060 }
4061 if (is_simd
4062 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4063 idx, lane, ivar, lvar))
4064 {
4065 if (new_vard == new_var)
4066 {
4067 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
4068 SET_DECL_VALUE_EXPR (new_var, ivar);
4069 }
4070 else
4071 {
4072 SET_DECL_VALUE_EXPR (new_vard,
4073 build_fold_addr_expr (ivar));
4074 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
4075 }
4076 x = lang_hooks.decls.omp_clause_default_ctor
4077 (c, unshare_expr (ivar),
4078 build_outer_var_ref (var, ctx));
4079 if (x)
4080 gimplify_and_add (x, &llist[0]);
4081 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
4082 {
4083 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
4084 lower_omp (&tseq, ctx);
4085 gimple_seq_add_seq (&llist[0], tseq);
4086 }
4087 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
4088 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
4089 lower_omp (&tseq, ctx);
4090 gimple_seq_add_seq (&llist[1], tseq);
4091 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4092 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
4093 if (new_vard == new_var)
4094 SET_DECL_VALUE_EXPR (new_var, lvar);
4095 else
4096 SET_DECL_VALUE_EXPR (new_vard,
4097 build_fold_addr_expr (lvar));
4098 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
4099 if (x)
4100 {
4101 tseq = NULL;
4102 dtor = x;
4103 gimplify_stmt (&dtor, &tseq);
4104 gimple_seq_add_seq (&llist[1], tseq);
4105 }
4106 break;
4107 }
4108 /* If this is a reference to constant size reduction var
4109 with placeholder, we haven't emitted the initializer
4110 for it because it is undesirable if SIMD arrays are used.
4111 But if they aren't used, we need to emit the deferred
4112 initialization now. */
4113 else if (is_reference (var) && is_simd)
4114 handle_simd_reference (clause_loc, new_vard, ilist);
4115 x = lang_hooks.decls.omp_clause_default_ctor
4116 (c, unshare_expr (new_var),
4117 build_outer_var_ref (var, ctx));
4118 if (x)
4119 gimplify_and_add (x, ilist);
4120 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
4121 {
4122 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
4123 lower_omp (&tseq, ctx);
4124 gimple_seq_add_seq (ilist, tseq);
4125 }
4126 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
4127 if (is_simd)
4128 {
4129 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
4130 lower_omp (&tseq, ctx);
4131 gimple_seq_add_seq (dlist, tseq);
4132 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4133 }
4134 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
4135 goto do_dtor;
4136 }
4137 else
4138 {
4139 x = omp_reduction_init (c, TREE_TYPE (new_var));
4140 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
4141 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
4142
4143 /* reduction(-:var) sums up the partial results, so it
4144 acts identically to reduction(+:var). */
4145 if (code == MINUS_EXPR)
4146 code = PLUS_EXPR;
4147
4148 tree new_vard = new_var;
4149 if (is_simd && is_reference (var))
4150 {
4151 gcc_assert (TREE_CODE (new_var) == MEM_REF);
4152 new_vard = TREE_OPERAND (new_var, 0);
4153 gcc_assert (DECL_P (new_vard));
4154 }
4155 if (is_simd
4156 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
4157 idx, lane, ivar, lvar))
4158 {
4159 tree ref = build_outer_var_ref (var, ctx);
4160
4161 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
4162
4163 x = build2 (code, TREE_TYPE (ref), ref, ivar);
4164 ref = build_outer_var_ref (var, ctx);
4165 gimplify_assign (ref, x, &llist[1]);
4166
4167 if (new_vard != new_var)
4168 {
4169 SET_DECL_VALUE_EXPR (new_vard,
4170 build_fold_addr_expr (lvar));
4171 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
4172 }
4173 }
4174 else
4175 {
4176 if (is_reference (var) && is_simd)
4177 handle_simd_reference (clause_loc, new_vard, ilist);
4178 gimplify_assign (new_var, x, ilist);
4179 if (is_simd)
4180 {
4181 tree ref = build_outer_var_ref (var, ctx);
4182
4183 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4184 ref = build_outer_var_ref (var, ctx);
4185 gimplify_assign (ref, x, dlist);
4186 }
4187 }
4188 }
4189 break;
4190
4191 default:
4192 gcc_unreachable ();
4193 }
4194 }
4195 }
4196
4197 if (lane)
4198 {
4199 tree uid = create_tmp_var (ptr_type_node, "simduid");
4200 /* Don't want uninit warnings on simduid, it is always uninitialized,
4201 but we use it not for the value, but for the DECL_UID only. */
4202 TREE_NO_WARNING (uid) = 1;
4203 gimple g
4204 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
4205 gimple_call_set_lhs (g, lane);
4206 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
4207 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
4208 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
4209 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
4210 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
4211 gimple_omp_for_set_clauses (ctx->stmt, c);
4212 g = gimple_build_assign (lane, INTEGER_CST,
4213 build_int_cst (unsigned_type_node, 0));
4214 gimple_seq_add_stmt (ilist, g);
4215 for (int i = 0; i < 2; i++)
4216 if (llist[i])
4217 {
4218 tree vf = create_tmp_var (unsigned_type_node);
4219 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
4220 gimple_call_set_lhs (g, vf);
4221 gimple_seq *seq = i == 0 ? ilist : dlist;
4222 gimple_seq_add_stmt (seq, g);
4223 tree t = build_int_cst (unsigned_type_node, 0);
4224 g = gimple_build_assign (idx, INTEGER_CST, t);
4225 gimple_seq_add_stmt (seq, g);
4226 tree body = create_artificial_label (UNKNOWN_LOCATION);
4227 tree header = create_artificial_label (UNKNOWN_LOCATION);
4228 tree end = create_artificial_label (UNKNOWN_LOCATION);
4229 gimple_seq_add_stmt (seq, gimple_build_goto (header));
4230 gimple_seq_add_stmt (seq, gimple_build_label (body));
4231 gimple_seq_add_seq (seq, llist[i]);
4232 t = build_int_cst (unsigned_type_node, 1);
4233 g = gimple_build_assign (idx, PLUS_EXPR, idx, t);
4234 gimple_seq_add_stmt (seq, g);
4235 gimple_seq_add_stmt (seq, gimple_build_label (header));
4236 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
4237 gimple_seq_add_stmt (seq, g);
4238 gimple_seq_add_stmt (seq, gimple_build_label (end));
4239 }
4240 }
4241
4242 /* The copyin sequence is not to be executed by the main thread, since
4243 that would result in self-copies. Perhaps not visible to scalars,
4244 but it certainly is to C++ operator=. */
4245 if (copyin_seq)
4246 {
4247 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
4248 0);
4249 x = build2 (NE_EXPR, boolean_type_node, x,
4250 build_int_cst (TREE_TYPE (x), 0));
4251 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
4252 gimplify_and_add (x, ilist);
4253 }
4254
4255 /* If any copyin variable is passed by reference, we must ensure the
4256 master thread doesn't modify it before it is copied over in all
4257 threads. Similarly for variables in both firstprivate and
4258 lastprivate clauses we need to ensure the lastprivate copying
4259 happens after firstprivate copying in all threads. And similarly
4260 for UDRs if initializer expression refers to omp_orig. */
4261 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
4262 {
4263 /* Don't add any barrier for #pragma omp simd or
4264 #pragma omp distribute. */
4265 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
4266 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
4267 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
4268 }
4269
4270 /* If max_vf is non-zero, then we can use only a vectorization factor
4271 up to the max_vf we chose. So stick it into the safelen clause. */
4272 if (max_vf)
4273 {
4274 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
4275 OMP_CLAUSE_SAFELEN);
4276 if (c == NULL_TREE
4277 || (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) == INTEGER_CST
4278 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
4279 max_vf) == 1))
4280 {
4281 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
4282 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
4283 max_vf);
4284 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
4285 gimple_omp_for_set_clauses (ctx->stmt, c);
4286 }
4287 }
4288 }
4289
4290
4291 /* Generate code to implement the LASTPRIVATE clauses. This is used for
4292 both parallel and workshare constructs. PREDICATE may be NULL if it's
4293 always true. */
4294
4295 static void
4296 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
4297 omp_context *ctx)
4298 {
4299 tree x, c, label = NULL, orig_clauses = clauses;
4300 bool par_clauses = false;
4301 tree simduid = NULL, lastlane = NULL;
4302
4303 /* Early exit if there are no lastprivate or linear clauses. */
4304 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
4305 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
4306 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
4307 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
4308 break;
4309 if (clauses == NULL)
4310 {
4311 /* If this was a workshare clause, see if it had been combined
4312 with its parallel. In that case, look for the clauses on the
4313 parallel statement itself. */
4314 if (is_parallel_ctx (ctx))
4315 return;
4316
4317 ctx = ctx->outer;
4318 if (ctx == NULL || !is_parallel_ctx (ctx))
4319 return;
4320
4321 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4322 OMP_CLAUSE_LASTPRIVATE);
4323 if (clauses == NULL)
4324 return;
4325 par_clauses = true;
4326 }
4327
4328 if (predicate)
4329 {
4330 gcond *stmt;
4331 tree label_true, arm1, arm2;
4332
4333 label = create_artificial_label (UNKNOWN_LOCATION);
4334 label_true = create_artificial_label (UNKNOWN_LOCATION);
4335 arm1 = TREE_OPERAND (predicate, 0);
4336 arm2 = TREE_OPERAND (predicate, 1);
4337 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
4338 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
4339 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
4340 label_true, label);
4341 gimple_seq_add_stmt (stmt_list, stmt);
4342 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
4343 }
4344
4345 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4346 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4347 {
4348 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
4349 if (simduid)
4350 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
4351 }
4352
4353 for (c = clauses; c ;)
4354 {
4355 tree var, new_var;
4356 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4357
4358 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4359 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4360 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
4361 {
4362 var = OMP_CLAUSE_DECL (c);
4363 new_var = lookup_decl (var, ctx);
4364
4365 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
4366 {
4367 tree val = DECL_VALUE_EXPR (new_var);
4368 if (TREE_CODE (val) == ARRAY_REF
4369 && VAR_P (TREE_OPERAND (val, 0))
4370 && lookup_attribute ("omp simd array",
4371 DECL_ATTRIBUTES (TREE_OPERAND (val,
4372 0))))
4373 {
4374 if (lastlane == NULL)
4375 {
4376 lastlane = create_tmp_var (unsigned_type_node);
4377 gcall *g
4378 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
4379 2, simduid,
4380 TREE_OPERAND (val, 1));
4381 gimple_call_set_lhs (g, lastlane);
4382 gimple_seq_add_stmt (stmt_list, g);
4383 }
4384 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
4385 TREE_OPERAND (val, 0), lastlane,
4386 NULL_TREE, NULL_TREE);
4387 }
4388 }
4389
4390 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
4391 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
4392 {
4393 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
4394 gimple_seq_add_seq (stmt_list,
4395 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
4396 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
4397 }
4398 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
4399 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
4400 {
4401 lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
4402 gimple_seq_add_seq (stmt_list,
4403 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
4404 OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
4405 }
4406
4407 x = build_outer_var_ref (var, ctx);
4408 if (is_reference (var))
4409 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4410 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
4411 gimplify_and_add (x, stmt_list);
4412 }
4413 c = OMP_CLAUSE_CHAIN (c);
4414 if (c == NULL && !par_clauses)
4415 {
4416 /* If this was a workshare clause, see if it had been combined
4417 with its parallel. In that case, continue looking for the
4418 clauses also on the parallel statement itself. */
4419 if (is_parallel_ctx (ctx))
4420 break;
4421
4422 ctx = ctx->outer;
4423 if (ctx == NULL || !is_parallel_ctx (ctx))
4424 break;
4425
4426 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
4427 OMP_CLAUSE_LASTPRIVATE);
4428 par_clauses = true;
4429 }
4430 }
4431
4432 if (label)
4433 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
4434 }
4435
4436 static void
4437 oacc_lower_reduction_var_helper (gimple_seq *stmt_seqp, omp_context *ctx,
4438 tree tid, tree var, tree new_var)
4439 {
4440 /* The atomic add at the end of the sum creates unnecessary
4441 write contention on accelerators. To work around this,
4442 create an array to store the partial reductions. Later, in
4443 lower_omp_for (for openacc), the values of array will be
4444 combined. */
4445
4446 tree t = NULL_TREE, array, x;
4447 tree type = get_base_type (var);
4448 gimple stmt;
4449
4450 /* Now insert the partial reductions into the array. */
4451
4452 /* Find the reduction array. */
4453
4454 tree ptype = build_pointer_type (type);
4455
4456 t = lookup_oacc_reduction (oacc_get_reduction_array_id (var), ctx);
4457 t = build_receiver_ref (t, false, ctx->outer);
4458
4459 array = create_tmp_var (ptype);
4460 gimplify_assign (array, t, stmt_seqp);
4461
4462 tree ptr = create_tmp_var (TREE_TYPE (array));
4463
4464 /* Find the reduction array. */
4465
4466 /* testing a unary conversion. */
4467 tree offset = create_tmp_var (sizetype);
4468 gimplify_assign (offset, TYPE_SIZE_UNIT (type),
4469 stmt_seqp);
4470 t = create_tmp_var (sizetype);
4471 gimplify_assign (t, unshare_expr (fold_build1 (NOP_EXPR, sizetype, tid)),
4472 stmt_seqp);
4473 stmt = gimple_build_assign (offset, MULT_EXPR, offset, t);
4474 gimple_seq_add_stmt (stmt_seqp, stmt);
4475
4476 /* Offset expression. Does the POINTER_PLUS_EXPR take care
4477 of adding sizeof(var) to the array? */
4478 ptr = create_tmp_var (ptype);
4479 stmt = gimple_build_assign (unshare_expr (ptr), POINTER_PLUS_EXPR, array,
4480 offset);
4481 gimple_seq_add_stmt (stmt_seqp, stmt);
4482
4483 /* Move the local sum to gfc$sum[i]. */
4484 x = unshare_expr (build_simple_mem_ref (ptr));
4485 stmt = gimplify_assign (x, new_var, stmt_seqp);
4486 }
4487
4488 /* Generate code to implement the REDUCTION clauses. */
4489
4490 static void
4491 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
4492 {
4493 gimple_seq sub_seq = NULL;
4494 gimple stmt;
4495 tree x, c, tid = NULL_TREE;
4496 int count = 0;
4497
4498 /* SIMD reductions are handled in lower_rec_input_clauses. */
4499 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
4500 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
4501 return;
4502
4503 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
4504 update in that case, otherwise use a lock. */
4505 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
4506 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
4507 {
4508 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4509 {
4510 /* Never use OMP_ATOMIC for array reductions or UDRs. */
4511 count = -1;
4512 break;
4513 }
4514 count++;
4515 }
4516
4517 if (count == 0)
4518 return;
4519
4520 /* Initialize thread info for OpenACC. */
4521 if (is_gimple_omp_oacc (ctx->stmt))
4522 {
4523 /* Get the current thread id. */
4524 tree call = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM);
4525 tid = create_tmp_var (TREE_TYPE (TREE_TYPE (call)));
4526 gimple stmt = gimple_build_call (call, 0);
4527 gimple_call_set_lhs (stmt, tid);
4528 gimple_seq_add_stmt (stmt_seqp, stmt);
4529 }
4530
4531 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4532 {
4533 tree var, ref, new_var;
4534 enum tree_code code;
4535 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4536
4537 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
4538 continue;
4539
4540 var = OMP_CLAUSE_DECL (c);
4541 new_var = lookup_decl (var, ctx);
4542 if (is_reference (var))
4543 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4544 ref = build_outer_var_ref (var, ctx);
4545 code = OMP_CLAUSE_REDUCTION_CODE (c);
4546
4547 /* reduction(-:var) sums up the partial results, so it acts
4548 identically to reduction(+:var). */
4549 if (code == MINUS_EXPR)
4550 code = PLUS_EXPR;
4551
4552 if (is_gimple_omp_oacc (ctx->stmt))
4553 {
4554 gcc_checking_assert (!OMP_CLAUSE_REDUCTION_PLACEHOLDER (c));
4555
4556 oacc_lower_reduction_var_helper (stmt_seqp, ctx, tid, var, new_var);
4557 }
4558 else if (count == 1)
4559 {
4560 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
4561
4562 addr = save_expr (addr);
4563 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
4564 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
4565 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
4566 gimplify_and_add (x, stmt_seqp);
4567 return;
4568 }
4569 else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
4570 {
4571 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
4572
4573 if (is_reference (var)
4574 && !useless_type_conversion_p (TREE_TYPE (placeholder),
4575 TREE_TYPE (ref)))
4576 ref = build_fold_addr_expr_loc (clause_loc, ref);
4577 SET_DECL_VALUE_EXPR (placeholder, ref);
4578 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
4579 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
4580 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
4581 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
4582 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
4583 }
4584 else
4585 {
4586 x = build2 (code, TREE_TYPE (ref), ref, new_var);
4587 ref = build_outer_var_ref (var, ctx);
4588 gimplify_assign (ref, x, &sub_seq);
4589 }
4590 }
4591
4592 if (is_gimple_omp_oacc (ctx->stmt))
4593 return;
4594
4595 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
4596 0);
4597 gimple_seq_add_stmt (stmt_seqp, stmt);
4598
4599 gimple_seq_add_seq (stmt_seqp, sub_seq);
4600
4601 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
4602 0);
4603 gimple_seq_add_stmt (stmt_seqp, stmt);
4604 }
4605
4606
4607 /* Generate code to implement the COPYPRIVATE clauses. */
4608
4609 static void
4610 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
4611 omp_context *ctx)
4612 {
4613 tree c;
4614
4615 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4616 {
4617 tree var, new_var, ref, x;
4618 bool by_ref;
4619 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4620
4621 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
4622 continue;
4623
4624 var = OMP_CLAUSE_DECL (c);
4625 by_ref = use_pointer_for_field (var, NULL);
4626
4627 ref = build_sender_ref (var, ctx);
4628 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
4629 if (by_ref)
4630 {
4631 x = build_fold_addr_expr_loc (clause_loc, new_var);
4632 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
4633 }
4634 gimplify_assign (ref, x, slist);
4635
4636 ref = build_receiver_ref (var, false, ctx);
4637 if (by_ref)
4638 {
4639 ref = fold_convert_loc (clause_loc,
4640 build_pointer_type (TREE_TYPE (new_var)),
4641 ref);
4642 ref = build_fold_indirect_ref_loc (clause_loc, ref);
4643 }
4644 if (is_reference (var))
4645 {
4646 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
4647 ref = build_simple_mem_ref_loc (clause_loc, ref);
4648 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
4649 }
4650 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
4651 gimplify_and_add (x, rlist);
4652 }
4653 }
4654
4655
4656 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
4657 and REDUCTION from the sender (aka parent) side. */
4658
4659 static void
4660 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
4661 omp_context *ctx)
4662 {
4663 tree c;
4664
4665 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
4666 {
4667 tree val, ref, x, var;
4668 bool by_ref, do_in = false, do_out = false;
4669 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
4670
4671 switch (OMP_CLAUSE_CODE (c))
4672 {
4673 case OMP_CLAUSE_PRIVATE:
4674 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
4675 break;
4676 continue;
4677 case OMP_CLAUSE_FIRSTPRIVATE:
4678 case OMP_CLAUSE_COPYIN:
4679 case OMP_CLAUSE_LASTPRIVATE:
4680 case OMP_CLAUSE_REDUCTION:
4681 case OMP_CLAUSE__LOOPTEMP_:
4682 break;
4683 default:
4684 continue;
4685 }
4686
4687 val = OMP_CLAUSE_DECL (c);
4688 var = lookup_decl_in_outer_ctx (val, ctx);
4689
4690 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
4691 && is_global_var (var))
4692 continue;
4693 if (is_variable_sized (val))
4694 continue;
4695 by_ref = use_pointer_for_field (val, NULL);
4696
4697 switch (OMP_CLAUSE_CODE (c))
4698 {
4699 case OMP_CLAUSE_PRIVATE:
4700 case OMP_CLAUSE_FIRSTPRIVATE:
4701 case OMP_CLAUSE_COPYIN:
4702 case OMP_CLAUSE__LOOPTEMP_:
4703 do_in = true;
4704 break;
4705
4706 case OMP_CLAUSE_LASTPRIVATE:
4707 if (by_ref || is_reference (val))
4708 {
4709 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
4710 continue;
4711 do_in = true;
4712 }
4713 else
4714 {
4715 do_out = true;
4716 if (lang_hooks.decls.omp_private_outer_ref (val))
4717 do_in = true;
4718 }
4719 break;
4720
4721 case OMP_CLAUSE_REDUCTION:
4722 do_in = true;
4723 do_out = !(by_ref || is_reference (val));
4724 break;
4725
4726 default:
4727 gcc_unreachable ();
4728 }
4729
4730 if (do_in)
4731 {
4732 ref = build_sender_ref (val, ctx);
4733 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4734 gimplify_assign (ref, x, ilist);
4735 if (is_task_ctx (ctx))
4736 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4737 }
4738
4739 if (do_out)
4740 {
4741 ref = build_sender_ref (val, ctx);
4742 gimplify_assign (var, ref, olist);
4743 }
4744 }
4745 }
4746
4747 /* Generate code to implement SHARED from the sender (aka parent)
4748 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4749 list things that got automatically shared. */
4750
4751 static void
4752 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4753 {
4754 tree var, ovar, nvar, f, x, record_type;
4755
4756 if (ctx->record_type == NULL)
4757 return;
4758
4759 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4760 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4761 {
4762 ovar = DECL_ABSTRACT_ORIGIN (f);
4763 nvar = maybe_lookup_decl (ovar, ctx);
4764 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4765 continue;
4766
4767 /* If CTX is a nested parallel directive. Find the immediately
4768 enclosing parallel or workshare construct that contains a
4769 mapping for OVAR. */
4770 var = lookup_decl_in_outer_ctx (ovar, ctx);
4771
4772 if (use_pointer_for_field (ovar, ctx))
4773 {
4774 x = build_sender_ref (ovar, ctx);
4775 var = build_fold_addr_expr (var);
4776 gimplify_assign (x, var, ilist);
4777 }
4778 else
4779 {
4780 x = build_sender_ref (ovar, ctx);
4781 gimplify_assign (x, var, ilist);
4782
4783 if (!TREE_READONLY (var)
4784 /* We don't need to receive a new reference to a result
4785 or parm decl. In fact we may not store to it as we will
4786 invalidate any pending RSO and generate wrong gimple
4787 during inlining. */
4788 && !((TREE_CODE (var) == RESULT_DECL
4789 || TREE_CODE (var) == PARM_DECL)
4790 && DECL_BY_REFERENCE (var)))
4791 {
4792 x = build_sender_ref (ovar, ctx);
4793 gimplify_assign (var, x, olist);
4794 }
4795 }
4796 }
4797 }
4798
4799
4800 /* A convenience function to build an empty GIMPLE_COND with just the
4801 condition. */
4802
4803 static gcond *
4804 gimple_build_cond_empty (tree cond)
4805 {
4806 enum tree_code pred_code;
4807 tree lhs, rhs;
4808
4809 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4810 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4811 }
4812
4813
4814 /* Build the function calls to GOMP_parallel_start etc to actually
4815 generate the parallel operation. REGION is the parallel region
4816 being expanded. BB is the block where to insert the code. WS_ARGS
4817 will be set if this is a call to a combined parallel+workshare
4818 construct, it contains the list of additional arguments needed by
4819 the workshare construct. */
4820
4821 static void
4822 expand_parallel_call (struct omp_region *region, basic_block bb,
4823 gomp_parallel *entry_stmt,
4824 vec<tree, va_gc> *ws_args)
4825 {
4826 tree t, t1, t2, val, cond, c, clauses, flags;
4827 gimple_stmt_iterator gsi;
4828 gimple stmt;
4829 enum built_in_function start_ix;
4830 int start_ix2;
4831 location_t clause_loc;
4832 vec<tree, va_gc> *args;
4833
4834 clauses = gimple_omp_parallel_clauses (entry_stmt);
4835
4836 /* Determine what flavor of GOMP_parallel we will be
4837 emitting. */
4838 start_ix = BUILT_IN_GOMP_PARALLEL;
4839 if (is_combined_parallel (region))
4840 {
4841 switch (region->inner->type)
4842 {
4843 case GIMPLE_OMP_FOR:
4844 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4845 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4846 + (region->inner->sched_kind
4847 == OMP_CLAUSE_SCHEDULE_RUNTIME
4848 ? 3 : region->inner->sched_kind));
4849 start_ix = (enum built_in_function)start_ix2;
4850 break;
4851 case GIMPLE_OMP_SECTIONS:
4852 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4853 break;
4854 default:
4855 gcc_unreachable ();
4856 }
4857 }
4858
4859 /* By default, the value of NUM_THREADS is zero (selected at run time)
4860 and there is no conditional. */
4861 cond = NULL_TREE;
4862 val = build_int_cst (unsigned_type_node, 0);
4863 flags = build_int_cst (unsigned_type_node, 0);
4864
4865 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4866 if (c)
4867 cond = OMP_CLAUSE_IF_EXPR (c);
4868
4869 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4870 if (c)
4871 {
4872 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4873 clause_loc = OMP_CLAUSE_LOCATION (c);
4874 }
4875 else
4876 clause_loc = gimple_location (entry_stmt);
4877
4878 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4879 if (c)
4880 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4881
4882 /* Ensure 'val' is of the correct type. */
4883 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4884
4885 /* If we found the clause 'if (cond)', build either
4886 (cond != 0) or (cond ? val : 1u). */
4887 if (cond)
4888 {
4889 cond = gimple_boolify (cond);
4890
4891 if (integer_zerop (val))
4892 val = fold_build2_loc (clause_loc,
4893 EQ_EXPR, unsigned_type_node, cond,
4894 build_int_cst (TREE_TYPE (cond), 0));
4895 else
4896 {
4897 basic_block cond_bb, then_bb, else_bb;
4898 edge e, e_then, e_else;
4899 tree tmp_then, tmp_else, tmp_join, tmp_var;
4900
4901 tmp_var = create_tmp_var (TREE_TYPE (val));
4902 if (gimple_in_ssa_p (cfun))
4903 {
4904 tmp_then = make_ssa_name (tmp_var);
4905 tmp_else = make_ssa_name (tmp_var);
4906 tmp_join = make_ssa_name (tmp_var);
4907 }
4908 else
4909 {
4910 tmp_then = tmp_var;
4911 tmp_else = tmp_var;
4912 tmp_join = tmp_var;
4913 }
4914
4915 e = split_block_after_labels (bb);
4916 cond_bb = e->src;
4917 bb = e->dest;
4918 remove_edge (e);
4919
4920 then_bb = create_empty_bb (cond_bb);
4921 else_bb = create_empty_bb (then_bb);
4922 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4923 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4924
4925 stmt = gimple_build_cond_empty (cond);
4926 gsi = gsi_start_bb (cond_bb);
4927 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4928
4929 gsi = gsi_start_bb (then_bb);
4930 stmt = gimple_build_assign (tmp_then, val);
4931 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4932
4933 gsi = gsi_start_bb (else_bb);
4934 stmt = gimple_build_assign
4935 (tmp_else, build_int_cst (unsigned_type_node, 1));
4936 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4937
4938 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4939 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4940 add_bb_to_loop (then_bb, cond_bb->loop_father);
4941 add_bb_to_loop (else_bb, cond_bb->loop_father);
4942 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4943 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4944
4945 if (gimple_in_ssa_p (cfun))
4946 {
4947 gphi *phi = create_phi_node (tmp_join, bb);
4948 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4949 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4950 }
4951
4952 val = tmp_join;
4953 }
4954
4955 gsi = gsi_start_bb (bb);
4956 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4957 false, GSI_CONTINUE_LINKING);
4958 }
4959
4960 gsi = gsi_last_bb (bb);
4961 t = gimple_omp_parallel_data_arg (entry_stmt);
4962 if (t == NULL)
4963 t1 = null_pointer_node;
4964 else
4965 t1 = build_fold_addr_expr (t);
4966 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4967
4968 vec_alloc (args, 4 + vec_safe_length (ws_args));
4969 args->quick_push (t2);
4970 args->quick_push (t1);
4971 args->quick_push (val);
4972 if (ws_args)
4973 args->splice (*ws_args);
4974 args->quick_push (flags);
4975
4976 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4977 builtin_decl_explicit (start_ix), args);
4978
4979 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4980 false, GSI_CONTINUE_LINKING);
4981 }
4982
4983 /* Insert a function call whose name is FUNC_NAME with the information from
4984 ENTRY_STMT into the basic_block BB. */
4985
4986 static void
4987 expand_cilk_for_call (basic_block bb, gomp_parallel *entry_stmt,
4988 vec <tree, va_gc> *ws_args)
4989 {
4990 tree t, t1, t2;
4991 gimple_stmt_iterator gsi;
4992 vec <tree, va_gc> *args;
4993
4994 gcc_assert (vec_safe_length (ws_args) == 2);
4995 tree func_name = (*ws_args)[0];
4996 tree grain = (*ws_args)[1];
4997
4998 tree clauses = gimple_omp_parallel_clauses (entry_stmt);
4999 tree count = find_omp_clause (clauses, OMP_CLAUSE__CILK_FOR_COUNT_);
5000 gcc_assert (count != NULL_TREE);
5001 count = OMP_CLAUSE_OPERAND (count, 0);
5002
5003 gsi = gsi_last_bb (bb);
5004 t = gimple_omp_parallel_data_arg (entry_stmt);
5005 if (t == NULL)
5006 t1 = null_pointer_node;
5007 else
5008 t1 = build_fold_addr_expr (t);
5009 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
5010
5011 vec_alloc (args, 4);
5012 args->quick_push (t2);
5013 args->quick_push (t1);
5014 args->quick_push (count);
5015 args->quick_push (grain);
5016 t = build_call_expr_loc_vec (UNKNOWN_LOCATION, func_name, args);
5017
5018 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false,
5019 GSI_CONTINUE_LINKING);
5020 }
5021
5022 /* Build the function call to GOMP_task to actually
5023 generate the task operation. BB is the block where to insert the code. */
5024
5025 static void
5026 expand_task_call (basic_block bb, gomp_task *entry_stmt)
5027 {
5028 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
5029 gimple_stmt_iterator gsi;
5030 location_t loc = gimple_location (entry_stmt);
5031
5032 clauses = gimple_omp_task_clauses (entry_stmt);
5033
5034 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
5035 if (c)
5036 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
5037 else
5038 cond = boolean_true_node;
5039
5040 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
5041 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
5042 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
5043 flags = build_int_cst (unsigned_type_node,
5044 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
5045
5046 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
5047 if (c)
5048 {
5049 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
5050 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
5051 build_int_cst (unsigned_type_node, 2),
5052 build_int_cst (unsigned_type_node, 0));
5053 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
5054 }
5055 if (depend)
5056 depend = OMP_CLAUSE_DECL (depend);
5057 else
5058 depend = build_int_cst (ptr_type_node, 0);
5059
5060 gsi = gsi_last_bb (bb);
5061 t = gimple_omp_task_data_arg (entry_stmt);
5062 if (t == NULL)
5063 t2 = null_pointer_node;
5064 else
5065 t2 = build_fold_addr_expr_loc (loc, t);
5066 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
5067 t = gimple_omp_task_copy_fn (entry_stmt);
5068 if (t == NULL)
5069 t3 = null_pointer_node;
5070 else
5071 t3 = build_fold_addr_expr_loc (loc, t);
5072
5073 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
5074 8, t1, t2, t3,
5075 gimple_omp_task_arg_size (entry_stmt),
5076 gimple_omp_task_arg_align (entry_stmt), cond, flags,
5077 depend);
5078
5079 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5080 false, GSI_CONTINUE_LINKING);
5081 }
5082
5083
5084 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
5085 catch handler and return it. This prevents programs from violating the
5086 structured block semantics with throws. */
5087
5088 static gimple_seq
5089 maybe_catch_exception (gimple_seq body)
5090 {
5091 gimple g;
5092 tree decl;
5093
5094 if (!flag_exceptions)
5095 return body;
5096
5097 if (lang_hooks.eh_protect_cleanup_actions != NULL)
5098 decl = lang_hooks.eh_protect_cleanup_actions ();
5099 else
5100 decl = builtin_decl_explicit (BUILT_IN_TRAP);
5101
5102 g = gimple_build_eh_must_not_throw (decl);
5103 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
5104 GIMPLE_TRY_CATCH);
5105
5106 return gimple_seq_alloc_with_stmt (g);
5107 }
5108
5109 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
5110
5111 static tree
5112 vec2chain (vec<tree, va_gc> *v)
5113 {
5114 tree chain = NULL_TREE, t;
5115 unsigned ix;
5116
5117 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
5118 {
5119 DECL_CHAIN (t) = chain;
5120 chain = t;
5121 }
5122
5123 return chain;
5124 }
5125
5126
5127 /* Remove barriers in REGION->EXIT's block. Note that this is only
5128 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
5129 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
5130 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
5131 removed. */
5132
5133 static void
5134 remove_exit_barrier (struct omp_region *region)
5135 {
5136 gimple_stmt_iterator gsi;
5137 basic_block exit_bb;
5138 edge_iterator ei;
5139 edge e;
5140 gimple stmt;
5141 int any_addressable_vars = -1;
5142
5143 exit_bb = region->exit;
5144
5145 /* If the parallel region doesn't return, we don't have REGION->EXIT
5146 block at all. */
5147 if (! exit_bb)
5148 return;
5149
5150 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
5151 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
5152 statements that can appear in between are extremely limited -- no
5153 memory operations at all. Here, we allow nothing at all, so the
5154 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
5155 gsi = gsi_last_bb (exit_bb);
5156 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5157 gsi_prev (&gsi);
5158 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
5159 return;
5160
5161 FOR_EACH_EDGE (e, ei, exit_bb->preds)
5162 {
5163 gsi = gsi_last_bb (e->src);
5164 if (gsi_end_p (gsi))
5165 continue;
5166 stmt = gsi_stmt (gsi);
5167 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
5168 && !gimple_omp_return_nowait_p (stmt))
5169 {
5170 /* OpenMP 3.0 tasks unfortunately prevent this optimization
5171 in many cases. If there could be tasks queued, the barrier
5172 might be needed to let the tasks run before some local
5173 variable of the parallel that the task uses as shared
5174 runs out of scope. The task can be spawned either
5175 from within current function (this would be easy to check)
5176 or from some function it calls and gets passed an address
5177 of such a variable. */
5178 if (any_addressable_vars < 0)
5179 {
5180 gomp_parallel *parallel_stmt
5181 = as_a <gomp_parallel *> (last_stmt (region->entry));
5182 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
5183 tree local_decls, block, decl;
5184 unsigned ix;
5185
5186 any_addressable_vars = 0;
5187 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
5188 if (TREE_ADDRESSABLE (decl))
5189 {
5190 any_addressable_vars = 1;
5191 break;
5192 }
5193 for (block = gimple_block (stmt);
5194 !any_addressable_vars
5195 && block
5196 && TREE_CODE (block) == BLOCK;
5197 block = BLOCK_SUPERCONTEXT (block))
5198 {
5199 for (local_decls = BLOCK_VARS (block);
5200 local_decls;
5201 local_decls = DECL_CHAIN (local_decls))
5202 if (TREE_ADDRESSABLE (local_decls))
5203 {
5204 any_addressable_vars = 1;
5205 break;
5206 }
5207 if (block == gimple_block (parallel_stmt))
5208 break;
5209 }
5210 }
5211 if (!any_addressable_vars)
5212 gimple_omp_return_set_nowait (stmt);
5213 }
5214 }
5215 }
5216
5217 static void
5218 remove_exit_barriers (struct omp_region *region)
5219 {
5220 if (region->type == GIMPLE_OMP_PARALLEL)
5221 remove_exit_barrier (region);
5222
5223 if (region->inner)
5224 {
5225 region = region->inner;
5226 remove_exit_barriers (region);
5227 while (region->next)
5228 {
5229 region = region->next;
5230 remove_exit_barriers (region);
5231 }
5232 }
5233 }
5234
5235 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
5236 calls. These can't be declared as const functions, but
5237 within one parallel body they are constant, so they can be
5238 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
5239 which are declared const. Similarly for task body, except
5240 that in untied task omp_get_thread_num () can change at any task
5241 scheduling point. */
5242
5243 static void
5244 optimize_omp_library_calls (gimple entry_stmt)
5245 {
5246 basic_block bb;
5247 gimple_stmt_iterator gsi;
5248 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5249 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
5250 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
5251 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
5252 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
5253 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
5254 OMP_CLAUSE_UNTIED) != NULL);
5255
5256 FOR_EACH_BB_FN (bb, cfun)
5257 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5258 {
5259 gimple call = gsi_stmt (gsi);
5260 tree decl;
5261
5262 if (is_gimple_call (call)
5263 && (decl = gimple_call_fndecl (call))
5264 && DECL_EXTERNAL (decl)
5265 && TREE_PUBLIC (decl)
5266 && DECL_INITIAL (decl) == NULL)
5267 {
5268 tree built_in;
5269
5270 if (DECL_NAME (decl) == thr_num_id)
5271 {
5272 /* In #pragma omp task untied omp_get_thread_num () can change
5273 during the execution of the task region. */
5274 if (untied_task)
5275 continue;
5276 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5277 }
5278 else if (DECL_NAME (decl) == num_thr_id)
5279 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
5280 else
5281 continue;
5282
5283 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
5284 || gimple_call_num_args (call) != 0)
5285 continue;
5286
5287 if (flag_exceptions && !TREE_NOTHROW (decl))
5288 continue;
5289
5290 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
5291 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
5292 TREE_TYPE (TREE_TYPE (built_in))))
5293 continue;
5294
5295 gimple_call_set_fndecl (call, built_in);
5296 }
5297 }
5298 }
5299
5300 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
5301 regimplified. */
5302
5303 static tree
5304 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
5305 {
5306 tree t = *tp;
5307
5308 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
5309 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
5310 return t;
5311
5312 if (TREE_CODE (t) == ADDR_EXPR)
5313 recompute_tree_invariant_for_addr_expr (t);
5314
5315 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
5316 return NULL_TREE;
5317 }
5318
5319 /* Prepend TO = FROM assignment before *GSI_P. */
5320
5321 static void
5322 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
5323 {
5324 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
5325 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
5326 true, GSI_SAME_STMT);
5327 gimple stmt = gimple_build_assign (to, from);
5328 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
5329 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
5330 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
5331 {
5332 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5333 gimple_regimplify_operands (stmt, &gsi);
5334 }
5335 }
5336
5337 /* Expand the OpenMP parallel or task directive starting at REGION. */
5338
5339 static void
5340 expand_omp_taskreg (struct omp_region *region)
5341 {
5342 basic_block entry_bb, exit_bb, new_bb;
5343 struct function *child_cfun;
5344 tree child_fn, block, t;
5345 gimple_stmt_iterator gsi;
5346 gimple entry_stmt, stmt;
5347 edge e;
5348 vec<tree, va_gc> *ws_args;
5349
5350 entry_stmt = last_stmt (region->entry);
5351 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
5352 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
5353
5354 entry_bb = region->entry;
5355 if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
5356 exit_bb = region->cont;
5357 else
5358 exit_bb = region->exit;
5359
5360 bool is_cilk_for
5361 = (flag_cilkplus
5362 && gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL
5363 && find_omp_clause (gimple_omp_parallel_clauses (entry_stmt),
5364 OMP_CLAUSE__CILK_FOR_COUNT_) != NULL_TREE);
5365
5366 if (is_cilk_for)
5367 /* If it is a _Cilk_for statement, it is modelled *like* a parallel for,
5368 and the inner statement contains the name of the built-in function
5369 and grain. */
5370 ws_args = region->inner->ws_args;
5371 else if (is_combined_parallel (region))
5372 ws_args = region->ws_args;
5373 else
5374 ws_args = NULL;
5375
5376 if (child_cfun->cfg)
5377 {
5378 /* Due to inlining, it may happen that we have already outlined
5379 the region, in which case all we need to do is make the
5380 sub-graph unreachable and emit the parallel call. */
5381 edge entry_succ_e, exit_succ_e;
5382
5383 entry_succ_e = single_succ_edge (entry_bb);
5384
5385 gsi = gsi_last_bb (entry_bb);
5386 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
5387 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
5388 gsi_remove (&gsi, true);
5389
5390 new_bb = entry_bb;
5391 if (exit_bb)
5392 {
5393 exit_succ_e = single_succ_edge (exit_bb);
5394 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
5395 }
5396 remove_edge_and_dominated_blocks (entry_succ_e);
5397 }
5398 else
5399 {
5400 unsigned srcidx, dstidx, num;
5401
5402 /* If the parallel region needs data sent from the parent
5403 function, then the very first statement (except possible
5404 tree profile counter updates) of the parallel body
5405 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
5406 &.OMP_DATA_O is passed as an argument to the child function,
5407 we need to replace it with the argument as seen by the child
5408 function.
5409
5410 In most cases, this will end up being the identity assignment
5411 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
5412 a function call that has been inlined, the original PARM_DECL
5413 .OMP_DATA_I may have been converted into a different local
5414 variable. In which case, we need to keep the assignment. */
5415 if (gimple_omp_taskreg_data_arg (entry_stmt))
5416 {
5417 basic_block entry_succ_bb
5418 = single_succ_p (entry_bb) ? single_succ (entry_bb)
5419 : FALLTHRU_EDGE (entry_bb)->dest;
5420 tree arg, narg;
5421 gimple parcopy_stmt = NULL;
5422
5423 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
5424 {
5425 gimple stmt;
5426
5427 gcc_assert (!gsi_end_p (gsi));
5428 stmt = gsi_stmt (gsi);
5429 if (gimple_code (stmt) != GIMPLE_ASSIGN)
5430 continue;
5431
5432 if (gimple_num_ops (stmt) == 2)
5433 {
5434 tree arg = gimple_assign_rhs1 (stmt);
5435
5436 /* We're ignore the subcode because we're
5437 effectively doing a STRIP_NOPS. */
5438
5439 if (TREE_CODE (arg) == ADDR_EXPR
5440 && TREE_OPERAND (arg, 0)
5441 == gimple_omp_taskreg_data_arg (entry_stmt))
5442 {
5443 parcopy_stmt = stmt;
5444 break;
5445 }
5446 }
5447 }
5448
5449 gcc_assert (parcopy_stmt != NULL);
5450 arg = DECL_ARGUMENTS (child_fn);
5451
5452 if (!gimple_in_ssa_p (cfun))
5453 {
5454 if (gimple_assign_lhs (parcopy_stmt) == arg)
5455 gsi_remove (&gsi, true);
5456 else
5457 {
5458 /* ?? Is setting the subcode really necessary ?? */
5459 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
5460 gimple_assign_set_rhs1 (parcopy_stmt, arg);
5461 }
5462 }
5463 else
5464 {
5465 /* If we are in ssa form, we must load the value from the default
5466 definition of the argument. That should not be defined now,
5467 since the argument is not used uninitialized. */
5468 gcc_assert (ssa_default_def (cfun, arg) == NULL);
5469 narg = make_ssa_name (arg, gimple_build_nop ());
5470 set_ssa_default_def (cfun, arg, narg);
5471 /* ?? Is setting the subcode really necessary ?? */
5472 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
5473 gimple_assign_set_rhs1 (parcopy_stmt, narg);
5474 update_stmt (parcopy_stmt);
5475 }
5476 }
5477
5478 /* Declare local variables needed in CHILD_CFUN. */
5479 block = DECL_INITIAL (child_fn);
5480 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
5481 /* The gimplifier could record temporaries in parallel/task block
5482 rather than in containing function's local_decls chain,
5483 which would mean cgraph missed finalizing them. Do it now. */
5484 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
5485 if (TREE_CODE (t) == VAR_DECL
5486 && TREE_STATIC (t)
5487 && !DECL_EXTERNAL (t))
5488 varpool_node::finalize_decl (t);
5489 DECL_SAVED_TREE (child_fn) = NULL;
5490 /* We'll create a CFG for child_fn, so no gimple body is needed. */
5491 gimple_set_body (child_fn, NULL);
5492 TREE_USED (block) = 1;
5493
5494 /* Reset DECL_CONTEXT on function arguments. */
5495 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
5496 DECL_CONTEXT (t) = child_fn;
5497
5498 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
5499 so that it can be moved to the child function. */
5500 gsi = gsi_last_bb (entry_bb);
5501 stmt = gsi_stmt (gsi);
5502 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
5503 || gimple_code (stmt) == GIMPLE_OMP_TASK));
5504 e = split_block (entry_bb, stmt);
5505 gsi_remove (&gsi, true);
5506 entry_bb = e->dest;
5507 edge e2 = NULL;
5508 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5509 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5510 else
5511 {
5512 e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
5513 gcc_assert (e2->dest == region->exit);
5514 remove_edge (BRANCH_EDGE (entry_bb));
5515 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
5516 gsi = gsi_last_bb (region->exit);
5517 gcc_assert (!gsi_end_p (gsi)
5518 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
5519 gsi_remove (&gsi, true);
5520 }
5521
5522 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
5523 if (exit_bb)
5524 {
5525 gsi = gsi_last_bb (exit_bb);
5526 gcc_assert (!gsi_end_p (gsi)
5527 && (gimple_code (gsi_stmt (gsi))
5528 == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
5529 stmt = gimple_build_return (NULL);
5530 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5531 gsi_remove (&gsi, true);
5532 }
5533
5534 /* Move the parallel region into CHILD_CFUN. */
5535
5536 if (gimple_in_ssa_p (cfun))
5537 {
5538 init_tree_ssa (child_cfun);
5539 init_ssa_operands (child_cfun);
5540 child_cfun->gimple_df->in_ssa_p = true;
5541 block = NULL_TREE;
5542 }
5543 else
5544 block = gimple_block (entry_stmt);
5545
5546 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
5547 if (exit_bb)
5548 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
5549 if (e2)
5550 {
5551 basic_block dest_bb = e2->dest;
5552 if (!exit_bb)
5553 make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
5554 remove_edge (e2);
5555 set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
5556 }
5557 /* When the OMP expansion process cannot guarantee an up-to-date
5558 loop tree arrange for the child function to fixup loops. */
5559 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5560 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
5561
5562 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
5563 num = vec_safe_length (child_cfun->local_decls);
5564 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
5565 {
5566 t = (*child_cfun->local_decls)[srcidx];
5567 if (DECL_CONTEXT (t) == cfun->decl)
5568 continue;
5569 if (srcidx != dstidx)
5570 (*child_cfun->local_decls)[dstidx] = t;
5571 dstidx++;
5572 }
5573 if (dstidx != num)
5574 vec_safe_truncate (child_cfun->local_decls, dstidx);
5575
5576 /* Inform the callgraph about the new function. */
5577 child_cfun->curr_properties = cfun->curr_properties;
5578 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
5579 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
5580 cgraph_node *node = cgraph_node::get_create (child_fn);
5581 node->parallelized_function = 1;
5582 cgraph_node::add_new_function (child_fn, true);
5583
5584 /* Fix the callgraph edges for child_cfun. Those for cfun will be
5585 fixed in a following pass. */
5586 push_cfun (child_cfun);
5587 if (optimize)
5588 optimize_omp_library_calls (entry_stmt);
5589 cgraph_edge::rebuild_edges ();
5590
5591 /* Some EH regions might become dead, see PR34608. If
5592 pass_cleanup_cfg isn't the first pass to happen with the
5593 new child, these dead EH edges might cause problems.
5594 Clean them up now. */
5595 if (flag_exceptions)
5596 {
5597 basic_block bb;
5598 bool changed = false;
5599
5600 FOR_EACH_BB_FN (bb, cfun)
5601 changed |= gimple_purge_dead_eh_edges (bb);
5602 if (changed)
5603 cleanup_tree_cfg ();
5604 }
5605 if (gimple_in_ssa_p (cfun))
5606 update_ssa (TODO_update_ssa);
5607 #ifdef ENABLE_CHECKING
5608 if (!loops_state_satisfies_p (LOOPS_NEED_FIXUP))
5609 verify_loop_structure ();
5610 #endif
5611 pop_cfun ();
5612 }
5613
5614 /* Emit a library call to launch the children threads. */
5615 if (is_cilk_for)
5616 expand_cilk_for_call (new_bb,
5617 as_a <gomp_parallel *> (entry_stmt), ws_args);
5618 else if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
5619 expand_parallel_call (region, new_bb,
5620 as_a <gomp_parallel *> (entry_stmt), ws_args);
5621 else
5622 expand_task_call (new_bb, as_a <gomp_task *> (entry_stmt));
5623 if (gimple_in_ssa_p (cfun))
5624 update_ssa (TODO_update_ssa_only_virtuals);
5625 }
5626
5627
5628 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
5629 of the combined collapse > 1 loop constructs, generate code like:
5630 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
5631 if (cond3 is <)
5632 adj = STEP3 - 1;
5633 else
5634 adj = STEP3 + 1;
5635 count3 = (adj + N32 - N31) / STEP3;
5636 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
5637 if (cond2 is <)
5638 adj = STEP2 - 1;
5639 else
5640 adj = STEP2 + 1;
5641 count2 = (adj + N22 - N21) / STEP2;
5642 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
5643 if (cond1 is <)
5644 adj = STEP1 - 1;
5645 else
5646 adj = STEP1 + 1;
5647 count1 = (adj + N12 - N11) / STEP1;
5648 count = count1 * count2 * count3;
5649 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
5650 count = 0;
5651 and set ZERO_ITER_BB to that bb. If this isn't the outermost
5652 of the combined loop constructs, just initialize COUNTS array
5653 from the _looptemp_ clauses. */
5654
5655 /* NOTE: It *could* be better to moosh all of the BBs together,
5656 creating one larger BB with all the computation and the unexpected
5657 jump at the end. I.e.
5658
5659 bool zero3, zero2, zero1, zero;
5660
5661 zero3 = N32 c3 N31;
5662 count3 = (N32 - N31) /[cl] STEP3;
5663 zero2 = N22 c2 N21;
5664 count2 = (N22 - N21) /[cl] STEP2;
5665 zero1 = N12 c1 N11;
5666 count1 = (N12 - N11) /[cl] STEP1;
5667 zero = zero3 || zero2 || zero1;
5668 count = count1 * count2 * count3;
5669 if (__builtin_expect(zero, false)) goto zero_iter_bb;
5670
5671 After all, we expect the zero=false, and thus we expect to have to
5672 evaluate all of the comparison expressions, so short-circuiting
5673 oughtn't be a win. Since the condition isn't protecting a
5674 denominator, we're not concerned about divide-by-zero, so we can
5675 fully evaluate count even if a numerator turned out to be wrong.
5676
5677 It seems like putting this all together would create much better
5678 scheduling opportunities, and less pressure on the chip's branch
5679 predictor. */
5680
5681 static void
5682 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5683 basic_block &entry_bb, tree *counts,
5684 basic_block &zero_iter_bb, int &first_zero_iter,
5685 basic_block &l2_dom_bb)
5686 {
5687 tree t, type = TREE_TYPE (fd->loop.v);
5688 edge e, ne;
5689 int i;
5690
5691 /* Collapsed loops need work for expansion into SSA form. */
5692 gcc_assert (!gimple_in_ssa_p (cfun));
5693
5694 if (gimple_omp_for_combined_into_p (fd->for_stmt)
5695 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5696 {
5697 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5698 isn't supposed to be handled, as the inner loop doesn't
5699 use it. */
5700 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5701 OMP_CLAUSE__LOOPTEMP_);
5702 gcc_assert (innerc);
5703 for (i = 0; i < fd->collapse; i++)
5704 {
5705 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5706 OMP_CLAUSE__LOOPTEMP_);
5707 gcc_assert (innerc);
5708 if (i)
5709 counts[i] = OMP_CLAUSE_DECL (innerc);
5710 else
5711 counts[0] = NULL_TREE;
5712 }
5713 return;
5714 }
5715
5716 for (i = 0; i < fd->collapse; i++)
5717 {
5718 tree itype = TREE_TYPE (fd->loops[i].v);
5719
5720 if (SSA_VAR_P (fd->loop.n2)
5721 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
5722 fold_convert (itype, fd->loops[i].n1),
5723 fold_convert (itype, fd->loops[i].n2)))
5724 == NULL_TREE || !integer_onep (t)))
5725 {
5726 gcond *cond_stmt;
5727 tree n1, n2;
5728 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
5729 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
5730 true, GSI_SAME_STMT);
5731 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
5732 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
5733 true, GSI_SAME_STMT);
5734 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
5735 NULL_TREE, NULL_TREE);
5736 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
5737 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
5738 expand_omp_regimplify_p, NULL, NULL)
5739 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
5740 expand_omp_regimplify_p, NULL, NULL))
5741 {
5742 *gsi = gsi_for_stmt (cond_stmt);
5743 gimple_regimplify_operands (cond_stmt, gsi);
5744 }
5745 e = split_block (entry_bb, cond_stmt);
5746 if (zero_iter_bb == NULL)
5747 {
5748 gassign *assign_stmt;
5749 first_zero_iter = i;
5750 zero_iter_bb = create_empty_bb (entry_bb);
5751 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
5752 *gsi = gsi_after_labels (zero_iter_bb);
5753 assign_stmt = gimple_build_assign (fd->loop.n2,
5754 build_zero_cst (type));
5755 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
5756 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
5757 entry_bb);
5758 }
5759 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
5760 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
5761 e->flags = EDGE_TRUE_VALUE;
5762 e->probability = REG_BR_PROB_BASE - ne->probability;
5763 if (l2_dom_bb == NULL)
5764 l2_dom_bb = entry_bb;
5765 entry_bb = e->dest;
5766 *gsi = gsi_last_bb (entry_bb);
5767 }
5768
5769 if (POINTER_TYPE_P (itype))
5770 itype = signed_type_for (itype);
5771 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
5772 ? -1 : 1));
5773 t = fold_build2 (PLUS_EXPR, itype,
5774 fold_convert (itype, fd->loops[i].step), t);
5775 t = fold_build2 (PLUS_EXPR, itype, t,
5776 fold_convert (itype, fd->loops[i].n2));
5777 t = fold_build2 (MINUS_EXPR, itype, t,
5778 fold_convert (itype, fd->loops[i].n1));
5779 /* ?? We could probably use CEIL_DIV_EXPR instead of
5780 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
5781 generate the same code in the end because generically we
5782 don't know that the values involved must be negative for
5783 GT?? */
5784 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
5785 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5786 fold_build1 (NEGATE_EXPR, itype, t),
5787 fold_build1 (NEGATE_EXPR, itype,
5788 fold_convert (itype,
5789 fd->loops[i].step)));
5790 else
5791 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
5792 fold_convert (itype, fd->loops[i].step));
5793 t = fold_convert (type, t);
5794 if (TREE_CODE (t) == INTEGER_CST)
5795 counts[i] = t;
5796 else
5797 {
5798 counts[i] = create_tmp_reg (type, ".count");
5799 expand_omp_build_assign (gsi, counts[i], t);
5800 }
5801 if (SSA_VAR_P (fd->loop.n2))
5802 {
5803 if (i == 0)
5804 t = counts[0];
5805 else
5806 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5807 expand_omp_build_assign (gsi, fd->loop.n2, t);
5808 }
5809 }
5810 }
5811
5812
5813 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5814 T = V;
5815 V3 = N31 + (T % count3) * STEP3;
5816 T = T / count3;
5817 V2 = N21 + (T % count2) * STEP2;
5818 T = T / count2;
5819 V1 = N11 + T * STEP1;
5820 if this loop doesn't have an inner loop construct combined with it.
5821 If it does have an inner loop construct combined with it and the
5822 iteration count isn't known constant, store values from counts array
5823 into its _looptemp_ temporaries instead. */
5824
5825 static void
5826 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5827 tree *counts, gimple inner_stmt, tree startvar)
5828 {
5829 int i;
5830 if (gimple_omp_for_combined_p (fd->for_stmt))
5831 {
5832 /* If fd->loop.n2 is constant, then no propagation of the counts
5833 is needed, they are constant. */
5834 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5835 return;
5836
5837 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5838 ? gimple_omp_parallel_clauses (inner_stmt)
5839 : gimple_omp_for_clauses (inner_stmt);
5840 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5841 isn't supposed to be handled, as the inner loop doesn't
5842 use it. */
5843 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5844 gcc_assert (innerc);
5845 for (i = 0; i < fd->collapse; i++)
5846 {
5847 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5848 OMP_CLAUSE__LOOPTEMP_);
5849 gcc_assert (innerc);
5850 if (i)
5851 {
5852 tree tem = OMP_CLAUSE_DECL (innerc);
5853 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5854 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5855 false, GSI_CONTINUE_LINKING);
5856 gassign *stmt = gimple_build_assign (tem, t);
5857 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5858 }
5859 }
5860 return;
5861 }
5862
5863 tree type = TREE_TYPE (fd->loop.v);
5864 tree tem = create_tmp_reg (type, ".tem");
5865 gassign *stmt = gimple_build_assign (tem, startvar);
5866 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5867
5868 for (i = fd->collapse - 1; i >= 0; i--)
5869 {
5870 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5871 itype = vtype;
5872 if (POINTER_TYPE_P (vtype))
5873 itype = signed_type_for (vtype);
5874 if (i != 0)
5875 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5876 else
5877 t = tem;
5878 t = fold_convert (itype, t);
5879 t = fold_build2 (MULT_EXPR, itype, t,
5880 fold_convert (itype, fd->loops[i].step));
5881 if (POINTER_TYPE_P (vtype))
5882 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5883 else
5884 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5885 t = force_gimple_operand_gsi (gsi, t,
5886 DECL_P (fd->loops[i].v)
5887 && TREE_ADDRESSABLE (fd->loops[i].v),
5888 NULL_TREE, false,
5889 GSI_CONTINUE_LINKING);
5890 stmt = gimple_build_assign (fd->loops[i].v, t);
5891 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5892 if (i != 0)
5893 {
5894 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5895 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5896 false, GSI_CONTINUE_LINKING);
5897 stmt = gimple_build_assign (tem, t);
5898 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5899 }
5900 }
5901 }
5902
5903
5904 /* Helper function for expand_omp_for_*. Generate code like:
5905 L10:
5906 V3 += STEP3;
5907 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5908 L11:
5909 V3 = N31;
5910 V2 += STEP2;
5911 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5912 L12:
5913 V2 = N21;
5914 V1 += STEP1;
5915 goto BODY_BB; */
5916
5917 static basic_block
5918 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5919 basic_block body_bb)
5920 {
5921 basic_block last_bb, bb, collapse_bb = NULL;
5922 int i;
5923 gimple_stmt_iterator gsi;
5924 edge e;
5925 tree t;
5926 gimple stmt;
5927
5928 last_bb = cont_bb;
5929 for (i = fd->collapse - 1; i >= 0; i--)
5930 {
5931 tree vtype = TREE_TYPE (fd->loops[i].v);
5932
5933 bb = create_empty_bb (last_bb);
5934 add_bb_to_loop (bb, last_bb->loop_father);
5935 gsi = gsi_start_bb (bb);
5936
5937 if (i < fd->collapse - 1)
5938 {
5939 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5940 e->probability = REG_BR_PROB_BASE / 8;
5941
5942 t = fd->loops[i + 1].n1;
5943 t = force_gimple_operand_gsi (&gsi, t,
5944 DECL_P (fd->loops[i + 1].v)
5945 && TREE_ADDRESSABLE (fd->loops[i
5946 + 1].v),
5947 NULL_TREE, false,
5948 GSI_CONTINUE_LINKING);
5949 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5950 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5951 }
5952 else
5953 collapse_bb = bb;
5954
5955 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5956
5957 if (POINTER_TYPE_P (vtype))
5958 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5959 else
5960 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5961 t = force_gimple_operand_gsi (&gsi, t,
5962 DECL_P (fd->loops[i].v)
5963 && TREE_ADDRESSABLE (fd->loops[i].v),
5964 NULL_TREE, false, GSI_CONTINUE_LINKING);
5965 stmt = gimple_build_assign (fd->loops[i].v, t);
5966 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5967
5968 if (i > 0)
5969 {
5970 t = fd->loops[i].n2;
5971 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5972 false, GSI_CONTINUE_LINKING);
5973 tree v = fd->loops[i].v;
5974 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5975 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5976 false, GSI_CONTINUE_LINKING);
5977 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5978 stmt = gimple_build_cond_empty (t);
5979 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5980 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5981 e->probability = REG_BR_PROB_BASE * 7 / 8;
5982 }
5983 else
5984 make_edge (bb, body_bb, EDGE_FALLTHRU);
5985 last_bb = bb;
5986 }
5987
5988 return collapse_bb;
5989 }
5990
5991
5992 /* A subroutine of expand_omp_for. Generate code for a parallel
5993 loop with any schedule. Given parameters:
5994
5995 for (V = N1; V cond N2; V += STEP) BODY;
5996
5997 where COND is "<" or ">", we generate pseudocode
5998
5999 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
6000 if (more) goto L0; else goto L3;
6001 L0:
6002 V = istart0;
6003 iend = iend0;
6004 L1:
6005 BODY;
6006 V += STEP;
6007 if (V cond iend) goto L1; else goto L2;
6008 L2:
6009 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
6010 L3:
6011
6012 If this is a combined omp parallel loop, instead of the call to
6013 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
6014 If this is gimple_omp_for_combined_p loop, then instead of assigning
6015 V and iend in L0 we assign the first two _looptemp_ clause decls of the
6016 inner GIMPLE_OMP_FOR and V += STEP; and
6017 if (V cond iend) goto L1; else goto L2; are removed.
6018
6019 For collapsed loops, given parameters:
6020 collapse(3)
6021 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6022 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6023 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6024 BODY;
6025
6026 we generate pseudocode
6027
6028 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
6029 if (cond3 is <)
6030 adj = STEP3 - 1;
6031 else
6032 adj = STEP3 + 1;
6033 count3 = (adj + N32 - N31) / STEP3;
6034 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
6035 if (cond2 is <)
6036 adj = STEP2 - 1;
6037 else
6038 adj = STEP2 + 1;
6039 count2 = (adj + N22 - N21) / STEP2;
6040 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
6041 if (cond1 is <)
6042 adj = STEP1 - 1;
6043 else
6044 adj = STEP1 + 1;
6045 count1 = (adj + N12 - N11) / STEP1;
6046 count = count1 * count2 * count3;
6047 goto Z1;
6048 Z0:
6049 count = 0;
6050 Z1:
6051 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
6052 if (more) goto L0; else goto L3;
6053 L0:
6054 V = istart0;
6055 T = V;
6056 V3 = N31 + (T % count3) * STEP3;
6057 T = T / count3;
6058 V2 = N21 + (T % count2) * STEP2;
6059 T = T / count2;
6060 V1 = N11 + T * STEP1;
6061 iend = iend0;
6062 L1:
6063 BODY;
6064 V += 1;
6065 if (V < iend) goto L10; else goto L2;
6066 L10:
6067 V3 += STEP3;
6068 if (V3 cond3 N32) goto L1; else goto L11;
6069 L11:
6070 V3 = N31;
6071 V2 += STEP2;
6072 if (V2 cond2 N22) goto L1; else goto L12;
6073 L12:
6074 V2 = N21;
6075 V1 += STEP1;
6076 goto L1;
6077 L2:
6078 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
6079 L3:
6080
6081 */
6082
6083 static void
6084 expand_omp_for_generic (struct omp_region *region,
6085 struct omp_for_data *fd,
6086 enum built_in_function start_fn,
6087 enum built_in_function next_fn,
6088 gimple inner_stmt)
6089 {
6090 tree type, istart0, iend0, iend;
6091 tree t, vmain, vback, bias = NULL_TREE;
6092 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
6093 basic_block l2_bb = NULL, l3_bb = NULL;
6094 gimple_stmt_iterator gsi;
6095 gassign *assign_stmt;
6096 bool in_combined_parallel = is_combined_parallel (region);
6097 bool broken_loop = region->cont == NULL;
6098 edge e, ne;
6099 tree *counts = NULL;
6100 int i;
6101
6102 gcc_assert (!broken_loop || !in_combined_parallel);
6103 gcc_assert (fd->iter_type == long_integer_type_node
6104 || !in_combined_parallel);
6105
6106 type = TREE_TYPE (fd->loop.v);
6107 istart0 = create_tmp_var (fd->iter_type, ".istart0");
6108 iend0 = create_tmp_var (fd->iter_type, ".iend0");
6109 TREE_ADDRESSABLE (istart0) = 1;
6110 TREE_ADDRESSABLE (iend0) = 1;
6111
6112 /* See if we need to bias by LLONG_MIN. */
6113 if (fd->iter_type == long_long_unsigned_type_node
6114 && TREE_CODE (type) == INTEGER_TYPE
6115 && !TYPE_UNSIGNED (type))
6116 {
6117 tree n1, n2;
6118
6119 if (fd->loop.cond_code == LT_EXPR)
6120 {
6121 n1 = fd->loop.n1;
6122 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
6123 }
6124 else
6125 {
6126 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
6127 n2 = fd->loop.n1;
6128 }
6129 if (TREE_CODE (n1) != INTEGER_CST
6130 || TREE_CODE (n2) != INTEGER_CST
6131 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
6132 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
6133 }
6134
6135 entry_bb = region->entry;
6136 cont_bb = region->cont;
6137 collapse_bb = NULL;
6138 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6139 gcc_assert (broken_loop
6140 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6141 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6142 l1_bb = single_succ (l0_bb);
6143 if (!broken_loop)
6144 {
6145 l2_bb = create_empty_bb (cont_bb);
6146 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
6147 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6148 }
6149 else
6150 l2_bb = NULL;
6151 l3_bb = BRANCH_EDGE (entry_bb)->dest;
6152 exit_bb = region->exit;
6153
6154 gsi = gsi_last_bb (entry_bb);
6155
6156 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6157 if (fd->collapse > 1)
6158 {
6159 int first_zero_iter = -1;
6160 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
6161
6162 counts = XALLOCAVEC (tree, fd->collapse);
6163 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6164 zero_iter_bb, first_zero_iter,
6165 l2_dom_bb);
6166
6167 if (zero_iter_bb)
6168 {
6169 /* Some counts[i] vars might be uninitialized if
6170 some loop has zero iterations. But the body shouldn't
6171 be executed in that case, so just avoid uninit warnings. */
6172 for (i = first_zero_iter; i < fd->collapse; i++)
6173 if (SSA_VAR_P (counts[i]))
6174 TREE_NO_WARNING (counts[i]) = 1;
6175 gsi_prev (&gsi);
6176 e = split_block (entry_bb, gsi_stmt (gsi));
6177 entry_bb = e->dest;
6178 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
6179 gsi = gsi_last_bb (entry_bb);
6180 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
6181 get_immediate_dominator (CDI_DOMINATORS,
6182 zero_iter_bb));
6183 }
6184 }
6185 if (in_combined_parallel)
6186 {
6187 /* In a combined parallel loop, emit a call to
6188 GOMP_loop_foo_next. */
6189 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
6190 build_fold_addr_expr (istart0),
6191 build_fold_addr_expr (iend0));
6192 }
6193 else
6194 {
6195 tree t0, t1, t2, t3, t4;
6196 /* If this is not a combined parallel loop, emit a call to
6197 GOMP_loop_foo_start in ENTRY_BB. */
6198 t4 = build_fold_addr_expr (iend0);
6199 t3 = build_fold_addr_expr (istart0);
6200 t2 = fold_convert (fd->iter_type, fd->loop.step);
6201 t1 = fd->loop.n2;
6202 t0 = fd->loop.n1;
6203 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6204 {
6205 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6206 OMP_CLAUSE__LOOPTEMP_);
6207 gcc_assert (innerc);
6208 t0 = OMP_CLAUSE_DECL (innerc);
6209 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6210 OMP_CLAUSE__LOOPTEMP_);
6211 gcc_assert (innerc);
6212 t1 = OMP_CLAUSE_DECL (innerc);
6213 }
6214 if (POINTER_TYPE_P (TREE_TYPE (t0))
6215 && TYPE_PRECISION (TREE_TYPE (t0))
6216 != TYPE_PRECISION (fd->iter_type))
6217 {
6218 /* Avoid casting pointers to integer of a different size. */
6219 tree itype = signed_type_for (type);
6220 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
6221 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
6222 }
6223 else
6224 {
6225 t1 = fold_convert (fd->iter_type, t1);
6226 t0 = fold_convert (fd->iter_type, t0);
6227 }
6228 if (bias)
6229 {
6230 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
6231 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
6232 }
6233 if (fd->iter_type == long_integer_type_node)
6234 {
6235 if (fd->chunk_size)
6236 {
6237 t = fold_convert (fd->iter_type, fd->chunk_size);
6238 t = build_call_expr (builtin_decl_explicit (start_fn),
6239 6, t0, t1, t2, t, t3, t4);
6240 }
6241 else
6242 t = build_call_expr (builtin_decl_explicit (start_fn),
6243 5, t0, t1, t2, t3, t4);
6244 }
6245 else
6246 {
6247 tree t5;
6248 tree c_bool_type;
6249 tree bfn_decl;
6250
6251 /* The GOMP_loop_ull_*start functions have additional boolean
6252 argument, true for < loops and false for > loops.
6253 In Fortran, the C bool type can be different from
6254 boolean_type_node. */
6255 bfn_decl = builtin_decl_explicit (start_fn);
6256 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
6257 t5 = build_int_cst (c_bool_type,
6258 fd->loop.cond_code == LT_EXPR ? 1 : 0);
6259 if (fd->chunk_size)
6260 {
6261 tree bfn_decl = builtin_decl_explicit (start_fn);
6262 t = fold_convert (fd->iter_type, fd->chunk_size);
6263 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
6264 }
6265 else
6266 t = build_call_expr (builtin_decl_explicit (start_fn),
6267 6, t5, t0, t1, t2, t3, t4);
6268 }
6269 }
6270 if (TREE_TYPE (t) != boolean_type_node)
6271 t = fold_build2 (NE_EXPR, boolean_type_node,
6272 t, build_int_cst (TREE_TYPE (t), 0));
6273 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6274 true, GSI_SAME_STMT);
6275 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6276
6277 /* Remove the GIMPLE_OMP_FOR statement. */
6278 gsi_remove (&gsi, true);
6279
6280 /* Iteration setup for sequential loop goes in L0_BB. */
6281 tree startvar = fd->loop.v;
6282 tree endvar = NULL_TREE;
6283
6284 if (gimple_omp_for_combined_p (fd->for_stmt))
6285 {
6286 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
6287 && gimple_omp_for_kind (inner_stmt)
6288 == GF_OMP_FOR_KIND_SIMD);
6289 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
6290 OMP_CLAUSE__LOOPTEMP_);
6291 gcc_assert (innerc);
6292 startvar = OMP_CLAUSE_DECL (innerc);
6293 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6294 OMP_CLAUSE__LOOPTEMP_);
6295 gcc_assert (innerc);
6296 endvar = OMP_CLAUSE_DECL (innerc);
6297 }
6298
6299 gsi = gsi_start_bb (l0_bb);
6300 t = istart0;
6301 if (bias)
6302 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
6303 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
6304 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
6305 t = fold_convert (TREE_TYPE (startvar), t);
6306 t = force_gimple_operand_gsi (&gsi, t,
6307 DECL_P (startvar)
6308 && TREE_ADDRESSABLE (startvar),
6309 NULL_TREE, false, GSI_CONTINUE_LINKING);
6310 assign_stmt = gimple_build_assign (startvar, t);
6311 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6312
6313 t = iend0;
6314 if (bias)
6315 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
6316 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
6317 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
6318 t = fold_convert (TREE_TYPE (startvar), t);
6319 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6320 false, GSI_CONTINUE_LINKING);
6321 if (endvar)
6322 {
6323 assign_stmt = gimple_build_assign (endvar, iend);
6324 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6325 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
6326 assign_stmt = gimple_build_assign (fd->loop.v, iend);
6327 else
6328 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
6329 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6330 }
6331 if (fd->collapse > 1)
6332 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6333
6334 if (!broken_loop)
6335 {
6336 /* Code to control the increment and predicate for the sequential
6337 loop goes in the CONT_BB. */
6338 gsi = gsi_last_bb (cont_bb);
6339 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
6340 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6341 vmain = gimple_omp_continue_control_use (cont_stmt);
6342 vback = gimple_omp_continue_control_def (cont_stmt);
6343
6344 if (!gimple_omp_for_combined_p (fd->for_stmt))
6345 {
6346 if (POINTER_TYPE_P (type))
6347 t = fold_build_pointer_plus (vmain, fd->loop.step);
6348 else
6349 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
6350 t = force_gimple_operand_gsi (&gsi, t,
6351 DECL_P (vback)
6352 && TREE_ADDRESSABLE (vback),
6353 NULL_TREE, true, GSI_SAME_STMT);
6354 assign_stmt = gimple_build_assign (vback, t);
6355 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6356
6357 t = build2 (fd->loop.cond_code, boolean_type_node,
6358 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
6359 iend);
6360 gcond *cond_stmt = gimple_build_cond_empty (t);
6361 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6362 }
6363
6364 /* Remove GIMPLE_OMP_CONTINUE. */
6365 gsi_remove (&gsi, true);
6366
6367 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6368 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
6369
6370 /* Emit code to get the next parallel iteration in L2_BB. */
6371 gsi = gsi_start_bb (l2_bb);
6372
6373 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
6374 build_fold_addr_expr (istart0),
6375 build_fold_addr_expr (iend0));
6376 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6377 false, GSI_CONTINUE_LINKING);
6378 if (TREE_TYPE (t) != boolean_type_node)
6379 t = fold_build2 (NE_EXPR, boolean_type_node,
6380 t, build_int_cst (TREE_TYPE (t), 0));
6381 gcond *cond_stmt = gimple_build_cond_empty (t);
6382 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
6383 }
6384
6385 /* Add the loop cleanup function. */
6386 gsi = gsi_last_bb (exit_bb);
6387 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6388 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
6389 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
6390 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
6391 else
6392 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
6393 gcall *call_stmt = gimple_build_call (t, 0);
6394 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
6395 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
6396 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
6397 gsi_remove (&gsi, true);
6398
6399 /* Connect the new blocks. */
6400 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
6401 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
6402
6403 if (!broken_loop)
6404 {
6405 gimple_seq phis;
6406
6407 e = find_edge (cont_bb, l3_bb);
6408 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
6409
6410 phis = phi_nodes (l3_bb);
6411 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
6412 {
6413 gimple phi = gsi_stmt (gsi);
6414 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
6415 PHI_ARG_DEF_FROM_EDGE (phi, e));
6416 }
6417 remove_edge (e);
6418
6419 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
6420 add_bb_to_loop (l2_bb, cont_bb->loop_father);
6421 e = find_edge (cont_bb, l1_bb);
6422 if (gimple_omp_for_combined_p (fd->for_stmt))
6423 {
6424 remove_edge (e);
6425 e = NULL;
6426 }
6427 else if (fd->collapse > 1)
6428 {
6429 remove_edge (e);
6430 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6431 }
6432 else
6433 e->flags = EDGE_TRUE_VALUE;
6434 if (e)
6435 {
6436 e->probability = REG_BR_PROB_BASE * 7 / 8;
6437 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
6438 }
6439 else
6440 {
6441 e = find_edge (cont_bb, l2_bb);
6442 e->flags = EDGE_FALLTHRU;
6443 }
6444 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
6445
6446 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
6447 recompute_dominator (CDI_DOMINATORS, l2_bb));
6448 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
6449 recompute_dominator (CDI_DOMINATORS, l3_bb));
6450 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
6451 recompute_dominator (CDI_DOMINATORS, l0_bb));
6452 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
6453 recompute_dominator (CDI_DOMINATORS, l1_bb));
6454
6455 struct loop *outer_loop = alloc_loop ();
6456 outer_loop->header = l0_bb;
6457 outer_loop->latch = l2_bb;
6458 add_loop (outer_loop, l0_bb->loop_father);
6459
6460 if (!gimple_omp_for_combined_p (fd->for_stmt))
6461 {
6462 struct loop *loop = alloc_loop ();
6463 loop->header = l1_bb;
6464 /* The loop may have multiple latches. */
6465 add_loop (loop, outer_loop);
6466 }
6467 }
6468 }
6469
6470
6471 /* A subroutine of expand_omp_for. Generate code for a parallel
6472 loop with static schedule and no specified chunk size. Given
6473 parameters:
6474
6475 for (V = N1; V cond N2; V += STEP) BODY;
6476
6477 where COND is "<" or ">", we generate pseudocode
6478
6479 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6480 if (cond is <)
6481 adj = STEP - 1;
6482 else
6483 adj = STEP + 1;
6484 if ((__typeof (V)) -1 > 0 && cond is >)
6485 n = -(adj + N2 - N1) / -STEP;
6486 else
6487 n = (adj + N2 - N1) / STEP;
6488 q = n / nthreads;
6489 tt = n % nthreads;
6490 if (threadid < tt) goto L3; else goto L4;
6491 L3:
6492 tt = 0;
6493 q = q + 1;
6494 L4:
6495 s0 = q * threadid + tt;
6496 e0 = s0 + q;
6497 V = s0 * STEP + N1;
6498 if (s0 >= e0) goto L2; else goto L0;
6499 L0:
6500 e = e0 * STEP + N1;
6501 L1:
6502 BODY;
6503 V += STEP;
6504 if (V cond e) goto L1;
6505 L2:
6506 */
6507
6508 static void
6509 expand_omp_for_static_nochunk (struct omp_region *region,
6510 struct omp_for_data *fd,
6511 gimple inner_stmt)
6512 {
6513 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
6514 tree type, itype, vmain, vback;
6515 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
6516 basic_block body_bb, cont_bb, collapse_bb = NULL;
6517 basic_block fin_bb;
6518 gimple_stmt_iterator gsi;
6519 edge ep;
6520 bool broken_loop = region->cont == NULL;
6521 tree *counts = NULL;
6522 tree n1, n2, step;
6523
6524 gcc_checking_assert ((gimple_omp_for_kind (fd->for_stmt)
6525 != GF_OMP_FOR_KIND_OACC_LOOP)
6526 || !inner_stmt);
6527
6528 itype = type = TREE_TYPE (fd->loop.v);
6529 if (POINTER_TYPE_P (type))
6530 itype = signed_type_for (type);
6531
6532 entry_bb = region->entry;
6533 cont_bb = region->cont;
6534 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6535 fin_bb = BRANCH_EDGE (entry_bb)->dest;
6536 gcc_assert (broken_loop
6537 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
6538 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
6539 body_bb = single_succ (seq_start_bb);
6540 if (!broken_loop)
6541 {
6542 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb
6543 || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb);
6544 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6545 }
6546 exit_bb = region->exit;
6547
6548 /* Iteration space partitioning goes in ENTRY_BB. */
6549 gsi = gsi_last_bb (entry_bb);
6550 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6551
6552 if (fd->collapse > 1)
6553 {
6554 int first_zero_iter = -1;
6555 basic_block l2_dom_bb = NULL;
6556
6557 counts = XALLOCAVEC (tree, fd->collapse);
6558 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6559 fin_bb, first_zero_iter,
6560 l2_dom_bb);
6561 t = NULL_TREE;
6562 }
6563 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6564 t = integer_one_node;
6565 else
6566 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6567 fold_convert (type, fd->loop.n1),
6568 fold_convert (type, fd->loop.n2));
6569 if (fd->collapse == 1
6570 && TYPE_UNSIGNED (type)
6571 && (t == NULL_TREE || !integer_onep (t)))
6572 {
6573 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6574 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6575 true, GSI_SAME_STMT);
6576 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6577 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6578 true, GSI_SAME_STMT);
6579 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6580 NULL_TREE, NULL_TREE);
6581 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6582 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6583 expand_omp_regimplify_p, NULL, NULL)
6584 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6585 expand_omp_regimplify_p, NULL, NULL))
6586 {
6587 gsi = gsi_for_stmt (cond_stmt);
6588 gimple_regimplify_operands (cond_stmt, &gsi);
6589 }
6590 ep = split_block (entry_bb, cond_stmt);
6591 ep->flags = EDGE_TRUE_VALUE;
6592 entry_bb = ep->dest;
6593 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6594 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
6595 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
6596 if (gimple_in_ssa_p (cfun))
6597 {
6598 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6599 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
6600 !gsi_end_p (gpi); gsi_next (&gpi))
6601 {
6602 gphi *phi = gpi.phi ();
6603 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6604 ep, UNKNOWN_LOCATION);
6605 }
6606 }
6607 gsi = gsi_last_bb (entry_bb);
6608 }
6609
6610 switch (gimple_omp_for_kind (fd->for_stmt))
6611 {
6612 case GF_OMP_FOR_KIND_FOR:
6613 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
6614 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6615 break;
6616 case GF_OMP_FOR_KIND_DISTRIBUTE:
6617 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
6618 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
6619 break;
6620 case GF_OMP_FOR_KIND_OACC_LOOP:
6621 nthreads = builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS);
6622 threadid = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM);
6623 break;
6624 default:
6625 gcc_unreachable ();
6626 }
6627 nthreads = build_call_expr (nthreads, 0);
6628 nthreads = fold_convert (itype, nthreads);
6629 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
6630 true, GSI_SAME_STMT);
6631 threadid = build_call_expr (threadid, 0);
6632 threadid = fold_convert (itype, threadid);
6633 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
6634 true, GSI_SAME_STMT);
6635
6636 n1 = fd->loop.n1;
6637 n2 = fd->loop.n2;
6638 step = fd->loop.step;
6639 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6640 {
6641 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6642 OMP_CLAUSE__LOOPTEMP_);
6643 gcc_assert (innerc);
6644 n1 = OMP_CLAUSE_DECL (innerc);
6645 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6646 OMP_CLAUSE__LOOPTEMP_);
6647 gcc_assert (innerc);
6648 n2 = OMP_CLAUSE_DECL (innerc);
6649 }
6650 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
6651 true, NULL_TREE, true, GSI_SAME_STMT);
6652 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
6653 true, NULL_TREE, true, GSI_SAME_STMT);
6654 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
6655 true, NULL_TREE, true, GSI_SAME_STMT);
6656
6657 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6658 t = fold_build2 (PLUS_EXPR, itype, step, t);
6659 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6660 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6661 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6662 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6663 fold_build1 (NEGATE_EXPR, itype, t),
6664 fold_build1 (NEGATE_EXPR, itype, step));
6665 else
6666 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6667 t = fold_convert (itype, t);
6668 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6669
6670 q = create_tmp_reg (itype, "q");
6671 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
6672 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6673 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
6674
6675 tt = create_tmp_reg (itype, "tt");
6676 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
6677 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
6678 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
6679
6680 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
6681 gcond *cond_stmt = gimple_build_cond_empty (t);
6682 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6683
6684 second_bb = split_block (entry_bb, cond_stmt)->dest;
6685 gsi = gsi_last_bb (second_bb);
6686 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6687
6688 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
6689 GSI_SAME_STMT);
6690 gassign *assign_stmt
6691 = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1));
6692 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6693
6694 third_bb = split_block (second_bb, assign_stmt)->dest;
6695 gsi = gsi_last_bb (third_bb);
6696 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6697
6698 t = build2 (MULT_EXPR, itype, q, threadid);
6699 t = build2 (PLUS_EXPR, itype, t, tt);
6700 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6701
6702 t = fold_build2 (PLUS_EXPR, itype, s0, q);
6703 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
6704
6705 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
6706 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6707
6708 /* Remove the GIMPLE_OMP_FOR statement. */
6709 gsi_remove (&gsi, true);
6710
6711 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6712 gsi = gsi_start_bb (seq_start_bb);
6713
6714 tree startvar = fd->loop.v;
6715 tree endvar = NULL_TREE;
6716
6717 if (gimple_omp_for_combined_p (fd->for_stmt))
6718 {
6719 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6720 ? gimple_omp_parallel_clauses (inner_stmt)
6721 : gimple_omp_for_clauses (inner_stmt);
6722 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6723 gcc_assert (innerc);
6724 startvar = OMP_CLAUSE_DECL (innerc);
6725 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6726 OMP_CLAUSE__LOOPTEMP_);
6727 gcc_assert (innerc);
6728 endvar = OMP_CLAUSE_DECL (innerc);
6729 }
6730 t = fold_convert (itype, s0);
6731 t = fold_build2 (MULT_EXPR, itype, t, step);
6732 if (POINTER_TYPE_P (type))
6733 t = fold_build_pointer_plus (n1, t);
6734 else
6735 t = fold_build2 (PLUS_EXPR, type, t, n1);
6736 t = fold_convert (TREE_TYPE (startvar), t);
6737 t = force_gimple_operand_gsi (&gsi, t,
6738 DECL_P (startvar)
6739 && TREE_ADDRESSABLE (startvar),
6740 NULL_TREE, false, GSI_CONTINUE_LINKING);
6741 assign_stmt = gimple_build_assign (startvar, t);
6742 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6743
6744 t = fold_convert (itype, e0);
6745 t = fold_build2 (MULT_EXPR, itype, t, step);
6746 if (POINTER_TYPE_P (type))
6747 t = fold_build_pointer_plus (n1, t);
6748 else
6749 t = fold_build2 (PLUS_EXPR, type, t, n1);
6750 t = fold_convert (TREE_TYPE (startvar), t);
6751 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6752 false, GSI_CONTINUE_LINKING);
6753 if (endvar)
6754 {
6755 assign_stmt = gimple_build_assign (endvar, e);
6756 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6757 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6758 assign_stmt = gimple_build_assign (fd->loop.v, e);
6759 else
6760 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
6761 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
6762 }
6763 if (fd->collapse > 1)
6764 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
6765
6766 if (!broken_loop)
6767 {
6768 /* The code controlling the sequential loop replaces the
6769 GIMPLE_OMP_CONTINUE. */
6770 gsi = gsi_last_bb (cont_bb);
6771 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
6772 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
6773 vmain = gimple_omp_continue_control_use (cont_stmt);
6774 vback = gimple_omp_continue_control_def (cont_stmt);
6775
6776 if (!gimple_omp_for_combined_p (fd->for_stmt))
6777 {
6778 if (POINTER_TYPE_P (type))
6779 t = fold_build_pointer_plus (vmain, step);
6780 else
6781 t = fold_build2 (PLUS_EXPR, type, vmain, step);
6782 t = force_gimple_operand_gsi (&gsi, t,
6783 DECL_P (vback)
6784 && TREE_ADDRESSABLE (vback),
6785 NULL_TREE, true, GSI_SAME_STMT);
6786 assign_stmt = gimple_build_assign (vback, t);
6787 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
6788
6789 t = build2 (fd->loop.cond_code, boolean_type_node,
6790 DECL_P (vback) && TREE_ADDRESSABLE (vback)
6791 ? t : vback, e);
6792 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
6793 }
6794
6795 /* Remove the GIMPLE_OMP_CONTINUE statement. */
6796 gsi_remove (&gsi, true);
6797
6798 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6799 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6800 }
6801
6802 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6803 gsi = gsi_last_bb (exit_bb);
6804 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
6805 {
6806 t = gimple_omp_return_lhs (gsi_stmt (gsi));
6807 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
6808 gcc_checking_assert (t == NULL_TREE);
6809 else
6810 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
6811 }
6812 gsi_remove (&gsi, true);
6813
6814 /* Connect all the blocks. */
6815 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
6816 ep->probability = REG_BR_PROB_BASE / 4 * 3;
6817 ep = find_edge (entry_bb, second_bb);
6818 ep->flags = EDGE_TRUE_VALUE;
6819 ep->probability = REG_BR_PROB_BASE / 4;
6820 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
6821 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
6822
6823 if (!broken_loop)
6824 {
6825 ep = find_edge (cont_bb, body_bb);
6826 if (ep == NULL)
6827 {
6828 ep = BRANCH_EDGE (cont_bb);
6829 gcc_assert (single_succ (ep->dest) == body_bb);
6830 }
6831 if (gimple_omp_for_combined_p (fd->for_stmt))
6832 {
6833 remove_edge (ep);
6834 ep = NULL;
6835 }
6836 else if (fd->collapse > 1)
6837 {
6838 remove_edge (ep);
6839 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6840 }
6841 else
6842 ep->flags = EDGE_TRUE_VALUE;
6843 find_edge (cont_bb, fin_bb)->flags
6844 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6845 }
6846
6847 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6848 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6849 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6850
6851 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6852 recompute_dominator (CDI_DOMINATORS, body_bb));
6853 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6854 recompute_dominator (CDI_DOMINATORS, fin_bb));
6855
6856 struct loop *loop = body_bb->loop_father;
6857 if (loop != entry_bb->loop_father)
6858 {
6859 gcc_assert (loop->header == body_bb);
6860 gcc_assert (broken_loop
6861 || loop->latch == region->cont
6862 || single_pred (loop->latch) == region->cont);
6863 return;
6864 }
6865
6866 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6867 {
6868 loop = alloc_loop ();
6869 loop->header = body_bb;
6870 if (collapse_bb == NULL)
6871 loop->latch = cont_bb;
6872 add_loop (loop, body_bb->loop_father);
6873 }
6874 }
6875
6876
6877 /* A subroutine of expand_omp_for. Generate code for a parallel
6878 loop with static schedule and a specified chunk size. Given
6879 parameters:
6880
6881 for (V = N1; V cond N2; V += STEP) BODY;
6882
6883 where COND is "<" or ">", we generate pseudocode
6884
6885 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6886 if (cond is <)
6887 adj = STEP - 1;
6888 else
6889 adj = STEP + 1;
6890 if ((__typeof (V)) -1 > 0 && cond is >)
6891 n = -(adj + N2 - N1) / -STEP;
6892 else
6893 n = (adj + N2 - N1) / STEP;
6894 trip = 0;
6895 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6896 here so that V is defined
6897 if the loop is not entered
6898 L0:
6899 s0 = (trip * nthreads + threadid) * CHUNK;
6900 e0 = min(s0 + CHUNK, n);
6901 if (s0 < n) goto L1; else goto L4;
6902 L1:
6903 V = s0 * STEP + N1;
6904 e = e0 * STEP + N1;
6905 L2:
6906 BODY;
6907 V += STEP;
6908 if (V cond e) goto L2; else goto L3;
6909 L3:
6910 trip += 1;
6911 goto L0;
6912 L4:
6913 */
6914
6915 static void
6916 expand_omp_for_static_chunk (struct omp_region *region,
6917 struct omp_for_data *fd, gimple inner_stmt)
6918 {
6919 tree n, s0, e0, e, t;
6920 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6921 tree type, itype, vmain, vback, vextra;
6922 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6923 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6924 gimple_stmt_iterator gsi;
6925 edge se;
6926 bool broken_loop = region->cont == NULL;
6927 tree *counts = NULL;
6928 tree n1, n2, step;
6929
6930 gcc_checking_assert ((gimple_omp_for_kind (fd->for_stmt)
6931 != GF_OMP_FOR_KIND_OACC_LOOP)
6932 || !inner_stmt);
6933
6934 itype = type = TREE_TYPE (fd->loop.v);
6935 if (POINTER_TYPE_P (type))
6936 itype = signed_type_for (type);
6937
6938 entry_bb = region->entry;
6939 se = split_block (entry_bb, last_stmt (entry_bb));
6940 entry_bb = se->src;
6941 iter_part_bb = se->dest;
6942 cont_bb = region->cont;
6943 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6944 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6945 gcc_assert (broken_loop
6946 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6947 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6948 body_bb = single_succ (seq_start_bb);
6949 if (!broken_loop)
6950 {
6951 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6952 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6953 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6954 }
6955 exit_bb = region->exit;
6956
6957 /* Trip and adjustment setup goes in ENTRY_BB. */
6958 gsi = gsi_last_bb (entry_bb);
6959 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6960
6961 if (fd->collapse > 1)
6962 {
6963 int first_zero_iter = -1;
6964 basic_block l2_dom_bb = NULL;
6965
6966 counts = XALLOCAVEC (tree, fd->collapse);
6967 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6968 fin_bb, first_zero_iter,
6969 l2_dom_bb);
6970 t = NULL_TREE;
6971 }
6972 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6973 t = integer_one_node;
6974 else
6975 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6976 fold_convert (type, fd->loop.n1),
6977 fold_convert (type, fd->loop.n2));
6978 if (fd->collapse == 1
6979 && TYPE_UNSIGNED (type)
6980 && (t == NULL_TREE || !integer_onep (t)))
6981 {
6982 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6983 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
6984 true, GSI_SAME_STMT);
6985 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6986 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
6987 true, GSI_SAME_STMT);
6988 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6989 NULL_TREE, NULL_TREE);
6990 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
6991 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
6992 expand_omp_regimplify_p, NULL, NULL)
6993 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
6994 expand_omp_regimplify_p, NULL, NULL))
6995 {
6996 gsi = gsi_for_stmt (cond_stmt);
6997 gimple_regimplify_operands (cond_stmt, &gsi);
6998 }
6999 se = split_block (entry_bb, cond_stmt);
7000 se->flags = EDGE_TRUE_VALUE;
7001 entry_bb = se->dest;
7002 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
7003 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
7004 se->probability = REG_BR_PROB_BASE / 2000 - 1;
7005 if (gimple_in_ssa_p (cfun))
7006 {
7007 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
7008 for (gphi_iterator gpi = gsi_start_phis (fin_bb);
7009 !gsi_end_p (gpi); gsi_next (&gpi))
7010 {
7011 gphi *phi = gpi.phi ();
7012 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
7013 se, UNKNOWN_LOCATION);
7014 }
7015 }
7016 gsi = gsi_last_bb (entry_bb);
7017 }
7018
7019 switch (gimple_omp_for_kind (fd->for_stmt))
7020 {
7021 case GF_OMP_FOR_KIND_FOR:
7022 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
7023 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7024 break;
7025 case GF_OMP_FOR_KIND_DISTRIBUTE:
7026 nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS);
7027 threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM);
7028 break;
7029 case GF_OMP_FOR_KIND_OACC_LOOP:
7030 nthreads = builtin_decl_explicit (BUILT_IN_GOACC_GET_NUM_THREADS);
7031 threadid = builtin_decl_explicit (BUILT_IN_GOACC_GET_THREAD_NUM);
7032 break;
7033 default:
7034 gcc_unreachable ();
7035 }
7036 nthreads = build_call_expr (nthreads, 0);
7037 nthreads = fold_convert (itype, nthreads);
7038 nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE,
7039 true, GSI_SAME_STMT);
7040 threadid = build_call_expr (threadid, 0);
7041 threadid = fold_convert (itype, threadid);
7042 threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE,
7043 true, GSI_SAME_STMT);
7044
7045 n1 = fd->loop.n1;
7046 n2 = fd->loop.n2;
7047 step = fd->loop.step;
7048 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7049 {
7050 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7051 OMP_CLAUSE__LOOPTEMP_);
7052 gcc_assert (innerc);
7053 n1 = OMP_CLAUSE_DECL (innerc);
7054 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7055 OMP_CLAUSE__LOOPTEMP_);
7056 gcc_assert (innerc);
7057 n2 = OMP_CLAUSE_DECL (innerc);
7058 }
7059 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
7060 true, NULL_TREE, true, GSI_SAME_STMT);
7061 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
7062 true, NULL_TREE, true, GSI_SAME_STMT);
7063 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
7064 true, NULL_TREE, true, GSI_SAME_STMT);
7065 fd->chunk_size
7066 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->chunk_size),
7067 true, NULL_TREE, true, GSI_SAME_STMT);
7068
7069 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
7070 t = fold_build2 (PLUS_EXPR, itype, step, t);
7071 t = fold_build2 (PLUS_EXPR, itype, t, n2);
7072 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
7073 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
7074 t = fold_build2 (TRUNC_DIV_EXPR, itype,
7075 fold_build1 (NEGATE_EXPR, itype, t),
7076 fold_build1 (NEGATE_EXPR, itype, step));
7077 else
7078 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
7079 t = fold_convert (itype, t);
7080 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7081 true, GSI_SAME_STMT);
7082
7083 trip_var = create_tmp_reg (itype, ".trip");
7084 if (gimple_in_ssa_p (cfun))
7085 {
7086 trip_init = make_ssa_name (trip_var);
7087 trip_main = make_ssa_name (trip_var);
7088 trip_back = make_ssa_name (trip_var);
7089 }
7090 else
7091 {
7092 trip_init = trip_var;
7093 trip_main = trip_var;
7094 trip_back = trip_var;
7095 }
7096
7097 gassign *assign_stmt
7098 = gimple_build_assign (trip_init, build_int_cst (itype, 0));
7099 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
7100
7101 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
7102 t = fold_build2 (MULT_EXPR, itype, t, step);
7103 if (POINTER_TYPE_P (type))
7104 t = fold_build_pointer_plus (n1, t);
7105 else
7106 t = fold_build2 (PLUS_EXPR, type, t, n1);
7107 vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7108 true, GSI_SAME_STMT);
7109
7110 /* Remove the GIMPLE_OMP_FOR. */
7111 gsi_remove (&gsi, true);
7112
7113 /* Iteration space partitioning goes in ITER_PART_BB. */
7114 gsi = gsi_last_bb (iter_part_bb);
7115
7116 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
7117 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
7118 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
7119 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7120 false, GSI_CONTINUE_LINKING);
7121
7122 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
7123 t = fold_build2 (MIN_EXPR, itype, t, n);
7124 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7125 false, GSI_CONTINUE_LINKING);
7126
7127 t = build2 (LT_EXPR, boolean_type_node, s0, n);
7128 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
7129
7130 /* Setup code for sequential iteration goes in SEQ_START_BB. */
7131 gsi = gsi_start_bb (seq_start_bb);
7132
7133 tree startvar = fd->loop.v;
7134 tree endvar = NULL_TREE;
7135
7136 if (gimple_omp_for_combined_p (fd->for_stmt))
7137 {
7138 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
7139 ? gimple_omp_parallel_clauses (inner_stmt)
7140 : gimple_omp_for_clauses (inner_stmt);
7141 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
7142 gcc_assert (innerc);
7143 startvar = OMP_CLAUSE_DECL (innerc);
7144 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7145 OMP_CLAUSE__LOOPTEMP_);
7146 gcc_assert (innerc);
7147 endvar = OMP_CLAUSE_DECL (innerc);
7148 }
7149
7150 t = fold_convert (itype, s0);
7151 t = fold_build2 (MULT_EXPR, itype, t, step);
7152 if (POINTER_TYPE_P (type))
7153 t = fold_build_pointer_plus (n1, t);
7154 else
7155 t = fold_build2 (PLUS_EXPR, type, t, n1);
7156 t = fold_convert (TREE_TYPE (startvar), t);
7157 t = force_gimple_operand_gsi (&gsi, t,
7158 DECL_P (startvar)
7159 && TREE_ADDRESSABLE (startvar),
7160 NULL_TREE, false, GSI_CONTINUE_LINKING);
7161 assign_stmt = gimple_build_assign (startvar, t);
7162 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7163
7164 t = fold_convert (itype, e0);
7165 t = fold_build2 (MULT_EXPR, itype, t, step);
7166 if (POINTER_TYPE_P (type))
7167 t = fold_build_pointer_plus (n1, t);
7168 else
7169 t = fold_build2 (PLUS_EXPR, type, t, n1);
7170 t = fold_convert (TREE_TYPE (startvar), t);
7171 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7172 false, GSI_CONTINUE_LINKING);
7173 if (endvar)
7174 {
7175 assign_stmt = gimple_build_assign (endvar, e);
7176 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7177 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
7178 assign_stmt = gimple_build_assign (fd->loop.v, e);
7179 else
7180 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
7181 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7182 }
7183 if (fd->collapse > 1)
7184 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
7185
7186 if (!broken_loop)
7187 {
7188 /* The code controlling the sequential loop goes in CONT_BB,
7189 replacing the GIMPLE_OMP_CONTINUE. */
7190 gsi = gsi_last_bb (cont_bb);
7191 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
7192 vmain = gimple_omp_continue_control_use (cont_stmt);
7193 vback = gimple_omp_continue_control_def (cont_stmt);
7194
7195 if (!gimple_omp_for_combined_p (fd->for_stmt))
7196 {
7197 if (POINTER_TYPE_P (type))
7198 t = fold_build_pointer_plus (vmain, step);
7199 else
7200 t = fold_build2 (PLUS_EXPR, type, vmain, step);
7201 if (DECL_P (vback) && TREE_ADDRESSABLE (vback))
7202 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7203 true, GSI_SAME_STMT);
7204 assign_stmt = gimple_build_assign (vback, t);
7205 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
7206
7207 t = build2 (fd->loop.cond_code, boolean_type_node,
7208 DECL_P (vback) && TREE_ADDRESSABLE (vback)
7209 ? t : vback, e);
7210 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
7211 }
7212
7213 /* Remove GIMPLE_OMP_CONTINUE. */
7214 gsi_remove (&gsi, true);
7215
7216 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
7217 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
7218
7219 /* Trip update code goes into TRIP_UPDATE_BB. */
7220 gsi = gsi_start_bb (trip_update_bb);
7221
7222 t = build_int_cst (itype, 1);
7223 t = build2 (PLUS_EXPR, itype, trip_main, t);
7224 assign_stmt = gimple_build_assign (trip_back, t);
7225 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
7226 }
7227
7228 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
7229 gsi = gsi_last_bb (exit_bb);
7230 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
7231 {
7232 t = gimple_omp_return_lhs (gsi_stmt (gsi));
7233 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
7234 gcc_checking_assert (t == NULL_TREE);
7235 else
7236 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
7237 }
7238 gsi_remove (&gsi, true);
7239
7240 /* Connect the new blocks. */
7241 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
7242 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
7243
7244 if (!broken_loop)
7245 {
7246 se = find_edge (cont_bb, body_bb);
7247 if (gimple_omp_for_combined_p (fd->for_stmt))
7248 {
7249 remove_edge (se);
7250 se = NULL;
7251 }
7252 else if (fd->collapse > 1)
7253 {
7254 remove_edge (se);
7255 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
7256 }
7257 else
7258 se->flags = EDGE_TRUE_VALUE;
7259 find_edge (cont_bb, trip_update_bb)->flags
7260 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
7261
7262 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
7263 }
7264
7265 if (gimple_in_ssa_p (cfun))
7266 {
7267 gphi_iterator psi;
7268 gphi *phi;
7269 edge re, ene;
7270 edge_var_map *vm;
7271 size_t i;
7272
7273 gcc_assert (fd->collapse == 1 && !broken_loop);
7274
7275 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
7276 remove arguments of the phi nodes in fin_bb. We need to create
7277 appropriate phi nodes in iter_part_bb instead. */
7278 se = single_pred_edge (fin_bb);
7279 re = single_succ_edge (trip_update_bb);
7280 vec<edge_var_map> *head = redirect_edge_var_map_vector (re);
7281 ene = single_succ_edge (entry_bb);
7282
7283 psi = gsi_start_phis (fin_bb);
7284 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
7285 gsi_next (&psi), ++i)
7286 {
7287 gphi *nphi;
7288 source_location locus;
7289
7290 phi = psi.phi ();
7291 t = gimple_phi_result (phi);
7292 gcc_assert (t == redirect_edge_var_map_result (vm));
7293 nphi = create_phi_node (t, iter_part_bb);
7294
7295 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
7296 locus = gimple_phi_arg_location_from_edge (phi, se);
7297
7298 /* A special case -- fd->loop.v is not yet computed in
7299 iter_part_bb, we need to use vextra instead. */
7300 if (t == fd->loop.v)
7301 t = vextra;
7302 add_phi_arg (nphi, t, ene, locus);
7303 locus = redirect_edge_var_map_location (vm);
7304 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
7305 }
7306 gcc_assert (gsi_end_p (psi) && i == head->length ());
7307 redirect_edge_var_map_clear (re);
7308 while (1)
7309 {
7310 psi = gsi_start_phis (fin_bb);
7311 if (gsi_end_p (psi))
7312 break;
7313 remove_phi_node (&psi, false);
7314 }
7315
7316 /* Make phi node for trip. */
7317 phi = create_phi_node (trip_main, iter_part_bb);
7318 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
7319 UNKNOWN_LOCATION);
7320 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
7321 UNKNOWN_LOCATION);
7322 }
7323
7324 if (!broken_loop)
7325 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
7326 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
7327 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
7328 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
7329 recompute_dominator (CDI_DOMINATORS, fin_bb));
7330 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
7331 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
7332 set_immediate_dominator (CDI_DOMINATORS, body_bb,
7333 recompute_dominator (CDI_DOMINATORS, body_bb));
7334
7335 if (!broken_loop)
7336 {
7337 struct loop *trip_loop = alloc_loop ();
7338 trip_loop->header = iter_part_bb;
7339 trip_loop->latch = trip_update_bb;
7340 add_loop (trip_loop, iter_part_bb->loop_father);
7341
7342 if (!gimple_omp_for_combined_p (fd->for_stmt))
7343 {
7344 struct loop *loop = alloc_loop ();
7345 loop->header = body_bb;
7346 if (collapse_bb == NULL)
7347 loop->latch = cont_bb;
7348 add_loop (loop, trip_loop);
7349 }
7350 }
7351 }
7352
7353 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
7354 Given parameters:
7355 for (V = N1; V cond N2; V += STEP) BODY;
7356
7357 where COND is "<" or ">" or "!=", we generate pseudocode
7358
7359 for (ind_var = low; ind_var < high; ind_var++)
7360 {
7361 V = n1 + (ind_var * STEP)
7362
7363 <BODY>
7364 }
7365
7366 In the above pseudocode, low and high are function parameters of the
7367 child function. In the function below, we are inserting a temp.
7368 variable that will be making a call to two OMP functions that will not be
7369 found in the body of _Cilk_for (since OMP_FOR cannot be mixed
7370 with _Cilk_for). These functions are replaced with low and high
7371 by the function that handles taskreg. */
7372
7373
7374 static void
7375 expand_cilk_for (struct omp_region *region, struct omp_for_data *fd)
7376 {
7377 bool broken_loop = region->cont == NULL;
7378 basic_block entry_bb = region->entry;
7379 basic_block cont_bb = region->cont;
7380
7381 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7382 gcc_assert (broken_loop
7383 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7384 basic_block l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7385 basic_block l1_bb, l2_bb;
7386
7387 if (!broken_loop)
7388 {
7389 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7390 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7391 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7392 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7393 }
7394 else
7395 {
7396 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7397 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7398 l2_bb = single_succ (l1_bb);
7399 }
7400 basic_block exit_bb = region->exit;
7401 basic_block l2_dom_bb = NULL;
7402
7403 gimple_stmt_iterator gsi = gsi_last_bb (entry_bb);
7404
7405 /* Below statements until the "tree high_val = ..." are pseudo statements
7406 used to pass information to be used by expand_omp_taskreg.
7407 low_val and high_val will be replaced by the __low and __high
7408 parameter from the child function.
7409
7410 The call_exprs part is a place-holder, it is mainly used
7411 to distinctly identify to the top-level part that this is
7412 where we should put low and high (reasoning given in header
7413 comment). */
7414
7415 tree child_fndecl
7416 = gimple_omp_parallel_child_fn (
7417 as_a <gomp_parallel *> (last_stmt (region->outer->entry)));
7418 tree t, low_val = NULL_TREE, high_val = NULL_TREE;
7419 for (t = DECL_ARGUMENTS (child_fndecl); t; t = TREE_CHAIN (t))
7420 {
7421 if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__high"))
7422 high_val = t;
7423 else if (!strcmp (IDENTIFIER_POINTER (DECL_NAME (t)), "__low"))
7424 low_val = t;
7425 }
7426 gcc_assert (low_val && high_val);
7427
7428 tree type = TREE_TYPE (low_val);
7429 tree ind_var = create_tmp_reg (type, "__cilk_ind_var");
7430 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7431
7432 /* Not needed in SSA form right now. */
7433 gcc_assert (!gimple_in_ssa_p (cfun));
7434 if (l2_dom_bb == NULL)
7435 l2_dom_bb = l1_bb;
7436
7437 tree n1 = low_val;
7438 tree n2 = high_val;
7439
7440 gimple stmt = gimple_build_assign (ind_var, n1);
7441
7442 /* Replace the GIMPLE_OMP_FOR statement. */
7443 gsi_replace (&gsi, stmt, true);
7444
7445 if (!broken_loop)
7446 {
7447 /* Code to control the increment goes in the CONT_BB. */
7448 gsi = gsi_last_bb (cont_bb);
7449 stmt = gsi_stmt (gsi);
7450 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7451 stmt = gimple_build_assign (ind_var, PLUS_EXPR, ind_var,
7452 build_one_cst (type));
7453
7454 /* Replace GIMPLE_OMP_CONTINUE. */
7455 gsi_replace (&gsi, stmt, true);
7456 }
7457
7458 /* Emit the condition in L1_BB. */
7459 gsi = gsi_after_labels (l1_bb);
7460 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
7461 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
7462 fd->loop.step);
7463 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
7464 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
7465 fd->loop.n1, fold_convert (sizetype, t));
7466 else
7467 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
7468 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
7469 t = fold_convert (TREE_TYPE (fd->loop.v), t);
7470 expand_omp_build_assign (&gsi, fd->loop.v, t);
7471
7472 /* The condition is always '<' since the runtime will fill in the low
7473 and high values. */
7474 stmt = gimple_build_cond (LT_EXPR, ind_var, n2, NULL_TREE, NULL_TREE);
7475 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
7476
7477 /* Remove GIMPLE_OMP_RETURN. */
7478 gsi = gsi_last_bb (exit_bb);
7479 gsi_remove (&gsi, true);
7480
7481 /* Connect the new blocks. */
7482 remove_edge (FALLTHRU_EDGE (entry_bb));
7483
7484 edge e, ne;
7485 if (!broken_loop)
7486 {
7487 remove_edge (BRANCH_EDGE (entry_bb));
7488 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7489
7490 e = BRANCH_EDGE (l1_bb);
7491 ne = FALLTHRU_EDGE (l1_bb);
7492 e->flags = EDGE_TRUE_VALUE;
7493 }
7494 else
7495 {
7496 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7497
7498 ne = single_succ_edge (l1_bb);
7499 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7500
7501 }
7502 ne->flags = EDGE_FALSE_VALUE;
7503 e->probability = REG_BR_PROB_BASE * 7 / 8;
7504 ne->probability = REG_BR_PROB_BASE / 8;
7505
7506 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7507 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7508 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7509
7510 if (!broken_loop)
7511 {
7512 struct loop *loop = alloc_loop ();
7513 loop->header = l1_bb;
7514 loop->latch = cont_bb;
7515 add_loop (loop, l1_bb->loop_father);
7516 loop->safelen = INT_MAX;
7517 }
7518
7519 /* Pick the correct library function based on the precision of the
7520 induction variable type. */
7521 tree lib_fun = NULL_TREE;
7522 if (TYPE_PRECISION (type) == 32)
7523 lib_fun = cilk_for_32_fndecl;
7524 else if (TYPE_PRECISION (type) == 64)
7525 lib_fun = cilk_for_64_fndecl;
7526 else
7527 gcc_unreachable ();
7528
7529 gcc_assert (fd->sched_kind == OMP_CLAUSE_SCHEDULE_CILKFOR);
7530
7531 /* WS_ARGS contains the library function flavor to call:
7532 __libcilkrts_cilk_for_64 or __libcilkrts_cilk_for_32), and the
7533 user-defined grain value. If the user does not define one, then zero
7534 is passed in by the parser. */
7535 vec_alloc (region->ws_args, 2);
7536 region->ws_args->quick_push (lib_fun);
7537 region->ws_args->quick_push (fd->chunk_size);
7538 }
7539
7540 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
7541 loop. Given parameters:
7542
7543 for (V = N1; V cond N2; V += STEP) BODY;
7544
7545 where COND is "<" or ">", we generate pseudocode
7546
7547 V = N1;
7548 goto L1;
7549 L0:
7550 BODY;
7551 V += STEP;
7552 L1:
7553 if (V cond N2) goto L0; else goto L2;
7554 L2:
7555
7556 For collapsed loops, given parameters:
7557 collapse(3)
7558 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
7559 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
7560 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
7561 BODY;
7562
7563 we generate pseudocode
7564
7565 if (cond3 is <)
7566 adj = STEP3 - 1;
7567 else
7568 adj = STEP3 + 1;
7569 count3 = (adj + N32 - N31) / STEP3;
7570 if (cond2 is <)
7571 adj = STEP2 - 1;
7572 else
7573 adj = STEP2 + 1;
7574 count2 = (adj + N22 - N21) / STEP2;
7575 if (cond1 is <)
7576 adj = STEP1 - 1;
7577 else
7578 adj = STEP1 + 1;
7579 count1 = (adj + N12 - N11) / STEP1;
7580 count = count1 * count2 * count3;
7581 V = 0;
7582 V1 = N11;
7583 V2 = N21;
7584 V3 = N31;
7585 goto L1;
7586 L0:
7587 BODY;
7588 V += 1;
7589 V3 += STEP3;
7590 V2 += (V3 cond3 N32) ? 0 : STEP2;
7591 V3 = (V3 cond3 N32) ? V3 : N31;
7592 V1 += (V2 cond2 N22) ? 0 : STEP1;
7593 V2 = (V2 cond2 N22) ? V2 : N21;
7594 L1:
7595 if (V < count) goto L0; else goto L2;
7596 L2:
7597
7598 */
7599
7600 static void
7601 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
7602 {
7603 tree type, t;
7604 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
7605 gimple_stmt_iterator gsi;
7606 gimple stmt;
7607 gcond *cond_stmt;
7608 bool broken_loop = region->cont == NULL;
7609 edge e, ne;
7610 tree *counts = NULL;
7611 int i;
7612 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7613 OMP_CLAUSE_SAFELEN);
7614 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7615 OMP_CLAUSE__SIMDUID_);
7616 tree n1, n2;
7617
7618 type = TREE_TYPE (fd->loop.v);
7619 entry_bb = region->entry;
7620 cont_bb = region->cont;
7621 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
7622 gcc_assert (broken_loop
7623 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
7624 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
7625 if (!broken_loop)
7626 {
7627 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
7628 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
7629 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
7630 l2_bb = BRANCH_EDGE (entry_bb)->dest;
7631 }
7632 else
7633 {
7634 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
7635 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
7636 l2_bb = single_succ (l1_bb);
7637 }
7638 exit_bb = region->exit;
7639 l2_dom_bb = NULL;
7640
7641 gsi = gsi_last_bb (entry_bb);
7642
7643 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
7644 /* Not needed in SSA form right now. */
7645 gcc_assert (!gimple_in_ssa_p (cfun));
7646 if (fd->collapse > 1)
7647 {
7648 int first_zero_iter = -1;
7649 basic_block zero_iter_bb = l2_bb;
7650
7651 counts = XALLOCAVEC (tree, fd->collapse);
7652 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
7653 zero_iter_bb, first_zero_iter,
7654 l2_dom_bb);
7655 }
7656 if (l2_dom_bb == NULL)
7657 l2_dom_bb = l1_bb;
7658
7659 n1 = fd->loop.n1;
7660 n2 = fd->loop.n2;
7661 if (gimple_omp_for_combined_into_p (fd->for_stmt))
7662 {
7663 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
7664 OMP_CLAUSE__LOOPTEMP_);
7665 gcc_assert (innerc);
7666 n1 = OMP_CLAUSE_DECL (innerc);
7667 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
7668 OMP_CLAUSE__LOOPTEMP_);
7669 gcc_assert (innerc);
7670 n2 = OMP_CLAUSE_DECL (innerc);
7671 expand_omp_build_assign (&gsi, fd->loop.v,
7672 fold_convert (type, n1));
7673 if (fd->collapse > 1)
7674 {
7675 gsi_prev (&gsi);
7676 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
7677 gsi_next (&gsi);
7678 }
7679 }
7680 else
7681 {
7682 expand_omp_build_assign (&gsi, fd->loop.v,
7683 fold_convert (type, fd->loop.n1));
7684 if (fd->collapse > 1)
7685 for (i = 0; i < fd->collapse; i++)
7686 {
7687 tree itype = TREE_TYPE (fd->loops[i].v);
7688 if (POINTER_TYPE_P (itype))
7689 itype = signed_type_for (itype);
7690 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
7691 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7692 }
7693 }
7694
7695 /* Remove the GIMPLE_OMP_FOR statement. */
7696 gsi_remove (&gsi, true);
7697
7698 if (!broken_loop)
7699 {
7700 /* Code to control the increment goes in the CONT_BB. */
7701 gsi = gsi_last_bb (cont_bb);
7702 stmt = gsi_stmt (gsi);
7703 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
7704
7705 if (POINTER_TYPE_P (type))
7706 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7707 else
7708 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7709 expand_omp_build_assign (&gsi, fd->loop.v, t);
7710
7711 if (fd->collapse > 1)
7712 {
7713 i = fd->collapse - 1;
7714 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
7715 {
7716 t = fold_convert (sizetype, fd->loops[i].step);
7717 t = fold_build_pointer_plus (fd->loops[i].v, t);
7718 }
7719 else
7720 {
7721 t = fold_convert (TREE_TYPE (fd->loops[i].v),
7722 fd->loops[i].step);
7723 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
7724 fd->loops[i].v, t);
7725 }
7726 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7727
7728 for (i = fd->collapse - 1; i > 0; i--)
7729 {
7730 tree itype = TREE_TYPE (fd->loops[i].v);
7731 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
7732 if (POINTER_TYPE_P (itype2))
7733 itype2 = signed_type_for (itype2);
7734 t = build3 (COND_EXPR, itype2,
7735 build2 (fd->loops[i].cond_code, boolean_type_node,
7736 fd->loops[i].v,
7737 fold_convert (itype, fd->loops[i].n2)),
7738 build_int_cst (itype2, 0),
7739 fold_convert (itype2, fd->loops[i - 1].step));
7740 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
7741 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
7742 else
7743 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
7744 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
7745
7746 t = build3 (COND_EXPR, itype,
7747 build2 (fd->loops[i].cond_code, boolean_type_node,
7748 fd->loops[i].v,
7749 fold_convert (itype, fd->loops[i].n2)),
7750 fd->loops[i].v,
7751 fold_convert (itype, fd->loops[i].n1));
7752 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
7753 }
7754 }
7755
7756 /* Remove GIMPLE_OMP_CONTINUE. */
7757 gsi_remove (&gsi, true);
7758 }
7759
7760 /* Emit the condition in L1_BB. */
7761 gsi = gsi_start_bb (l1_bb);
7762
7763 t = fold_convert (type, n2);
7764 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
7765 false, GSI_CONTINUE_LINKING);
7766 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7767 cond_stmt = gimple_build_cond_empty (t);
7768 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
7769 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p,
7770 NULL, NULL)
7771 || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p,
7772 NULL, NULL))
7773 {
7774 gsi = gsi_for_stmt (cond_stmt);
7775 gimple_regimplify_operands (cond_stmt, &gsi);
7776 }
7777
7778 /* Remove GIMPLE_OMP_RETURN. */
7779 gsi = gsi_last_bb (exit_bb);
7780 gsi_remove (&gsi, true);
7781
7782 /* Connect the new blocks. */
7783 remove_edge (FALLTHRU_EDGE (entry_bb));
7784
7785 if (!broken_loop)
7786 {
7787 remove_edge (BRANCH_EDGE (entry_bb));
7788 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
7789
7790 e = BRANCH_EDGE (l1_bb);
7791 ne = FALLTHRU_EDGE (l1_bb);
7792 e->flags = EDGE_TRUE_VALUE;
7793 }
7794 else
7795 {
7796 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7797
7798 ne = single_succ_edge (l1_bb);
7799 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
7800
7801 }
7802 ne->flags = EDGE_FALSE_VALUE;
7803 e->probability = REG_BR_PROB_BASE * 7 / 8;
7804 ne->probability = REG_BR_PROB_BASE / 8;
7805
7806 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
7807 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
7808 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
7809
7810 if (!broken_loop)
7811 {
7812 struct loop *loop = alloc_loop ();
7813 loop->header = l1_bb;
7814 loop->latch = cont_bb;
7815 add_loop (loop, l1_bb->loop_father);
7816 if (safelen == NULL_TREE)
7817 loop->safelen = INT_MAX;
7818 else
7819 {
7820 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
7821 if (TREE_CODE (safelen) != INTEGER_CST)
7822 loop->safelen = 0;
7823 else if (!tree_fits_uhwi_p (safelen)
7824 || tree_to_uhwi (safelen) > INT_MAX)
7825 loop->safelen = INT_MAX;
7826 else
7827 loop->safelen = tree_to_uhwi (safelen);
7828 if (loop->safelen == 1)
7829 loop->safelen = 0;
7830 }
7831 if (simduid)
7832 {
7833 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7834 cfun->has_simduid_loops = true;
7835 }
7836 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7837 the loop. */
7838 if ((flag_tree_loop_vectorize
7839 || (!global_options_set.x_flag_tree_loop_vectorize
7840 && !global_options_set.x_flag_tree_vectorize))
7841 && flag_tree_loop_optimize
7842 && loop->safelen > 1)
7843 {
7844 loop->force_vectorize = true;
7845 cfun->has_force_vectorize_loops = true;
7846 }
7847 }
7848 else if (simduid)
7849 cfun->has_simduid_loops = true;
7850 }
7851
7852
7853 /* Expand the OMP loop defined by REGION. */
7854
7855 static void
7856 expand_omp_for (struct omp_region *region, gimple inner_stmt)
7857 {
7858 struct omp_for_data fd;
7859 struct omp_for_data_loop *loops;
7860
7861 loops
7862 = (struct omp_for_data_loop *)
7863 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
7864 * sizeof (struct omp_for_data_loop));
7865 extract_omp_for_data (as_a <gomp_for *> (last_stmt (region->entry)),
7866 &fd, loops);
7867 region->sched_kind = fd.sched_kind;
7868
7869 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
7870 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7871 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
7872 if (region->cont)
7873 {
7874 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
7875 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7876 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
7877 }
7878 else
7879 /* If there isn't a continue then this is a degerate case where
7880 the introduction of abnormal edges during lowering will prevent
7881 original loops from being detected. Fix that up. */
7882 loops_state_set (LOOPS_NEED_FIXUP);
7883
7884 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD)
7885 expand_omp_simd (region, &fd);
7886 else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
7887 expand_cilk_for (region, &fd);
7888 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
7889 && !fd.have_ordered)
7890 {
7891 if (fd.chunk_size == NULL)
7892 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
7893 else
7894 expand_omp_for_static_chunk (region, &fd, inner_stmt);
7895 }
7896 else
7897 {
7898 int fn_index, start_ix, next_ix;
7899
7900 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
7901 == GF_OMP_FOR_KIND_FOR);
7902 if (fd.chunk_size == NULL
7903 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
7904 fd.chunk_size = integer_zero_node;
7905 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
7906 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
7907 ? 3 : fd.sched_kind;
7908 fn_index += fd.have_ordered * 4;
7909 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
7910 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
7911 if (fd.iter_type == long_long_unsigned_type_node)
7912 {
7913 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
7914 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
7915 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
7916 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
7917 }
7918 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
7919 (enum built_in_function) next_ix, inner_stmt);
7920 }
7921
7922 if (gimple_in_ssa_p (cfun))
7923 update_ssa (TODO_update_ssa_only_virtuals);
7924 }
7925
7926
7927 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
7928
7929 v = GOMP_sections_start (n);
7930 L0:
7931 switch (v)
7932 {
7933 case 0:
7934 goto L2;
7935 case 1:
7936 section 1;
7937 goto L1;
7938 case 2:
7939 ...
7940 case n:
7941 ...
7942 default:
7943 abort ();
7944 }
7945 L1:
7946 v = GOMP_sections_next ();
7947 goto L0;
7948 L2:
7949 reduction;
7950
7951 If this is a combined parallel sections, replace the call to
7952 GOMP_sections_start with call to GOMP_sections_next. */
7953
7954 static void
7955 expand_omp_sections (struct omp_region *region)
7956 {
7957 tree t, u, vin = NULL, vmain, vnext, l2;
7958 unsigned len;
7959 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
7960 gimple_stmt_iterator si, switch_si;
7961 gomp_sections *sections_stmt;
7962 gimple stmt;
7963 gomp_continue *cont;
7964 edge_iterator ei;
7965 edge e;
7966 struct omp_region *inner;
7967 unsigned i, casei;
7968 bool exit_reachable = region->cont != NULL;
7969
7970 gcc_assert (region->exit != NULL);
7971 entry_bb = region->entry;
7972 l0_bb = single_succ (entry_bb);
7973 l1_bb = region->cont;
7974 l2_bb = region->exit;
7975 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
7976 l2 = gimple_block_label (l2_bb);
7977 else
7978 {
7979 /* This can happen if there are reductions. */
7980 len = EDGE_COUNT (l0_bb->succs);
7981 gcc_assert (len > 0);
7982 e = EDGE_SUCC (l0_bb, len - 1);
7983 si = gsi_last_bb (e->dest);
7984 l2 = NULL_TREE;
7985 if (gsi_end_p (si)
7986 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7987 l2 = gimple_block_label (e->dest);
7988 else
7989 FOR_EACH_EDGE (e, ei, l0_bb->succs)
7990 {
7991 si = gsi_last_bb (e->dest);
7992 if (gsi_end_p (si)
7993 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
7994 {
7995 l2 = gimple_block_label (e->dest);
7996 break;
7997 }
7998 }
7999 }
8000 if (exit_reachable)
8001 default_bb = create_empty_bb (l1_bb->prev_bb);
8002 else
8003 default_bb = create_empty_bb (l0_bb);
8004
8005 /* We will build a switch() with enough cases for all the
8006 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
8007 and a default case to abort if something goes wrong. */
8008 len = EDGE_COUNT (l0_bb->succs);
8009
8010 /* Use vec::quick_push on label_vec throughout, since we know the size
8011 in advance. */
8012 auto_vec<tree> label_vec (len);
8013
8014 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
8015 GIMPLE_OMP_SECTIONS statement. */
8016 si = gsi_last_bb (entry_bb);
8017 sections_stmt = as_a <gomp_sections *> (gsi_stmt (si));
8018 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
8019 vin = gimple_omp_sections_control (sections_stmt);
8020 if (!is_combined_parallel (region))
8021 {
8022 /* If we are not inside a combined parallel+sections region,
8023 call GOMP_sections_start. */
8024 t = build_int_cst (unsigned_type_node, len - 1);
8025 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
8026 stmt = gimple_build_call (u, 1, t);
8027 }
8028 else
8029 {
8030 /* Otherwise, call GOMP_sections_next. */
8031 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
8032 stmt = gimple_build_call (u, 0);
8033 }
8034 gimple_call_set_lhs (stmt, vin);
8035 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
8036 gsi_remove (&si, true);
8037
8038 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
8039 L0_BB. */
8040 switch_si = gsi_last_bb (l0_bb);
8041 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
8042 if (exit_reachable)
8043 {
8044 cont = as_a <gomp_continue *> (last_stmt (l1_bb));
8045 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
8046 vmain = gimple_omp_continue_control_use (cont);
8047 vnext = gimple_omp_continue_control_def (cont);
8048 }
8049 else
8050 {
8051 vmain = vin;
8052 vnext = NULL_TREE;
8053 }
8054
8055 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
8056 label_vec.quick_push (t);
8057 i = 1;
8058
8059 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
8060 for (inner = region->inner, casei = 1;
8061 inner;
8062 inner = inner->next, i++, casei++)
8063 {
8064 basic_block s_entry_bb, s_exit_bb;
8065
8066 /* Skip optional reduction region. */
8067 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
8068 {
8069 --i;
8070 --casei;
8071 continue;
8072 }
8073
8074 s_entry_bb = inner->entry;
8075 s_exit_bb = inner->exit;
8076
8077 t = gimple_block_label (s_entry_bb);
8078 u = build_int_cst (unsigned_type_node, casei);
8079 u = build_case_label (u, NULL, t);
8080 label_vec.quick_push (u);
8081
8082 si = gsi_last_bb (s_entry_bb);
8083 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
8084 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
8085 gsi_remove (&si, true);
8086 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
8087
8088 if (s_exit_bb == NULL)
8089 continue;
8090
8091 si = gsi_last_bb (s_exit_bb);
8092 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
8093 gsi_remove (&si, true);
8094
8095 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
8096 }
8097
8098 /* Error handling code goes in DEFAULT_BB. */
8099 t = gimple_block_label (default_bb);
8100 u = build_case_label (NULL, NULL, t);
8101 make_edge (l0_bb, default_bb, 0);
8102 add_bb_to_loop (default_bb, current_loops->tree_root);
8103
8104 stmt = gimple_build_switch (vmain, u, label_vec);
8105 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
8106 gsi_remove (&switch_si, true);
8107
8108 si = gsi_start_bb (default_bb);
8109 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
8110 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
8111
8112 if (exit_reachable)
8113 {
8114 tree bfn_decl;
8115
8116 /* Code to get the next section goes in L1_BB. */
8117 si = gsi_last_bb (l1_bb);
8118 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
8119
8120 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
8121 stmt = gimple_build_call (bfn_decl, 0);
8122 gimple_call_set_lhs (stmt, vnext);
8123 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
8124 gsi_remove (&si, true);
8125
8126 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
8127 }
8128
8129 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
8130 si = gsi_last_bb (l2_bb);
8131 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
8132 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
8133 else if (gimple_omp_return_lhs (gsi_stmt (si)))
8134 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
8135 else
8136 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
8137 stmt = gimple_build_call (t, 0);
8138 if (gimple_omp_return_lhs (gsi_stmt (si)))
8139 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
8140 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
8141 gsi_remove (&si, true);
8142
8143 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
8144 }
8145
8146
8147 /* Expand code for an OpenMP single directive. We've already expanded
8148 much of the code, here we simply place the GOMP_barrier call. */
8149
8150 static void
8151 expand_omp_single (struct omp_region *region)
8152 {
8153 basic_block entry_bb, exit_bb;
8154 gimple_stmt_iterator si;
8155
8156 entry_bb = region->entry;
8157 exit_bb = region->exit;
8158
8159 si = gsi_last_bb (entry_bb);
8160 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
8161 gsi_remove (&si, true);
8162 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8163
8164 si = gsi_last_bb (exit_bb);
8165 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
8166 {
8167 tree t = gimple_omp_return_lhs (gsi_stmt (si));
8168 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
8169 }
8170 gsi_remove (&si, true);
8171 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
8172 }
8173
8174
8175 /* Generic expansion for OpenMP synchronization directives: master,
8176 ordered and critical. All we need to do here is remove the entry
8177 and exit markers for REGION. */
8178
8179 static void
8180 expand_omp_synch (struct omp_region *region)
8181 {
8182 basic_block entry_bb, exit_bb;
8183 gimple_stmt_iterator si;
8184
8185 entry_bb = region->entry;
8186 exit_bb = region->exit;
8187
8188 si = gsi_last_bb (entry_bb);
8189 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
8190 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
8191 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
8192 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
8193 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
8194 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
8195 gsi_remove (&si, true);
8196 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8197
8198 if (exit_bb)
8199 {
8200 si = gsi_last_bb (exit_bb);
8201 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
8202 gsi_remove (&si, true);
8203 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
8204 }
8205 }
8206
8207 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8208 operation as a normal volatile load. */
8209
8210 static bool
8211 expand_omp_atomic_load (basic_block load_bb, tree addr,
8212 tree loaded_val, int index)
8213 {
8214 enum built_in_function tmpbase;
8215 gimple_stmt_iterator gsi;
8216 basic_block store_bb;
8217 location_t loc;
8218 gimple stmt;
8219 tree decl, call, type, itype;
8220
8221 gsi = gsi_last_bb (load_bb);
8222 stmt = gsi_stmt (gsi);
8223 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
8224 loc = gimple_location (stmt);
8225
8226 /* ??? If the target does not implement atomic_load_optab[mode], and mode
8227 is smaller than word size, then expand_atomic_load assumes that the load
8228 is atomic. We could avoid the builtin entirely in this case. */
8229
8230 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8231 decl = builtin_decl_explicit (tmpbase);
8232 if (decl == NULL_TREE)
8233 return false;
8234
8235 type = TREE_TYPE (loaded_val);
8236 itype = TREE_TYPE (TREE_TYPE (decl));
8237
8238 call = build_call_expr_loc (loc, decl, 2, addr,
8239 build_int_cst (NULL,
8240 gimple_omp_atomic_seq_cst_p (stmt)
8241 ? MEMMODEL_SEQ_CST
8242 : MEMMODEL_RELAXED));
8243 if (!useless_type_conversion_p (type, itype))
8244 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
8245 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
8246
8247 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
8248 gsi_remove (&gsi, true);
8249
8250 store_bb = single_succ (load_bb);
8251 gsi = gsi_last_bb (store_bb);
8252 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
8253 gsi_remove (&gsi, true);
8254
8255 if (gimple_in_ssa_p (cfun))
8256 update_ssa (TODO_update_ssa_no_phi);
8257
8258 return true;
8259 }
8260
8261 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8262 operation as a normal volatile store. */
8263
8264 static bool
8265 expand_omp_atomic_store (basic_block load_bb, tree addr,
8266 tree loaded_val, tree stored_val, int index)
8267 {
8268 enum built_in_function tmpbase;
8269 gimple_stmt_iterator gsi;
8270 basic_block store_bb = single_succ (load_bb);
8271 location_t loc;
8272 gimple stmt;
8273 tree decl, call, type, itype;
8274 machine_mode imode;
8275 bool exchange;
8276
8277 gsi = gsi_last_bb (load_bb);
8278 stmt = gsi_stmt (gsi);
8279 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
8280
8281 /* If the load value is needed, then this isn't a store but an exchange. */
8282 exchange = gimple_omp_atomic_need_value_p (stmt);
8283
8284 gsi = gsi_last_bb (store_bb);
8285 stmt = gsi_stmt (gsi);
8286 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
8287 loc = gimple_location (stmt);
8288
8289 /* ??? If the target does not implement atomic_store_optab[mode], and mode
8290 is smaller than word size, then expand_atomic_store assumes that the store
8291 is atomic. We could avoid the builtin entirely in this case. */
8292
8293 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
8294 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
8295 decl = builtin_decl_explicit (tmpbase);
8296 if (decl == NULL_TREE)
8297 return false;
8298
8299 type = TREE_TYPE (stored_val);
8300
8301 /* Dig out the type of the function's second argument. */
8302 itype = TREE_TYPE (decl);
8303 itype = TYPE_ARG_TYPES (itype);
8304 itype = TREE_CHAIN (itype);
8305 itype = TREE_VALUE (itype);
8306 imode = TYPE_MODE (itype);
8307
8308 if (exchange && !can_atomic_exchange_p (imode, true))
8309 return false;
8310
8311 if (!useless_type_conversion_p (itype, type))
8312 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
8313 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
8314 build_int_cst (NULL,
8315 gimple_omp_atomic_seq_cst_p (stmt)
8316 ? MEMMODEL_SEQ_CST
8317 : MEMMODEL_RELAXED));
8318 if (exchange)
8319 {
8320 if (!useless_type_conversion_p (type, itype))
8321 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
8322 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
8323 }
8324
8325 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
8326 gsi_remove (&gsi, true);
8327
8328 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
8329 gsi = gsi_last_bb (load_bb);
8330 gsi_remove (&gsi, true);
8331
8332 if (gimple_in_ssa_p (cfun))
8333 update_ssa (TODO_update_ssa_no_phi);
8334
8335 return true;
8336 }
8337
8338 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
8339 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
8340 size of the data type, and thus usable to find the index of the builtin
8341 decl. Returns false if the expression is not of the proper form. */
8342
8343 static bool
8344 expand_omp_atomic_fetch_op (basic_block load_bb,
8345 tree addr, tree loaded_val,
8346 tree stored_val, int index)
8347 {
8348 enum built_in_function oldbase, newbase, tmpbase;
8349 tree decl, itype, call;
8350 tree lhs, rhs;
8351 basic_block store_bb = single_succ (load_bb);
8352 gimple_stmt_iterator gsi;
8353 gimple stmt;
8354 location_t loc;
8355 enum tree_code code;
8356 bool need_old, need_new;
8357 machine_mode imode;
8358 bool seq_cst;
8359
8360 /* We expect to find the following sequences:
8361
8362 load_bb:
8363 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
8364
8365 store_bb:
8366 val = tmp OP something; (or: something OP tmp)
8367 GIMPLE_OMP_STORE (val)
8368
8369 ???FIXME: Allow a more flexible sequence.
8370 Perhaps use data flow to pick the statements.
8371
8372 */
8373
8374 gsi = gsi_after_labels (store_bb);
8375 stmt = gsi_stmt (gsi);
8376 loc = gimple_location (stmt);
8377 if (!is_gimple_assign (stmt))
8378 return false;
8379 gsi_next (&gsi);
8380 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
8381 return false;
8382 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
8383 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
8384 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
8385 gcc_checking_assert (!need_old || !need_new);
8386
8387 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
8388 return false;
8389
8390 /* Check for one of the supported fetch-op operations. */
8391 code = gimple_assign_rhs_code (stmt);
8392 switch (code)
8393 {
8394 case PLUS_EXPR:
8395 case POINTER_PLUS_EXPR:
8396 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
8397 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
8398 break;
8399 case MINUS_EXPR:
8400 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
8401 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
8402 break;
8403 case BIT_AND_EXPR:
8404 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
8405 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
8406 break;
8407 case BIT_IOR_EXPR:
8408 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
8409 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
8410 break;
8411 case BIT_XOR_EXPR:
8412 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
8413 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
8414 break;
8415 default:
8416 return false;
8417 }
8418
8419 /* Make sure the expression is of the proper form. */
8420 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
8421 rhs = gimple_assign_rhs2 (stmt);
8422 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
8423 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
8424 rhs = gimple_assign_rhs1 (stmt);
8425 else
8426 return false;
8427
8428 tmpbase = ((enum built_in_function)
8429 ((need_new ? newbase : oldbase) + index + 1));
8430 decl = builtin_decl_explicit (tmpbase);
8431 if (decl == NULL_TREE)
8432 return false;
8433 itype = TREE_TYPE (TREE_TYPE (decl));
8434 imode = TYPE_MODE (itype);
8435
8436 /* We could test all of the various optabs involved, but the fact of the
8437 matter is that (with the exception of i486 vs i586 and xadd) all targets
8438 that support any atomic operaton optab also implements compare-and-swap.
8439 Let optabs.c take care of expanding any compare-and-swap loop. */
8440 if (!can_compare_and_swap_p (imode, true))
8441 return false;
8442
8443 gsi = gsi_last_bb (load_bb);
8444 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
8445
8446 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
8447 It only requires that the operation happen atomically. Thus we can
8448 use the RELAXED memory model. */
8449 call = build_call_expr_loc (loc, decl, 3, addr,
8450 fold_convert_loc (loc, itype, rhs),
8451 build_int_cst (NULL,
8452 seq_cst ? MEMMODEL_SEQ_CST
8453 : MEMMODEL_RELAXED));
8454
8455 if (need_old || need_new)
8456 {
8457 lhs = need_old ? loaded_val : stored_val;
8458 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
8459 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
8460 }
8461 else
8462 call = fold_convert_loc (loc, void_type_node, call);
8463 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
8464 gsi_remove (&gsi, true);
8465
8466 gsi = gsi_last_bb (store_bb);
8467 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
8468 gsi_remove (&gsi, true);
8469 gsi = gsi_last_bb (store_bb);
8470 gsi_remove (&gsi, true);
8471
8472 if (gimple_in_ssa_p (cfun))
8473 update_ssa (TODO_update_ssa_no_phi);
8474
8475 return true;
8476 }
8477
8478 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8479
8480 oldval = *addr;
8481 repeat:
8482 newval = rhs; // with oldval replacing *addr in rhs
8483 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
8484 if (oldval != newval)
8485 goto repeat;
8486
8487 INDEX is log2 of the size of the data type, and thus usable to find the
8488 index of the builtin decl. */
8489
8490 static bool
8491 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
8492 tree addr, tree loaded_val, tree stored_val,
8493 int index)
8494 {
8495 tree loadedi, storedi, initial, new_storedi, old_vali;
8496 tree type, itype, cmpxchg, iaddr;
8497 gimple_stmt_iterator si;
8498 basic_block loop_header = single_succ (load_bb);
8499 gimple phi, stmt;
8500 edge e;
8501 enum built_in_function fncode;
8502
8503 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
8504 order to use the RELAXED memory model effectively. */
8505 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
8506 + index + 1);
8507 cmpxchg = builtin_decl_explicit (fncode);
8508 if (cmpxchg == NULL_TREE)
8509 return false;
8510 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8511 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
8512
8513 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
8514 return false;
8515
8516 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
8517 si = gsi_last_bb (load_bb);
8518 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8519
8520 /* For floating-point values, we'll need to view-convert them to integers
8521 so that we can perform the atomic compare and swap. Simplify the
8522 following code by always setting up the "i"ntegral variables. */
8523 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
8524 {
8525 tree iaddr_val;
8526
8527 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
8528 true));
8529 iaddr_val
8530 = force_gimple_operand_gsi (&si,
8531 fold_convert (TREE_TYPE (iaddr), addr),
8532 false, NULL_TREE, true, GSI_SAME_STMT);
8533 stmt = gimple_build_assign (iaddr, iaddr_val);
8534 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8535 loadedi = create_tmp_var (itype);
8536 if (gimple_in_ssa_p (cfun))
8537 loadedi = make_ssa_name (loadedi);
8538 }
8539 else
8540 {
8541 iaddr = addr;
8542 loadedi = loaded_val;
8543 }
8544
8545 fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
8546 tree loaddecl = builtin_decl_explicit (fncode);
8547 if (loaddecl)
8548 initial
8549 = fold_convert (TREE_TYPE (TREE_TYPE (iaddr)),
8550 build_call_expr (loaddecl, 2, iaddr,
8551 build_int_cst (NULL_TREE,
8552 MEMMODEL_RELAXED)));
8553 else
8554 initial = build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)), iaddr,
8555 build_int_cst (TREE_TYPE (iaddr), 0));
8556
8557 initial
8558 = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true,
8559 GSI_SAME_STMT);
8560
8561 /* Move the value to the LOADEDI temporary. */
8562 if (gimple_in_ssa_p (cfun))
8563 {
8564 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
8565 phi = create_phi_node (loadedi, loop_header);
8566 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
8567 initial);
8568 }
8569 else
8570 gsi_insert_before (&si,
8571 gimple_build_assign (loadedi, initial),
8572 GSI_SAME_STMT);
8573 if (loadedi != loaded_val)
8574 {
8575 gimple_stmt_iterator gsi2;
8576 tree x;
8577
8578 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
8579 gsi2 = gsi_start_bb (loop_header);
8580 if (gimple_in_ssa_p (cfun))
8581 {
8582 gassign *stmt;
8583 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8584 true, GSI_SAME_STMT);
8585 stmt = gimple_build_assign (loaded_val, x);
8586 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
8587 }
8588 else
8589 {
8590 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
8591 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
8592 true, GSI_SAME_STMT);
8593 }
8594 }
8595 gsi_remove (&si, true);
8596
8597 si = gsi_last_bb (store_bb);
8598 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8599
8600 if (iaddr == addr)
8601 storedi = stored_val;
8602 else
8603 storedi =
8604 force_gimple_operand_gsi (&si,
8605 build1 (VIEW_CONVERT_EXPR, itype,
8606 stored_val), true, NULL_TREE, true,
8607 GSI_SAME_STMT);
8608
8609 /* Build the compare&swap statement. */
8610 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
8611 new_storedi = force_gimple_operand_gsi (&si,
8612 fold_convert (TREE_TYPE (loadedi),
8613 new_storedi),
8614 true, NULL_TREE,
8615 true, GSI_SAME_STMT);
8616
8617 if (gimple_in_ssa_p (cfun))
8618 old_vali = loadedi;
8619 else
8620 {
8621 old_vali = create_tmp_var (TREE_TYPE (loadedi));
8622 stmt = gimple_build_assign (old_vali, loadedi);
8623 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8624
8625 stmt = gimple_build_assign (loadedi, new_storedi);
8626 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8627 }
8628
8629 /* Note that we always perform the comparison as an integer, even for
8630 floating point. This allows the atomic operation to properly
8631 succeed even with NaNs and -0.0. */
8632 stmt = gimple_build_cond_empty
8633 (build2 (NE_EXPR, boolean_type_node,
8634 new_storedi, old_vali));
8635 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8636
8637 /* Update cfg. */
8638 e = single_succ_edge (store_bb);
8639 e->flags &= ~EDGE_FALLTHRU;
8640 e->flags |= EDGE_FALSE_VALUE;
8641
8642 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
8643
8644 /* Copy the new value to loadedi (we already did that before the condition
8645 if we are not in SSA). */
8646 if (gimple_in_ssa_p (cfun))
8647 {
8648 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
8649 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
8650 }
8651
8652 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
8653 gsi_remove (&si, true);
8654
8655 struct loop *loop = alloc_loop ();
8656 loop->header = loop_header;
8657 loop->latch = store_bb;
8658 add_loop (loop, loop_header->loop_father);
8659
8660 if (gimple_in_ssa_p (cfun))
8661 update_ssa (TODO_update_ssa_no_phi);
8662
8663 return true;
8664 }
8665
8666 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
8667
8668 GOMP_atomic_start ();
8669 *addr = rhs;
8670 GOMP_atomic_end ();
8671
8672 The result is not globally atomic, but works so long as all parallel
8673 references are within #pragma omp atomic directives. According to
8674 responses received from omp@openmp.org, appears to be within spec.
8675 Which makes sense, since that's how several other compilers handle
8676 this situation as well.
8677 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
8678 expanding. STORED_VAL is the operand of the matching
8679 GIMPLE_OMP_ATOMIC_STORE.
8680
8681 We replace
8682 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
8683 loaded_val = *addr;
8684
8685 and replace
8686 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
8687 *addr = stored_val;
8688 */
8689
8690 static bool
8691 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
8692 tree addr, tree loaded_val, tree stored_val)
8693 {
8694 gimple_stmt_iterator si;
8695 gassign *stmt;
8696 tree t;
8697
8698 si = gsi_last_bb (load_bb);
8699 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
8700
8701 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
8702 t = build_call_expr (t, 0);
8703 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8704
8705 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
8706 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8707 gsi_remove (&si, true);
8708
8709 si = gsi_last_bb (store_bb);
8710 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
8711
8712 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
8713 stored_val);
8714 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
8715
8716 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
8717 t = build_call_expr (t, 0);
8718 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
8719 gsi_remove (&si, true);
8720
8721 if (gimple_in_ssa_p (cfun))
8722 update_ssa (TODO_update_ssa_no_phi);
8723 return true;
8724 }
8725
8726 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
8727 using expand_omp_atomic_fetch_op. If it failed, we try to
8728 call expand_omp_atomic_pipeline, and if it fails too, the
8729 ultimate fallback is wrapping the operation in a mutex
8730 (expand_omp_atomic_mutex). REGION is the atomic region built
8731 by build_omp_regions_1(). */
8732
8733 static void
8734 expand_omp_atomic (struct omp_region *region)
8735 {
8736 basic_block load_bb = region->entry, store_bb = region->exit;
8737 gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb));
8738 gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb));
8739 tree loaded_val = gimple_omp_atomic_load_lhs (load);
8740 tree addr = gimple_omp_atomic_load_rhs (load);
8741 tree stored_val = gimple_omp_atomic_store_val (store);
8742 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
8743 HOST_WIDE_INT index;
8744
8745 /* Make sure the type is one of the supported sizes. */
8746 index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
8747 index = exact_log2 (index);
8748 if (index >= 0 && index <= 4)
8749 {
8750 unsigned int align = TYPE_ALIGN_UNIT (type);
8751
8752 /* __sync builtins require strict data alignment. */
8753 if (exact_log2 (align) >= index)
8754 {
8755 /* Atomic load. */
8756 if (loaded_val == stored_val
8757 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8758 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8759 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8760 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
8761 return;
8762
8763 /* Atomic store. */
8764 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
8765 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
8766 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
8767 && store_bb == single_succ (load_bb)
8768 && first_stmt (store_bb) == store
8769 && expand_omp_atomic_store (load_bb, addr, loaded_val,
8770 stored_val, index))
8771 return;
8772
8773 /* When possible, use specialized atomic update functions. */
8774 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
8775 && store_bb == single_succ (load_bb)
8776 && expand_omp_atomic_fetch_op (load_bb, addr,
8777 loaded_val, stored_val, index))
8778 return;
8779
8780 /* If we don't have specialized __sync builtins, try and implement
8781 as a compare and swap loop. */
8782 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
8783 loaded_val, stored_val, index))
8784 return;
8785 }
8786 }
8787
8788 /* The ultimate fallback is wrapping the operation in a mutex. */
8789 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
8790 }
8791
8792
8793 /* Expand the GIMPLE_OMP_TARGET starting at REGION. */
8794
8795 static void
8796 expand_omp_target (struct omp_region *region)
8797 {
8798 basic_block entry_bb, exit_bb, new_bb;
8799 struct function *child_cfun;
8800 tree child_fn, block, t;
8801 gimple_stmt_iterator gsi;
8802 gomp_target *entry_stmt;
8803 gimple stmt;
8804 edge e;
8805 bool offloaded, data_region;
8806
8807 entry_stmt = as_a <gomp_target *> (last_stmt (region->entry));
8808 new_bb = region->entry;
8809
8810 offloaded = is_gimple_omp_offloaded (entry_stmt);
8811 switch (gimple_omp_target_kind (entry_stmt))
8812 {
8813 case GF_OMP_TARGET_KIND_REGION:
8814 case GF_OMP_TARGET_KIND_UPDATE:
8815 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
8816 case GF_OMP_TARGET_KIND_OACC_KERNELS:
8817 case GF_OMP_TARGET_KIND_OACC_UPDATE:
8818 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
8819 data_region = false;
8820 break;
8821 case GF_OMP_TARGET_KIND_DATA:
8822 case GF_OMP_TARGET_KIND_OACC_DATA:
8823 data_region = true;
8824 break;
8825 default:
8826 gcc_unreachable ();
8827 }
8828
8829 child_fn = NULL_TREE;
8830 child_cfun = NULL;
8831 if (offloaded)
8832 {
8833 child_fn = gimple_omp_target_child_fn (entry_stmt);
8834 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
8835 }
8836
8837 /* Supported by expand_omp_taskreg, but not here. */
8838 if (child_cfun != NULL)
8839 gcc_checking_assert (!child_cfun->cfg);
8840 gcc_checking_assert (!gimple_in_ssa_p (cfun));
8841
8842 entry_bb = region->entry;
8843 exit_bb = region->exit;
8844
8845 if (offloaded)
8846 {
8847 unsigned srcidx, dstidx, num;
8848
8849 /* If the offloading region needs data sent from the parent
8850 function, then the very first statement (except possible
8851 tree profile counter updates) of the offloading body
8852 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
8853 &.OMP_DATA_O is passed as an argument to the child function,
8854 we need to replace it with the argument as seen by the child
8855 function.
8856
8857 In most cases, this will end up being the identity assignment
8858 .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had
8859 a function call that has been inlined, the original PARM_DECL
8860 .OMP_DATA_I may have been converted into a different local
8861 variable. In which case, we need to keep the assignment. */
8862 tree data_arg = gimple_omp_target_data_arg (entry_stmt);
8863 if (data_arg)
8864 {
8865 basic_block entry_succ_bb = single_succ (entry_bb);
8866 gimple_stmt_iterator gsi;
8867 tree arg;
8868 gimple tgtcopy_stmt = NULL;
8869 tree sender = TREE_VEC_ELT (data_arg, 0);
8870
8871 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
8872 {
8873 gcc_assert (!gsi_end_p (gsi));
8874 stmt = gsi_stmt (gsi);
8875 if (gimple_code (stmt) != GIMPLE_ASSIGN)
8876 continue;
8877
8878 if (gimple_num_ops (stmt) == 2)
8879 {
8880 tree arg = gimple_assign_rhs1 (stmt);
8881
8882 /* We're ignoring the subcode because we're
8883 effectively doing a STRIP_NOPS. */
8884
8885 if (TREE_CODE (arg) == ADDR_EXPR
8886 && TREE_OPERAND (arg, 0) == sender)
8887 {
8888 tgtcopy_stmt = stmt;
8889 break;
8890 }
8891 }
8892 }
8893
8894 gcc_assert (tgtcopy_stmt != NULL);
8895 arg = DECL_ARGUMENTS (child_fn);
8896
8897 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
8898 gsi_remove (&gsi, true);
8899 }
8900
8901 /* Declare local variables needed in CHILD_CFUN. */
8902 block = DECL_INITIAL (child_fn);
8903 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
8904 /* The gimplifier could record temporaries in the offloading block
8905 rather than in containing function's local_decls chain,
8906 which would mean cgraph missed finalizing them. Do it now. */
8907 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
8908 if (TREE_CODE (t) == VAR_DECL
8909 && TREE_STATIC (t)
8910 && !DECL_EXTERNAL (t))
8911 varpool_node::finalize_decl (t);
8912 DECL_SAVED_TREE (child_fn) = NULL;
8913 /* We'll create a CFG for child_fn, so no gimple body is needed. */
8914 gimple_set_body (child_fn, NULL);
8915 TREE_USED (block) = 1;
8916
8917 /* Reset DECL_CONTEXT on function arguments. */
8918 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
8919 DECL_CONTEXT (t) = child_fn;
8920
8921 /* Split ENTRY_BB at GIMPLE_*,
8922 so that it can be moved to the child function. */
8923 gsi = gsi_last_bb (entry_bb);
8924 stmt = gsi_stmt (gsi);
8925 gcc_assert (stmt
8926 && gimple_code (stmt) == gimple_code (entry_stmt));
8927 e = split_block (entry_bb, stmt);
8928 gsi_remove (&gsi, true);
8929 entry_bb = e->dest;
8930 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
8931
8932 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
8933 if (exit_bb)
8934 {
8935 gsi = gsi_last_bb (exit_bb);
8936 gcc_assert (!gsi_end_p (gsi)
8937 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
8938 stmt = gimple_build_return (NULL);
8939 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8940 gsi_remove (&gsi, true);
8941 }
8942
8943 /* Move the offloading region into CHILD_CFUN. */
8944
8945 block = gimple_block (entry_stmt);
8946
8947 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
8948 if (exit_bb)
8949 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
8950 /* When the OMP expansion process cannot guarantee an up-to-date
8951 loop tree arrange for the child function to fixup loops. */
8952 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
8953 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
8954
8955 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
8956 num = vec_safe_length (child_cfun->local_decls);
8957 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
8958 {
8959 t = (*child_cfun->local_decls)[srcidx];
8960 if (DECL_CONTEXT (t) == cfun->decl)
8961 continue;
8962 if (srcidx != dstidx)
8963 (*child_cfun->local_decls)[dstidx] = t;
8964 dstidx++;
8965 }
8966 if (dstidx != num)
8967 vec_safe_truncate (child_cfun->local_decls, dstidx);
8968
8969 /* Inform the callgraph about the new function. */
8970 child_cfun->curr_properties = cfun->curr_properties;
8971 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
8972 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
8973 cgraph_node *node = cgraph_node::get_create (child_fn);
8974 node->parallelized_function = 1;
8975 cgraph_node::add_new_function (child_fn, true);
8976
8977 #ifdef ENABLE_OFFLOADING
8978 /* Add the new function to the offload table. */
8979 vec_safe_push (offload_funcs, child_fn);
8980 #endif
8981
8982 /* Fix the callgraph edges for child_cfun. Those for cfun will be
8983 fixed in a following pass. */
8984 push_cfun (child_cfun);
8985 cgraph_edge::rebuild_edges ();
8986
8987 #ifdef ENABLE_OFFLOADING
8988 /* Prevent IPA from removing child_fn as unreachable, since there are no
8989 refs from the parent function to child_fn in offload LTO mode. */
8990 cgraph_node::get (child_fn)->mark_force_output ();
8991 #endif
8992
8993 /* Some EH regions might become dead, see PR34608. If
8994 pass_cleanup_cfg isn't the first pass to happen with the
8995 new child, these dead EH edges might cause problems.
8996 Clean them up now. */
8997 if (flag_exceptions)
8998 {
8999 basic_block bb;
9000 bool changed = false;
9001
9002 FOR_EACH_BB_FN (bb, cfun)
9003 changed |= gimple_purge_dead_eh_edges (bb);
9004 if (changed)
9005 cleanup_tree_cfg ();
9006 }
9007 #ifdef ENABLE_CHECKING
9008 if (!loops_state_satisfies_p (LOOPS_NEED_FIXUP))
9009 verify_loop_structure ();
9010 #endif
9011 pop_cfun ();
9012 }
9013
9014 /* Emit a library call to launch the offloading region, or do data
9015 transfers. */
9016 tree t1, t2, t3, t4, device, cond, c, clauses;
9017 enum built_in_function start_ix;
9018 location_t clause_loc;
9019
9020 switch (gimple_omp_target_kind (entry_stmt))
9021 {
9022 case GF_OMP_TARGET_KIND_REGION:
9023 start_ix = BUILT_IN_GOMP_TARGET;
9024 break;
9025 case GF_OMP_TARGET_KIND_DATA:
9026 start_ix = BUILT_IN_GOMP_TARGET_DATA;
9027 break;
9028 case GF_OMP_TARGET_KIND_UPDATE:
9029 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
9030 break;
9031 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
9032 case GF_OMP_TARGET_KIND_OACC_KERNELS:
9033 start_ix = BUILT_IN_GOACC_PARALLEL;
9034 break;
9035 case GF_OMP_TARGET_KIND_OACC_DATA:
9036 start_ix = BUILT_IN_GOACC_DATA_START;
9037 break;
9038 case GF_OMP_TARGET_KIND_OACC_UPDATE:
9039 start_ix = BUILT_IN_GOACC_UPDATE;
9040 break;
9041 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
9042 start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA;
9043 break;
9044 default:
9045 gcc_unreachable ();
9046 }
9047
9048 clauses = gimple_omp_target_clauses (entry_stmt);
9049
9050 /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime
9051 library choose) and there is no conditional. */
9052 cond = NULL_TREE;
9053 device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV);
9054
9055 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
9056 if (c)
9057 cond = OMP_CLAUSE_IF_EXPR (c);
9058
9059 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
9060 if (c)
9061 {
9062 /* Even if we pass it to all library function calls, it is currently only
9063 defined/used for the OpenMP target ones. */
9064 gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET
9065 || start_ix == BUILT_IN_GOMP_TARGET_DATA
9066 || start_ix == BUILT_IN_GOMP_TARGET_UPDATE);
9067
9068 device = OMP_CLAUSE_DEVICE_ID (c);
9069 clause_loc = OMP_CLAUSE_LOCATION (c);
9070 }
9071 else
9072 clause_loc = gimple_location (entry_stmt);
9073
9074 /* Ensure 'device' is of the correct type. */
9075 device = fold_convert_loc (clause_loc, integer_type_node, device);
9076
9077 /* If we found the clause 'if (cond)', build
9078 (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */
9079 if (cond)
9080 {
9081 cond = gimple_boolify (cond);
9082
9083 basic_block cond_bb, then_bb, else_bb;
9084 edge e;
9085 tree tmp_var;
9086
9087 tmp_var = create_tmp_var (TREE_TYPE (device));
9088 if (offloaded)
9089 e = split_block_after_labels (new_bb);
9090 else
9091 {
9092 gsi = gsi_last_bb (new_bb);
9093 gsi_prev (&gsi);
9094 e = split_block (new_bb, gsi_stmt (gsi));
9095 }
9096 cond_bb = e->src;
9097 new_bb = e->dest;
9098 remove_edge (e);
9099
9100 then_bb = create_empty_bb (cond_bb);
9101 else_bb = create_empty_bb (then_bb);
9102 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
9103 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
9104
9105 stmt = gimple_build_cond_empty (cond);
9106 gsi = gsi_last_bb (cond_bb);
9107 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
9108
9109 gsi = gsi_start_bb (then_bb);
9110 stmt = gimple_build_assign (tmp_var, device);
9111 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
9112
9113 gsi = gsi_start_bb (else_bb);
9114 stmt = gimple_build_assign (tmp_var,
9115 build_int_cst (integer_type_node,
9116 GOMP_DEVICE_HOST_FALLBACK));
9117 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
9118
9119 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
9120 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
9121 add_bb_to_loop (then_bb, cond_bb->loop_father);
9122 add_bb_to_loop (else_bb, cond_bb->loop_father);
9123 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
9124 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
9125
9126 device = tmp_var;
9127 }
9128
9129 gsi = gsi_last_bb (new_bb);
9130 t = gimple_omp_target_data_arg (entry_stmt);
9131 if (t == NULL)
9132 {
9133 t1 = size_zero_node;
9134 t2 = build_zero_cst (ptr_type_node);
9135 t3 = t2;
9136 t4 = t2;
9137 }
9138 else
9139 {
9140 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
9141 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
9142 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
9143 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
9144 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
9145 }
9146
9147 gimple g;
9148 /* The maximum number used by any start_ix, without varargs. */
9149 auto_vec<tree, 11> args;
9150 args.quick_push (device);
9151 if (offloaded)
9152 args.quick_push (build_fold_addr_expr (child_fn));
9153 switch (start_ix)
9154 {
9155 case BUILT_IN_GOMP_TARGET:
9156 case BUILT_IN_GOMP_TARGET_DATA:
9157 case BUILT_IN_GOMP_TARGET_UPDATE:
9158 /* This const void * is part of the current ABI, but we're not actually
9159 using it. */
9160 args.quick_push (build_zero_cst (ptr_type_node));
9161 break;
9162 case BUILT_IN_GOACC_DATA_START:
9163 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
9164 case BUILT_IN_GOACC_PARALLEL:
9165 case BUILT_IN_GOACC_UPDATE:
9166 break;
9167 default:
9168 gcc_unreachable ();
9169 }
9170 args.quick_push (t1);
9171 args.quick_push (t2);
9172 args.quick_push (t3);
9173 args.quick_push (t4);
9174 switch (start_ix)
9175 {
9176 case BUILT_IN_GOACC_DATA_START:
9177 case BUILT_IN_GOMP_TARGET:
9178 case BUILT_IN_GOMP_TARGET_DATA:
9179 case BUILT_IN_GOMP_TARGET_UPDATE:
9180 break;
9181 case BUILT_IN_GOACC_PARALLEL:
9182 {
9183 tree t_num_gangs, t_num_workers, t_vector_length;
9184
9185 /* Default values for num_gangs, num_workers, and vector_length. */
9186 t_num_gangs = t_num_workers = t_vector_length
9187 = fold_convert_loc (gimple_location (entry_stmt),
9188 integer_type_node, integer_one_node);
9189 /* ..., but if present, use the value specified by the respective
9190 clause, making sure that are of the correct type. */
9191 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS);
9192 if (c)
9193 t_num_gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9194 integer_type_node,
9195 OMP_CLAUSE_NUM_GANGS_EXPR (c));
9196 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_WORKERS);
9197 if (c)
9198 t_num_workers = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9199 integer_type_node,
9200 OMP_CLAUSE_NUM_WORKERS_EXPR (c));
9201 c = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH);
9202 if (c)
9203 t_vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9204 integer_type_node,
9205 OMP_CLAUSE_VECTOR_LENGTH_EXPR (c));
9206 args.quick_push (t_num_gangs);
9207 args.quick_push (t_num_workers);
9208 args.quick_push (t_vector_length);
9209 }
9210 /* FALLTHRU */
9211 case BUILT_IN_GOACC_ENTER_EXIT_DATA:
9212 case BUILT_IN_GOACC_UPDATE:
9213 {
9214 tree t_async;
9215 int t_wait_idx;
9216
9217 /* Default values for t_async. */
9218 t_async = fold_convert_loc (gimple_location (entry_stmt),
9219 integer_type_node,
9220 build_int_cst (integer_type_node,
9221 GOMP_ASYNC_SYNC));
9222 /* ..., but if present, use the value specified by the respective
9223 clause, making sure that is of the correct type. */
9224 c = find_omp_clause (clauses, OMP_CLAUSE_ASYNC);
9225 if (c)
9226 t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9227 integer_type_node,
9228 OMP_CLAUSE_ASYNC_EXPR (c));
9229
9230 args.quick_push (t_async);
9231 /* Save the index, and... */
9232 t_wait_idx = args.length ();
9233 /* ... push a default value. */
9234 args.quick_push (fold_convert_loc (gimple_location (entry_stmt),
9235 integer_type_node,
9236 integer_zero_node));
9237 c = find_omp_clause (clauses, OMP_CLAUSE_WAIT);
9238 if (c)
9239 {
9240 int n = 0;
9241
9242 for (; c; c = OMP_CLAUSE_CHAIN (c))
9243 {
9244 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT)
9245 {
9246 args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c),
9247 integer_type_node,
9248 OMP_CLAUSE_WAIT_EXPR (c)));
9249 n++;
9250 }
9251 }
9252
9253 /* Now that we know the number, replace the default value. */
9254 args.ordered_remove (t_wait_idx);
9255 args.quick_insert (t_wait_idx,
9256 fold_convert_loc (gimple_location (entry_stmt),
9257 integer_type_node,
9258 build_int_cst (integer_type_node, n)));
9259 }
9260 }
9261 break;
9262 default:
9263 gcc_unreachable ();
9264 }
9265
9266 g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args);
9267 gimple_set_location (g, gimple_location (entry_stmt));
9268 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
9269 if (!offloaded)
9270 {
9271 g = gsi_stmt (gsi);
9272 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
9273 gsi_remove (&gsi, true);
9274 }
9275 if (data_region
9276 && region->exit)
9277 {
9278 gsi = gsi_last_bb (region->exit);
9279 g = gsi_stmt (gsi);
9280 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
9281 gsi_remove (&gsi, true);
9282 }
9283 }
9284
9285
9286 /* Expand the parallel region tree rooted at REGION. Expansion
9287 proceeds in depth-first order. Innermost regions are expanded
9288 first. This way, parallel regions that require a new function to
9289 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
9290 internal dependencies in their body. */
9291
9292 static void
9293 expand_omp (struct omp_region *region)
9294 {
9295 while (region)
9296 {
9297 location_t saved_location;
9298 gimple inner_stmt = NULL;
9299
9300 /* First, determine whether this is a combined parallel+workshare
9301 region. */
9302 if (region->type == GIMPLE_OMP_PARALLEL)
9303 determine_parallel_type (region);
9304
9305 if (region->type == GIMPLE_OMP_FOR
9306 && gimple_omp_for_combined_p (last_stmt (region->entry)))
9307 inner_stmt = last_stmt (region->inner->entry);
9308
9309 if (region->inner)
9310 expand_omp (region->inner);
9311
9312 saved_location = input_location;
9313 if (gimple_has_location (last_stmt (region->entry)))
9314 input_location = gimple_location (last_stmt (region->entry));
9315
9316 switch (region->type)
9317 {
9318 case GIMPLE_OMP_PARALLEL:
9319 case GIMPLE_OMP_TASK:
9320 expand_omp_taskreg (region);
9321 break;
9322
9323 case GIMPLE_OMP_FOR:
9324 expand_omp_for (region, inner_stmt);
9325 break;
9326
9327 case GIMPLE_OMP_SECTIONS:
9328 expand_omp_sections (region);
9329 break;
9330
9331 case GIMPLE_OMP_SECTION:
9332 /* Individual omp sections are handled together with their
9333 parent GIMPLE_OMP_SECTIONS region. */
9334 break;
9335
9336 case GIMPLE_OMP_SINGLE:
9337 expand_omp_single (region);
9338 break;
9339
9340 case GIMPLE_OMP_MASTER:
9341 case GIMPLE_OMP_TASKGROUP:
9342 case GIMPLE_OMP_ORDERED:
9343 case GIMPLE_OMP_CRITICAL:
9344 case GIMPLE_OMP_TEAMS:
9345 expand_omp_synch (region);
9346 break;
9347
9348 case GIMPLE_OMP_ATOMIC_LOAD:
9349 expand_omp_atomic (region);
9350 break;
9351
9352 case GIMPLE_OMP_TARGET:
9353 expand_omp_target (region);
9354 break;
9355
9356 default:
9357 gcc_unreachable ();
9358 }
9359
9360 input_location = saved_location;
9361 region = region->next;
9362 }
9363 }
9364
9365
9366 /* Helper for build_omp_regions. Scan the dominator tree starting at
9367 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
9368 true, the function ends once a single tree is built (otherwise, whole
9369 forest of OMP constructs may be built). */
9370
9371 static void
9372 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
9373 bool single_tree)
9374 {
9375 gimple_stmt_iterator gsi;
9376 gimple stmt;
9377 basic_block son;
9378
9379 gsi = gsi_last_bb (bb);
9380 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
9381 {
9382 struct omp_region *region;
9383 enum gimple_code code;
9384
9385 stmt = gsi_stmt (gsi);
9386 code = gimple_code (stmt);
9387 if (code == GIMPLE_OMP_RETURN)
9388 {
9389 /* STMT is the return point out of region PARENT. Mark it
9390 as the exit point and make PARENT the immediately
9391 enclosing region. */
9392 gcc_assert (parent);
9393 region = parent;
9394 region->exit = bb;
9395 parent = parent->outer;
9396 }
9397 else if (code == GIMPLE_OMP_ATOMIC_STORE)
9398 {
9399 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
9400 GIMPLE_OMP_RETURN, but matches with
9401 GIMPLE_OMP_ATOMIC_LOAD. */
9402 gcc_assert (parent);
9403 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
9404 region = parent;
9405 region->exit = bb;
9406 parent = parent->outer;
9407 }
9408 else if (code == GIMPLE_OMP_CONTINUE)
9409 {
9410 gcc_assert (parent);
9411 parent->cont = bb;
9412 }
9413 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
9414 {
9415 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
9416 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
9417 }
9418 else
9419 {
9420 region = new_omp_region (bb, code, parent);
9421 /* Otherwise... */
9422 if (code == GIMPLE_OMP_TARGET)
9423 {
9424 switch (gimple_omp_target_kind (stmt))
9425 {
9426 case GF_OMP_TARGET_KIND_REGION:
9427 case GF_OMP_TARGET_KIND_DATA:
9428 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
9429 case GF_OMP_TARGET_KIND_OACC_KERNELS:
9430 case GF_OMP_TARGET_KIND_OACC_DATA:
9431 break;
9432 case GF_OMP_TARGET_KIND_UPDATE:
9433 case GF_OMP_TARGET_KIND_OACC_UPDATE:
9434 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
9435 /* ..., other than for those stand-alone directives... */
9436 region = NULL;
9437 break;
9438 default:
9439 gcc_unreachable ();
9440 }
9441 }
9442 /* ..., this directive becomes the parent for a new region. */
9443 if (region)
9444 parent = region;
9445 }
9446 }
9447
9448 if (single_tree && !parent)
9449 return;
9450
9451 for (son = first_dom_son (CDI_DOMINATORS, bb);
9452 son;
9453 son = next_dom_son (CDI_DOMINATORS, son))
9454 build_omp_regions_1 (son, parent, single_tree);
9455 }
9456
9457 /* Builds the tree of OMP regions rooted at ROOT, storing it to
9458 root_omp_region. */
9459
9460 static void
9461 build_omp_regions_root (basic_block root)
9462 {
9463 gcc_assert (root_omp_region == NULL);
9464 build_omp_regions_1 (root, NULL, true);
9465 gcc_assert (root_omp_region != NULL);
9466 }
9467
9468 /* Expands omp construct (and its subconstructs) starting in HEAD. */
9469
9470 void
9471 omp_expand_local (basic_block head)
9472 {
9473 build_omp_regions_root (head);
9474 if (dump_file && (dump_flags & TDF_DETAILS))
9475 {
9476 fprintf (dump_file, "\nOMP region tree\n\n");
9477 dump_omp_region (dump_file, root_omp_region, 0);
9478 fprintf (dump_file, "\n");
9479 }
9480
9481 remove_exit_barriers (root_omp_region);
9482 expand_omp (root_omp_region);
9483
9484 free_omp_regions ();
9485 }
9486
9487 /* Scan the CFG and build a tree of OMP regions. Return the root of
9488 the OMP region tree. */
9489
9490 static void
9491 build_omp_regions (void)
9492 {
9493 gcc_assert (root_omp_region == NULL);
9494 calculate_dominance_info (CDI_DOMINATORS);
9495 build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false);
9496 }
9497
9498 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
9499
9500 static unsigned int
9501 execute_expand_omp (void)
9502 {
9503 build_omp_regions ();
9504
9505 if (!root_omp_region)
9506 return 0;
9507
9508 if (dump_file)
9509 {
9510 fprintf (dump_file, "\nOMP region tree\n\n");
9511 dump_omp_region (dump_file, root_omp_region, 0);
9512 fprintf (dump_file, "\n");
9513 }
9514
9515 remove_exit_barriers (root_omp_region);
9516
9517 expand_omp (root_omp_region);
9518
9519 #ifdef ENABLE_CHECKING
9520 if (!loops_state_satisfies_p (LOOPS_NEED_FIXUP))
9521 verify_loop_structure ();
9522 #endif
9523 cleanup_tree_cfg ();
9524
9525 free_omp_regions ();
9526
9527 return 0;
9528 }
9529
9530 /* OMP expansion -- the default pass, run before creation of SSA form. */
9531
9532 static const pass_data pass_data_expand_omp =
9533 {
9534 GIMPLE_PASS, /* type */
9535 "ompexp", /* name */
9536 OPTGROUP_NONE, /* optinfo_flags */
9537 TV_NONE, /* tv_id */
9538 PROP_gimple_any, /* properties_required */
9539 PROP_gimple_eomp, /* properties_provided */
9540 0, /* properties_destroyed */
9541 0, /* todo_flags_start */
9542 0, /* todo_flags_finish */
9543 };
9544
9545 class pass_expand_omp GCC_FINAL : public gimple_opt_pass
9546 {
9547 public:
9548 pass_expand_omp (gcc::context *ctxt)
9549 : gimple_opt_pass (pass_data_expand_omp, ctxt)
9550 {}
9551
9552 /* opt_pass methods: */
9553 virtual unsigned int execute (function *)
9554 {
9555 bool gate = ((flag_cilkplus != 0 || flag_openacc != 0 || flag_openmp != 0
9556 || flag_openmp_simd != 0)
9557 && !seen_error ());
9558
9559 /* This pass always runs, to provide PROP_gimple_eomp.
9560 But often, there is nothing to do. */
9561 if (!gate)
9562 return 0;
9563
9564 return execute_expand_omp ();
9565 }
9566
9567 }; // class pass_expand_omp
9568
9569 gimple_opt_pass *
9570 make_pass_expand_omp (gcc::context *ctxt)
9571 {
9572 return new pass_expand_omp (ctxt);
9573 }
9574
9575 static const pass_data pass_data_expand_omp_ssa =
9576 {
9577 GIMPLE_PASS, /* type */
9578 "ompexpssa", /* name */
9579 OPTGROUP_NONE, /* optinfo_flags */
9580 TV_NONE, /* tv_id */
9581 PROP_cfg | PROP_ssa, /* properties_required */
9582 PROP_gimple_eomp, /* properties_provided */
9583 0, /* properties_destroyed */
9584 0, /* todo_flags_start */
9585 TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */
9586 };
9587
9588 class pass_expand_omp_ssa GCC_FINAL : public gimple_opt_pass
9589 {
9590 public:
9591 pass_expand_omp_ssa (gcc::context *ctxt)
9592 : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt)
9593 {}
9594
9595 /* opt_pass methods: */
9596 virtual bool gate (function *fun)
9597 {
9598 return !(fun->curr_properties & PROP_gimple_eomp);
9599 }
9600 virtual unsigned int execute (function *) { return execute_expand_omp (); }
9601
9602 }; // class pass_expand_omp_ssa
9603
9604 gimple_opt_pass *
9605 make_pass_expand_omp_ssa (gcc::context *ctxt)
9606 {
9607 return new pass_expand_omp_ssa (ctxt);
9608 }
9609 \f
9610 /* Routines to lower OMP directives into OMP-GIMPLE. */
9611
9612 /* Helper function to preform, potentially COMPLEX_TYPE, operation and
9613 convert it to gimple. */
9614 static void
9615 oacc_gimple_assign (tree dest, tree_code op, tree src, gimple_seq *seq)
9616 {
9617 gimple stmt;
9618
9619 if (TREE_CODE (TREE_TYPE (dest)) != COMPLEX_TYPE)
9620 {
9621 stmt = gimple_build_assign (dest, op, dest, src);
9622 gimple_seq_add_stmt (seq, stmt);
9623 return;
9624 }
9625
9626 tree t = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9627 tree rdest = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (dest)), dest);
9628 gimplify_assign (t, rdest, seq);
9629 rdest = t;
9630
9631 t = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9632 tree idest = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (dest)), dest);
9633 gimplify_assign (t, idest, seq);
9634 idest = t;
9635
9636 t = create_tmp_var (TREE_TYPE (TREE_TYPE (src)));
9637 tree rsrc = fold_build1 (REALPART_EXPR, TREE_TYPE (TREE_TYPE (src)), src);
9638 gimplify_assign (t, rsrc, seq);
9639 rsrc = t;
9640
9641 t = create_tmp_var (TREE_TYPE (TREE_TYPE (src)));
9642 tree isrc = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (src)), src);
9643 gimplify_assign (t, isrc, seq);
9644 isrc = t;
9645
9646 tree r = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9647 tree i = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9648 tree result;
9649
9650 if (op == PLUS_EXPR)
9651 {
9652 stmt = gimple_build_assign (r, op, rdest, rsrc);
9653 gimple_seq_add_stmt (seq, stmt);
9654
9655 stmt = gimple_build_assign (i, op, idest, isrc);
9656 gimple_seq_add_stmt (seq, stmt);
9657 }
9658 else if (op == MULT_EXPR)
9659 {
9660 /* Let x = a + ib = dest, y = c + id = src.
9661 x * y = (ac - bd) + i(ad + bc) */
9662 tree ac = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9663 tree bd = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9664 tree ad = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9665 tree bc = create_tmp_var (TREE_TYPE (TREE_TYPE (dest)));
9666
9667 stmt = gimple_build_assign (ac, MULT_EXPR, rdest, rsrc);
9668 gimple_seq_add_stmt (seq, stmt);
9669
9670 stmt = gimple_build_assign (bd, MULT_EXPR, idest, isrc);
9671 gimple_seq_add_stmt (seq, stmt);
9672
9673 stmt = gimple_build_assign (r, MINUS_EXPR, ac, bd);
9674 gimple_seq_add_stmt (seq, stmt);
9675
9676 stmt = gimple_build_assign (ad, MULT_EXPR, rdest, isrc);
9677 gimple_seq_add_stmt (seq, stmt);
9678
9679 stmt = gimple_build_assign (bd, MULT_EXPR, idest, rsrc);
9680 gimple_seq_add_stmt (seq, stmt);
9681
9682 stmt = gimple_build_assign (i, PLUS_EXPR, ad, bc);
9683 gimple_seq_add_stmt (seq, stmt);
9684 }
9685 else
9686 gcc_unreachable ();
9687
9688 result = build2 (COMPLEX_EXPR, TREE_TYPE (dest), r, i);
9689 gimplify_assign (dest, result, seq);
9690 }
9691
9692 /* Helper function to initialize local data for the reduction arrays.
9693 The reduction arrays need to be placed inside the calling function
9694 for accelerators, or else the host won't be able to preform the final
9695 reduction. */
9696
9697 static void
9698 oacc_initialize_reduction_data (tree clauses, tree nthreads,
9699 gimple_seq *stmt_seqp, omp_context *ctx)
9700 {
9701 tree c, t, oc;
9702 gimple stmt;
9703 omp_context *octx;
9704
9705 /* Find the innermost OpenACC parallel context. */
9706 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET
9707 && (gimple_omp_target_kind (ctx->stmt)
9708 == GF_OMP_TARGET_KIND_OACC_PARALLEL))
9709 octx = ctx;
9710 else
9711 octx = ctx->outer;
9712 gcc_checking_assert (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
9713 && (gimple_omp_target_kind (octx->stmt)
9714 == GF_OMP_TARGET_KIND_OACC_PARALLEL));
9715
9716 /* Extract the clauses. */
9717 oc = gimple_omp_target_clauses (octx->stmt);
9718
9719 /* Find the last outer clause. */
9720 for (; oc && OMP_CLAUSE_CHAIN (oc); oc = OMP_CLAUSE_CHAIN (oc))
9721 ;
9722
9723 /* Allocate arrays for each reduction variable. */
9724 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9725 {
9726 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
9727 continue;
9728
9729 tree var = OMP_CLAUSE_DECL (c);
9730 tree type = get_base_type (var);
9731 tree array = lookup_oacc_reduction (oacc_get_reduction_array_id (var),
9732 ctx);
9733 tree size, call;
9734
9735 /* Calculate size of the reduction array. */
9736 t = create_tmp_var (TREE_TYPE (nthreads));
9737 stmt = gimple_build_assign (t, MULT_EXPR, nthreads,
9738 fold_convert (TREE_TYPE (nthreads),
9739 TYPE_SIZE_UNIT (type)));
9740 gimple_seq_add_stmt (stmt_seqp, stmt);
9741
9742 size = create_tmp_var (sizetype);
9743 gimplify_assign (size, fold_build1 (NOP_EXPR, sizetype, t), stmt_seqp);
9744
9745 /* Now allocate memory for it. */
9746 call = unshare_expr (builtin_decl_explicit (BUILT_IN_ALLOCA));
9747 stmt = gimple_build_call (call, 1, size);
9748 gimple_call_set_lhs (stmt, array);
9749 gimple_seq_add_stmt (stmt_seqp, stmt);
9750
9751 /* Map this array into the accelerator. */
9752
9753 /* Add the reduction array to the list of clauses. */
9754 tree x = array;
9755 t = build_omp_clause (gimple_location (ctx->stmt), OMP_CLAUSE_MAP);
9756 OMP_CLAUSE_SET_MAP_KIND (t, GOMP_MAP_FORCE_FROM);
9757 OMP_CLAUSE_DECL (t) = x;
9758 OMP_CLAUSE_CHAIN (t) = NULL;
9759 if (oc)
9760 OMP_CLAUSE_CHAIN (oc) = t;
9761 else
9762 gimple_omp_target_set_clauses (as_a <gomp_target *> (octx->stmt), t);
9763 OMP_CLAUSE_SIZE (t) = size;
9764 oc = t;
9765 }
9766 }
9767
9768 /* Helper function to process the array of partial reductions. Nthreads
9769 indicates the number of threads. Unfortunately, GOACC_GET_NUM_THREADS
9770 cannot be used here, because nthreads on the host may be different than
9771 on the accelerator. */
9772
9773 static void
9774 oacc_finalize_reduction_data (tree clauses, tree nthreads,
9775 gimple_seq *stmt_seqp, omp_context *ctx)
9776 {
9777 tree c, x, var, array, loop_header, loop_body, loop_exit, type;
9778 gimple stmt;
9779
9780 /* Create for loop.
9781
9782 let var = the original reduction variable
9783 let array = reduction variable array
9784
9785 for (i = 0; i < nthreads; i++)
9786 var op= array[i]
9787 */
9788
9789 loop_header = create_artificial_label (UNKNOWN_LOCATION);
9790 loop_body = create_artificial_label (UNKNOWN_LOCATION);
9791 loop_exit = create_artificial_label (UNKNOWN_LOCATION);
9792
9793 /* Create and initialize an index variable. */
9794 tree ix = create_tmp_var (sizetype);
9795 gimplify_assign (ix, fold_build1 (NOP_EXPR, sizetype, integer_zero_node),
9796 stmt_seqp);
9797
9798 /* Insert the loop header label here. */
9799 gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_header));
9800
9801 /* Exit loop if ix >= nthreads. */
9802 x = create_tmp_var (sizetype);
9803 gimplify_assign (x, fold_build1 (NOP_EXPR, sizetype, nthreads), stmt_seqp);
9804 stmt = gimple_build_cond (GE_EXPR, ix, x, loop_exit, loop_body);
9805 gimple_seq_add_stmt (stmt_seqp, stmt);
9806
9807 /* Insert the loop body label here. */
9808 gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_body));
9809
9810 /* Collapse each reduction array, one element at a time. */
9811 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9812 {
9813 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
9814 continue;
9815
9816 tree_code reduction_code = OMP_CLAUSE_REDUCTION_CODE (c);
9817
9818 /* reduction(-:var) sums up the partial results, so it acts
9819 identically to reduction(+:var). */
9820 if (reduction_code == MINUS_EXPR)
9821 reduction_code = PLUS_EXPR;
9822
9823 /* Set up reduction variable var. */
9824 var = OMP_CLAUSE_DECL (c);
9825 type = get_base_type (var);
9826 array = lookup_oacc_reduction (oacc_get_reduction_array_id
9827 (OMP_CLAUSE_DECL (c)), ctx);
9828
9829 /* Calculate the array offset. */
9830 tree offset = create_tmp_var (sizetype);
9831 gimplify_assign (offset, TYPE_SIZE_UNIT (type), stmt_seqp);
9832 stmt = gimple_build_assign (offset, MULT_EXPR, offset, ix);
9833 gimple_seq_add_stmt (stmt_seqp, stmt);
9834
9835 tree ptr = create_tmp_var (TREE_TYPE (array));
9836 stmt = gimple_build_assign (ptr, POINTER_PLUS_EXPR, array, offset);
9837 gimple_seq_add_stmt (stmt_seqp, stmt);
9838
9839 /* Extract array[ix] into mem. */
9840 tree mem = create_tmp_var (type);
9841 gimplify_assign (mem, build_simple_mem_ref (ptr), stmt_seqp);
9842
9843 /* Find the original reduction variable. */
9844 if (is_reference (var))
9845 var = build_simple_mem_ref (var);
9846
9847 tree t = create_tmp_var (type);
9848
9849 x = lang_hooks.decls.omp_clause_assign_op (c, t, var);
9850 gimplify_and_add (unshare_expr(x), stmt_seqp);
9851
9852 /* var = var op mem */
9853 switch (OMP_CLAUSE_REDUCTION_CODE (c))
9854 {
9855 case TRUTH_ANDIF_EXPR:
9856 case TRUTH_ORIF_EXPR:
9857 t = fold_build2 (OMP_CLAUSE_REDUCTION_CODE (c), integer_type_node,
9858 t, mem);
9859 gimplify_and_add (t, stmt_seqp);
9860 break;
9861 default:
9862 /* The lhs isn't a gimple_reg when var is COMPLEX_TYPE. */
9863 oacc_gimple_assign (t, OMP_CLAUSE_REDUCTION_CODE (c), mem,
9864 stmt_seqp);
9865 }
9866
9867 t = fold_build1 (NOP_EXPR, TREE_TYPE (var), t);
9868 x = lang_hooks.decls.omp_clause_assign_op (c, var, t);
9869 gimplify_and_add (unshare_expr(x), stmt_seqp);
9870 }
9871
9872 /* Increment the induction variable. */
9873 tree one = fold_build1 (NOP_EXPR, sizetype, integer_one_node);
9874 stmt = gimple_build_assign (ix, PLUS_EXPR, ix, one);
9875 gimple_seq_add_stmt (stmt_seqp, stmt);
9876
9877 /* Go back to the top of the loop. */
9878 gimple_seq_add_stmt (stmt_seqp, gimple_build_goto (loop_header));
9879
9880 /* Place the loop exit label here. */
9881 gimple_seq_add_stmt (stmt_seqp, gimple_build_label (loop_exit));
9882 }
9883
9884 /* Scan through all of the gimple stmts searching for an OMP_FOR_EXPR, and
9885 scan that for reductions. */
9886
9887 static void
9888 oacc_process_reduction_data (gimple_seq *body, gimple_seq *in_stmt_seqp,
9889 gimple_seq *out_stmt_seqp, omp_context *ctx)
9890 {
9891 gimple_stmt_iterator gsi;
9892 gimple_seq inner = NULL;
9893
9894 /* A collapse clause may have inserted a new bind block. */
9895 gsi = gsi_start (*body);
9896 while (!gsi_end_p (gsi))
9897 {
9898 gimple stmt = gsi_stmt (gsi);
9899 if (gbind *bind_stmt = dyn_cast <gbind *> (stmt))
9900 {
9901 inner = gimple_bind_body (bind_stmt);
9902 body = &inner;
9903 gsi = gsi_start (*body);
9904 }
9905 else if (dyn_cast <gomp_for *> (stmt))
9906 break;
9907 else
9908 gsi_next (&gsi);
9909 }
9910
9911 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
9912 {
9913 tree clauses, nthreads, t, c, acc_device, acc_device_host, call,
9914 enter, exit;
9915 bool reduction_found = false;
9916
9917 gimple stmt = gsi_stmt (gsi);
9918
9919 switch (gimple_code (stmt))
9920 {
9921 case GIMPLE_OMP_FOR:
9922 clauses = gimple_omp_for_clauses (stmt);
9923
9924 /* Search for a reduction clause. */
9925 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9926 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
9927 {
9928 reduction_found = true;
9929 break;
9930 }
9931
9932 if (!reduction_found)
9933 break;
9934
9935 ctx = maybe_lookup_ctx (stmt);
9936 t = NULL_TREE;
9937
9938 /* Extract the number of threads. */
9939 nthreads = create_tmp_var (sizetype);
9940 t = oacc_max_threads (ctx);
9941 gimplify_assign (nthreads, t, in_stmt_seqp);
9942
9943 /* Determine if this is kernel will be executed on the host. */
9944 call = builtin_decl_explicit (BUILT_IN_ACC_GET_DEVICE_TYPE);
9945 acc_device = create_tmp_var (integer_type_node, ".acc_device_type");
9946 stmt = gimple_build_call (call, 0);
9947 gimple_call_set_lhs (stmt, acc_device);
9948 gimple_seq_add_stmt (in_stmt_seqp, stmt);
9949
9950 /* Set nthreads = 1 for ACC_DEVICE_TYPE=host. */
9951 acc_device_host = create_tmp_var (integer_type_node,
9952 ".acc_device_host");
9953 gimplify_assign (acc_device_host,
9954 build_int_cst (integer_type_node,
9955 GOMP_DEVICE_HOST),
9956 in_stmt_seqp);
9957
9958 enter = create_artificial_label (UNKNOWN_LOCATION);
9959 exit = create_artificial_label (UNKNOWN_LOCATION);
9960
9961 stmt = gimple_build_cond (EQ_EXPR, acc_device, acc_device_host,
9962 enter, exit);
9963 gimple_seq_add_stmt (in_stmt_seqp, stmt);
9964 gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (enter));
9965 gimplify_assign (nthreads, fold_build1 (NOP_EXPR, sizetype,
9966 integer_one_node),
9967 in_stmt_seqp);
9968 gimple_seq_add_stmt (in_stmt_seqp, gimple_build_label (exit));
9969
9970 oacc_initialize_reduction_data (clauses, nthreads, in_stmt_seqp,
9971 ctx);
9972 oacc_finalize_reduction_data (clauses, nthreads, out_stmt_seqp, ctx);
9973 break;
9974 default:
9975 // Scan for other directives which support reduction here.
9976 break;
9977 }
9978 }
9979 }
9980
9981 /* If ctx is a worksharing context inside of a cancellable parallel
9982 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
9983 and conditional branch to parallel's cancel_label to handle
9984 cancellation in the implicit barrier. */
9985
9986 static void
9987 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
9988 {
9989 gimple omp_return = gimple_seq_last_stmt (*body);
9990 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
9991 if (gimple_omp_return_nowait_p (omp_return))
9992 return;
9993 if (ctx->outer
9994 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
9995 && ctx->outer->cancellable)
9996 {
9997 tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
9998 tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
9999 tree lhs = create_tmp_var (c_bool_type);
10000 gimple_omp_return_set_lhs (omp_return, lhs);
10001 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10002 gimple g = gimple_build_cond (NE_EXPR, lhs,
10003 fold_convert (c_bool_type,
10004 boolean_false_node),
10005 ctx->outer->cancel_label, fallthru_label);
10006 gimple_seq_add_stmt (body, g);
10007 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
10008 }
10009 }
10010
10011 /* Lower the OpenMP sections directive in the current statement in GSI_P.
10012 CTX is the enclosing OMP context for the current statement. */
10013
10014 static void
10015 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10016 {
10017 tree block, control;
10018 gimple_stmt_iterator tgsi;
10019 gomp_sections *stmt;
10020 gimple t;
10021 gbind *new_stmt, *bind;
10022 gimple_seq ilist, dlist, olist, new_body;
10023
10024 stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
10025
10026 push_gimplify_context ();
10027
10028 dlist = NULL;
10029 ilist = NULL;
10030 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
10031 &ilist, &dlist, ctx, NULL);
10032
10033 new_body = gimple_omp_body (stmt);
10034 gimple_omp_set_body (stmt, NULL);
10035 tgsi = gsi_start (new_body);
10036 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
10037 {
10038 omp_context *sctx;
10039 gimple sec_start;
10040
10041 sec_start = gsi_stmt (tgsi);
10042 sctx = maybe_lookup_ctx (sec_start);
10043 gcc_assert (sctx);
10044
10045 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
10046 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
10047 GSI_CONTINUE_LINKING);
10048 gimple_omp_set_body (sec_start, NULL);
10049
10050 if (gsi_one_before_end_p (tgsi))
10051 {
10052 gimple_seq l = NULL;
10053 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
10054 &l, ctx);
10055 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
10056 gimple_omp_section_set_last (sec_start);
10057 }
10058
10059 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
10060 GSI_CONTINUE_LINKING);
10061 }
10062
10063 block = make_node (BLOCK);
10064 bind = gimple_build_bind (NULL, new_body, block);
10065
10066 olist = NULL;
10067 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
10068
10069 block = make_node (BLOCK);
10070 new_stmt = gimple_build_bind (NULL, NULL, block);
10071 gsi_replace (gsi_p, new_stmt, true);
10072
10073 pop_gimplify_context (new_stmt);
10074 gimple_bind_append_vars (new_stmt, ctx->block_vars);
10075 BLOCK_VARS (block) = gimple_bind_vars (bind);
10076 if (BLOCK_VARS (block))
10077 TREE_USED (block) = 1;
10078
10079 new_body = NULL;
10080 gimple_seq_add_seq (&new_body, ilist);
10081 gimple_seq_add_stmt (&new_body, stmt);
10082 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
10083 gimple_seq_add_stmt (&new_body, bind);
10084
10085 control = create_tmp_var (unsigned_type_node, ".section");
10086 t = gimple_build_omp_continue (control, control);
10087 gimple_omp_sections_set_control (stmt, control);
10088 gimple_seq_add_stmt (&new_body, t);
10089
10090 gimple_seq_add_seq (&new_body, olist);
10091 if (ctx->cancellable)
10092 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
10093 gimple_seq_add_seq (&new_body, dlist);
10094
10095 new_body = maybe_catch_exception (new_body);
10096
10097 t = gimple_build_omp_return
10098 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
10099 OMP_CLAUSE_NOWAIT));
10100 gimple_seq_add_stmt (&new_body, t);
10101 maybe_add_implicit_barrier_cancel (ctx, &new_body);
10102
10103 gimple_bind_set_body (new_stmt, new_body);
10104 }
10105
10106
10107 /* A subroutine of lower_omp_single. Expand the simple form of
10108 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
10109
10110 if (GOMP_single_start ())
10111 BODY;
10112 [ GOMP_barrier (); ] -> unless 'nowait' is present.
10113
10114 FIXME. It may be better to delay expanding the logic of this until
10115 pass_expand_omp. The expanded logic may make the job more difficult
10116 to a synchronization analysis pass. */
10117
10118 static void
10119 lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
10120 {
10121 location_t loc = gimple_location (single_stmt);
10122 tree tlabel = create_artificial_label (loc);
10123 tree flabel = create_artificial_label (loc);
10124 gimple call, cond;
10125 tree lhs, decl;
10126
10127 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
10128 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
10129 call = gimple_build_call (decl, 0);
10130 gimple_call_set_lhs (call, lhs);
10131 gimple_seq_add_stmt (pre_p, call);
10132
10133 cond = gimple_build_cond (EQ_EXPR, lhs,
10134 fold_convert_loc (loc, TREE_TYPE (lhs),
10135 boolean_true_node),
10136 tlabel, flabel);
10137 gimple_seq_add_stmt (pre_p, cond);
10138 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
10139 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
10140 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
10141 }
10142
10143
10144 /* A subroutine of lower_omp_single. Expand the simple form of
10145 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
10146
10147 #pragma omp single copyprivate (a, b, c)
10148
10149 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
10150
10151 {
10152 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
10153 {
10154 BODY;
10155 copyout.a = a;
10156 copyout.b = b;
10157 copyout.c = c;
10158 GOMP_single_copy_end (&copyout);
10159 }
10160 else
10161 {
10162 a = copyout_p->a;
10163 b = copyout_p->b;
10164 c = copyout_p->c;
10165 }
10166 GOMP_barrier ();
10167 }
10168
10169 FIXME. It may be better to delay expanding the logic of this until
10170 pass_expand_omp. The expanded logic may make the job more difficult
10171 to a synchronization analysis pass. */
10172
10173 static void
10174 lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
10175 omp_context *ctx)
10176 {
10177 tree ptr_type, t, l0, l1, l2, bfn_decl;
10178 gimple_seq copyin_seq;
10179 location_t loc = gimple_location (single_stmt);
10180
10181 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
10182
10183 ptr_type = build_pointer_type (ctx->record_type);
10184 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
10185
10186 l0 = create_artificial_label (loc);
10187 l1 = create_artificial_label (loc);
10188 l2 = create_artificial_label (loc);
10189
10190 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
10191 t = build_call_expr_loc (loc, bfn_decl, 0);
10192 t = fold_convert_loc (loc, ptr_type, t);
10193 gimplify_assign (ctx->receiver_decl, t, pre_p);
10194
10195 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
10196 build_int_cst (ptr_type, 0));
10197 t = build3 (COND_EXPR, void_type_node, t,
10198 build_and_jump (&l0), build_and_jump (&l1));
10199 gimplify_and_add (t, pre_p);
10200
10201 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
10202
10203 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
10204
10205 copyin_seq = NULL;
10206 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
10207 &copyin_seq, ctx);
10208
10209 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
10210 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
10211 t = build_call_expr_loc (loc, bfn_decl, 1, t);
10212 gimplify_and_add (t, pre_p);
10213
10214 t = build_and_jump (&l2);
10215 gimplify_and_add (t, pre_p);
10216
10217 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
10218
10219 gimple_seq_add_seq (pre_p, copyin_seq);
10220
10221 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
10222 }
10223
10224
10225 /* Expand code for an OpenMP single directive. */
10226
10227 static void
10228 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10229 {
10230 tree block;
10231 gimple t;
10232 gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
10233 gbind *bind;
10234 gimple_seq bind_body, bind_body_tail = NULL, dlist;
10235
10236 push_gimplify_context ();
10237
10238 block = make_node (BLOCK);
10239 bind = gimple_build_bind (NULL, NULL, block);
10240 gsi_replace (gsi_p, bind, true);
10241 bind_body = NULL;
10242 dlist = NULL;
10243 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
10244 &bind_body, &dlist, ctx, NULL);
10245 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
10246
10247 gimple_seq_add_stmt (&bind_body, single_stmt);
10248
10249 if (ctx->record_type)
10250 lower_omp_single_copy (single_stmt, &bind_body, ctx);
10251 else
10252 lower_omp_single_simple (single_stmt, &bind_body);
10253
10254 gimple_omp_set_body (single_stmt, NULL);
10255
10256 gimple_seq_add_seq (&bind_body, dlist);
10257
10258 bind_body = maybe_catch_exception (bind_body);
10259
10260 t = gimple_build_omp_return
10261 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
10262 OMP_CLAUSE_NOWAIT));
10263 gimple_seq_add_stmt (&bind_body_tail, t);
10264 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
10265 if (ctx->record_type)
10266 {
10267 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
10268 tree clobber = build_constructor (ctx->record_type, NULL);
10269 TREE_THIS_VOLATILE (clobber) = 1;
10270 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
10271 clobber), GSI_SAME_STMT);
10272 }
10273 gimple_seq_add_seq (&bind_body, bind_body_tail);
10274 gimple_bind_set_body (bind, bind_body);
10275
10276 pop_gimplify_context (bind);
10277
10278 gimple_bind_append_vars (bind, ctx->block_vars);
10279 BLOCK_VARS (block) = ctx->block_vars;
10280 if (BLOCK_VARS (block))
10281 TREE_USED (block) = 1;
10282 }
10283
10284
10285 /* Expand code for an OpenMP master directive. */
10286
10287 static void
10288 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10289 {
10290 tree block, lab = NULL, x, bfn_decl;
10291 gimple stmt = gsi_stmt (*gsi_p);
10292 gbind *bind;
10293 location_t loc = gimple_location (stmt);
10294 gimple_seq tseq;
10295
10296 push_gimplify_context ();
10297
10298 block = make_node (BLOCK);
10299 bind = gimple_build_bind (NULL, NULL, block);
10300 gsi_replace (gsi_p, bind, true);
10301 gimple_bind_add_stmt (bind, stmt);
10302
10303 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
10304 x = build_call_expr_loc (loc, bfn_decl, 0);
10305 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
10306 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
10307 tseq = NULL;
10308 gimplify_and_add (x, &tseq);
10309 gimple_bind_add_seq (bind, tseq);
10310
10311 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10312 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
10313 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10314 gimple_omp_set_body (stmt, NULL);
10315
10316 gimple_bind_add_stmt (bind, gimple_build_label (lab));
10317
10318 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10319
10320 pop_gimplify_context (bind);
10321
10322 gimple_bind_append_vars (bind, ctx->block_vars);
10323 BLOCK_VARS (block) = ctx->block_vars;
10324 }
10325
10326
10327 /* Expand code for an OpenMP taskgroup directive. */
10328
10329 static void
10330 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10331 {
10332 gimple stmt = gsi_stmt (*gsi_p);
10333 gcall *x;
10334 gbind *bind;
10335 tree block = make_node (BLOCK);
10336
10337 bind = gimple_build_bind (NULL, NULL, block);
10338 gsi_replace (gsi_p, bind, true);
10339 gimple_bind_add_stmt (bind, stmt);
10340
10341 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
10342 0);
10343 gimple_bind_add_stmt (bind, x);
10344
10345 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10346 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10347 gimple_omp_set_body (stmt, NULL);
10348
10349 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10350
10351 gimple_bind_append_vars (bind, ctx->block_vars);
10352 BLOCK_VARS (block) = ctx->block_vars;
10353 }
10354
10355
10356 /* Expand code for an OpenMP ordered directive. */
10357
10358 static void
10359 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10360 {
10361 tree block;
10362 gimple stmt = gsi_stmt (*gsi_p);
10363 gcall *x;
10364 gbind *bind;
10365
10366 push_gimplify_context ();
10367
10368 block = make_node (BLOCK);
10369 bind = gimple_build_bind (NULL, NULL, block);
10370 gsi_replace (gsi_p, bind, true);
10371 gimple_bind_add_stmt (bind, stmt);
10372
10373 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
10374 0);
10375 gimple_bind_add_stmt (bind, x);
10376
10377 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10378 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
10379 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10380 gimple_omp_set_body (stmt, NULL);
10381
10382 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
10383 gimple_bind_add_stmt (bind, x);
10384
10385 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10386
10387 pop_gimplify_context (bind);
10388
10389 gimple_bind_append_vars (bind, ctx->block_vars);
10390 BLOCK_VARS (block) = gimple_bind_vars (bind);
10391 }
10392
10393
10394 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
10395 substitution of a couple of function calls. But in the NAMED case,
10396 requires that languages coordinate a symbol name. It is therefore
10397 best put here in common code. */
10398
10399 static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
10400
10401 static void
10402 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10403 {
10404 tree block;
10405 tree name, lock, unlock;
10406 gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
10407 gbind *bind;
10408 location_t loc = gimple_location (stmt);
10409 gimple_seq tbody;
10410
10411 name = gimple_omp_critical_name (stmt);
10412 if (name)
10413 {
10414 tree decl;
10415
10416 if (!critical_name_mutexes)
10417 critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
10418
10419 tree *n = critical_name_mutexes->get (name);
10420 if (n == NULL)
10421 {
10422 char *new_str;
10423
10424 decl = create_tmp_var_raw (ptr_type_node);
10425
10426 new_str = ACONCAT ((".gomp_critical_user_",
10427 IDENTIFIER_POINTER (name), NULL));
10428 DECL_NAME (decl) = get_identifier (new_str);
10429 TREE_PUBLIC (decl) = 1;
10430 TREE_STATIC (decl) = 1;
10431 DECL_COMMON (decl) = 1;
10432 DECL_ARTIFICIAL (decl) = 1;
10433 DECL_IGNORED_P (decl) = 1;
10434
10435 varpool_node::finalize_decl (decl);
10436
10437 critical_name_mutexes->put (name, decl);
10438 }
10439 else
10440 decl = *n;
10441
10442 /* If '#pragma omp critical' is inside offloaded region or
10443 inside function marked as offloadable, the symbol must be
10444 marked as offloadable too. */
10445 omp_context *octx;
10446 if (cgraph_node::get (current_function_decl)->offloadable)
10447 varpool_node::get_create (decl)->offloadable = 1;
10448 else
10449 for (octx = ctx->outer; octx; octx = octx->outer)
10450 if (is_gimple_omp_offloaded (octx->stmt))
10451 {
10452 varpool_node::get_create (decl)->offloadable = 1;
10453 break;
10454 }
10455
10456 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
10457 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
10458
10459 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
10460 unlock = build_call_expr_loc (loc, unlock, 1,
10461 build_fold_addr_expr_loc (loc, decl));
10462 }
10463 else
10464 {
10465 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
10466 lock = build_call_expr_loc (loc, lock, 0);
10467
10468 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
10469 unlock = build_call_expr_loc (loc, unlock, 0);
10470 }
10471
10472 push_gimplify_context ();
10473
10474 block = make_node (BLOCK);
10475 bind = gimple_build_bind (NULL, NULL, block);
10476 gsi_replace (gsi_p, bind, true);
10477 gimple_bind_add_stmt (bind, stmt);
10478
10479 tbody = gimple_bind_body (bind);
10480 gimplify_and_add (lock, &tbody);
10481 gimple_bind_set_body (bind, tbody);
10482
10483 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10484 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
10485 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
10486 gimple_omp_set_body (stmt, NULL);
10487
10488 tbody = gimple_bind_body (bind);
10489 gimplify_and_add (unlock, &tbody);
10490 gimple_bind_set_body (bind, tbody);
10491
10492 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
10493
10494 pop_gimplify_context (bind);
10495 gimple_bind_append_vars (bind, ctx->block_vars);
10496 BLOCK_VARS (block) = gimple_bind_vars (bind);
10497 }
10498
10499
10500 /* A subroutine of lower_omp_for. Generate code to emit the predicate
10501 for a lastprivate clause. Given a loop control predicate of (V
10502 cond N2), we gate the clause on (!(V cond N2)). The lowered form
10503 is appended to *DLIST, iterator initialization is appended to
10504 *BODY_P. */
10505
10506 static void
10507 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
10508 gimple_seq *dlist, struct omp_context *ctx)
10509 {
10510 tree clauses, cond, vinit;
10511 enum tree_code cond_code;
10512 gimple_seq stmts;
10513
10514 cond_code = fd->loop.cond_code;
10515 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
10516
10517 /* When possible, use a strict equality expression. This can let VRP
10518 type optimizations deduce the value and remove a copy. */
10519 if (tree_fits_shwi_p (fd->loop.step))
10520 {
10521 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
10522 if (step == 1 || step == -1)
10523 cond_code = EQ_EXPR;
10524 }
10525
10526 tree n2 = fd->loop.n2;
10527 if (fd->collapse > 1
10528 && TREE_CODE (n2) != INTEGER_CST
10529 && gimple_omp_for_combined_into_p (fd->for_stmt)
10530 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
10531 {
10532 gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
10533 if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR)
10534 {
10535 struct omp_for_data outer_fd;
10536 extract_omp_for_data (gfor, &outer_fd, NULL);
10537 n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
10538 }
10539 }
10540 cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
10541
10542 clauses = gimple_omp_for_clauses (fd->for_stmt);
10543 stmts = NULL;
10544 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
10545 if (!gimple_seq_empty_p (stmts))
10546 {
10547 gimple_seq_add_seq (&stmts, *dlist);
10548 *dlist = stmts;
10549
10550 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
10551 vinit = fd->loop.n1;
10552 if (cond_code == EQ_EXPR
10553 && tree_fits_shwi_p (fd->loop.n2)
10554 && ! integer_zerop (fd->loop.n2))
10555 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
10556 else
10557 vinit = unshare_expr (vinit);
10558
10559 /* Initialize the iterator variable, so that threads that don't execute
10560 any iterations don't execute the lastprivate clauses by accident. */
10561 gimplify_assign (fd->loop.v, vinit, body_p);
10562 }
10563 }
10564
10565
10566 /* Lower code for an OMP loop directive. */
10567
10568 static void
10569 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
10570 {
10571 tree *rhs_p, block;
10572 struct omp_for_data fd, *fdp = NULL;
10573 gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
10574 gbind *new_stmt;
10575 gimple_seq omp_for_body, body, dlist;
10576 size_t i;
10577
10578 push_gimplify_context ();
10579
10580 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
10581
10582 block = make_node (BLOCK);
10583 new_stmt = gimple_build_bind (NULL, NULL, block);
10584 /* Replace at gsi right away, so that 'stmt' is no member
10585 of a sequence anymore as we're going to add to a different
10586 one below. */
10587 gsi_replace (gsi_p, new_stmt, true);
10588
10589 /* Move declaration of temporaries in the loop body before we make
10590 it go away. */
10591 omp_for_body = gimple_omp_body (stmt);
10592 if (!gimple_seq_empty_p (omp_for_body)
10593 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
10594 {
10595 gbind *inner_bind
10596 = as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
10597 tree vars = gimple_bind_vars (inner_bind);
10598 gimple_bind_append_vars (new_stmt, vars);
10599 /* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
10600 keep them on the inner_bind and it's block. */
10601 gimple_bind_set_vars (inner_bind, NULL_TREE);
10602 if (gimple_bind_block (inner_bind))
10603 BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
10604 }
10605
10606 if (gimple_omp_for_combined_into_p (stmt))
10607 {
10608 extract_omp_for_data (stmt, &fd, NULL);
10609 fdp = &fd;
10610
10611 /* We need two temporaries with fd.loop.v type (istart/iend)
10612 and then (fd.collapse - 1) temporaries with the same
10613 type for count2 ... countN-1 vars if not constant. */
10614 size_t count = 2;
10615 tree type = fd.iter_type;
10616 if (fd.collapse > 1
10617 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
10618 count += fd.collapse - 1;
10619 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
10620 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
10621 tree clauses = *pc;
10622 if (parallel_for)
10623 outerc
10624 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
10625 OMP_CLAUSE__LOOPTEMP_);
10626 for (i = 0; i < count; i++)
10627 {
10628 tree temp;
10629 if (parallel_for)
10630 {
10631 gcc_assert (outerc);
10632 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
10633 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
10634 OMP_CLAUSE__LOOPTEMP_);
10635 }
10636 else
10637 {
10638 temp = create_tmp_var (type);
10639 insert_decl_map (&ctx->outer->cb, temp, temp);
10640 }
10641 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
10642 OMP_CLAUSE_DECL (*pc) = temp;
10643 pc = &OMP_CLAUSE_CHAIN (*pc);
10644 }
10645 *pc = clauses;
10646 }
10647
10648 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
10649 dlist = NULL;
10650 body = NULL;
10651 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
10652 fdp);
10653 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
10654
10655 lower_omp (gimple_omp_body_ptr (stmt), ctx);
10656
10657 /* Lower the header expressions. At this point, we can assume that
10658 the header is of the form:
10659
10660 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
10661
10662 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
10663 using the .omp_data_s mapping, if needed. */
10664 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
10665 {
10666 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
10667 if (!is_gimple_min_invariant (*rhs_p))
10668 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
10669
10670 rhs_p = gimple_omp_for_final_ptr (stmt, i);
10671 if (!is_gimple_min_invariant (*rhs_p))
10672 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
10673
10674 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
10675 if (!is_gimple_min_invariant (*rhs_p))
10676 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
10677 }
10678
10679 /* Once lowered, extract the bounds and clauses. */
10680 extract_omp_for_data (stmt, &fd, NULL);
10681
10682 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
10683
10684 gimple_seq_add_stmt (&body, stmt);
10685 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
10686
10687 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
10688 fd.loop.v));
10689
10690 /* After the loop, add exit clauses. */
10691 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
10692
10693 if (ctx->cancellable)
10694 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
10695
10696 gimple_seq_add_seq (&body, dlist);
10697
10698 body = maybe_catch_exception (body);
10699
10700 /* Region exit marker goes at the end of the loop body. */
10701 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
10702 maybe_add_implicit_barrier_cancel (ctx, &body);
10703 pop_gimplify_context (new_stmt);
10704
10705 gimple_bind_append_vars (new_stmt, ctx->block_vars);
10706 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
10707 if (BLOCK_VARS (block))
10708 TREE_USED (block) = 1;
10709
10710 gimple_bind_set_body (new_stmt, body);
10711 gimple_omp_set_body (stmt, NULL);
10712 gimple_omp_for_set_pre_body (stmt, NULL);
10713 }
10714
10715 /* Callback for walk_stmts. Check if the current statement only contains
10716 GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
10717
10718 static tree
10719 check_combined_parallel (gimple_stmt_iterator *gsi_p,
10720 bool *handled_ops_p,
10721 struct walk_stmt_info *wi)
10722 {
10723 int *info = (int *) wi->info;
10724 gimple stmt = gsi_stmt (*gsi_p);
10725
10726 *handled_ops_p = true;
10727 switch (gimple_code (stmt))
10728 {
10729 WALK_SUBSTMTS;
10730
10731 case GIMPLE_OMP_FOR:
10732 case GIMPLE_OMP_SECTIONS:
10733 *info = *info == 0 ? 1 : -1;
10734 break;
10735 default:
10736 *info = -1;
10737 break;
10738 }
10739 return NULL;
10740 }
10741
10742 struct omp_taskcopy_context
10743 {
10744 /* This field must be at the beginning, as we do "inheritance": Some
10745 callback functions for tree-inline.c (e.g., omp_copy_decl)
10746 receive a copy_body_data pointer that is up-casted to an
10747 omp_context pointer. */
10748 copy_body_data cb;
10749 omp_context *ctx;
10750 };
10751
10752 static tree
10753 task_copyfn_copy_decl (tree var, copy_body_data *cb)
10754 {
10755 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
10756
10757 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
10758 return create_tmp_var (TREE_TYPE (var));
10759
10760 return var;
10761 }
10762
10763 static tree
10764 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
10765 {
10766 tree name, new_fields = NULL, type, f;
10767
10768 type = lang_hooks.types.make_type (RECORD_TYPE);
10769 name = DECL_NAME (TYPE_NAME (orig_type));
10770 name = build_decl (gimple_location (tcctx->ctx->stmt),
10771 TYPE_DECL, name, type);
10772 TYPE_NAME (type) = name;
10773
10774 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
10775 {
10776 tree new_f = copy_node (f);
10777 DECL_CONTEXT (new_f) = type;
10778 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
10779 TREE_CHAIN (new_f) = new_fields;
10780 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
10781 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
10782 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
10783 &tcctx->cb, NULL);
10784 new_fields = new_f;
10785 tcctx->cb.decl_map->put (f, new_f);
10786 }
10787 TYPE_FIELDS (type) = nreverse (new_fields);
10788 layout_type (type);
10789 return type;
10790 }
10791
10792 /* Create task copyfn. */
10793
10794 static void
10795 create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
10796 {
10797 struct function *child_cfun;
10798 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
10799 tree record_type, srecord_type, bind, list;
10800 bool record_needs_remap = false, srecord_needs_remap = false;
10801 splay_tree_node n;
10802 struct omp_taskcopy_context tcctx;
10803 location_t loc = gimple_location (task_stmt);
10804
10805 child_fn = gimple_omp_task_copy_fn (task_stmt);
10806 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
10807 gcc_assert (child_cfun->cfg == NULL);
10808 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
10809
10810 /* Reset DECL_CONTEXT on function arguments. */
10811 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
10812 DECL_CONTEXT (t) = child_fn;
10813
10814 /* Populate the function. */
10815 push_gimplify_context ();
10816 push_cfun (child_cfun);
10817
10818 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
10819 TREE_SIDE_EFFECTS (bind) = 1;
10820 list = NULL;
10821 DECL_SAVED_TREE (child_fn) = bind;
10822 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
10823
10824 /* Remap src and dst argument types if needed. */
10825 record_type = ctx->record_type;
10826 srecord_type = ctx->srecord_type;
10827 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
10828 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
10829 {
10830 record_needs_remap = true;
10831 break;
10832 }
10833 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
10834 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
10835 {
10836 srecord_needs_remap = true;
10837 break;
10838 }
10839
10840 if (record_needs_remap || srecord_needs_remap)
10841 {
10842 memset (&tcctx, '\0', sizeof (tcctx));
10843 tcctx.cb.src_fn = ctx->cb.src_fn;
10844 tcctx.cb.dst_fn = child_fn;
10845 tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
10846 gcc_checking_assert (tcctx.cb.src_node);
10847 tcctx.cb.dst_node = tcctx.cb.src_node;
10848 tcctx.cb.src_cfun = ctx->cb.src_cfun;
10849 tcctx.cb.copy_decl = task_copyfn_copy_decl;
10850 tcctx.cb.eh_lp_nr = 0;
10851 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
10852 tcctx.cb.decl_map = new hash_map<tree, tree>;
10853 tcctx.ctx = ctx;
10854
10855 if (record_needs_remap)
10856 record_type = task_copyfn_remap_type (&tcctx, record_type);
10857 if (srecord_needs_remap)
10858 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
10859 }
10860 else
10861 tcctx.cb.decl_map = NULL;
10862
10863 arg = DECL_ARGUMENTS (child_fn);
10864 TREE_TYPE (arg) = build_pointer_type (record_type);
10865 sarg = DECL_CHAIN (arg);
10866 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
10867
10868 /* First pass: initialize temporaries used in record_type and srecord_type
10869 sizes and field offsets. */
10870 if (tcctx.cb.decl_map)
10871 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
10872 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
10873 {
10874 tree *p;
10875
10876 decl = OMP_CLAUSE_DECL (c);
10877 p = tcctx.cb.decl_map->get (decl);
10878 if (p == NULL)
10879 continue;
10880 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10881 sf = (tree) n->value;
10882 sf = *tcctx.cb.decl_map->get (sf);
10883 src = build_simple_mem_ref_loc (loc, sarg);
10884 src = omp_build_component_ref (src, sf);
10885 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
10886 append_to_statement_list (t, &list);
10887 }
10888
10889 /* Second pass: copy shared var pointers and copy construct non-VLA
10890 firstprivate vars. */
10891 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
10892 switch (OMP_CLAUSE_CODE (c))
10893 {
10894 case OMP_CLAUSE_SHARED:
10895 decl = OMP_CLAUSE_DECL (c);
10896 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10897 if (n == NULL)
10898 break;
10899 f = (tree) n->value;
10900 if (tcctx.cb.decl_map)
10901 f = *tcctx.cb.decl_map->get (f);
10902 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10903 sf = (tree) n->value;
10904 if (tcctx.cb.decl_map)
10905 sf = *tcctx.cb.decl_map->get (sf);
10906 src = build_simple_mem_ref_loc (loc, sarg);
10907 src = omp_build_component_ref (src, sf);
10908 dst = build_simple_mem_ref_loc (loc, arg);
10909 dst = omp_build_component_ref (dst, f);
10910 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
10911 append_to_statement_list (t, &list);
10912 break;
10913 case OMP_CLAUSE_FIRSTPRIVATE:
10914 decl = OMP_CLAUSE_DECL (c);
10915 if (is_variable_sized (decl))
10916 break;
10917 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10918 if (n == NULL)
10919 break;
10920 f = (tree) n->value;
10921 if (tcctx.cb.decl_map)
10922 f = *tcctx.cb.decl_map->get (f);
10923 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10924 if (n != NULL)
10925 {
10926 sf = (tree) n->value;
10927 if (tcctx.cb.decl_map)
10928 sf = *tcctx.cb.decl_map->get (sf);
10929 src = build_simple_mem_ref_loc (loc, sarg);
10930 src = omp_build_component_ref (src, sf);
10931 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
10932 src = build_simple_mem_ref_loc (loc, src);
10933 }
10934 else
10935 src = decl;
10936 dst = build_simple_mem_ref_loc (loc, arg);
10937 dst = omp_build_component_ref (dst, f);
10938 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
10939 append_to_statement_list (t, &list);
10940 break;
10941 case OMP_CLAUSE_PRIVATE:
10942 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
10943 break;
10944 decl = OMP_CLAUSE_DECL (c);
10945 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10946 f = (tree) n->value;
10947 if (tcctx.cb.decl_map)
10948 f = *tcctx.cb.decl_map->get (f);
10949 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
10950 if (n != NULL)
10951 {
10952 sf = (tree) n->value;
10953 if (tcctx.cb.decl_map)
10954 sf = *tcctx.cb.decl_map->get (sf);
10955 src = build_simple_mem_ref_loc (loc, sarg);
10956 src = omp_build_component_ref (src, sf);
10957 if (use_pointer_for_field (decl, NULL))
10958 src = build_simple_mem_ref_loc (loc, src);
10959 }
10960 else
10961 src = decl;
10962 dst = build_simple_mem_ref_loc (loc, arg);
10963 dst = omp_build_component_ref (dst, f);
10964 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
10965 append_to_statement_list (t, &list);
10966 break;
10967 default:
10968 break;
10969 }
10970
10971 /* Last pass: handle VLA firstprivates. */
10972 if (tcctx.cb.decl_map)
10973 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
10974 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
10975 {
10976 tree ind, ptr, df;
10977
10978 decl = OMP_CLAUSE_DECL (c);
10979 if (!is_variable_sized (decl))
10980 continue;
10981 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
10982 if (n == NULL)
10983 continue;
10984 f = (tree) n->value;
10985 f = *tcctx.cb.decl_map->get (f);
10986 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
10987 ind = DECL_VALUE_EXPR (decl);
10988 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
10989 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
10990 n = splay_tree_lookup (ctx->sfield_map,
10991 (splay_tree_key) TREE_OPERAND (ind, 0));
10992 sf = (tree) n->value;
10993 sf = *tcctx.cb.decl_map->get (sf);
10994 src = build_simple_mem_ref_loc (loc, sarg);
10995 src = omp_build_component_ref (src, sf);
10996 src = build_simple_mem_ref_loc (loc, src);
10997 dst = build_simple_mem_ref_loc (loc, arg);
10998 dst = omp_build_component_ref (dst, f);
10999 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
11000 append_to_statement_list (t, &list);
11001 n = splay_tree_lookup (ctx->field_map,
11002 (splay_tree_key) TREE_OPERAND (ind, 0));
11003 df = (tree) n->value;
11004 df = *tcctx.cb.decl_map->get (df);
11005 ptr = build_simple_mem_ref_loc (loc, arg);
11006 ptr = omp_build_component_ref (ptr, df);
11007 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
11008 build_fold_addr_expr_loc (loc, dst));
11009 append_to_statement_list (t, &list);
11010 }
11011
11012 t = build1 (RETURN_EXPR, void_type_node, NULL);
11013 append_to_statement_list (t, &list);
11014
11015 if (tcctx.cb.decl_map)
11016 delete tcctx.cb.decl_map;
11017 pop_gimplify_context (NULL);
11018 BIND_EXPR_BODY (bind) = list;
11019 pop_cfun ();
11020 }
11021
11022 static void
11023 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
11024 {
11025 tree c, clauses;
11026 gimple g;
11027 size_t n_in = 0, n_out = 0, idx = 2, i;
11028
11029 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
11030 OMP_CLAUSE_DEPEND);
11031 gcc_assert (clauses);
11032 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
11033 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
11034 switch (OMP_CLAUSE_DEPEND_KIND (c))
11035 {
11036 case OMP_CLAUSE_DEPEND_IN:
11037 n_in++;
11038 break;
11039 case OMP_CLAUSE_DEPEND_OUT:
11040 case OMP_CLAUSE_DEPEND_INOUT:
11041 n_out++;
11042 break;
11043 default:
11044 gcc_unreachable ();
11045 }
11046 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
11047 tree array = create_tmp_var (type);
11048 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
11049 NULL_TREE);
11050 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
11051 gimple_seq_add_stmt (iseq, g);
11052 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
11053 NULL_TREE);
11054 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
11055 gimple_seq_add_stmt (iseq, g);
11056 for (i = 0; i < 2; i++)
11057 {
11058 if ((i ? n_in : n_out) == 0)
11059 continue;
11060 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
11061 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
11062 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
11063 {
11064 tree t = OMP_CLAUSE_DECL (c);
11065 t = fold_convert (ptr_type_node, t);
11066 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
11067 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
11068 NULL_TREE, NULL_TREE);
11069 g = gimple_build_assign (r, t);
11070 gimple_seq_add_stmt (iseq, g);
11071 }
11072 }
11073 tree *p = gimple_omp_task_clauses_ptr (stmt);
11074 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
11075 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
11076 OMP_CLAUSE_CHAIN (c) = *p;
11077 *p = c;
11078 tree clobber = build_constructor (type, NULL);
11079 TREE_THIS_VOLATILE (clobber) = 1;
11080 g = gimple_build_assign (array, clobber);
11081 gimple_seq_add_stmt (oseq, g);
11082 }
11083
11084 /* Lower the OpenMP parallel or task directive in the current statement
11085 in GSI_P. CTX holds context information for the directive. */
11086
11087 static void
11088 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11089 {
11090 tree clauses;
11091 tree child_fn, t;
11092 gimple stmt = gsi_stmt (*gsi_p);
11093 gbind *par_bind, *bind, *dep_bind = NULL;
11094 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
11095 location_t loc = gimple_location (stmt);
11096
11097 clauses = gimple_omp_taskreg_clauses (stmt);
11098 par_bind
11099 = as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
11100 par_body = gimple_bind_body (par_bind);
11101 child_fn = ctx->cb.dst_fn;
11102 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
11103 && !gimple_omp_parallel_combined_p (stmt))
11104 {
11105 struct walk_stmt_info wi;
11106 int ws_num = 0;
11107
11108 memset (&wi, 0, sizeof (wi));
11109 wi.info = &ws_num;
11110 wi.val_only = true;
11111 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
11112 if (ws_num == 1)
11113 gimple_omp_parallel_set_combined_p (stmt, true);
11114 }
11115 gimple_seq dep_ilist = NULL;
11116 gimple_seq dep_olist = NULL;
11117 if (gimple_code (stmt) == GIMPLE_OMP_TASK
11118 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
11119 {
11120 push_gimplify_context ();
11121 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
11122 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
11123 }
11124
11125 if (ctx->srecord_type)
11126 create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
11127
11128 push_gimplify_context ();
11129
11130 par_olist = NULL;
11131 par_ilist = NULL;
11132 par_rlist = NULL;
11133 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
11134 lower_omp (&par_body, ctx);
11135 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
11136 lower_reduction_clauses (clauses, &par_rlist, ctx);
11137
11138 /* Declare all the variables created by mapping and the variables
11139 declared in the scope of the parallel body. */
11140 record_vars_into (ctx->block_vars, child_fn);
11141 record_vars_into (gimple_bind_vars (par_bind), child_fn);
11142
11143 if (ctx->record_type)
11144 {
11145 ctx->sender_decl
11146 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
11147 : ctx->record_type, ".omp_data_o");
11148 DECL_NAMELESS (ctx->sender_decl) = 1;
11149 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
11150 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
11151 }
11152
11153 olist = NULL;
11154 ilist = NULL;
11155 lower_send_clauses (clauses, &ilist, &olist, ctx);
11156 lower_send_shared_vars (&ilist, &olist, ctx);
11157
11158 if (ctx->record_type)
11159 {
11160 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
11161 TREE_THIS_VOLATILE (clobber) = 1;
11162 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
11163 clobber));
11164 }
11165
11166 /* Once all the expansions are done, sequence all the different
11167 fragments inside gimple_omp_body. */
11168
11169 new_body = NULL;
11170
11171 if (ctx->record_type)
11172 {
11173 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
11174 /* fixup_child_record_type might have changed receiver_decl's type. */
11175 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
11176 gimple_seq_add_stmt (&new_body,
11177 gimple_build_assign (ctx->receiver_decl, t));
11178 }
11179
11180 gimple_seq_add_seq (&new_body, par_ilist);
11181 gimple_seq_add_seq (&new_body, par_body);
11182 gimple_seq_add_seq (&new_body, par_rlist);
11183 if (ctx->cancellable)
11184 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
11185 gimple_seq_add_seq (&new_body, par_olist);
11186 new_body = maybe_catch_exception (new_body);
11187 if (gimple_code (stmt) == GIMPLE_OMP_TASK)
11188 gimple_seq_add_stmt (&new_body,
11189 gimple_build_omp_continue (integer_zero_node,
11190 integer_zero_node));
11191 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
11192 gimple_omp_set_body (stmt, new_body);
11193
11194 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
11195 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
11196 gimple_bind_add_seq (bind, ilist);
11197 gimple_bind_add_stmt (bind, stmt);
11198 gimple_bind_add_seq (bind, olist);
11199
11200 pop_gimplify_context (NULL);
11201
11202 if (dep_bind)
11203 {
11204 gimple_bind_add_seq (dep_bind, dep_ilist);
11205 gimple_bind_add_stmt (dep_bind, bind);
11206 gimple_bind_add_seq (dep_bind, dep_olist);
11207 pop_gimplify_context (dep_bind);
11208 }
11209 }
11210
11211 /* Lower the GIMPLE_OMP_TARGET in the current statement
11212 in GSI_P. CTX holds context information for the directive. */
11213
11214 static void
11215 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11216 {
11217 tree clauses;
11218 tree child_fn, t, c;
11219 gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
11220 gbind *tgt_bind, *bind;
11221 gimple_seq tgt_body, olist, ilist, orlist, irlist, new_body;
11222 location_t loc = gimple_location (stmt);
11223 bool offloaded, data_region;
11224 unsigned int map_cnt = 0;
11225
11226 offloaded = is_gimple_omp_offloaded (stmt);
11227 switch (gimple_omp_target_kind (stmt))
11228 {
11229 case GF_OMP_TARGET_KIND_REGION:
11230 case GF_OMP_TARGET_KIND_UPDATE:
11231 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
11232 case GF_OMP_TARGET_KIND_OACC_KERNELS:
11233 case GF_OMP_TARGET_KIND_OACC_UPDATE:
11234 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
11235 data_region = false;
11236 break;
11237 case GF_OMP_TARGET_KIND_DATA:
11238 case GF_OMP_TARGET_KIND_OACC_DATA:
11239 data_region = true;
11240 break;
11241 default:
11242 gcc_unreachable ();
11243 }
11244
11245 clauses = gimple_omp_target_clauses (stmt);
11246
11247 tgt_bind = NULL;
11248 tgt_body = NULL;
11249 if (offloaded)
11250 {
11251 tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
11252 tgt_body = gimple_bind_body (tgt_bind);
11253 }
11254 else if (data_region)
11255 tgt_body = gimple_omp_body (stmt);
11256 child_fn = ctx->cb.dst_fn;
11257
11258 push_gimplify_context ();
11259
11260 irlist = NULL;
11261 orlist = NULL;
11262 if (offloaded
11263 && is_gimple_omp_oacc (stmt))
11264 oacc_process_reduction_data (&tgt_body, &irlist, &orlist, ctx);
11265
11266 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
11267 switch (OMP_CLAUSE_CODE (c))
11268 {
11269 tree var, x;
11270
11271 default:
11272 break;
11273 case OMP_CLAUSE_MAP:
11274 #ifdef ENABLE_CHECKING
11275 /* First check what we're prepared to handle in the following. */
11276 switch (OMP_CLAUSE_MAP_KIND (c))
11277 {
11278 case GOMP_MAP_ALLOC:
11279 case GOMP_MAP_TO:
11280 case GOMP_MAP_FROM:
11281 case GOMP_MAP_TOFROM:
11282 case GOMP_MAP_POINTER:
11283 case GOMP_MAP_TO_PSET:
11284 break;
11285 case GOMP_MAP_FORCE_ALLOC:
11286 case GOMP_MAP_FORCE_TO:
11287 case GOMP_MAP_FORCE_FROM:
11288 case GOMP_MAP_FORCE_TOFROM:
11289 case GOMP_MAP_FORCE_PRESENT:
11290 case GOMP_MAP_FORCE_DEALLOC:
11291 case GOMP_MAP_FORCE_DEVICEPTR:
11292 gcc_assert (is_gimple_omp_oacc (stmt));
11293 break;
11294 default:
11295 gcc_unreachable ();
11296 }
11297 #endif
11298 /* FALLTHRU */
11299 case OMP_CLAUSE_TO:
11300 case OMP_CLAUSE_FROM:
11301 var = OMP_CLAUSE_DECL (c);
11302 if (!DECL_P (var))
11303 {
11304 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
11305 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
11306 map_cnt++;
11307 continue;
11308 }
11309
11310 if (DECL_SIZE (var)
11311 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
11312 {
11313 tree var2 = DECL_VALUE_EXPR (var);
11314 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
11315 var2 = TREE_OPERAND (var2, 0);
11316 gcc_assert (DECL_P (var2));
11317 var = var2;
11318 }
11319
11320 if (!maybe_lookup_field (var, ctx))
11321 continue;
11322
11323 if (offloaded)
11324 {
11325 x = build_receiver_ref (var, true, ctx);
11326 tree new_var = lookup_decl (var, ctx);
11327 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
11328 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
11329 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
11330 x = build_simple_mem_ref (x);
11331 SET_DECL_VALUE_EXPR (new_var, x);
11332 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
11333 }
11334 map_cnt++;
11335 }
11336
11337 if (offloaded)
11338 {
11339 target_nesting_level++;
11340 lower_omp (&tgt_body, ctx);
11341 target_nesting_level--;
11342 }
11343 else if (data_region)
11344 lower_omp (&tgt_body, ctx);
11345
11346 if (offloaded)
11347 {
11348 /* Declare all the variables created by mapping and the variables
11349 declared in the scope of the target body. */
11350 record_vars_into (ctx->block_vars, child_fn);
11351 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
11352 }
11353
11354 olist = NULL;
11355 ilist = NULL;
11356 if (ctx->record_type)
11357 {
11358 ctx->sender_decl
11359 = create_tmp_var (ctx->record_type, ".omp_data_arr");
11360 DECL_NAMELESS (ctx->sender_decl) = 1;
11361 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
11362 t = make_tree_vec (3);
11363 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
11364 TREE_VEC_ELT (t, 1)
11365 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
11366 ".omp_data_sizes");
11367 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
11368 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
11369 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
11370 tree tkind_type;
11371 int talign_shift;
11372 if (is_gimple_omp_oacc (stmt))
11373 {
11374 tkind_type = short_unsigned_type_node;
11375 talign_shift = 8;
11376 }
11377 else
11378 {
11379 tkind_type = unsigned_char_type_node;
11380 talign_shift = 3;
11381 }
11382 TREE_VEC_ELT (t, 2)
11383 = create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
11384 ".omp_data_kinds");
11385 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
11386 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
11387 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
11388 gimple_omp_target_set_data_arg (stmt, t);
11389
11390 vec<constructor_elt, va_gc> *vsize;
11391 vec<constructor_elt, va_gc> *vkind;
11392 vec_alloc (vsize, map_cnt);
11393 vec_alloc (vkind, map_cnt);
11394 unsigned int map_idx = 0;
11395
11396 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
11397 switch (OMP_CLAUSE_CODE (c))
11398 {
11399 tree ovar, nc;
11400
11401 default:
11402 break;
11403 case OMP_CLAUSE_MAP:
11404 case OMP_CLAUSE_TO:
11405 case OMP_CLAUSE_FROM:
11406 nc = c;
11407 ovar = OMP_CLAUSE_DECL (c);
11408 if (!DECL_P (ovar))
11409 {
11410 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
11411 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
11412 {
11413 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
11414 == get_base_address (ovar));
11415 nc = OMP_CLAUSE_CHAIN (c);
11416 ovar = OMP_CLAUSE_DECL (nc);
11417 }
11418 else
11419 {
11420 tree x = build_sender_ref (ovar, ctx);
11421 tree v
11422 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
11423 gimplify_assign (x, v, &ilist);
11424 nc = NULL_TREE;
11425 }
11426 }
11427 else
11428 {
11429 if (DECL_SIZE (ovar)
11430 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
11431 {
11432 tree ovar2 = DECL_VALUE_EXPR (ovar);
11433 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
11434 ovar2 = TREE_OPERAND (ovar2, 0);
11435 gcc_assert (DECL_P (ovar2));
11436 ovar = ovar2;
11437 }
11438 if (!maybe_lookup_field (ovar, ctx))
11439 continue;
11440 }
11441
11442 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
11443 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
11444 talign = DECL_ALIGN_UNIT (ovar);
11445 if (nc)
11446 {
11447 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
11448 tree x = build_sender_ref (ovar, ctx);
11449 if (maybe_lookup_oacc_reduction (var, ctx))
11450 {
11451 gcc_checking_assert (offloaded
11452 && is_gimple_omp_oacc (stmt));
11453 gimplify_assign (x, var, &ilist);
11454 }
11455 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
11456 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
11457 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
11458 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
11459 {
11460 gcc_assert (offloaded);
11461 tree avar
11462 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
11463 mark_addressable (avar);
11464 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
11465 talign = DECL_ALIGN_UNIT (avar);
11466 avar = build_fold_addr_expr (avar);
11467 gimplify_assign (x, avar, &ilist);
11468 }
11469 else if (is_gimple_reg (var))
11470 {
11471 gcc_assert (offloaded);
11472 tree avar = create_tmp_var (TREE_TYPE (var));
11473 mark_addressable (avar);
11474 enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
11475 if (GOMP_MAP_COPY_TO_P (map_kind)
11476 || map_kind == GOMP_MAP_POINTER
11477 || map_kind == GOMP_MAP_TO_PSET
11478 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
11479 gimplify_assign (avar, var, &ilist);
11480 avar = build_fold_addr_expr (avar);
11481 gimplify_assign (x, avar, &ilist);
11482 if ((GOMP_MAP_COPY_FROM_P (map_kind)
11483 || map_kind == GOMP_MAP_FORCE_DEVICEPTR)
11484 && !TYPE_READONLY (TREE_TYPE (var)))
11485 {
11486 x = build_sender_ref (ovar, ctx);
11487 x = build_simple_mem_ref (x);
11488 gimplify_assign (var, x, &olist);
11489 }
11490 }
11491 else
11492 {
11493 var = build_fold_addr_expr (var);
11494 gimplify_assign (x, var, &ilist);
11495 }
11496 }
11497 tree s = OMP_CLAUSE_SIZE (c);
11498 if (s == NULL_TREE)
11499 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
11500 s = fold_convert (size_type_node, s);
11501 tree purpose = size_int (map_idx++);
11502 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
11503 if (TREE_CODE (s) != INTEGER_CST)
11504 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
11505
11506 unsigned HOST_WIDE_INT tkind;
11507 switch (OMP_CLAUSE_CODE (c))
11508 {
11509 case OMP_CLAUSE_MAP:
11510 tkind = OMP_CLAUSE_MAP_KIND (c);
11511 break;
11512 case OMP_CLAUSE_TO:
11513 tkind = GOMP_MAP_TO;
11514 break;
11515 case OMP_CLAUSE_FROM:
11516 tkind = GOMP_MAP_FROM;
11517 break;
11518 default:
11519 gcc_unreachable ();
11520 }
11521 gcc_checking_assert (tkind
11522 < (HOST_WIDE_INT_C (1U) << talign_shift));
11523 talign = ceil_log2 (talign);
11524 tkind |= talign << talign_shift;
11525 gcc_checking_assert (tkind
11526 <= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
11527 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
11528 build_int_cstu (tkind_type, tkind));
11529 if (nc && nc != c)
11530 c = nc;
11531 }
11532
11533 gcc_assert (map_idx == map_cnt);
11534
11535 DECL_INITIAL (TREE_VEC_ELT (t, 1))
11536 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
11537 DECL_INITIAL (TREE_VEC_ELT (t, 2))
11538 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
11539 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
11540 {
11541 gimple_seq initlist = NULL;
11542 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
11543 TREE_VEC_ELT (t, 1)),
11544 &initlist, true, NULL_TREE);
11545 gimple_seq_add_seq (&ilist, initlist);
11546
11547 tree clobber = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)),
11548 NULL);
11549 TREE_THIS_VOLATILE (clobber) = 1;
11550 gimple_seq_add_stmt (&olist,
11551 gimple_build_assign (TREE_VEC_ELT (t, 1),
11552 clobber));
11553 }
11554
11555 tree clobber = build_constructor (ctx->record_type, NULL);
11556 TREE_THIS_VOLATILE (clobber) = 1;
11557 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
11558 clobber));
11559 }
11560
11561 /* Once all the expansions are done, sequence all the different
11562 fragments inside gimple_omp_body. */
11563
11564 new_body = NULL;
11565
11566 if (offloaded
11567 && ctx->record_type)
11568 {
11569 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
11570 /* fixup_child_record_type might have changed receiver_decl's type. */
11571 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
11572 gimple_seq_add_stmt (&new_body,
11573 gimple_build_assign (ctx->receiver_decl, t));
11574 }
11575
11576 if (offloaded)
11577 {
11578 gimple_seq_add_seq (&new_body, tgt_body);
11579 new_body = maybe_catch_exception (new_body);
11580 }
11581 else if (data_region)
11582 new_body = tgt_body;
11583 if (offloaded || data_region)
11584 {
11585 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
11586 gimple_omp_set_body (stmt, new_body);
11587 }
11588
11589 bind = gimple_build_bind (NULL, NULL,
11590 tgt_bind ? gimple_bind_block (tgt_bind)
11591 : NULL_TREE);
11592 gsi_replace (gsi_p, bind, true);
11593 gimple_bind_add_seq (bind, irlist);
11594 gimple_bind_add_seq (bind, ilist);
11595 gimple_bind_add_stmt (bind, stmt);
11596 gimple_bind_add_seq (bind, olist);
11597 gimple_bind_add_seq (bind, orlist);
11598
11599 pop_gimplify_context (NULL);
11600 }
11601
11602 /* Expand code for an OpenMP teams directive. */
11603
11604 static void
11605 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11606 {
11607 gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
11608 push_gimplify_context ();
11609
11610 tree block = make_node (BLOCK);
11611 gbind *bind = gimple_build_bind (NULL, NULL, block);
11612 gsi_replace (gsi_p, bind, true);
11613 gimple_seq bind_body = NULL;
11614 gimple_seq dlist = NULL;
11615 gimple_seq olist = NULL;
11616
11617 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
11618 OMP_CLAUSE_NUM_TEAMS);
11619 if (num_teams == NULL_TREE)
11620 num_teams = build_int_cst (unsigned_type_node, 0);
11621 else
11622 {
11623 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
11624 num_teams = fold_convert (unsigned_type_node, num_teams);
11625 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
11626 }
11627 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
11628 OMP_CLAUSE_THREAD_LIMIT);
11629 if (thread_limit == NULL_TREE)
11630 thread_limit = build_int_cst (unsigned_type_node, 0);
11631 else
11632 {
11633 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
11634 thread_limit = fold_convert (unsigned_type_node, thread_limit);
11635 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
11636 fb_rvalue);
11637 }
11638
11639 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
11640 &bind_body, &dlist, ctx, NULL);
11641 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
11642 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
11643 gimple_seq_add_stmt (&bind_body, teams_stmt);
11644
11645 location_t loc = gimple_location (teams_stmt);
11646 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
11647 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
11648 gimple_set_location (call, loc);
11649 gimple_seq_add_stmt (&bind_body, call);
11650
11651 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
11652 gimple_omp_set_body (teams_stmt, NULL);
11653 gimple_seq_add_seq (&bind_body, olist);
11654 gimple_seq_add_seq (&bind_body, dlist);
11655 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
11656 gimple_bind_set_body (bind, bind_body);
11657
11658 pop_gimplify_context (bind);
11659
11660 gimple_bind_append_vars (bind, ctx->block_vars);
11661 BLOCK_VARS (block) = ctx->block_vars;
11662 if (BLOCK_VARS (block))
11663 TREE_USED (block) = 1;
11664 }
11665
11666
11667 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
11668 regimplified. If DATA is non-NULL, lower_omp_1 is outside
11669 of OMP context, but with task_shared_vars set. */
11670
11671 static tree
11672 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
11673 void *data)
11674 {
11675 tree t = *tp;
11676
11677 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
11678 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
11679 return t;
11680
11681 if (task_shared_vars
11682 && DECL_P (t)
11683 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
11684 return t;
11685
11686 /* If a global variable has been privatized, TREE_CONSTANT on
11687 ADDR_EXPR might be wrong. */
11688 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
11689 recompute_tree_invariant_for_addr_expr (t);
11690
11691 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
11692 return NULL_TREE;
11693 }
11694
11695 static void
11696 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
11697 {
11698 gimple stmt = gsi_stmt (*gsi_p);
11699 struct walk_stmt_info wi;
11700 gcall *call_stmt;
11701
11702 if (gimple_has_location (stmt))
11703 input_location = gimple_location (stmt);
11704
11705 if (task_shared_vars)
11706 memset (&wi, '\0', sizeof (wi));
11707
11708 /* If we have issued syntax errors, avoid doing any heavy lifting.
11709 Just replace the OMP directives with a NOP to avoid
11710 confusing RTL expansion. */
11711 if (seen_error () && is_gimple_omp (stmt))
11712 {
11713 gsi_replace (gsi_p, gimple_build_nop (), true);
11714 return;
11715 }
11716
11717 switch (gimple_code (stmt))
11718 {
11719 case GIMPLE_COND:
11720 {
11721 gcond *cond_stmt = as_a <gcond *> (stmt);
11722 if ((ctx || task_shared_vars)
11723 && (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
11724 lower_omp_regimplify_p,
11725 ctx ? NULL : &wi, NULL)
11726 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
11727 lower_omp_regimplify_p,
11728 ctx ? NULL : &wi, NULL)))
11729 gimple_regimplify_operands (cond_stmt, gsi_p);
11730 }
11731 break;
11732 case GIMPLE_CATCH:
11733 lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
11734 break;
11735 case GIMPLE_EH_FILTER:
11736 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
11737 break;
11738 case GIMPLE_TRY:
11739 lower_omp (gimple_try_eval_ptr (stmt), ctx);
11740 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
11741 break;
11742 case GIMPLE_TRANSACTION:
11743 lower_omp (gimple_transaction_body_ptr (
11744 as_a <gtransaction *> (stmt)),
11745 ctx);
11746 break;
11747 case GIMPLE_BIND:
11748 lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
11749 break;
11750 case GIMPLE_OMP_PARALLEL:
11751 case GIMPLE_OMP_TASK:
11752 ctx = maybe_lookup_ctx (stmt);
11753 gcc_assert (ctx);
11754 if (ctx->cancellable)
11755 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
11756 lower_omp_taskreg (gsi_p, ctx);
11757 break;
11758 case GIMPLE_OMP_FOR:
11759 ctx = maybe_lookup_ctx (stmt);
11760 gcc_assert (ctx);
11761 if (ctx->cancellable)
11762 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
11763 lower_omp_for (gsi_p, ctx);
11764 break;
11765 case GIMPLE_OMP_SECTIONS:
11766 ctx = maybe_lookup_ctx (stmt);
11767 gcc_assert (ctx);
11768 if (ctx->cancellable)
11769 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
11770 lower_omp_sections (gsi_p, ctx);
11771 break;
11772 case GIMPLE_OMP_SINGLE:
11773 ctx = maybe_lookup_ctx (stmt);
11774 gcc_assert (ctx);
11775 lower_omp_single (gsi_p, ctx);
11776 break;
11777 case GIMPLE_OMP_MASTER:
11778 ctx = maybe_lookup_ctx (stmt);
11779 gcc_assert (ctx);
11780 lower_omp_master (gsi_p, ctx);
11781 break;
11782 case GIMPLE_OMP_TASKGROUP:
11783 ctx = maybe_lookup_ctx (stmt);
11784 gcc_assert (ctx);
11785 lower_omp_taskgroup (gsi_p, ctx);
11786 break;
11787 case GIMPLE_OMP_ORDERED:
11788 ctx = maybe_lookup_ctx (stmt);
11789 gcc_assert (ctx);
11790 lower_omp_ordered (gsi_p, ctx);
11791 break;
11792 case GIMPLE_OMP_CRITICAL:
11793 ctx = maybe_lookup_ctx (stmt);
11794 gcc_assert (ctx);
11795 lower_omp_critical (gsi_p, ctx);
11796 break;
11797 case GIMPLE_OMP_ATOMIC_LOAD:
11798 if ((ctx || task_shared_vars)
11799 && walk_tree (gimple_omp_atomic_load_rhs_ptr (
11800 as_a <gomp_atomic_load *> (stmt)),
11801 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
11802 gimple_regimplify_operands (stmt, gsi_p);
11803 break;
11804 case GIMPLE_OMP_TARGET:
11805 ctx = maybe_lookup_ctx (stmt);
11806 gcc_assert (ctx);
11807 lower_omp_target (gsi_p, ctx);
11808 break;
11809 case GIMPLE_OMP_TEAMS:
11810 ctx = maybe_lookup_ctx (stmt);
11811 gcc_assert (ctx);
11812 lower_omp_teams (gsi_p, ctx);
11813 break;
11814 case GIMPLE_CALL:
11815 tree fndecl;
11816 call_stmt = as_a <gcall *> (stmt);
11817 fndecl = gimple_call_fndecl (call_stmt);
11818 if (fndecl
11819 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
11820 switch (DECL_FUNCTION_CODE (fndecl))
11821 {
11822 case BUILT_IN_GOMP_BARRIER:
11823 if (ctx == NULL)
11824 break;
11825 /* FALLTHRU */
11826 case BUILT_IN_GOMP_CANCEL:
11827 case BUILT_IN_GOMP_CANCELLATION_POINT:
11828 omp_context *cctx;
11829 cctx = ctx;
11830 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
11831 cctx = cctx->outer;
11832 gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
11833 if (!cctx->cancellable)
11834 {
11835 if (DECL_FUNCTION_CODE (fndecl)
11836 == BUILT_IN_GOMP_CANCELLATION_POINT)
11837 {
11838 stmt = gimple_build_nop ();
11839 gsi_replace (gsi_p, stmt, false);
11840 }
11841 break;
11842 }
11843 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
11844 {
11845 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
11846 gimple_call_set_fndecl (call_stmt, fndecl);
11847 gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
11848 }
11849 tree lhs;
11850 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
11851 gimple_call_set_lhs (call_stmt, lhs);
11852 tree fallthru_label;
11853 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
11854 gimple g;
11855 g = gimple_build_label (fallthru_label);
11856 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
11857 g = gimple_build_cond (NE_EXPR, lhs,
11858 fold_convert (TREE_TYPE (lhs),
11859 boolean_false_node),
11860 cctx->cancel_label, fallthru_label);
11861 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
11862 break;
11863 default:
11864 break;
11865 }
11866 /* FALLTHRU */
11867 default:
11868 if ((ctx || task_shared_vars)
11869 && walk_gimple_op (stmt, lower_omp_regimplify_p,
11870 ctx ? NULL : &wi))
11871 {
11872 /* Just remove clobbers, this should happen only if we have
11873 "privatized" local addressable variables in SIMD regions,
11874 the clobber isn't needed in that case and gimplifying address
11875 of the ARRAY_REF into a pointer and creating MEM_REF based
11876 clobber would create worse code than we get with the clobber
11877 dropped. */
11878 if (gimple_clobber_p (stmt))
11879 {
11880 gsi_replace (gsi_p, gimple_build_nop (), true);
11881 break;
11882 }
11883 gimple_regimplify_operands (stmt, gsi_p);
11884 }
11885 break;
11886 }
11887 }
11888
11889 static void
11890 lower_omp (gimple_seq *body, omp_context *ctx)
11891 {
11892 location_t saved_location = input_location;
11893 gimple_stmt_iterator gsi;
11894 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
11895 lower_omp_1 (&gsi, ctx);
11896 /* During gimplification, we haven't folded statments inside offloading
11897 or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
11898 if (target_nesting_level || taskreg_nesting_level)
11899 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
11900 fold_stmt (&gsi);
11901 input_location = saved_location;
11902 }
11903 \f
11904 /* Main entry point. */
11905
11906 static unsigned int
11907 execute_lower_omp (void)
11908 {
11909 gimple_seq body;
11910 int i;
11911 omp_context *ctx;
11912
11913 /* This pass always runs, to provide PROP_gimple_lomp.
11914 But often, there is nothing to do. */
11915 if (flag_cilkplus == 0 && flag_openacc == 0 && flag_openmp == 0
11916 && flag_openmp_simd == 0)
11917 return 0;
11918
11919 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
11920 delete_omp_context);
11921
11922 body = gimple_body (current_function_decl);
11923 scan_omp (&body, NULL);
11924 gcc_assert (taskreg_nesting_level == 0);
11925 FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
11926 finish_taskreg_scan (ctx);
11927 taskreg_contexts.release ();
11928
11929 if (all_contexts->root)
11930 {
11931 if (task_shared_vars)
11932 push_gimplify_context ();
11933 lower_omp (&body, NULL);
11934 if (task_shared_vars)
11935 pop_gimplify_context (NULL);
11936 }
11937
11938 if (all_contexts)
11939 {
11940 splay_tree_delete (all_contexts);
11941 all_contexts = NULL;
11942 }
11943 BITMAP_FREE (task_shared_vars);
11944 return 0;
11945 }
11946
11947 static const pass_data pass_data_lower_omp =
11948 {
11949 GIMPLE_PASS, /* type */
11950 "omplower", /* name */
11951 OPTGROUP_NONE, /* optinfo_flags */
11952 TV_NONE, /* tv_id */
11953 PROP_gimple_any, /* properties_required */
11954 PROP_gimple_lomp, /* properties_provided */
11955 0, /* properties_destroyed */
11956 0, /* todo_flags_start */
11957 0, /* todo_flags_finish */
11958 };
11959
11960 class pass_lower_omp GCC_FINAL : public gimple_opt_pass
11961 {
11962 public:
11963 pass_lower_omp (gcc::context *ctxt)
11964 : gimple_opt_pass (pass_data_lower_omp, ctxt)
11965 {}
11966
11967 /* opt_pass methods: */
11968 virtual unsigned int execute (function *) { return execute_lower_omp (); }
11969
11970 }; // class pass_lower_omp
11971
11972 gimple_opt_pass *
11973 make_pass_lower_omp (gcc::context *ctxt)
11974 {
11975 return new pass_lower_omp (ctxt);
11976 }
11977 \f
11978 /* The following is a utility to diagnose structured block violations.
11979 It is not part of the "omplower" pass, as that's invoked too late. It
11980 should be invoked by the respective front ends after gimplification. */
11981
11982 static splay_tree all_labels;
11983
11984 /* Check for mismatched contexts and generate an error if needed. Return
11985 true if an error is detected. */
11986
11987 static bool
11988 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
11989 gimple branch_ctx, gimple label_ctx)
11990 {
11991 gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
11992 gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
11993
11994 if (label_ctx == branch_ctx)
11995 return false;
11996
11997 const char* kind = NULL;
11998
11999 if (flag_cilkplus)
12000 {
12001 if ((branch_ctx
12002 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
12003 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
12004 || (label_ctx
12005 && gimple_code (label_ctx) == GIMPLE_OMP_FOR
12006 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
12007 kind = "Cilk Plus";
12008 }
12009 if (flag_openacc)
12010 {
12011 if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
12012 || (label_ctx && is_gimple_omp_oacc (label_ctx)))
12013 {
12014 gcc_checking_assert (kind == NULL);
12015 kind = "OpenACC";
12016 }
12017 }
12018 if (kind == NULL)
12019 {
12020 gcc_checking_assert (flag_openmp);
12021 kind = "OpenMP";
12022 }
12023
12024 /*
12025 Previously we kept track of the label's entire context in diagnose_sb_[12]
12026 so we could traverse it and issue a correct "exit" or "enter" error
12027 message upon a structured block violation.
12028
12029 We built the context by building a list with tree_cons'ing, but there is
12030 no easy counterpart in gimple tuples. It seems like far too much work
12031 for issuing exit/enter error messages. If someone really misses the
12032 distinct error message... patches welcome.
12033 */
12034
12035 #if 0
12036 /* Try to avoid confusing the user by producing and error message
12037 with correct "exit" or "enter" verbiage. We prefer "exit"
12038 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
12039 if (branch_ctx == NULL)
12040 exit_p = false;
12041 else
12042 {
12043 while (label_ctx)
12044 {
12045 if (TREE_VALUE (label_ctx) == branch_ctx)
12046 {
12047 exit_p = false;
12048 break;
12049 }
12050 label_ctx = TREE_CHAIN (label_ctx);
12051 }
12052 }
12053
12054 if (exit_p)
12055 error ("invalid exit from %s structured block", kind);
12056 else
12057 error ("invalid entry to %s structured block", kind);
12058 #endif
12059
12060 /* If it's obvious we have an invalid entry, be specific about the error. */
12061 if (branch_ctx == NULL)
12062 error ("invalid entry to %s structured block", kind);
12063 else
12064 {
12065 /* Otherwise, be vague and lazy, but efficient. */
12066 error ("invalid branch to/from %s structured block", kind);
12067 }
12068
12069 gsi_replace (gsi_p, gimple_build_nop (), false);
12070 return true;
12071 }
12072
12073 /* Pass 1: Create a minimal tree of structured blocks, and record
12074 where each label is found. */
12075
12076 static tree
12077 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
12078 struct walk_stmt_info *wi)
12079 {
12080 gimple context = (gimple) wi->info;
12081 gimple inner_context;
12082 gimple stmt = gsi_stmt (*gsi_p);
12083
12084 *handled_ops_p = true;
12085
12086 switch (gimple_code (stmt))
12087 {
12088 WALK_SUBSTMTS;
12089
12090 case GIMPLE_OMP_PARALLEL:
12091 case GIMPLE_OMP_TASK:
12092 case GIMPLE_OMP_SECTIONS:
12093 case GIMPLE_OMP_SINGLE:
12094 case GIMPLE_OMP_SECTION:
12095 case GIMPLE_OMP_MASTER:
12096 case GIMPLE_OMP_ORDERED:
12097 case GIMPLE_OMP_CRITICAL:
12098 case GIMPLE_OMP_TARGET:
12099 case GIMPLE_OMP_TEAMS:
12100 case GIMPLE_OMP_TASKGROUP:
12101 /* The minimal context here is just the current OMP construct. */
12102 inner_context = stmt;
12103 wi->info = inner_context;
12104 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
12105 wi->info = context;
12106 break;
12107
12108 case GIMPLE_OMP_FOR:
12109 inner_context = stmt;
12110 wi->info = inner_context;
12111 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
12112 walk them. */
12113 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
12114 diagnose_sb_1, NULL, wi);
12115 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
12116 wi->info = context;
12117 break;
12118
12119 case GIMPLE_LABEL:
12120 splay_tree_insert (all_labels,
12121 (splay_tree_key) gimple_label_label (
12122 as_a <glabel *> (stmt)),
12123 (splay_tree_value) context);
12124 break;
12125
12126 default:
12127 break;
12128 }
12129
12130 return NULL_TREE;
12131 }
12132
12133 /* Pass 2: Check each branch and see if its context differs from that of
12134 the destination label's context. */
12135
12136 static tree
12137 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
12138 struct walk_stmt_info *wi)
12139 {
12140 gimple context = (gimple) wi->info;
12141 splay_tree_node n;
12142 gimple stmt = gsi_stmt (*gsi_p);
12143
12144 *handled_ops_p = true;
12145
12146 switch (gimple_code (stmt))
12147 {
12148 WALK_SUBSTMTS;
12149
12150 case GIMPLE_OMP_PARALLEL:
12151 case GIMPLE_OMP_TASK:
12152 case GIMPLE_OMP_SECTIONS:
12153 case GIMPLE_OMP_SINGLE:
12154 case GIMPLE_OMP_SECTION:
12155 case GIMPLE_OMP_MASTER:
12156 case GIMPLE_OMP_ORDERED:
12157 case GIMPLE_OMP_CRITICAL:
12158 case GIMPLE_OMP_TARGET:
12159 case GIMPLE_OMP_TEAMS:
12160 case GIMPLE_OMP_TASKGROUP:
12161 wi->info = stmt;
12162 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
12163 wi->info = context;
12164 break;
12165
12166 case GIMPLE_OMP_FOR:
12167 wi->info = stmt;
12168 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
12169 walk them. */
12170 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
12171 diagnose_sb_2, NULL, wi);
12172 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
12173 wi->info = context;
12174 break;
12175
12176 case GIMPLE_COND:
12177 {
12178 gcond *cond_stmt = as_a <gcond *> (stmt);
12179 tree lab = gimple_cond_true_label (cond_stmt);
12180 if (lab)
12181 {
12182 n = splay_tree_lookup (all_labels,
12183 (splay_tree_key) lab);
12184 diagnose_sb_0 (gsi_p, context,
12185 n ? (gimple) n->value : NULL);
12186 }
12187 lab = gimple_cond_false_label (cond_stmt);
12188 if (lab)
12189 {
12190 n = splay_tree_lookup (all_labels,
12191 (splay_tree_key) lab);
12192 diagnose_sb_0 (gsi_p, context,
12193 n ? (gimple) n->value : NULL);
12194 }
12195 }
12196 break;
12197
12198 case GIMPLE_GOTO:
12199 {
12200 tree lab = gimple_goto_dest (stmt);
12201 if (TREE_CODE (lab) != LABEL_DECL)
12202 break;
12203
12204 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
12205 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
12206 }
12207 break;
12208
12209 case GIMPLE_SWITCH:
12210 {
12211 gswitch *switch_stmt = as_a <gswitch *> (stmt);
12212 unsigned int i;
12213 for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
12214 {
12215 tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
12216 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
12217 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
12218 break;
12219 }
12220 }
12221 break;
12222
12223 case GIMPLE_RETURN:
12224 diagnose_sb_0 (gsi_p, context, NULL);
12225 break;
12226
12227 default:
12228 break;
12229 }
12230
12231 return NULL_TREE;
12232 }
12233
12234 /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant
12235 GIMPLE_* codes. */
12236 bool
12237 make_gimple_omp_edges (basic_block bb, struct omp_region **region,
12238 int *region_idx)
12239 {
12240 gimple last = last_stmt (bb);
12241 enum gimple_code code = gimple_code (last);
12242 struct omp_region *cur_region = *region;
12243 bool fallthru = false;
12244
12245 switch (code)
12246 {
12247 case GIMPLE_OMP_PARALLEL:
12248 case GIMPLE_OMP_TASK:
12249 case GIMPLE_OMP_FOR:
12250 case GIMPLE_OMP_SINGLE:
12251 case GIMPLE_OMP_TEAMS:
12252 case GIMPLE_OMP_MASTER:
12253 case GIMPLE_OMP_TASKGROUP:
12254 case GIMPLE_OMP_ORDERED:
12255 case GIMPLE_OMP_CRITICAL:
12256 case GIMPLE_OMP_SECTION:
12257 cur_region = new_omp_region (bb, code, cur_region);
12258 fallthru = true;
12259 break;
12260
12261 case GIMPLE_OMP_TARGET:
12262 cur_region = new_omp_region (bb, code, cur_region);
12263 fallthru = true;
12264 switch (gimple_omp_target_kind (last))
12265 {
12266 case GF_OMP_TARGET_KIND_REGION:
12267 case GF_OMP_TARGET_KIND_DATA:
12268 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
12269 case GF_OMP_TARGET_KIND_OACC_KERNELS:
12270 case GF_OMP_TARGET_KIND_OACC_DATA:
12271 break;
12272 case GF_OMP_TARGET_KIND_UPDATE:
12273 case GF_OMP_TARGET_KIND_OACC_UPDATE:
12274 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
12275 cur_region = cur_region->outer;
12276 break;
12277 default:
12278 gcc_unreachable ();
12279 }
12280 break;
12281
12282 case GIMPLE_OMP_SECTIONS:
12283 cur_region = new_omp_region (bb, code, cur_region);
12284 fallthru = true;
12285 break;
12286
12287 case GIMPLE_OMP_SECTIONS_SWITCH:
12288 fallthru = false;
12289 break;
12290
12291 case GIMPLE_OMP_ATOMIC_LOAD:
12292 case GIMPLE_OMP_ATOMIC_STORE:
12293 fallthru = true;
12294 break;
12295
12296 case GIMPLE_OMP_RETURN:
12297 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
12298 somewhere other than the next block. This will be
12299 created later. */
12300 cur_region->exit = bb;
12301 if (cur_region->type == GIMPLE_OMP_TASK)
12302 /* Add an edge corresponding to not scheduling the task
12303 immediately. */
12304 make_edge (cur_region->entry, bb, EDGE_ABNORMAL);
12305 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
12306 cur_region = cur_region->outer;
12307 break;
12308
12309 case GIMPLE_OMP_CONTINUE:
12310 cur_region->cont = bb;
12311 switch (cur_region->type)
12312 {
12313 case GIMPLE_OMP_FOR:
12314 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
12315 succs edges as abnormal to prevent splitting
12316 them. */
12317 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
12318 /* Make the loopback edge. */
12319 make_edge (bb, single_succ (cur_region->entry),
12320 EDGE_ABNORMAL);
12321
12322 /* Create an edge from GIMPLE_OMP_FOR to exit, which
12323 corresponds to the case that the body of the loop
12324 is not executed at all. */
12325 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
12326 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
12327 fallthru = false;
12328 break;
12329
12330 case GIMPLE_OMP_SECTIONS:
12331 /* Wire up the edges into and out of the nested sections. */
12332 {
12333 basic_block switch_bb = single_succ (cur_region->entry);
12334
12335 struct omp_region *i;
12336 for (i = cur_region->inner; i ; i = i->next)
12337 {
12338 gcc_assert (i->type == GIMPLE_OMP_SECTION);
12339 make_edge (switch_bb, i->entry, 0);
12340 make_edge (i->exit, bb, EDGE_FALLTHRU);
12341 }
12342
12343 /* Make the loopback edge to the block with
12344 GIMPLE_OMP_SECTIONS_SWITCH. */
12345 make_edge (bb, switch_bb, 0);
12346
12347 /* Make the edge from the switch to exit. */
12348 make_edge (switch_bb, bb->next_bb, 0);
12349 fallthru = false;
12350 }
12351 break;
12352
12353 case GIMPLE_OMP_TASK:
12354 fallthru = true;
12355 break;
12356
12357 default:
12358 gcc_unreachable ();
12359 }
12360 break;
12361
12362 default:
12363 gcc_unreachable ();
12364 }
12365
12366 if (*region != cur_region)
12367 {
12368 *region = cur_region;
12369 if (cur_region)
12370 *region_idx = cur_region->entry->index;
12371 else
12372 *region_idx = 0;
12373 }
12374
12375 return fallthru;
12376 }
12377
12378 static unsigned int
12379 diagnose_omp_structured_block_errors (void)
12380 {
12381 struct walk_stmt_info wi;
12382 gimple_seq body = gimple_body (current_function_decl);
12383
12384 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
12385
12386 memset (&wi, 0, sizeof (wi));
12387 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
12388
12389 memset (&wi, 0, sizeof (wi));
12390 wi.want_locations = true;
12391 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
12392
12393 gimple_set_body (current_function_decl, body);
12394
12395 splay_tree_delete (all_labels);
12396 all_labels = NULL;
12397
12398 return 0;
12399 }
12400
12401 static const pass_data pass_data_diagnose_omp_blocks =
12402 {
12403 GIMPLE_PASS, /* type */
12404 "*diagnose_omp_blocks", /* name */
12405 OPTGROUP_NONE, /* optinfo_flags */
12406 TV_NONE, /* tv_id */
12407 PROP_gimple_any, /* properties_required */
12408 0, /* properties_provided */
12409 0, /* properties_destroyed */
12410 0, /* todo_flags_start */
12411 0, /* todo_flags_finish */
12412 };
12413
12414 class pass_diagnose_omp_blocks GCC_FINAL : public gimple_opt_pass
12415 {
12416 public:
12417 pass_diagnose_omp_blocks (gcc::context *ctxt)
12418 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
12419 {}
12420
12421 /* opt_pass methods: */
12422 virtual bool gate (function *)
12423 {
12424 return flag_cilkplus || flag_openacc || flag_openmp;
12425 }
12426 virtual unsigned int execute (function *)
12427 {
12428 return diagnose_omp_structured_block_errors ();
12429 }
12430
12431 }; // class pass_diagnose_omp_blocks
12432
12433 gimple_opt_pass *
12434 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
12435 {
12436 return new pass_diagnose_omp_blocks (ctxt);
12437 }
12438 \f
12439 /* SIMD clone supporting code. */
12440
12441 /* Allocate a fresh `simd_clone' and return it. NARGS is the number
12442 of arguments to reserve space for. */
12443
12444 static struct cgraph_simd_clone *
12445 simd_clone_struct_alloc (int nargs)
12446 {
12447 struct cgraph_simd_clone *clone_info;
12448 size_t len = (sizeof (struct cgraph_simd_clone)
12449 + nargs * sizeof (struct cgraph_simd_clone_arg));
12450 clone_info = (struct cgraph_simd_clone *)
12451 ggc_internal_cleared_alloc (len);
12452 return clone_info;
12453 }
12454
12455 /* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
12456
12457 static inline void
12458 simd_clone_struct_copy (struct cgraph_simd_clone *to,
12459 struct cgraph_simd_clone *from)
12460 {
12461 memcpy (to, from, (sizeof (struct cgraph_simd_clone)
12462 + ((from->nargs - from->inbranch)
12463 * sizeof (struct cgraph_simd_clone_arg))));
12464 }
12465
12466 /* Return vector of parameter types of function FNDECL. This uses
12467 TYPE_ARG_TYPES if available, otherwise falls back to types of
12468 DECL_ARGUMENTS types. */
12469
12470 vec<tree>
12471 simd_clone_vector_of_formal_parm_types (tree fndecl)
12472 {
12473 if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
12474 return ipa_get_vector_of_formal_parm_types (TREE_TYPE (fndecl));
12475 vec<tree> args = ipa_get_vector_of_formal_parms (fndecl);
12476 unsigned int i;
12477 tree arg;
12478 FOR_EACH_VEC_ELT (args, i, arg)
12479 args[i] = TREE_TYPE (args[i]);
12480 return args;
12481 }
12482
12483 /* Given a simd function in NODE, extract the simd specific
12484 information from the OMP clauses passed in CLAUSES, and return
12485 the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
12486 is set to TRUE if the `inbranch' or `notinbranch' clause specified,
12487 otherwise set to FALSE. */
12488
12489 static struct cgraph_simd_clone *
12490 simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
12491 bool *inbranch_specified)
12492 {
12493 vec<tree> args = simd_clone_vector_of_formal_parm_types (node->decl);
12494 tree t;
12495 int n;
12496 *inbranch_specified = false;
12497
12498 n = args.length ();
12499 if (n > 0 && args.last () == void_type_node)
12500 n--;
12501
12502 /* To distinguish from an OpenMP simd clone, Cilk Plus functions to
12503 be cloned have a distinctive artificial label in addition to "omp
12504 declare simd". */
12505 bool cilk_clone
12506 = (flag_cilkplus
12507 && lookup_attribute ("cilk simd function",
12508 DECL_ATTRIBUTES (node->decl)));
12509
12510 /* Allocate one more than needed just in case this is an in-branch
12511 clone which will require a mask argument. */
12512 struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
12513 clone_info->nargs = n;
12514 clone_info->cilk_elemental = cilk_clone;
12515
12516 if (!clauses)
12517 {
12518 args.release ();
12519 return clone_info;
12520 }
12521 clauses = TREE_VALUE (clauses);
12522 if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
12523 return clone_info;
12524
12525 for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
12526 {
12527 switch (OMP_CLAUSE_CODE (t))
12528 {
12529 case OMP_CLAUSE_INBRANCH:
12530 clone_info->inbranch = 1;
12531 *inbranch_specified = true;
12532 break;
12533 case OMP_CLAUSE_NOTINBRANCH:
12534 clone_info->inbranch = 0;
12535 *inbranch_specified = true;
12536 break;
12537 case OMP_CLAUSE_SIMDLEN:
12538 clone_info->simdlen
12539 = TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
12540 break;
12541 case OMP_CLAUSE_LINEAR:
12542 {
12543 tree decl = OMP_CLAUSE_DECL (t);
12544 tree step = OMP_CLAUSE_LINEAR_STEP (t);
12545 int argno = TREE_INT_CST_LOW (decl);
12546 if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
12547 {
12548 clone_info->args[argno].arg_type
12549 = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
12550 clone_info->args[argno].linear_step = tree_to_shwi (step);
12551 gcc_assert (clone_info->args[argno].linear_step >= 0
12552 && clone_info->args[argno].linear_step < n);
12553 }
12554 else
12555 {
12556 if (POINTER_TYPE_P (args[argno]))
12557 step = fold_convert (ssizetype, step);
12558 if (!tree_fits_shwi_p (step))
12559 {
12560 warning_at (OMP_CLAUSE_LOCATION (t), 0,
12561 "ignoring large linear step");
12562 args.release ();
12563 return NULL;
12564 }
12565 else if (integer_zerop (step))
12566 {
12567 warning_at (OMP_CLAUSE_LOCATION (t), 0,
12568 "ignoring zero linear step");
12569 args.release ();
12570 return NULL;
12571 }
12572 else
12573 {
12574 clone_info->args[argno].arg_type
12575 = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
12576 clone_info->args[argno].linear_step = tree_to_shwi (step);
12577 }
12578 }
12579 break;
12580 }
12581 case OMP_CLAUSE_UNIFORM:
12582 {
12583 tree decl = OMP_CLAUSE_DECL (t);
12584 int argno = tree_to_uhwi (decl);
12585 clone_info->args[argno].arg_type
12586 = SIMD_CLONE_ARG_TYPE_UNIFORM;
12587 break;
12588 }
12589 case OMP_CLAUSE_ALIGNED:
12590 {
12591 tree decl = OMP_CLAUSE_DECL (t);
12592 int argno = tree_to_uhwi (decl);
12593 clone_info->args[argno].alignment
12594 = TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
12595 break;
12596 }
12597 default:
12598 break;
12599 }
12600 }
12601 args.release ();
12602 return clone_info;
12603 }
12604
12605 /* Given a SIMD clone in NODE, calculate the characteristic data
12606 type and return the coresponding type. The characteristic data
12607 type is computed as described in the Intel Vector ABI. */
12608
12609 static tree
12610 simd_clone_compute_base_data_type (struct cgraph_node *node,
12611 struct cgraph_simd_clone *clone_info)
12612 {
12613 tree type = integer_type_node;
12614 tree fndecl = node->decl;
12615
12616 /* a) For non-void function, the characteristic data type is the
12617 return type. */
12618 if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
12619 type = TREE_TYPE (TREE_TYPE (fndecl));
12620
12621 /* b) If the function has any non-uniform, non-linear parameters,
12622 then the characteristic data type is the type of the first
12623 such parameter. */
12624 else
12625 {
12626 vec<tree> map = simd_clone_vector_of_formal_parm_types (fndecl);
12627 for (unsigned int i = 0; i < clone_info->nargs; ++i)
12628 if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
12629 {
12630 type = map[i];
12631 break;
12632 }
12633 map.release ();
12634 }
12635
12636 /* c) If the characteristic data type determined by a) or b) above
12637 is struct, union, or class type which is pass-by-value (except
12638 for the type that maps to the built-in complex data type), the
12639 characteristic data type is int. */
12640 if (RECORD_OR_UNION_TYPE_P (type)
12641 && !aggregate_value_p (type, NULL)
12642 && TREE_CODE (type) != COMPLEX_TYPE)
12643 return integer_type_node;
12644
12645 /* d) If none of the above three classes is applicable, the
12646 characteristic data type is int. */
12647
12648 return type;
12649
12650 /* e) For Intel Xeon Phi native and offload compilation, if the
12651 resulting characteristic data type is 8-bit or 16-bit integer
12652 data type, the characteristic data type is int. */
12653 /* Well, we don't handle Xeon Phi yet. */
12654 }
12655
12656 static tree
12657 simd_clone_mangle (struct cgraph_node *node,
12658 struct cgraph_simd_clone *clone_info)
12659 {
12660 char vecsize_mangle = clone_info->vecsize_mangle;
12661 char mask = clone_info->inbranch ? 'M' : 'N';
12662 unsigned int simdlen = clone_info->simdlen;
12663 unsigned int n;
12664 pretty_printer pp;
12665
12666 gcc_assert (vecsize_mangle && simdlen);
12667
12668 pp_string (&pp, "_ZGV");
12669 pp_character (&pp, vecsize_mangle);
12670 pp_character (&pp, mask);
12671 pp_decimal_int (&pp, simdlen);
12672
12673 for (n = 0; n < clone_info->nargs; ++n)
12674 {
12675 struct cgraph_simd_clone_arg arg = clone_info->args[n];
12676
12677 if (arg.arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
12678 pp_character (&pp, 'u');
12679 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
12680 {
12681 gcc_assert (arg.linear_step != 0);
12682 pp_character (&pp, 'l');
12683 if (arg.linear_step > 1)
12684 pp_unsigned_wide_integer (&pp, arg.linear_step);
12685 else if (arg.linear_step < 0)
12686 {
12687 pp_character (&pp, 'n');
12688 pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
12689 arg.linear_step));
12690 }
12691 }
12692 else if (arg.arg_type == SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
12693 {
12694 pp_character (&pp, 's');
12695 pp_unsigned_wide_integer (&pp, arg.linear_step);
12696 }
12697 else
12698 pp_character (&pp, 'v');
12699 if (arg.alignment)
12700 {
12701 pp_character (&pp, 'a');
12702 pp_decimal_int (&pp, arg.alignment);
12703 }
12704 }
12705
12706 pp_underscore (&pp);
12707 const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
12708 if (*str == '*')
12709 ++str;
12710 pp_string (&pp, str);
12711 str = pp_formatted_text (&pp);
12712
12713 /* If there already is a SIMD clone with the same mangled name, don't
12714 add another one. This can happen e.g. for
12715 #pragma omp declare simd
12716 #pragma omp declare simd simdlen(8)
12717 int foo (int, int);
12718 if the simdlen is assumed to be 8 for the first one, etc. */
12719 for (struct cgraph_node *clone = node->simd_clones; clone;
12720 clone = clone->simdclone->next_clone)
12721 if (strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (clone->decl)),
12722 str) == 0)
12723 return NULL_TREE;
12724
12725 return get_identifier (str);
12726 }
12727
12728 /* Create a simd clone of OLD_NODE and return it. */
12729
12730 static struct cgraph_node *
12731 simd_clone_create (struct cgraph_node *old_node)
12732 {
12733 struct cgraph_node *new_node;
12734 if (old_node->definition)
12735 {
12736 if (!old_node->has_gimple_body_p ())
12737 return NULL;
12738 old_node->get_body ();
12739 new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
12740 false, NULL, NULL,
12741 "simdclone");
12742 }
12743 else
12744 {
12745 tree old_decl = old_node->decl;
12746 tree new_decl = copy_node (old_node->decl);
12747 DECL_NAME (new_decl) = clone_function_name (old_decl, "simdclone");
12748 SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
12749 SET_DECL_RTL (new_decl, NULL);
12750 DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
12751 DECL_STATIC_DESTRUCTOR (new_decl) = 0;
12752 new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
12753 symtab->call_cgraph_insertion_hooks (new_node);
12754 }
12755 if (new_node == NULL)
12756 return new_node;
12757
12758 TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
12759
12760 /* The function cgraph_function_versioning () will force the new
12761 symbol local. Undo this, and inherit external visability from
12762 the old node. */
12763 new_node->local.local = old_node->local.local;
12764 new_node->externally_visible = old_node->externally_visible;
12765
12766 return new_node;
12767 }
12768
12769 /* Adjust the return type of the given function to its appropriate
12770 vector counterpart. Returns a simd array to be used throughout the
12771 function as a return value. */
12772
12773 static tree
12774 simd_clone_adjust_return_type (struct cgraph_node *node)
12775 {
12776 tree fndecl = node->decl;
12777 tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
12778 unsigned int veclen;
12779 tree t;
12780
12781 /* Adjust the function return type. */
12782 if (orig_rettype == void_type_node)
12783 return NULL_TREE;
12784 TREE_TYPE (fndecl) = build_distinct_type_copy (TREE_TYPE (fndecl));
12785 t = TREE_TYPE (TREE_TYPE (fndecl));
12786 if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
12787 veclen = node->simdclone->vecsize_int;
12788 else
12789 veclen = node->simdclone->vecsize_float;
12790 veclen /= GET_MODE_BITSIZE (TYPE_MODE (t));
12791 if (veclen > node->simdclone->simdlen)
12792 veclen = node->simdclone->simdlen;
12793 if (POINTER_TYPE_P (t))
12794 t = pointer_sized_int_node;
12795 if (veclen == node->simdclone->simdlen)
12796 t = build_vector_type (t, node->simdclone->simdlen);
12797 else
12798 {
12799 t = build_vector_type (t, veclen);
12800 t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
12801 }
12802 TREE_TYPE (TREE_TYPE (fndecl)) = t;
12803 if (!node->definition)
12804 return NULL_TREE;
12805
12806 t = DECL_RESULT (fndecl);
12807 /* Adjust the DECL_RESULT. */
12808 gcc_assert (TREE_TYPE (t) != void_type_node);
12809 TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
12810 relayout_decl (t);
12811
12812 tree atype = build_array_type_nelts (orig_rettype,
12813 node->simdclone->simdlen);
12814 if (veclen != node->simdclone->simdlen)
12815 return build1 (VIEW_CONVERT_EXPR, atype, t);
12816
12817 /* Set up a SIMD array to use as the return value. */
12818 tree retval = create_tmp_var_raw (atype, "retval");
12819 gimple_add_tmp_var (retval);
12820 return retval;
12821 }
12822
12823 /* Each vector argument has a corresponding array to be used locally
12824 as part of the eventual loop. Create such temporary array and
12825 return it.
12826
12827 PREFIX is the prefix to be used for the temporary.
12828
12829 TYPE is the inner element type.
12830
12831 SIMDLEN is the number of elements. */
12832
12833 static tree
12834 create_tmp_simd_array (const char *prefix, tree type, int simdlen)
12835 {
12836 tree atype = build_array_type_nelts (type, simdlen);
12837 tree avar = create_tmp_var_raw (atype, prefix);
12838 gimple_add_tmp_var (avar);
12839 return avar;
12840 }
12841
12842 /* Modify the function argument types to their corresponding vector
12843 counterparts if appropriate. Also, create one array for each simd
12844 argument to be used locally when using the function arguments as
12845 part of the loop.
12846
12847 NODE is the function whose arguments are to be adjusted.
12848
12849 Returns an adjustment vector that will be filled describing how the
12850 argument types will be adjusted. */
12851
12852 static ipa_parm_adjustment_vec
12853 simd_clone_adjust_argument_types (struct cgraph_node *node)
12854 {
12855 vec<tree> args;
12856 ipa_parm_adjustment_vec adjustments;
12857
12858 if (node->definition)
12859 args = ipa_get_vector_of_formal_parms (node->decl);
12860 else
12861 args = simd_clone_vector_of_formal_parm_types (node->decl);
12862 adjustments.create (args.length ());
12863 unsigned i, j, veclen;
12864 struct ipa_parm_adjustment adj;
12865 for (i = 0; i < node->simdclone->nargs; ++i)
12866 {
12867 memset (&adj, 0, sizeof (adj));
12868 tree parm = args[i];
12869 tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
12870 adj.base_index = i;
12871 adj.base = parm;
12872
12873 node->simdclone->args[i].orig_arg = node->definition ? parm : NULL_TREE;
12874 node->simdclone->args[i].orig_type = parm_type;
12875
12876 if (node->simdclone->args[i].arg_type != SIMD_CLONE_ARG_TYPE_VECTOR)
12877 {
12878 /* No adjustment necessary for scalar arguments. */
12879 adj.op = IPA_PARM_OP_COPY;
12880 }
12881 else
12882 {
12883 if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
12884 veclen = node->simdclone->vecsize_int;
12885 else
12886 veclen = node->simdclone->vecsize_float;
12887 veclen /= GET_MODE_BITSIZE (TYPE_MODE (parm_type));
12888 if (veclen > node->simdclone->simdlen)
12889 veclen = node->simdclone->simdlen;
12890 adj.arg_prefix = "simd";
12891 if (POINTER_TYPE_P (parm_type))
12892 adj.type = build_vector_type (pointer_sized_int_node, veclen);
12893 else
12894 adj.type = build_vector_type (parm_type, veclen);
12895 node->simdclone->args[i].vector_type = adj.type;
12896 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
12897 {
12898 adjustments.safe_push (adj);
12899 if (j == veclen)
12900 {
12901 memset (&adj, 0, sizeof (adj));
12902 adj.op = IPA_PARM_OP_NEW;
12903 adj.arg_prefix = "simd";
12904 adj.base_index = i;
12905 adj.type = node->simdclone->args[i].vector_type;
12906 }
12907 }
12908
12909 if (node->definition)
12910 node->simdclone->args[i].simd_array
12911 = create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
12912 parm_type, node->simdclone->simdlen);
12913 }
12914 adjustments.safe_push (adj);
12915 }
12916
12917 if (node->simdclone->inbranch)
12918 {
12919 tree base_type
12920 = simd_clone_compute_base_data_type (node->simdclone->origin,
12921 node->simdclone);
12922
12923 memset (&adj, 0, sizeof (adj));
12924 adj.op = IPA_PARM_OP_NEW;
12925 adj.arg_prefix = "mask";
12926
12927 adj.base_index = i;
12928 if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
12929 veclen = node->simdclone->vecsize_int;
12930 else
12931 veclen = node->simdclone->vecsize_float;
12932 veclen /= GET_MODE_BITSIZE (TYPE_MODE (base_type));
12933 if (veclen > node->simdclone->simdlen)
12934 veclen = node->simdclone->simdlen;
12935 if (POINTER_TYPE_P (base_type))
12936 adj.type = build_vector_type (pointer_sized_int_node, veclen);
12937 else
12938 adj.type = build_vector_type (base_type, veclen);
12939 adjustments.safe_push (adj);
12940
12941 for (j = veclen; j < node->simdclone->simdlen; j += veclen)
12942 adjustments.safe_push (adj);
12943
12944 /* We have previously allocated one extra entry for the mask. Use
12945 it and fill it. */
12946 struct cgraph_simd_clone *sc = node->simdclone;
12947 sc->nargs++;
12948 if (node->definition)
12949 {
12950 sc->args[i].orig_arg
12951 = build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
12952 sc->args[i].simd_array
12953 = create_tmp_simd_array ("mask", base_type, sc->simdlen);
12954 }
12955 sc->args[i].orig_type = base_type;
12956 sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
12957 }
12958
12959 if (node->definition)
12960 ipa_modify_formal_parameters (node->decl, adjustments);
12961 else
12962 {
12963 tree new_arg_types = NULL_TREE, new_reversed;
12964 bool last_parm_void = false;
12965 if (args.length () > 0 && args.last () == void_type_node)
12966 last_parm_void = true;
12967
12968 gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
12969 j = adjustments.length ();
12970 for (i = 0; i < j; i++)
12971 {
12972 struct ipa_parm_adjustment *adj = &adjustments[i];
12973 tree ptype;
12974 if (adj->op == IPA_PARM_OP_COPY)
12975 ptype = args[adj->base_index];
12976 else
12977 ptype = adj->type;
12978 new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
12979 }
12980 new_reversed = nreverse (new_arg_types);
12981 if (last_parm_void)
12982 {
12983 if (new_reversed)
12984 TREE_CHAIN (new_arg_types) = void_list_node;
12985 else
12986 new_reversed = void_list_node;
12987 }
12988
12989 tree new_type = build_distinct_type_copy (TREE_TYPE (node->decl));
12990 TYPE_ARG_TYPES (new_type) = new_reversed;
12991 TREE_TYPE (node->decl) = new_type;
12992
12993 adjustments.release ();
12994 }
12995 args.release ();
12996 return adjustments;
12997 }
12998
12999 /* Initialize and copy the function arguments in NODE to their
13000 corresponding local simd arrays. Returns a fresh gimple_seq with
13001 the instruction sequence generated. */
13002
13003 static gimple_seq
13004 simd_clone_init_simd_arrays (struct cgraph_node *node,
13005 ipa_parm_adjustment_vec adjustments)
13006 {
13007 gimple_seq seq = NULL;
13008 unsigned i = 0, j = 0, k;
13009
13010 for (tree arg = DECL_ARGUMENTS (node->decl);
13011 arg;
13012 arg = DECL_CHAIN (arg), i++, j++)
13013 {
13014 if (adjustments[j].op == IPA_PARM_OP_COPY)
13015 continue;
13016
13017 node->simdclone->args[i].vector_arg = arg;
13018
13019 tree array = node->simdclone->args[i].simd_array;
13020 if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)) == node->simdclone->simdlen)
13021 {
13022 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
13023 tree ptr = build_fold_addr_expr (array);
13024 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
13025 build_int_cst (ptype, 0));
13026 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
13027 gimplify_and_add (t, &seq);
13028 }
13029 else
13030 {
13031 unsigned int simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg));
13032 tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
13033 for (k = 0; k < node->simdclone->simdlen; k += simdlen)
13034 {
13035 tree ptr = build_fold_addr_expr (array);
13036 int elemsize;
13037 if (k)
13038 {
13039 arg = DECL_CHAIN (arg);
13040 j++;
13041 }
13042 elemsize
13043 = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (arg))));
13044 tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
13045 build_int_cst (ptype, k * elemsize));
13046 t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
13047 gimplify_and_add (t, &seq);
13048 }
13049 }
13050 }
13051 return seq;
13052 }
13053
13054 /* Callback info for ipa_simd_modify_stmt_ops below. */
13055
13056 struct modify_stmt_info {
13057 ipa_parm_adjustment_vec adjustments;
13058 gimple stmt;
13059 /* True if the parent statement was modified by
13060 ipa_simd_modify_stmt_ops. */
13061 bool modified;
13062 };
13063
13064 /* Callback for walk_gimple_op.
13065
13066 Adjust operands from a given statement as specified in the
13067 adjustments vector in the callback data. */
13068
13069 static tree
13070 ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
13071 {
13072 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
13073 struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
13074 tree *orig_tp = tp;
13075 if (TREE_CODE (*tp) == ADDR_EXPR)
13076 tp = &TREE_OPERAND (*tp, 0);
13077 struct ipa_parm_adjustment *cand = NULL;
13078 if (TREE_CODE (*tp) == PARM_DECL)
13079 cand = ipa_get_adjustment_candidate (&tp, NULL, info->adjustments, true);
13080 else
13081 {
13082 if (TYPE_P (*tp))
13083 *walk_subtrees = 0;
13084 }
13085
13086 tree repl = NULL_TREE;
13087 if (cand)
13088 repl = unshare_expr (cand->new_decl);
13089 else
13090 {
13091 if (tp != orig_tp)
13092 {
13093 *walk_subtrees = 0;
13094 bool modified = info->modified;
13095 info->modified = false;
13096 walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
13097 if (!info->modified)
13098 {
13099 info->modified = modified;
13100 return NULL_TREE;
13101 }
13102 info->modified = modified;
13103 repl = *tp;
13104 }
13105 else
13106 return NULL_TREE;
13107 }
13108
13109 if (tp != orig_tp)
13110 {
13111 repl = build_fold_addr_expr (repl);
13112 gimple stmt;
13113 if (is_gimple_debug (info->stmt))
13114 {
13115 tree vexpr = make_node (DEBUG_EXPR_DECL);
13116 stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
13117 DECL_ARTIFICIAL (vexpr) = 1;
13118 TREE_TYPE (vexpr) = TREE_TYPE (repl);
13119 DECL_MODE (vexpr) = TYPE_MODE (TREE_TYPE (repl));
13120 repl = vexpr;
13121 }
13122 else
13123 {
13124 stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
13125 repl = gimple_assign_lhs (stmt);
13126 }
13127 gimple_stmt_iterator gsi = gsi_for_stmt (info->stmt);
13128 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
13129 *orig_tp = repl;
13130 }
13131 else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
13132 {
13133 tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
13134 *tp = vce;
13135 }
13136 else
13137 *tp = repl;
13138
13139 info->modified = true;
13140 return NULL_TREE;
13141 }
13142
13143 /* Traverse the function body and perform all modifications as
13144 described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
13145 modified such that the replacement/reduction value will now be an
13146 offset into the corresponding simd_array.
13147
13148 This function will replace all function argument uses with their
13149 corresponding simd array elements, and ajust the return values
13150 accordingly. */
13151
13152 static void
13153 ipa_simd_modify_function_body (struct cgraph_node *node,
13154 ipa_parm_adjustment_vec adjustments,
13155 tree retval_array, tree iter)
13156 {
13157 basic_block bb;
13158 unsigned int i, j, l;
13159
13160 /* Re-use the adjustments array, but this time use it to replace
13161 every function argument use to an offset into the corresponding
13162 simd_array. */
13163 for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
13164 {
13165 if (!node->simdclone->args[i].vector_arg)
13166 continue;
13167
13168 tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
13169 tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
13170 adjustments[j].new_decl
13171 = build4 (ARRAY_REF,
13172 basetype,
13173 node->simdclone->args[i].simd_array,
13174 iter,
13175 NULL_TREE, NULL_TREE);
13176 if (adjustments[j].op == IPA_PARM_OP_NONE
13177 && TYPE_VECTOR_SUBPARTS (vectype) < node->simdclone->simdlen)
13178 j += node->simdclone->simdlen / TYPE_VECTOR_SUBPARTS (vectype) - 1;
13179 }
13180
13181 l = adjustments.length ();
13182 for (i = 1; i < num_ssa_names; i++)
13183 {
13184 tree name = ssa_name (i);
13185 if (name
13186 && SSA_NAME_VAR (name)
13187 && TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL)
13188 {
13189 for (j = 0; j < l; j++)
13190 if (SSA_NAME_VAR (name) == adjustments[j].base
13191 && adjustments[j].new_decl)
13192 {
13193 tree base_var;
13194 if (adjustments[j].new_ssa_base == NULL_TREE)
13195 {
13196 base_var
13197 = copy_var_decl (adjustments[j].base,
13198 DECL_NAME (adjustments[j].base),
13199 TREE_TYPE (adjustments[j].base));
13200 adjustments[j].new_ssa_base = base_var;
13201 }
13202 else
13203 base_var = adjustments[j].new_ssa_base;
13204 if (SSA_NAME_IS_DEFAULT_DEF (name))
13205 {
13206 bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13207 gimple_stmt_iterator gsi = gsi_after_labels (bb);
13208 tree new_decl = unshare_expr (adjustments[j].new_decl);
13209 set_ssa_default_def (cfun, adjustments[j].base, NULL_TREE);
13210 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
13211 SSA_NAME_IS_DEFAULT_DEF (name) = 0;
13212 gimple stmt = gimple_build_assign (name, new_decl);
13213 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
13214 }
13215 else
13216 SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
13217 }
13218 }
13219 }
13220
13221 struct modify_stmt_info info;
13222 info.adjustments = adjustments;
13223
13224 FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
13225 {
13226 gimple_stmt_iterator gsi;
13227
13228 gsi = gsi_start_bb (bb);
13229 while (!gsi_end_p (gsi))
13230 {
13231 gimple stmt = gsi_stmt (gsi);
13232 info.stmt = stmt;
13233 struct walk_stmt_info wi;
13234
13235 memset (&wi, 0, sizeof (wi));
13236 info.modified = false;
13237 wi.info = &info;
13238 walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
13239
13240 if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
13241 {
13242 tree retval = gimple_return_retval (return_stmt);
13243 if (!retval)
13244 {
13245 gsi_remove (&gsi, true);
13246 continue;
13247 }
13248
13249 /* Replace `return foo' with `retval_array[iter] = foo'. */
13250 tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
13251 retval_array, iter, NULL, NULL);
13252 stmt = gimple_build_assign (ref, retval);
13253 gsi_replace (&gsi, stmt, true);
13254 info.modified = true;
13255 }
13256
13257 if (info.modified)
13258 {
13259 update_stmt (stmt);
13260 if (maybe_clean_eh_stmt (stmt))
13261 gimple_purge_dead_eh_edges (gimple_bb (stmt));
13262 }
13263 gsi_next (&gsi);
13264 }
13265 }
13266 }
13267
13268 /* Adjust the argument types in NODE to their appropriate vector
13269 counterparts. */
13270
13271 static void
13272 simd_clone_adjust (struct cgraph_node *node)
13273 {
13274 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
13275
13276 targetm.simd_clone.adjust (node);
13277
13278 tree retval = simd_clone_adjust_return_type (node);
13279 ipa_parm_adjustment_vec adjustments
13280 = simd_clone_adjust_argument_types (node);
13281
13282 push_gimplify_context ();
13283
13284 gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
13285
13286 /* Adjust all uses of vector arguments accordingly. Adjust all
13287 return values accordingly. */
13288 tree iter = create_tmp_var (unsigned_type_node, "iter");
13289 tree iter1 = make_ssa_name (iter);
13290 tree iter2 = make_ssa_name (iter);
13291 ipa_simd_modify_function_body (node, adjustments, retval, iter1);
13292
13293 /* Initialize the iteration variable. */
13294 basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13295 basic_block body_bb = split_block_after_labels (entry_bb)->dest;
13296 gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
13297 /* Insert the SIMD array and iv initialization at function
13298 entry. */
13299 gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
13300
13301 pop_gimplify_context (NULL);
13302
13303 /* Create a new BB right before the original exit BB, to hold the
13304 iteration increment and the condition/branch. */
13305 basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
13306 basic_block incr_bb = create_empty_bb (orig_exit);
13307 add_bb_to_loop (incr_bb, body_bb->loop_father);
13308 /* The succ of orig_exit was EXIT_BLOCK_PTR_FOR_FN (cfun), with an empty
13309 flag. Set it now to be a FALLTHRU_EDGE. */
13310 gcc_assert (EDGE_COUNT (orig_exit->succs) == 1);
13311 EDGE_SUCC (orig_exit, 0)->flags |= EDGE_FALLTHRU;
13312 for (unsigned i = 0;
13313 i < EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds); ++i)
13314 {
13315 edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), i);
13316 redirect_edge_succ (e, incr_bb);
13317 }
13318 edge e = make_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
13319 e->probability = REG_BR_PROB_BASE;
13320 gsi = gsi_last_bb (incr_bb);
13321 gimple g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
13322 build_int_cst (unsigned_type_node, 1));
13323 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13324
13325 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
13326 struct loop *loop = alloc_loop ();
13327 cfun->has_force_vectorize_loops = true;
13328 loop->safelen = node->simdclone->simdlen;
13329 loop->force_vectorize = true;
13330 loop->header = body_bb;
13331
13332 /* Branch around the body if the mask applies. */
13333 if (node->simdclone->inbranch)
13334 {
13335 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
13336 tree mask_array
13337 = node->simdclone->args[node->simdclone->nargs - 1].simd_array;
13338 tree mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
13339 tree aref = build4 (ARRAY_REF,
13340 TREE_TYPE (TREE_TYPE (mask_array)),
13341 mask_array, iter1,
13342 NULL, NULL);
13343 g = gimple_build_assign (mask, aref);
13344 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13345 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (aref)));
13346 if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
13347 {
13348 aref = build1 (VIEW_CONVERT_EXPR,
13349 build_nonstandard_integer_type (bitsize, 0), mask);
13350 mask = make_ssa_name (TREE_TYPE (aref));
13351 g = gimple_build_assign (mask, aref);
13352 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13353 }
13354
13355 g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
13356 NULL, NULL);
13357 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13358 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
13359 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
13360 }
13361
13362 /* Generate the condition. */
13363 g = gimple_build_cond (LT_EXPR,
13364 iter2,
13365 build_int_cst (unsigned_type_node,
13366 node->simdclone->simdlen),
13367 NULL, NULL);
13368 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13369 e = split_block (incr_bb, gsi_stmt (gsi));
13370 basic_block latch_bb = e->dest;
13371 basic_block new_exit_bb;
13372 new_exit_bb = split_block_after_labels (latch_bb)->dest;
13373 loop->latch = latch_bb;
13374
13375 redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
13376
13377 make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
13378 /* The successor of incr_bb is already pointing to latch_bb; just
13379 change the flags.
13380 make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
13381 FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
13382
13383 gphi *phi = create_phi_node (iter1, body_bb);
13384 edge preheader_edge = find_edge (entry_bb, body_bb);
13385 edge latch_edge = single_succ_edge (latch_bb);
13386 add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
13387 UNKNOWN_LOCATION);
13388 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
13389
13390 /* Generate the new return. */
13391 gsi = gsi_last_bb (new_exit_bb);
13392 if (retval
13393 && TREE_CODE (retval) == VIEW_CONVERT_EXPR
13394 && TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
13395 retval = TREE_OPERAND (retval, 0);
13396 else if (retval)
13397 {
13398 retval = build1 (VIEW_CONVERT_EXPR,
13399 TREE_TYPE (TREE_TYPE (node->decl)),
13400 retval);
13401 retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
13402 false, GSI_CONTINUE_LINKING);
13403 }
13404 g = gimple_build_return (retval);
13405 gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
13406
13407 /* Handle aligned clauses by replacing default defs of the aligned
13408 uniform args with __builtin_assume_aligned (arg_N(D), alignment)
13409 lhs. Handle linear by adding PHIs. */
13410 for (unsigned i = 0; i < node->simdclone->nargs; i++)
13411 if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
13412 && (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
13413 || !is_gimple_reg_type
13414 (TREE_TYPE (node->simdclone->args[i].orig_arg))))
13415 {
13416 tree orig_arg = node->simdclone->args[i].orig_arg;
13417 if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
13418 iter1 = make_ssa_name (TREE_TYPE (orig_arg));
13419 else
13420 {
13421 iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
13422 gimple_add_tmp_var (iter1);
13423 }
13424 gsi = gsi_after_labels (entry_bb);
13425 g = gimple_build_assign (iter1, orig_arg);
13426 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
13427 gsi = gsi_after_labels (body_bb);
13428 g = gimple_build_assign (orig_arg, iter1);
13429 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
13430 }
13431 else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
13432 && DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
13433 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
13434 == REFERENCE_TYPE
13435 && TREE_ADDRESSABLE
13436 (TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
13437 {
13438 tree orig_arg = node->simdclone->args[i].orig_arg;
13439 tree def = ssa_default_def (cfun, orig_arg);
13440 if (def && !has_zero_uses (def))
13441 {
13442 iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
13443 gimple_add_tmp_var (iter1);
13444 gsi = gsi_after_labels (entry_bb);
13445 g = gimple_build_assign (iter1, build_simple_mem_ref (def));
13446 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
13447 gsi = gsi_after_labels (body_bb);
13448 g = gimple_build_assign (build_simple_mem_ref (def), iter1);
13449 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
13450 }
13451 }
13452 else if (node->simdclone->args[i].alignment
13453 && node->simdclone->args[i].arg_type
13454 == SIMD_CLONE_ARG_TYPE_UNIFORM
13455 && (node->simdclone->args[i].alignment
13456 & (node->simdclone->args[i].alignment - 1)) == 0
13457 && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
13458 == POINTER_TYPE)
13459 {
13460 unsigned int alignment = node->simdclone->args[i].alignment;
13461 tree orig_arg = node->simdclone->args[i].orig_arg;
13462 tree def = ssa_default_def (cfun, orig_arg);
13463 if (def && !has_zero_uses (def))
13464 {
13465 tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
13466 gimple_seq seq = NULL;
13467 bool need_cvt = false;
13468 gcall *call
13469 = gimple_build_call (fn, 2, def, size_int (alignment));
13470 g = call;
13471 if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
13472 ptr_type_node))
13473 need_cvt = true;
13474 tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
13475 gimple_call_set_lhs (g, t);
13476 gimple_seq_add_stmt_without_update (&seq, g);
13477 if (need_cvt)
13478 {
13479 t = make_ssa_name (orig_arg);
13480 g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
13481 gimple_seq_add_stmt_without_update (&seq, g);
13482 }
13483 gsi_insert_seq_on_edge_immediate
13484 (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
13485
13486 entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
13487 int freq = compute_call_stmt_bb_frequency (current_function_decl,
13488 entry_bb);
13489 node->create_edge (cgraph_node::get_create (fn),
13490 call, entry_bb->count, freq);
13491
13492 imm_use_iterator iter;
13493 use_operand_p use_p;
13494 gimple use_stmt;
13495 tree repl = gimple_get_lhs (g);
13496 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
13497 if (is_gimple_debug (use_stmt) || use_stmt == call)
13498 continue;
13499 else
13500 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
13501 SET_USE (use_p, repl);
13502 }
13503 }
13504 else if (node->simdclone->args[i].arg_type
13505 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
13506 {
13507 tree orig_arg = node->simdclone->args[i].orig_arg;
13508 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
13509 || POINTER_TYPE_P (TREE_TYPE (orig_arg)));
13510 tree def = NULL_TREE;
13511 if (TREE_ADDRESSABLE (orig_arg))
13512 {
13513 def = make_ssa_name (TREE_TYPE (orig_arg));
13514 iter1 = make_ssa_name (TREE_TYPE (orig_arg));
13515 iter2 = make_ssa_name (TREE_TYPE (orig_arg));
13516 gsi = gsi_after_labels (entry_bb);
13517 g = gimple_build_assign (def, orig_arg);
13518 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
13519 }
13520 else
13521 {
13522 def = ssa_default_def (cfun, orig_arg);
13523 if (!def || has_zero_uses (def))
13524 def = NULL_TREE;
13525 else
13526 {
13527 iter1 = make_ssa_name (orig_arg);
13528 iter2 = make_ssa_name (orig_arg);
13529 }
13530 }
13531 if (def)
13532 {
13533 phi = create_phi_node (iter1, body_bb);
13534 add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
13535 add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
13536 enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
13537 ? PLUS_EXPR : POINTER_PLUS_EXPR;
13538 tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
13539 ? TREE_TYPE (orig_arg) : sizetype;
13540 tree addcst
13541 = build_int_cst (addtype, node->simdclone->args[i].linear_step);
13542 g = gimple_build_assign (iter2, code, iter1, addcst);
13543 gsi = gsi_last_bb (incr_bb);
13544 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
13545
13546 imm_use_iterator iter;
13547 use_operand_p use_p;
13548 gimple use_stmt;
13549 if (TREE_ADDRESSABLE (orig_arg))
13550 {
13551 gsi = gsi_after_labels (body_bb);
13552 g = gimple_build_assign (orig_arg, iter1);
13553 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
13554 }
13555 else
13556 FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
13557 if (use_stmt == phi)
13558 continue;
13559 else
13560 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
13561 SET_USE (use_p, iter1);
13562 }
13563 }
13564
13565 calculate_dominance_info (CDI_DOMINATORS);
13566 add_loop (loop, loop->header->loop_father);
13567 update_ssa (TODO_update_ssa);
13568
13569 pop_cfun ();
13570 }
13571
13572 /* If the function in NODE is tagged as an elemental SIMD function,
13573 create the appropriate SIMD clones. */
13574
13575 static void
13576 expand_simd_clones (struct cgraph_node *node)
13577 {
13578 tree attr = lookup_attribute ("omp declare simd",
13579 DECL_ATTRIBUTES (node->decl));
13580 if (attr == NULL_TREE
13581 || node->global.inlined_to
13582 || lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
13583 return;
13584
13585 /* Ignore
13586 #pragma omp declare simd
13587 extern int foo ();
13588 in C, there we don't know the argument types at all. */
13589 if (!node->definition
13590 && TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
13591 return;
13592
13593 do
13594 {
13595 /* Start with parsing the "omp declare simd" attribute(s). */
13596 bool inbranch_clause_specified;
13597 struct cgraph_simd_clone *clone_info
13598 = simd_clone_clauses_extract (node, TREE_VALUE (attr),
13599 &inbranch_clause_specified);
13600 if (clone_info == NULL)
13601 continue;
13602
13603 int orig_simdlen = clone_info->simdlen;
13604 tree base_type = simd_clone_compute_base_data_type (node, clone_info);
13605 /* The target can return 0 (no simd clones should be created),
13606 1 (just one ISA of simd clones should be created) or higher
13607 count of ISA variants. In that case, clone_info is initialized
13608 for the first ISA variant. */
13609 int count
13610 = targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
13611 base_type, 0);
13612 if (count == 0)
13613 continue;
13614
13615 /* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
13616 also create one inbranch and one !inbranch clone of it. */
13617 for (int i = 0; i < count * 2; i++)
13618 {
13619 struct cgraph_simd_clone *clone = clone_info;
13620 if (inbranch_clause_specified && (i & 1) != 0)
13621 continue;
13622
13623 if (i != 0)
13624 {
13625 clone = simd_clone_struct_alloc (clone_info->nargs
13626 + ((i & 1) != 0));
13627 simd_clone_struct_copy (clone, clone_info);
13628 /* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
13629 and simd_clone_adjust_argument_types did to the first
13630 clone's info. */
13631 clone->nargs -= clone_info->inbranch;
13632 clone->simdlen = orig_simdlen;
13633 /* And call the target hook again to get the right ISA. */
13634 targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
13635 base_type,
13636 i / 2);
13637 if ((i & 1) != 0)
13638 clone->inbranch = 1;
13639 }
13640
13641 /* simd_clone_mangle might fail if such a clone has been created
13642 already. */
13643 tree id = simd_clone_mangle (node, clone);
13644 if (id == NULL_TREE)
13645 continue;
13646
13647 /* Only when we are sure we want to create the clone actually
13648 clone the function (or definitions) or create another
13649 extern FUNCTION_DECL (for prototypes without definitions). */
13650 struct cgraph_node *n = simd_clone_create (node);
13651 if (n == NULL)
13652 continue;
13653
13654 n->simdclone = clone;
13655 clone->origin = node;
13656 clone->next_clone = NULL;
13657 if (node->simd_clones == NULL)
13658 {
13659 clone->prev_clone = n;
13660 node->simd_clones = n;
13661 }
13662 else
13663 {
13664 clone->prev_clone = node->simd_clones->simdclone->prev_clone;
13665 clone->prev_clone->simdclone->next_clone = n;
13666 node->simd_clones->simdclone->prev_clone = n;
13667 }
13668 symtab->change_decl_assembler_name (n->decl, id);
13669 /* And finally adjust the return type, parameters and for
13670 definitions also function body. */
13671 if (node->definition)
13672 simd_clone_adjust (n);
13673 else
13674 {
13675 simd_clone_adjust_return_type (n);
13676 simd_clone_adjust_argument_types (n);
13677 }
13678 }
13679 }
13680 while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
13681 }
13682
13683 /* Entry point for IPA simd clone creation pass. */
13684
13685 static unsigned int
13686 ipa_omp_simd_clone (void)
13687 {
13688 struct cgraph_node *node;
13689 FOR_EACH_FUNCTION (node)
13690 expand_simd_clones (node);
13691 return 0;
13692 }
13693
13694 static const pass_data pass_data_omp_simd_clone =
13695 {
13696 SIMPLE_IPA_PASS, /* type */
13697 "simdclone", /* name */
13698 OPTGROUP_NONE, /* optinfo_flags */
13699 TV_NONE, /* tv_id */
13700 ( PROP_ssa | PROP_cfg ), /* properties_required */
13701 0, /* properties_provided */
13702 0, /* properties_destroyed */
13703 0, /* todo_flags_start */
13704 0, /* todo_flags_finish */
13705 };
13706
13707 class pass_omp_simd_clone GCC_FINAL : public simple_ipa_opt_pass
13708 {
13709 public:
13710 pass_omp_simd_clone(gcc::context *ctxt)
13711 : simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
13712 {}
13713
13714 /* opt_pass methods: */
13715 virtual bool gate (function *);
13716 virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
13717 };
13718
13719 bool
13720 pass_omp_simd_clone::gate (function *)
13721 {
13722 return ((flag_openmp || flag_openmp_simd
13723 || flag_cilkplus
13724 || (in_lto_p && !flag_wpa))
13725 && (targetm.simd_clone.compute_vecsize_and_simdlen != NULL));
13726 }
13727
13728 simple_ipa_opt_pass *
13729 make_pass_omp_simd_clone (gcc::context *ctxt)
13730 {
13731 return new pass_omp_simd_clone (ctxt);
13732 }
13733
13734 /* Helper function for omp_finish_file routine. Takes decls from V_DECLS and
13735 adds their addresses and sizes to constructor-vector V_CTOR. */
13736 static void
13737 add_decls_addresses_to_decl_constructor (vec<tree, va_gc> *v_decls,
13738 vec<constructor_elt, va_gc> *v_ctor)
13739 {
13740 unsigned len = vec_safe_length (v_decls);
13741 for (unsigned i = 0; i < len; i++)
13742 {
13743 tree it = (*v_decls)[i];
13744 bool is_function = TREE_CODE (it) != VAR_DECL;
13745
13746 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE, build_fold_addr_expr (it));
13747 if (!is_function)
13748 CONSTRUCTOR_APPEND_ELT (v_ctor, NULL_TREE,
13749 fold_convert (const_ptr_type_node,
13750 DECL_SIZE_UNIT (it)));
13751 }
13752 }
13753
13754 /* Create new symbols containing (address, size) pairs for global variables,
13755 marked with "omp declare target" attribute, as well as addresses for the
13756 functions, which are outlined offloading regions. */
13757 void
13758 omp_finish_file (void)
13759 {
13760 unsigned num_funcs = vec_safe_length (offload_funcs);
13761 unsigned num_vars = vec_safe_length (offload_vars);
13762
13763 if (num_funcs == 0 && num_vars == 0)
13764 return;
13765
13766 if (targetm_common.have_named_sections)
13767 {
13768 vec<constructor_elt, va_gc> *v_f, *v_v;
13769 vec_alloc (v_f, num_funcs);
13770 vec_alloc (v_v, num_vars * 2);
13771
13772 add_decls_addresses_to_decl_constructor (offload_funcs, v_f);
13773 add_decls_addresses_to_decl_constructor (offload_vars, v_v);
13774
13775 tree vars_decl_type = build_array_type_nelts (pointer_sized_int_node,
13776 num_vars * 2);
13777 tree funcs_decl_type = build_array_type_nelts (pointer_sized_int_node,
13778 num_funcs);
13779 TYPE_ALIGN (vars_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
13780 TYPE_ALIGN (funcs_decl_type) = TYPE_ALIGN (pointer_sized_int_node);
13781 tree ctor_v = build_constructor (vars_decl_type, v_v);
13782 tree ctor_f = build_constructor (funcs_decl_type, v_f);
13783 TREE_CONSTANT (ctor_v) = TREE_CONSTANT (ctor_f) = 1;
13784 TREE_STATIC (ctor_v) = TREE_STATIC (ctor_f) = 1;
13785 tree funcs_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
13786 get_identifier (".offload_func_table"),
13787 funcs_decl_type);
13788 tree vars_decl = build_decl (UNKNOWN_LOCATION, VAR_DECL,
13789 get_identifier (".offload_var_table"),
13790 vars_decl_type);
13791 TREE_STATIC (funcs_decl) = TREE_STATIC (vars_decl) = 1;
13792 /* Do not align tables more than TYPE_ALIGN (pointer_sized_int_node),
13793 otherwise a joint table in a binary will contain padding between
13794 tables from multiple object files. */
13795 DECL_USER_ALIGN (funcs_decl) = DECL_USER_ALIGN (vars_decl) = 1;
13796 DECL_ALIGN (funcs_decl) = TYPE_ALIGN (funcs_decl_type);
13797 DECL_ALIGN (vars_decl) = TYPE_ALIGN (vars_decl_type);
13798 DECL_INITIAL (funcs_decl) = ctor_f;
13799 DECL_INITIAL (vars_decl) = ctor_v;
13800 set_decl_section_name (funcs_decl, OFFLOAD_FUNC_TABLE_SECTION_NAME);
13801 set_decl_section_name (vars_decl, OFFLOAD_VAR_TABLE_SECTION_NAME);
13802
13803 varpool_node::finalize_decl (vars_decl);
13804 varpool_node::finalize_decl (funcs_decl);
13805 }
13806 else
13807 {
13808 for (unsigned i = 0; i < num_funcs; i++)
13809 {
13810 tree it = (*offload_funcs)[i];
13811 targetm.record_offload_symbol (it);
13812 }
13813 for (unsigned i = 0; i < num_vars; i++)
13814 {
13815 tree it = (*offload_vars)[i];
13816 targetm.record_offload_symbol (it);
13817 }
13818 }
13819 }
13820
13821 #include "gt-omp-low.h"