Add 2006 to copyright line
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 02110-1301, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "tree-gimple.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44
45
46 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
47 phases. The first phase scans the function looking for OMP statements
48 and then for variables that must be replaced to satisfy data sharing
49 clauses. The second phase expands code for the constructs, as well as
50 re-gimplifing things when variables have been replaced with complex
51 expressions.
52
53 Final code generation is done by pass_expand_omp. The flowgraph is
54 scanned for parallel regions which are then moved to a new
55 function, to be invoked by the thread library. */
56
57 /* Parallel region information. Every parallel and workshare
58 directive is enclosed between two markers, the OMP_* directive
59 and a corresponding OMP_RETURN_EXPR statement. */
60
61 struct omp_region GTY(())
62 {
63 /* The enclosing region. */
64 struct omp_region *outer;
65
66 /* First child region. */
67 struct omp_region *inner;
68
69 /* Next peer region. */
70 struct omp_region *next;
71
72 /* Entry point to this region. */
73 tree entry;
74
75 /* Exit label from this region. */
76 tree exit;
77
78 /* Region number. */
79 int num;
80
81 /* True if this is a combined parallel+workshare region. */
82 bool is_combined_parallel;
83
84 /* If this is a combined parallel+workshare region, this is a list
85 of additional arguments needed by the combined parallel+workshare
86 library call. */
87 tree ws_args;
88 };
89
90
91 /* Context structure. Used to store information about each parallel
92 directive in the code. */
93
94 typedef struct omp_context
95 {
96 /* This field must be at the beginning, as we do "inheritance": Some
97 callback functions for tree-inline.c (e.g., omp_copy_decl)
98 receive a copy_body_data pointer that is up-casted to an
99 omp_context pointer. */
100 copy_body_data cb;
101
102 /* The tree of contexts corresponding to the encountered constructs. */
103 struct omp_context *outer;
104 tree stmt;
105
106 /* Map variables to fields in a structure that allows communication
107 between sending and receiving threads. */
108 splay_tree field_map;
109 tree record_type;
110 tree sender_decl;
111 tree receiver_decl;
112
113 /* A chain of variables to add to the top-level block surrounding the
114 construct. In the case of a parallel, this is in the child function. */
115 tree block_vars;
116
117 /* What to do with variables with implicitly determined sharing
118 attributes. */
119 enum omp_clause_default_kind default_kind;
120
121 /* Nesting depth of this context. Used to beautify error messages re
122 invalid gotos. The outermost ctx is depth 1, with depth 0 being
123 reserved for the main body of the function. */
124 int depth;
125
126 /* True if this parallel directive is nested within another. */
127 bool is_nested;
128 } omp_context;
129
130
131 /* A structure describing the main elements of a parallel loop. */
132
133 struct omp_for_data
134 {
135 tree v, n1, n2, step, chunk_size, for_stmt;
136 enum tree_code cond_code;
137 tree pre;
138 bool have_nowait, have_ordered;
139 enum omp_clause_schedule_kind sched_kind;
140 };
141
142
143 static splay_tree all_contexts;
144 static int parallel_nesting_level;
145 static splay_tree omp_regions;
146 static struct omp_region *root_omp_region;
147
148 static void scan_omp (tree *, omp_context *);
149 static void lower_omp (tree *, omp_context *);
150
151 /* Find an OpenMP clause of type KIND within CLAUSES. */
152
153 static tree
154 find_omp_clause (tree clauses, enum tree_code kind)
155 {
156 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
157 if (OMP_CLAUSE_CODE (clauses) == kind)
158 return clauses;
159
160 return NULL_TREE;
161 }
162
163 /* Return true if CTX is for an omp parallel. */
164
165 static inline bool
166 is_parallel_ctx (omp_context *ctx)
167 {
168 return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
169 }
170
171
172 /* Return true if REGION is a combined parallel+workshare region. */
173
174 static inline bool
175 is_combined_parallel (struct omp_region *region)
176 {
177 return region->is_combined_parallel;
178 }
179
180
181 /* Extract the header elements of parallel loop FOR_STMT and store
182 them into *FD. */
183
184 static void
185 extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
186 {
187 tree t;
188
189 fd->for_stmt = for_stmt;
190 fd->pre = NULL;
191
192 t = OMP_FOR_INIT (for_stmt);
193 gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
194 fd->v = TREE_OPERAND (t, 0);
195 gcc_assert (DECL_P (fd->v));
196 gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
197 fd->n1 = TREE_OPERAND (t, 1);
198
199 t = OMP_FOR_COND (for_stmt);
200 fd->cond_code = TREE_CODE (t);
201 gcc_assert (TREE_OPERAND (t, 0) == fd->v);
202 fd->n2 = TREE_OPERAND (t, 1);
203 switch (fd->cond_code)
204 {
205 case LT_EXPR:
206 case GT_EXPR:
207 break;
208 case LE_EXPR:
209 fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
210 build_int_cst (TREE_TYPE (fd->n2), 1));
211 fd->cond_code = LT_EXPR;
212 break;
213 case GE_EXPR:
214 fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
215 build_int_cst (TREE_TYPE (fd->n2), 1));
216 fd->cond_code = GT_EXPR;
217 break;
218 default:
219 gcc_unreachable ();
220 }
221
222 t = OMP_FOR_INCR (fd->for_stmt);
223 gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
224 gcc_assert (TREE_OPERAND (t, 0) == fd->v);
225 t = TREE_OPERAND (t, 1);
226 gcc_assert (TREE_OPERAND (t, 0) == fd->v);
227 switch (TREE_CODE (t))
228 {
229 case PLUS_EXPR:
230 fd->step = TREE_OPERAND (t, 1);
231 break;
232 case MINUS_EXPR:
233 fd->step = TREE_OPERAND (t, 1);
234 fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
235 break;
236 default:
237 gcc_unreachable ();
238 }
239
240 fd->have_nowait = fd->have_ordered = false;
241 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
242 fd->chunk_size = NULL_TREE;
243
244 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
245 switch (OMP_CLAUSE_CODE (t))
246 {
247 case OMP_CLAUSE_NOWAIT:
248 fd->have_nowait = true;
249 break;
250 case OMP_CLAUSE_ORDERED:
251 fd->have_ordered = true;
252 break;
253 case OMP_CLAUSE_SCHEDULE:
254 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
255 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
256 break;
257 default:
258 break;
259 }
260
261 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
262 gcc_assert (fd->chunk_size == NULL);
263 else if (fd->chunk_size == NULL)
264 {
265 /* We only need to compute a default chunk size for ordered
266 static loops and dynamic loops. */
267 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
268 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
269 ? integer_zero_node : integer_one_node;
270 }
271 }
272
273
274 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
275 is the immediate dominator of PAR_ENTRY_BB, return true if there
276 are no data dependencies that would prevent expanding the parallel
277 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
278
279 When expanding a combined parallel+workshare region, the call to
280 the child function may need additional arguments in the case of
281 OMP_FOR regions. In some cases, these arguments are computed out
282 of variables passed in from the parent to the child via 'struct
283 .omp_data_s'. For instance:
284
285 #pragma omp parallel for schedule (guided, i * 4)
286 for (j ...)
287
288 Is lowered into:
289
290 # BLOCK 2 (PAR_ENTRY_BB)
291 .omp_data_o.i = i;
292 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
293
294 # BLOCK 3 (WS_ENTRY_BB)
295 .omp_data_i = &.omp_data_o;
296 D.1667 = .omp_data_i->i;
297 D.1598 = D.1667 * 4;
298 #pragma omp for schedule (guided, D.1598)
299
300 When we outline the parallel region, the call to the child function
301 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
302 that value is computed *after* the call site. So, in principle we
303 cannot do the transformation.
304
305 To see whether the code in WS_ENTRY_BB blocks the combined
306 parallel+workshare call, we collect all the variables used in the
307 OMP_FOR header check whether they appear on the LHS of any
308 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
309 call.
310
311 FIXME. If we had the SSA form built at this point, we could merely
312 hoist the code in block 3 into block 2 and be done with it. But at
313 this point we don't have dataflow information and though we could
314 hack something up here, it is really not worth the aggravation. */
315
316 static bool
317 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
318 {
319 struct omp_for_data fd;
320 tree par_stmt, ws_stmt;
321
322 par_stmt = last_stmt (par_entry_bb);
323 ws_stmt = last_stmt (ws_entry_bb);
324
325 if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
326 return true;
327
328 gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
329
330 extract_omp_for_data (ws_stmt, &fd);
331
332 /* FIXME. We give up too easily here. If any of these arguments
333 are not constants, they will likely involve variables that have
334 been mapped into fields of .omp_data_s for sharing with the child
335 function. With appropriate data flow, it would be possible to
336 see through this. */
337 if (!is_gimple_min_invariant (fd.n1)
338 || !is_gimple_min_invariant (fd.n2)
339 || !is_gimple_min_invariant (fd.step)
340 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
341 return false;
342
343 return true;
344 }
345
346
347 /* Collect additional arguments needed to emit a combined
348 parallel+workshare call. WS_STMT is the workshare directive being
349 expanded. */
350
351 static tree
352 get_ws_args_for (tree ws_stmt)
353 {
354 tree t;
355
356 if (TREE_CODE (ws_stmt) == OMP_FOR)
357 {
358 struct omp_for_data fd;
359 tree ws_args;
360
361 extract_omp_for_data (ws_stmt, &fd);
362
363 ws_args = NULL_TREE;
364 if (fd.chunk_size)
365 {
366 t = fold_convert (long_integer_type_node, fd.chunk_size);
367 ws_args = tree_cons (NULL, t, ws_args);
368 }
369
370 t = fold_convert (long_integer_type_node, fd.step);
371 ws_args = tree_cons (NULL, t, ws_args);
372
373 t = fold_convert (long_integer_type_node, fd.n2);
374 ws_args = tree_cons (NULL, t, ws_args);
375
376 t = fold_convert (long_integer_type_node, fd.n1);
377 ws_args = tree_cons (NULL, t, ws_args);
378
379 return ws_args;
380 }
381 else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
382 {
383 basic_block bb = bb_for_stmt (ws_stmt);
384 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs));
385 t = tree_cons (NULL, t, NULL);
386 return t;
387 }
388
389 gcc_unreachable ();
390 }
391
392
393 /* Discover whether REGION is a combined parallel+workshare region. */
394
395 static void
396 determine_parallel_type (struct omp_region *region)
397 {
398 basic_block par_entry_bb, par_exit_bb;
399 basic_block ws_entry_bb, ws_exit_bb;
400
401 if (region == NULL || region->inner == NULL)
402 return;
403
404 /* We only support parallel+for and parallel+sections. */
405 if (TREE_CODE (region->entry) != OMP_PARALLEL
406 || (TREE_CODE (region->inner->entry) != OMP_FOR
407 && TREE_CODE (region->inner->entry) != OMP_SECTIONS))
408 return;
409
410 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
411 WS_EXIT_BB -> PAR_EXIT_BB. */
412 par_entry_bb = bb_for_stmt (region->entry);
413 par_exit_bb = bb_for_stmt (region->exit);
414
415 ws_entry_bb = bb_for_stmt (region->inner->entry);
416 ws_exit_bb = bb_for_stmt (region->inner->exit);
417
418 if (single_succ (par_entry_bb) == ws_entry_bb
419 && single_succ (ws_exit_bb) == par_exit_bb
420 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb))
421 {
422 if (TREE_CODE (region->inner->entry) == OMP_FOR)
423 {
424 /* If this is a combined parallel loop, we need to determine
425 whether or not to use the combined library calls. There
426 are two cases where we do not apply the transformation:
427 static loops and any kind of ordered loop. In the first
428 case, we already open code the loop so there is no need
429 to do anything else. In the latter case, the combined
430 parallel loop call would still need extra synchronization
431 to implement ordered semantics, so there would not be any
432 gain in using the combined call. */
433 tree clauses = OMP_FOR_CLAUSES (region->inner->entry);
434 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
435 if (c == NULL
436 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
437 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
438 {
439 region->is_combined_parallel = false;
440 region->inner->is_combined_parallel = false;
441 return;
442 }
443 }
444
445 region->is_combined_parallel = true;
446 region->inner->is_combined_parallel = true;
447 region->ws_args = get_ws_args_for (region->inner->entry);
448 }
449 }
450
451
452 /* Return true if EXPR is variable sized. */
453
454 static inline bool
455 is_variable_sized (tree expr)
456 {
457 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
458 }
459
460 /* Return true if DECL is a reference type. */
461
462 static inline bool
463 is_reference (tree decl)
464 {
465 return lang_hooks.decls.omp_privatize_by_reference (decl);
466 }
467
468 /* Lookup variables in the decl or field splay trees. The "maybe" form
469 allows for the variable form to not have been entered, otherwise we
470 assert that the variable must have been entered. */
471
472 static inline tree
473 lookup_decl (tree var, omp_context *ctx)
474 {
475 splay_tree_node n;
476 n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var);
477 return (tree) n->value;
478 }
479
480 static inline tree
481 maybe_lookup_decl (tree var, omp_context *ctx)
482 {
483 splay_tree_node n;
484 n = splay_tree_lookup (ctx->cb.decl_map, (splay_tree_key) var);
485 return n ? (tree) n->value : NULL_TREE;
486 }
487
488 static inline tree
489 lookup_field (tree var, omp_context *ctx)
490 {
491 splay_tree_node n;
492 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
493 return (tree) n->value;
494 }
495
496 static inline tree
497 maybe_lookup_field (tree var, omp_context *ctx)
498 {
499 splay_tree_node n;
500 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
501 return n ? (tree) n->value : NULL_TREE;
502 }
503
504 /* Return true if DECL should be copied by pointer. SHARED_P is true
505 if DECL is to be shared. */
506
507 static bool
508 use_pointer_for_field (tree decl, bool shared_p)
509 {
510 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
511 return true;
512
513 /* We can only use copy-in/copy-out semantics for shared varibles
514 when we know the value is not accessible from an outer scope. */
515 if (shared_p)
516 {
517 /* ??? Trivially accessible from anywhere. But why would we even
518 be passing an address in this case? Should we simply assert
519 this to be false, or should we have a cleanup pass that removes
520 these from the list of mappings? */
521 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
522 return true;
523
524 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
525 without analyzing the expression whether or not its location
526 is accessible to anyone else. In the case of nested parallel
527 regions it certainly may be. */
528 if (DECL_HAS_VALUE_EXPR_P (decl))
529 return true;
530
531 /* Do not use copy-in/copy-out for variables that have their
532 address taken. */
533 if (TREE_ADDRESSABLE (decl))
534 return true;
535 }
536
537 return false;
538 }
539
540 /* Construct a new automatic decl similar to VAR. */
541
542 static tree
543 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
544 {
545 tree copy = build_decl (VAR_DECL, name, type);
546
547 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
548 DECL_COMPLEX_GIMPLE_REG_P (copy) = DECL_COMPLEX_GIMPLE_REG_P (var);
549 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
550 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
551 TREE_USED (copy) = 1;
552 DECL_CONTEXT (copy) = current_function_decl;
553 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
554
555 TREE_CHAIN (copy) = ctx->block_vars;
556 ctx->block_vars = copy;
557
558 return copy;
559 }
560
561 static tree
562 omp_copy_decl_1 (tree var, omp_context *ctx)
563 {
564 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
565 }
566
567 /* Build tree nodes to access the field for VAR on the receiver side. */
568
569 static tree
570 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
571 {
572 tree x, field = lookup_field (var, ctx);
573
574 /* If the receiver record type was remapped in the child function,
575 remap the field into the new record type. */
576 x = maybe_lookup_field (field, ctx);
577 if (x != NULL)
578 field = x;
579
580 x = build_fold_indirect_ref (ctx->receiver_decl);
581 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
582 if (by_ref)
583 x = build_fold_indirect_ref (x);
584
585 return x;
586 }
587
588 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
589 of a parallel, this is a component reference; for workshare constructs
590 this is some variable. */
591
592 static tree
593 build_outer_var_ref (tree var, omp_context *ctx)
594 {
595 tree x;
596
597 if (is_global_var (var))
598 x = var;
599 else if (is_variable_sized (var))
600 {
601 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
602 x = build_outer_var_ref (x, ctx);
603 x = build_fold_indirect_ref (x);
604 }
605 else if (is_parallel_ctx (ctx))
606 {
607 bool by_ref = use_pointer_for_field (var, false);
608 x = build_receiver_ref (var, by_ref, ctx);
609 }
610 else if (ctx->outer)
611 x = lookup_decl (var, ctx->outer);
612 else
613 gcc_unreachable ();
614
615 if (is_reference (var))
616 x = build_fold_indirect_ref (x);
617
618 return x;
619 }
620
621 /* Build tree nodes to access the field for VAR on the sender side. */
622
623 static tree
624 build_sender_ref (tree var, omp_context *ctx)
625 {
626 tree field = lookup_field (var, ctx);
627 return build3 (COMPONENT_REF, TREE_TYPE (field),
628 ctx->sender_decl, field, NULL);
629 }
630
631 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
632
633 static void
634 install_var_field (tree var, bool by_ref, omp_context *ctx)
635 {
636 tree field, type;
637
638 gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
639
640 type = TREE_TYPE (var);
641 if (by_ref)
642 type = build_pointer_type (type);
643
644 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
645
646 /* Remember what variable this field was created for. This does have a
647 side effect of making dwarf2out ignore this member, so for helpful
648 debugging we clear it later in delete_omp_context. */
649 DECL_ABSTRACT_ORIGIN (field) = var;
650
651 insert_field_into_struct (ctx->record_type, field);
652
653 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
654 (splay_tree_value) field);
655 }
656
657 static tree
658 install_var_local (tree var, omp_context *ctx)
659 {
660 tree new_var = omp_copy_decl_1 (var, ctx);
661 insert_decl_map (&ctx->cb, var, new_var);
662 return new_var;
663 }
664
665 /* Adjust the replacement for DECL in CTX for the new context. This means
666 copying the DECL_VALUE_EXPR, and fixing up the type. */
667
668 static void
669 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
670 {
671 tree new_decl, size;
672
673 new_decl = lookup_decl (decl, ctx);
674
675 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
676
677 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
678 && DECL_HAS_VALUE_EXPR_P (decl))
679 {
680 tree ve = DECL_VALUE_EXPR (decl);
681 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
682 SET_DECL_VALUE_EXPR (new_decl, ve);
683 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
684 }
685
686 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
687 {
688 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
689 if (size == error_mark_node)
690 size = TYPE_SIZE (TREE_TYPE (new_decl));
691 DECL_SIZE (new_decl) = size;
692
693 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
694 if (size == error_mark_node)
695 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
696 DECL_SIZE_UNIT (new_decl) = size;
697 }
698 }
699
700 /* The callback for remap_decl. Search all containing contexts for a
701 mapping of the variable; this avoids having to duplicate the splay
702 tree ahead of time. We know a mapping doesn't already exist in the
703 given context. Create new mappings to implement default semantics. */
704
705 static tree
706 omp_copy_decl (tree var, copy_body_data *cb)
707 {
708 omp_context *ctx = (omp_context *) cb;
709 tree new_var;
710
711 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
712 return var;
713
714 if (TREE_CODE (var) == LABEL_DECL)
715 {
716 new_var = create_artificial_label ();
717 DECL_CONTEXT (new_var) = current_function_decl;
718 insert_decl_map (&ctx->cb, var, new_var);
719 return new_var;
720 }
721
722 while (!is_parallel_ctx (ctx))
723 {
724 ctx = ctx->outer;
725 if (ctx == NULL)
726 return var;
727 new_var = maybe_lookup_decl (var, ctx);
728 if (new_var)
729 return new_var;
730 }
731
732 return error_mark_node;
733 }
734
735
736 /* Return the parallel region associated with STMT. */
737
738 static inline struct omp_region *
739 lookup_omp_region (tree stmt)
740 {
741 splay_tree_node n = splay_tree_lookup (omp_regions, (splay_tree_key) stmt);
742 return n ? (struct omp_region *) n->value : NULL;
743 }
744
745
746 /* Debugging dumps for parallel regions. */
747 void dump_omp_region (FILE *, struct omp_region *, int);
748 void debug_omp_region (struct omp_region *);
749 void debug_all_omp_regions (void);
750
751 /* Dump the parallel region tree rooted at REGION. */
752
753 void
754 dump_omp_region (FILE *file, struct omp_region *region, int indent)
755 {
756 fprintf (file, "%*s", indent, "");
757 print_generic_stmt (file, region->entry, TDF_SLIM);
758
759 if (region->inner)
760 dump_omp_region (file, region->inner, indent + 4);
761
762 fprintf (file, "%*s", indent, "");
763 if (region->exit)
764 print_generic_stmt (file, region->exit, 0);
765 else
766 fprintf (file, "[no exit marker]\n");
767
768 if (region->next)
769 {
770 fprintf (file, "\n");
771 dump_omp_region (file, region->next, indent);
772 }
773 }
774
775 void
776 debug_omp_region (struct omp_region *region)
777 {
778 dump_omp_region (stderr, region, 0);
779 }
780
781 void
782 debug_all_omp_regions (void)
783 {
784 dump_omp_region (stderr, root_omp_region, 0);
785 }
786
787
788 /* Create a new parallel region starting at STMT inside region PARENT. */
789
790 static struct omp_region *
791 new_omp_region (tree stmt, struct omp_region *parent)
792 {
793 struct omp_region *region = ggc_alloc_cleared (sizeof (*region));
794 static int num = 0;
795
796 region->outer = parent;
797 region->entry = stmt;
798 region->num = num++;
799
800 if (parent)
801 {
802 /* This is a nested region. Add it to the list of inner
803 regions in PARENT. */
804 region->next = parent->inner;
805 parent->inner = region;
806 }
807 else if (omp_regions)
808 {
809 /* This is a toplevel region. Add it to the list of toplevel
810 regions in ROOT_OMP_REGION. */
811 region->next = root_omp_region;
812 root_omp_region = region;
813 }
814 else
815 {
816 /* Create a new root region with the first region we find. */
817 root_omp_region = region;
818 omp_regions = splay_tree_new (splay_tree_compare_pointers, 0, 0);
819 }
820
821 splay_tree_insert (omp_regions, (splay_tree_key) stmt,
822 (splay_tree_value) region);
823
824 return region;
825 }
826
827
828 /* Create a new context, with OUTER_CTX being the surrounding context. */
829
830 static omp_context *
831 new_omp_context (tree stmt, omp_context *outer_ctx)
832 {
833 omp_context *ctx = XCNEW (omp_context);
834
835 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
836 (splay_tree_value) ctx);
837 ctx->stmt = stmt;
838
839 if (outer_ctx)
840 {
841 ctx->outer = outer_ctx;
842 ctx->cb = outer_ctx->cb;
843 ctx->cb.block = NULL;
844 ctx->depth = outer_ctx->depth + 1;
845 }
846 else
847 {
848 ctx->cb.src_fn = current_function_decl;
849 ctx->cb.dst_fn = current_function_decl;
850 ctx->cb.src_node = cgraph_node (current_function_decl);
851 ctx->cb.dst_node = ctx->cb.src_node;
852 ctx->cb.src_cfun = cfun;
853 ctx->cb.copy_decl = omp_copy_decl;
854 ctx->cb.eh_region = -1;
855 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
856 ctx->depth = 1;
857 }
858
859 ctx->cb.decl_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
860
861 return ctx;
862 }
863
864 /* Destroy a omp_context data structures. Called through the splay tree
865 value delete callback. */
866
867 static void
868 delete_omp_context (splay_tree_value value)
869 {
870 omp_context *ctx = (omp_context *) value;
871
872 splay_tree_delete (ctx->cb.decl_map);
873
874 if (ctx->field_map)
875 splay_tree_delete (ctx->field_map);
876
877 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
878 it produces corrupt debug information. */
879 if (ctx->record_type)
880 {
881 tree t;
882 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
883 DECL_ABSTRACT_ORIGIN (t) = NULL;
884 }
885
886 XDELETE (ctx);
887 }
888
889 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
890 context. */
891
892 static void
893 fixup_child_record_type (omp_context *ctx)
894 {
895 tree f, type = ctx->record_type;
896
897 /* ??? It isn't sufficient to just call remap_type here, because
898 variably_modified_type_p doesn't work the way we expect for
899 record types. Testing each field for whether it needs remapping
900 and creating a new record by hand works, however. */
901 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
902 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
903 break;
904 if (f)
905 {
906 tree name, new_fields = NULL;
907
908 type = lang_hooks.types.make_type (RECORD_TYPE);
909 name = DECL_NAME (TYPE_NAME (ctx->record_type));
910 name = build_decl (TYPE_DECL, name, type);
911 TYPE_NAME (type) = name;
912
913 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
914 {
915 tree new_f = copy_node (f);
916 DECL_CONTEXT (new_f) = type;
917 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
918 TREE_CHAIN (new_f) = new_fields;
919 new_fields = new_f;
920
921 /* Arrange to be able to look up the receiver field
922 given the sender field. */
923 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
924 (splay_tree_value) new_f);
925 }
926 TYPE_FIELDS (type) = nreverse (new_fields);
927 layout_type (type);
928 }
929
930 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
931 }
932
933 /* Instantiate decls as necessary in CTX to satisfy the data sharing
934 specified by CLAUSES. */
935
936 static void
937 scan_sharing_clauses (tree clauses, omp_context *ctx)
938 {
939 tree c, decl;
940 bool scan_array_reductions = false;
941
942 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
943 {
944 bool by_ref;
945
946 switch (OMP_CLAUSE_CODE (c))
947 {
948 case OMP_CLAUSE_PRIVATE:
949 decl = OMP_CLAUSE_DECL (c);
950 if (!is_variable_sized (decl))
951 install_var_local (decl, ctx);
952 break;
953
954 case OMP_CLAUSE_SHARED:
955 gcc_assert (is_parallel_ctx (ctx));
956 decl = OMP_CLAUSE_DECL (c);
957 gcc_assert (!is_variable_sized (decl));
958 by_ref = use_pointer_for_field (decl, true);
959 if (! TREE_READONLY (decl)
960 || TREE_ADDRESSABLE (decl)
961 || by_ref
962 || is_reference (decl))
963 {
964 install_var_field (decl, by_ref, ctx);
965 install_var_local (decl, ctx);
966 break;
967 }
968 /* We don't need to copy const scalar vars back. */
969 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
970 goto do_private;
971
972 case OMP_CLAUSE_LASTPRIVATE:
973 /* Let the corresponding firstprivate clause create
974 the variable. */
975 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
976 break;
977 /* FALLTHRU */
978
979 case OMP_CLAUSE_FIRSTPRIVATE:
980 case OMP_CLAUSE_REDUCTION:
981 decl = OMP_CLAUSE_DECL (c);
982 do_private:
983 if (is_variable_sized (decl))
984 break;
985 else if (is_parallel_ctx (ctx))
986 {
987 by_ref = use_pointer_for_field (decl, false);
988 install_var_field (decl, by_ref, ctx);
989 }
990 install_var_local (decl, ctx);
991 break;
992
993 case OMP_CLAUSE_COPYPRIVATE:
994 if (ctx->outer)
995 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
996 /* FALLTHRU */
997
998 case OMP_CLAUSE_COPYIN:
999 decl = OMP_CLAUSE_DECL (c);
1000 by_ref = use_pointer_for_field (decl, false);
1001 install_var_field (decl, by_ref, ctx);
1002 break;
1003
1004 case OMP_CLAUSE_DEFAULT:
1005 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1006 break;
1007
1008 case OMP_CLAUSE_IF:
1009 case OMP_CLAUSE_NUM_THREADS:
1010 case OMP_CLAUSE_SCHEDULE:
1011 if (ctx->outer)
1012 scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1013 break;
1014
1015 case OMP_CLAUSE_NOWAIT:
1016 case OMP_CLAUSE_ORDERED:
1017 break;
1018
1019 default:
1020 gcc_unreachable ();
1021 }
1022 }
1023
1024 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1025 {
1026 switch (OMP_CLAUSE_CODE (c))
1027 {
1028 case OMP_CLAUSE_LASTPRIVATE:
1029 /* Let the corresponding firstprivate clause create
1030 the variable. */
1031 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1032 break;
1033 /* FALLTHRU */
1034
1035 case OMP_CLAUSE_PRIVATE:
1036 case OMP_CLAUSE_FIRSTPRIVATE:
1037 case OMP_CLAUSE_REDUCTION:
1038 decl = OMP_CLAUSE_DECL (c);
1039 if (is_variable_sized (decl))
1040 install_var_local (decl, ctx);
1041 fixup_remapped_decl (decl, ctx,
1042 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1043 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1044 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1045 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1046 scan_array_reductions = true;
1047 break;
1048
1049 case OMP_CLAUSE_SHARED:
1050 decl = OMP_CLAUSE_DECL (c);
1051 fixup_remapped_decl (decl, ctx, false);
1052 break;
1053
1054 case OMP_CLAUSE_COPYPRIVATE:
1055 case OMP_CLAUSE_COPYIN:
1056 case OMP_CLAUSE_DEFAULT:
1057 case OMP_CLAUSE_IF:
1058 case OMP_CLAUSE_NUM_THREADS:
1059 case OMP_CLAUSE_SCHEDULE:
1060 case OMP_CLAUSE_NOWAIT:
1061 case OMP_CLAUSE_ORDERED:
1062 break;
1063
1064 default:
1065 gcc_unreachable ();
1066 }
1067 }
1068
1069 if (scan_array_reductions)
1070 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1071 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1072 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1073 {
1074 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
1075 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
1076 }
1077 }
1078
1079 /* Create a new name for omp child function. Returns an identifier. */
1080
1081 static GTY(()) unsigned int tmp_ompfn_id_num;
1082
1083 static tree
1084 create_omp_child_function_name (void)
1085 {
1086 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1087 size_t len = IDENTIFIER_LENGTH (name);
1088 char *tmp_name, *prefix;
1089
1090 prefix = alloca (len + sizeof ("_omp_fn"));
1091 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1092 strcpy (prefix + len, "_omp_fn");
1093 #ifndef NO_DOT_IN_LABEL
1094 prefix[len] = '.';
1095 #elif !defined NO_DOLLAR_IN_LABEL
1096 prefix[len] = '$';
1097 #endif
1098 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1099 return get_identifier (tmp_name);
1100 }
1101
1102 /* Build a decl for the omp child function. It'll not contain a body
1103 yet, just the bare decl. */
1104
1105 static void
1106 create_omp_child_function (omp_context *ctx)
1107 {
1108 tree decl, type, name, t;
1109
1110 name = create_omp_child_function_name ();
1111 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1112
1113 decl = build_decl (FUNCTION_DECL, name, type);
1114 decl = lang_hooks.decls.pushdecl (decl);
1115
1116 ctx->cb.dst_fn = decl;
1117
1118 TREE_STATIC (decl) = 1;
1119 TREE_USED (decl) = 1;
1120 DECL_ARTIFICIAL (decl) = 1;
1121 DECL_IGNORED_P (decl) = 0;
1122 TREE_PUBLIC (decl) = 0;
1123 DECL_UNINLINABLE (decl) = 1;
1124 DECL_EXTERNAL (decl) = 0;
1125 DECL_CONTEXT (decl) = NULL_TREE;
1126 DECL_INITIAL (decl) = make_node (BLOCK);
1127
1128 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1129 DECL_ARTIFICIAL (t) = 1;
1130 DECL_IGNORED_P (t) = 1;
1131 DECL_RESULT (decl) = t;
1132
1133 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1134 DECL_ARTIFICIAL (t) = 1;
1135 DECL_ARG_TYPE (t) = ptr_type_node;
1136 DECL_CONTEXT (t) = current_function_decl;
1137 TREE_USED (t) = 1;
1138 DECL_ARGUMENTS (decl) = t;
1139 ctx->receiver_decl = t;
1140
1141 /* Allocate memory for the function structure. The call to
1142 allocate_struct_function clobbers CFUN, so we need to restore
1143 it afterward. */
1144 allocate_struct_function (decl);
1145 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
1146 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
1147 cfun = ctx->cb.src_cfun;
1148 }
1149
1150
1151 /* Scan an OpenMP parallel directive. */
1152
1153 static void
1154 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
1155 {
1156 omp_context *ctx;
1157 tree name;
1158
1159 /* Ignore parallel directives with empty bodies, unless there
1160 are copyin clauses. */
1161 if (optimize > 0
1162 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
1163 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
1164 {
1165 *stmt_p = build_empty_stmt ();
1166 return;
1167 }
1168
1169 ctx = new_omp_context (*stmt_p, outer_ctx);
1170 if (parallel_nesting_level > 1)
1171 ctx->is_nested = true;
1172 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1173 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1174 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1175 name = create_tmp_var_name (".omp_data_s");
1176 name = build_decl (TYPE_DECL, name, ctx->record_type);
1177 TYPE_NAME (ctx->record_type) = name;
1178 create_omp_child_function (ctx);
1179 OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
1180
1181 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
1182 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
1183
1184 if (TYPE_FIELDS (ctx->record_type) == NULL)
1185 ctx->record_type = ctx->receiver_decl = NULL;
1186 else
1187 {
1188 layout_type (ctx->record_type);
1189 fixup_child_record_type (ctx);
1190 }
1191 }
1192
1193
1194 /* Scan an OpenMP loop directive. */
1195
1196 static void
1197 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
1198 {
1199 omp_context *ctx;
1200 tree stmt;
1201
1202 stmt = *stmt_p;
1203 ctx = new_omp_context (stmt, outer_ctx);
1204
1205 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
1206
1207 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1208 scan_omp (&OMP_FOR_INIT (stmt), ctx);
1209 scan_omp (&OMP_FOR_COND (stmt), ctx);
1210 scan_omp (&OMP_FOR_INCR (stmt), ctx);
1211 scan_omp (&OMP_FOR_BODY (stmt), ctx);
1212 }
1213
1214 /* Scan an OpenMP sections directive. */
1215
1216 static void
1217 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
1218 {
1219 tree stmt;
1220 omp_context *ctx;
1221
1222 stmt = *stmt_p;
1223 ctx = new_omp_context (stmt, outer_ctx);
1224 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
1225 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
1226 }
1227
1228 /* Scan an OpenMP single directive. */
1229
1230 static void
1231 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
1232 {
1233 tree stmt = *stmt_p;
1234 omp_context *ctx;
1235 tree name;
1236
1237 ctx = new_omp_context (stmt, outer_ctx);
1238 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1239 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1240 name = create_tmp_var_name (".omp_copy_s");
1241 name = build_decl (TYPE_DECL, name, ctx->record_type);
1242 TYPE_NAME (ctx->record_type) = name;
1243
1244 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1245 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1246
1247 if (TYPE_FIELDS (ctx->record_type) == NULL)
1248 ctx->record_type = NULL;
1249 else
1250 layout_type (ctx->record_type);
1251 }
1252
1253
1254 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1255
1256 static tree
1257 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1258 {
1259 struct walk_stmt_info *wi = data;
1260 omp_context *ctx = wi->info;
1261 tree t = *tp;
1262
1263 if (EXPR_HAS_LOCATION (t))
1264 input_location = EXPR_LOCATION (t);
1265
1266 *walk_subtrees = 0;
1267 switch (TREE_CODE (t))
1268 {
1269 case OMP_PARALLEL:
1270 parallel_nesting_level++;
1271 scan_omp_parallel (tp, ctx);
1272 parallel_nesting_level--;
1273 break;
1274
1275 case OMP_FOR:
1276 scan_omp_for (tp, ctx);
1277 break;
1278
1279 case OMP_SECTIONS:
1280 scan_omp_sections (tp, ctx);
1281 break;
1282
1283 case OMP_SINGLE:
1284 scan_omp_single (tp, ctx);
1285 break;
1286
1287 case OMP_SECTION:
1288 case OMP_MASTER:
1289 case OMP_ORDERED:
1290 case OMP_CRITICAL:
1291 ctx = new_omp_context (*tp, ctx);
1292 scan_omp (&OMP_BODY (*tp), ctx);
1293 break;
1294
1295 case BIND_EXPR:
1296 {
1297 tree var;
1298 *walk_subtrees = 1;
1299
1300 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1301 insert_decl_map (&ctx->cb, var, var);
1302 }
1303 break;
1304
1305 case VAR_DECL:
1306 case PARM_DECL:
1307 case LABEL_DECL:
1308 if (ctx)
1309 *tp = remap_decl (t, &ctx->cb);
1310 break;
1311
1312 default:
1313 if (ctx && TYPE_P (t))
1314 *tp = remap_type (t, &ctx->cb);
1315 else if (!DECL_P (t))
1316 *walk_subtrees = 1;
1317 break;
1318 }
1319
1320 return NULL_TREE;
1321 }
1322
1323
1324 /* Scan all the statements starting at STMT_P. CTX contains context
1325 information about the OpenMP directives and clauses found during
1326 the scan. */
1327
1328 static void
1329 scan_omp (tree *stmt_p, omp_context *ctx)
1330 {
1331 location_t saved_location;
1332 struct walk_stmt_info wi;
1333
1334 memset (&wi, 0, sizeof (wi));
1335 wi.callback = scan_omp_1;
1336 wi.info = ctx;
1337 wi.want_bind_expr = (ctx != NULL);
1338 wi.want_locations = true;
1339
1340 saved_location = input_location;
1341 walk_stmts (&wi, stmt_p);
1342 input_location = saved_location;
1343 }
1344 \f
1345 /* Re-gimplification and code generation routines. */
1346
1347 /* Build a call to GOMP_barrier. */
1348
1349 static void
1350 build_omp_barrier (tree *stmt_list)
1351 {
1352 tree t;
1353
1354 t = built_in_decls[BUILT_IN_GOMP_BARRIER];
1355 t = build_function_call_expr (t, NULL);
1356 gimplify_and_add (t, stmt_list);
1357 }
1358
1359 /* If a context was created for STMT when it was scanned, return it. */
1360
1361 static omp_context *
1362 maybe_lookup_ctx (tree stmt)
1363 {
1364 splay_tree_node n;
1365 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1366 return n ? (omp_context *) n->value : NULL;
1367 }
1368
1369
1370 /* Find the mapping for DECL in CTX or the immediately enclosing
1371 context that has a mapping for DECL.
1372
1373 If CTX is a nested parallel directive, we may have to use the decl
1374 mappings created in CTX's parent context. Suppose that we have the
1375 following parallel nesting (variable UIDs showed for clarity):
1376
1377 iD.1562 = 0;
1378 #omp parallel shared(iD.1562) -> outer parallel
1379 iD.1562 = iD.1562 + 1;
1380
1381 #omp parallel shared (iD.1562) -> inner parallel
1382 iD.1562 = iD.1562 - 1;
1383
1384 Each parallel structure will create a distinct .omp_data_s structure
1385 for copying iD.1562 in/out of the directive:
1386
1387 outer parallel .omp_data_s.1.i -> iD.1562
1388 inner parallel .omp_data_s.2.i -> iD.1562
1389
1390 A shared variable mapping will produce a copy-out operation before
1391 the parallel directive and a copy-in operation after it. So, in
1392 this case we would have:
1393
1394 iD.1562 = 0;
1395 .omp_data_o.1.i = iD.1562;
1396 #omp parallel shared(iD.1562) -> outer parallel
1397 .omp_data_i.1 = &.omp_data_o.1
1398 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1399
1400 .omp_data_o.2.i = iD.1562; -> **
1401 #omp parallel shared(iD.1562) -> inner parallel
1402 .omp_data_i.2 = &.omp_data_o.2
1403 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1404
1405
1406 ** This is a problem. The symbol iD.1562 cannot be referenced
1407 inside the body of the outer parallel region. But since we are
1408 emitting this copy operation while expanding the inner parallel
1409 directive, we need to access the CTX structure of the outer
1410 parallel directive to get the correct mapping:
1411
1412 .omp_data_o.2.i = .omp_data_i.1->i
1413
1414 Since there may be other workshare or parallel directives enclosing
1415 the parallel directive, it may be necessary to walk up the context
1416 parent chain. This is not a problem in general because nested
1417 parallelism happens only rarely. */
1418
1419 static tree
1420 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1421 {
1422 tree t;
1423 omp_context *up;
1424
1425 gcc_assert (ctx->is_nested);
1426
1427 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1428 t = maybe_lookup_decl (decl, up);
1429
1430 gcc_assert (t);
1431
1432 return t;
1433 }
1434
1435
1436 /* Construct the initialization value for reduction CLAUSE. */
1437
1438 tree
1439 omp_reduction_init (tree clause, tree type)
1440 {
1441 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
1442 {
1443 case PLUS_EXPR:
1444 case MINUS_EXPR:
1445 case BIT_IOR_EXPR:
1446 case BIT_XOR_EXPR:
1447 case TRUTH_OR_EXPR:
1448 case TRUTH_ORIF_EXPR:
1449 case TRUTH_XOR_EXPR:
1450 case NE_EXPR:
1451 return fold_convert (type, integer_zero_node);
1452
1453 case MULT_EXPR:
1454 case TRUTH_AND_EXPR:
1455 case TRUTH_ANDIF_EXPR:
1456 case EQ_EXPR:
1457 return fold_convert (type, integer_one_node);
1458
1459 case BIT_AND_EXPR:
1460 return fold_convert (type, integer_minus_one_node);
1461
1462 case MAX_EXPR:
1463 if (SCALAR_FLOAT_TYPE_P (type))
1464 {
1465 REAL_VALUE_TYPE max, min;
1466 if (HONOR_INFINITIES (TYPE_MODE (type)))
1467 {
1468 real_inf (&max);
1469 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
1470 }
1471 else
1472 real_maxval (&min, 1, TYPE_MODE (type));
1473 return build_real (type, min);
1474 }
1475 else
1476 {
1477 gcc_assert (INTEGRAL_TYPE_P (type));
1478 return TYPE_MIN_VALUE (type);
1479 }
1480
1481 case MIN_EXPR:
1482 if (SCALAR_FLOAT_TYPE_P (type))
1483 {
1484 REAL_VALUE_TYPE max;
1485 if (HONOR_INFINITIES (TYPE_MODE (type)))
1486 real_inf (&max);
1487 else
1488 real_maxval (&max, 0, TYPE_MODE (type));
1489 return build_real (type, max);
1490 }
1491 else
1492 {
1493 gcc_assert (INTEGRAL_TYPE_P (type));
1494 return TYPE_MAX_VALUE (type);
1495 }
1496
1497 default:
1498 gcc_unreachable ();
1499 }
1500 }
1501
1502 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1503 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1504 private variables. Initialization statements go in ILIST, while calls
1505 to destructors go in DLIST. */
1506
1507 static void
1508 lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
1509 omp_context *ctx)
1510 {
1511 tree_stmt_iterator diter;
1512 tree c, dtor, copyin_seq, x, args, ptr;
1513 bool copyin_by_ref = false;
1514 int pass;
1515
1516 *dlist = alloc_stmt_list ();
1517 diter = tsi_start (*dlist);
1518 copyin_seq = NULL;
1519
1520 /* Do all the fixed sized types in the first pass, and the variable sized
1521 types in the second pass. This makes sure that the scalar arguments to
1522 the variable sized types are processed before we use them in the
1523 variable sized operations. */
1524 for (pass = 0; pass < 2; ++pass)
1525 {
1526 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1527 {
1528 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1529 tree var, new_var;
1530 bool by_ref;
1531
1532 switch (c_kind)
1533 {
1534 case OMP_CLAUSE_PRIVATE:
1535 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
1536 continue;
1537 break;
1538 case OMP_CLAUSE_SHARED:
1539 case OMP_CLAUSE_FIRSTPRIVATE:
1540 case OMP_CLAUSE_LASTPRIVATE:
1541 case OMP_CLAUSE_COPYIN:
1542 case OMP_CLAUSE_REDUCTION:
1543 break;
1544 default:
1545 continue;
1546 }
1547
1548 new_var = var = OMP_CLAUSE_DECL (c);
1549 if (c_kind != OMP_CLAUSE_COPYIN)
1550 new_var = lookup_decl (var, ctx);
1551
1552 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
1553 {
1554 if (pass != 0)
1555 continue;
1556 }
1557 else if (is_variable_sized (var))
1558 {
1559 /* For variable sized types, we need to allocate the
1560 actual storage here. Call alloca and store the
1561 result in the pointer decl that we created elsewhere. */
1562 if (pass == 0)
1563 continue;
1564
1565 ptr = DECL_VALUE_EXPR (new_var);
1566 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
1567 ptr = TREE_OPERAND (ptr, 0);
1568 gcc_assert (DECL_P (ptr));
1569
1570 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
1571 args = tree_cons (NULL, x, NULL);
1572 x = built_in_decls[BUILT_IN_ALLOCA];
1573 x = build_function_call_expr (x, args);
1574 x = fold_convert (TREE_TYPE (ptr), x);
1575 x = build2 (MODIFY_EXPR, void_type_node, ptr, x);
1576 gimplify_and_add (x, ilist);
1577 }
1578 else if (is_reference (var))
1579 {
1580 /* For references that are being privatized for Fortran,
1581 allocate new backing storage for the new pointer
1582 variable. This allows us to avoid changing all the
1583 code that expects a pointer to something that expects
1584 a direct variable. Note that this doesn't apply to
1585 C++, since reference types are disallowed in data
1586 sharing clauses there. */
1587 if (pass == 0)
1588 continue;
1589
1590 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
1591 if (TREE_CONSTANT (x))
1592 {
1593 const char *name = NULL;
1594 if (DECL_NAME (var))
1595 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
1596
1597 x = create_tmp_var (TREE_TYPE (TREE_TYPE (new_var)), name);
1598 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
1599 }
1600 else
1601 {
1602 args = tree_cons (NULL, x, NULL);
1603 x = built_in_decls[BUILT_IN_ALLOCA];
1604 x = build_function_call_expr (x, args);
1605 x = fold_convert (TREE_TYPE (new_var), x);
1606 }
1607
1608 x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
1609 gimplify_and_add (x, ilist);
1610
1611 new_var = build_fold_indirect_ref (new_var);
1612 }
1613 else if (c_kind == OMP_CLAUSE_REDUCTION
1614 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1615 {
1616 if (pass == 0)
1617 continue;
1618 }
1619 else if (pass != 0)
1620 continue;
1621
1622 switch (OMP_CLAUSE_CODE (c))
1623 {
1624 case OMP_CLAUSE_SHARED:
1625 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1626 needs to be delayed until after fixup_child_record_type so
1627 that we get the correct type during the dereference. */
1628 by_ref = use_pointer_for_field (var, true);
1629 x = build_receiver_ref (var, by_ref, ctx);
1630 SET_DECL_VALUE_EXPR (new_var, x);
1631 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
1632
1633 /* ??? If VAR is not passed by reference, and the variable
1634 hasn't been initialized yet, then we'll get a warning for
1635 the store into the omp_data_s structure. Ideally, we'd be
1636 able to notice this and not store anything at all, but
1637 we're generating code too early. Suppress the warning. */
1638 if (!by_ref)
1639 TREE_NO_WARNING (var) = 1;
1640 break;
1641
1642 case OMP_CLAUSE_LASTPRIVATE:
1643 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1644 break;
1645 /* FALLTHRU */
1646
1647 case OMP_CLAUSE_PRIVATE:
1648 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
1649 if (x)
1650 gimplify_and_add (x, ilist);
1651 /* FALLTHRU */
1652
1653 do_dtor:
1654 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
1655 if (x)
1656 {
1657 dtor = x;
1658 gimplify_stmt (&dtor);
1659 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
1660 }
1661 break;
1662
1663 case OMP_CLAUSE_FIRSTPRIVATE:
1664 x = build_outer_var_ref (var, ctx);
1665 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
1666 gimplify_and_add (x, ilist);
1667 goto do_dtor;
1668 break;
1669
1670 case OMP_CLAUSE_COPYIN:
1671 by_ref = use_pointer_for_field (var, false);
1672 x = build_receiver_ref (var, by_ref, ctx);
1673 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
1674 append_to_statement_list (x, &copyin_seq);
1675 copyin_by_ref |= by_ref;
1676 break;
1677
1678 case OMP_CLAUSE_REDUCTION:
1679 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1680 {
1681 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
1682 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
1683 }
1684 else
1685 {
1686 x = omp_reduction_init (c, TREE_TYPE (new_var));
1687 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
1688 x = build2 (MODIFY_EXPR, void_type_node, new_var, x);
1689 gimplify_and_add (x, ilist);
1690 }
1691 break;
1692
1693 default:
1694 gcc_unreachable ();
1695 }
1696 }
1697 }
1698
1699 /* The copyin sequence is not to be executed by the main thread, since
1700 that would result in self-copies. Perhaps not visible to scalars,
1701 but it certainly is to C++ operator=. */
1702 if (copyin_seq)
1703 {
1704 x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
1705 x = build_function_call_expr (x, NULL);
1706 x = build2 (NE_EXPR, boolean_type_node, x,
1707 build_int_cst (TREE_TYPE (x), 0));
1708 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
1709 gimplify_and_add (x, ilist);
1710 }
1711
1712 /* If any copyin variable is passed by reference, we must ensure the
1713 master thread doesn't modify it before it is copied over in all
1714 threads. */
1715 if (copyin_by_ref)
1716 build_omp_barrier (ilist);
1717 }
1718
1719
1720 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1721 both parallel and workshare constructs. PREDICATE may be NULL if it's
1722 always true. */
1723
1724 static void
1725 lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
1726 omp_context *ctx)
1727 {
1728 tree sub_list, x, c;
1729
1730 /* Early exit if there are no lastprivate clauses. */
1731 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
1732 if (clauses == NULL)
1733 {
1734 /* If this was a workshare clause, see if it had been combined
1735 with its parallel. In that case, look for the clauses on the
1736 parallel statement itself. */
1737 if (is_parallel_ctx (ctx))
1738 return;
1739
1740 ctx = ctx->outer;
1741 if (ctx == NULL || !is_parallel_ctx (ctx))
1742 return;
1743
1744 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
1745 OMP_CLAUSE_LASTPRIVATE);
1746 if (clauses == NULL)
1747 return;
1748 }
1749
1750 sub_list = alloc_stmt_list ();
1751
1752 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1753 {
1754 tree var, new_var;
1755
1756 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
1757 continue;
1758
1759 var = OMP_CLAUSE_DECL (c);
1760 new_var = lookup_decl (var, ctx);
1761
1762 x = build_outer_var_ref (var, ctx);
1763 if (is_reference (var))
1764 new_var = build_fold_indirect_ref (new_var);
1765 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
1766 append_to_statement_list (x, &sub_list);
1767 }
1768
1769 if (predicate)
1770 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
1771 else
1772 x = sub_list;
1773
1774 gimplify_and_add (x, stmt_list);
1775 }
1776
1777
1778 /* Generate code to implement the REDUCTION clauses. */
1779
1780 static void
1781 lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
1782 {
1783 tree sub_list = NULL, x, c;
1784 int count = 0;
1785
1786 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1787 update in that case, otherwise use a lock. */
1788 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
1789 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1790 {
1791 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1792 {
1793 /* Never use OMP_ATOMIC for array reductions. */
1794 count = -1;
1795 break;
1796 }
1797 count++;
1798 }
1799
1800 if (count == 0)
1801 return;
1802
1803 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1804 {
1805 tree var, ref, new_var;
1806 enum tree_code code;
1807
1808 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1809 continue;
1810
1811 var = OMP_CLAUSE_DECL (c);
1812 new_var = lookup_decl (var, ctx);
1813 if (is_reference (var))
1814 new_var = build_fold_indirect_ref (new_var);
1815 ref = build_outer_var_ref (var, ctx);
1816 code = OMP_CLAUSE_REDUCTION_CODE (c);
1817
1818 /* reduction(-:var) sums up the partial results, so it acts
1819 identically to reduction(+:var). */
1820 if (code == MINUS_EXPR)
1821 code = PLUS_EXPR;
1822
1823 if (count == 1)
1824 {
1825 tree addr = build_fold_addr_expr (ref);
1826
1827 addr = save_expr (addr);
1828 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
1829 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
1830 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
1831 gimplify_and_add (x, stmt_list);
1832 return;
1833 }
1834
1835 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1836 {
1837 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
1838
1839 if (is_reference (var))
1840 ref = build_fold_addr_expr (ref);
1841 SET_DECL_VALUE_EXPR (placeholder, ref);
1842 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
1843 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
1844 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
1845 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
1846 }
1847 else
1848 {
1849 x = build2 (code, TREE_TYPE (ref), ref, new_var);
1850 ref = build_outer_var_ref (var, ctx);
1851 x = build2 (MODIFY_EXPR, void_type_node, ref, x);
1852 append_to_statement_list (x, &sub_list);
1853 }
1854 }
1855
1856 x = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
1857 x = build_function_call_expr (x, NULL);
1858 gimplify_and_add (x, stmt_list);
1859
1860 gimplify_and_add (sub_list, stmt_list);
1861
1862 x = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
1863 x = build_function_call_expr (x, NULL);
1864 gimplify_and_add (x, stmt_list);
1865 }
1866
1867
1868 /* Generate code to implement the COPYPRIVATE clauses. */
1869
1870 static void
1871 lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
1872 omp_context *ctx)
1873 {
1874 tree c;
1875
1876 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1877 {
1878 tree var, ref, x;
1879 bool by_ref;
1880
1881 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
1882 continue;
1883
1884 var = OMP_CLAUSE_DECL (c);
1885 by_ref = use_pointer_for_field (var, false);
1886
1887 ref = build_sender_ref (var, ctx);
1888 x = (ctx->is_nested) ? lookup_decl_in_outer_ctx (var, ctx) : var;
1889 x = by_ref ? build_fold_addr_expr (x) : x;
1890 x = build2 (MODIFY_EXPR, void_type_node, ref, x);
1891 gimplify_and_add (x, slist);
1892
1893 ref = build_receiver_ref (var, by_ref, ctx);
1894 if (is_reference (var))
1895 {
1896 ref = build_fold_indirect_ref (ref);
1897 var = build_fold_indirect_ref (var);
1898 }
1899 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
1900 gimplify_and_add (x, rlist);
1901 }
1902 }
1903
1904
1905 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
1906 and REDUCTION from the sender (aka parent) side. */
1907
1908 static void
1909 lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
1910 {
1911 tree c;
1912
1913 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1914 {
1915 tree val, ref, x, var;
1916 bool by_ref, do_in = false, do_out = false;
1917
1918 switch (OMP_CLAUSE_CODE (c))
1919 {
1920 case OMP_CLAUSE_FIRSTPRIVATE:
1921 case OMP_CLAUSE_COPYIN:
1922 case OMP_CLAUSE_LASTPRIVATE:
1923 case OMP_CLAUSE_REDUCTION:
1924 break;
1925 default:
1926 continue;
1927 }
1928
1929 var = val = OMP_CLAUSE_DECL (c);
1930 if (ctx->is_nested)
1931 var = lookup_decl_in_outer_ctx (val, ctx);
1932
1933 if (is_variable_sized (val))
1934 continue;
1935 by_ref = use_pointer_for_field (val, false);
1936
1937 switch (OMP_CLAUSE_CODE (c))
1938 {
1939 case OMP_CLAUSE_FIRSTPRIVATE:
1940 case OMP_CLAUSE_COPYIN:
1941 do_in = true;
1942 break;
1943
1944 case OMP_CLAUSE_LASTPRIVATE:
1945 if (by_ref || is_reference (val))
1946 {
1947 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1948 continue;
1949 do_in = true;
1950 }
1951 else
1952 do_out = true;
1953 break;
1954
1955 case OMP_CLAUSE_REDUCTION:
1956 do_in = true;
1957 do_out = !(by_ref || is_reference (val));
1958 break;
1959
1960 default:
1961 gcc_unreachable ();
1962 }
1963
1964 if (do_in)
1965 {
1966 ref = build_sender_ref (val, ctx);
1967 x = by_ref ? build_fold_addr_expr (var) : var;
1968 x = build2 (MODIFY_EXPR, void_type_node, ref, x);
1969 gimplify_and_add (x, ilist);
1970 }
1971
1972 if (do_out)
1973 {
1974 ref = build_sender_ref (val, ctx);
1975 x = build2 (MODIFY_EXPR, void_type_node, var, ref);
1976 gimplify_and_add (x, olist);
1977 }
1978 }
1979 }
1980
1981 /* Generate code to implement SHARED from the sender (aka parent) side.
1982 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
1983 got automatically shared. */
1984
1985 static void
1986 lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
1987 {
1988 tree var, ovar, nvar, f, x;
1989
1990 if (ctx->record_type == NULL)
1991 return;
1992
1993 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1994 {
1995 ovar = DECL_ABSTRACT_ORIGIN (f);
1996 nvar = maybe_lookup_decl (ovar, ctx);
1997 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
1998 continue;
1999
2000 var = ovar;
2001
2002 /* If CTX is a nested parallel directive. Find the immediately
2003 enclosing parallel or workshare construct that contains a
2004 mapping for OVAR. */
2005 if (ctx->is_nested)
2006 var = lookup_decl_in_outer_ctx (ovar, ctx);
2007
2008 if (use_pointer_for_field (ovar, true))
2009 {
2010 x = build_sender_ref (ovar, ctx);
2011 var = build_fold_addr_expr (var);
2012 x = build2 (MODIFY_EXPR, void_type_node, x, var);
2013 gimplify_and_add (x, ilist);
2014 }
2015 else
2016 {
2017 x = build_sender_ref (ovar, ctx);
2018 x = build2 (MODIFY_EXPR, void_type_node, x, var);
2019 gimplify_and_add (x, ilist);
2020
2021 x = build_sender_ref (ovar, ctx);
2022 x = build2 (MODIFY_EXPR, void_type_node, var, x);
2023 gimplify_and_add (x, olist);
2024 }
2025 }
2026 }
2027
2028 /* Build the function calls to GOMP_parallel_start etc to actually
2029 generate the parallel operation. REGION is the parallel region
2030 being expanded. BB is the block where to insert the code. WS_ARGS
2031 will be set if this is a call to a combined parallel+workshare
2032 construct, it contains the list of additional arguments needed by
2033 the workshare construct. */
2034
2035 static void
2036 expand_parallel_call (struct omp_region *region, basic_block bb, tree ws_args)
2037 {
2038 tree t, args, val, cond, c, list, clauses;
2039 block_stmt_iterator si;
2040 int start_ix;
2041
2042 clauses = OMP_PARALLEL_CLAUSES (region->entry);
2043 push_gimplify_context ();
2044
2045 /* Determine what flavour of GOMP_parallel_start we will be
2046 emitting. */
2047 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2048 if (is_combined_parallel (region))
2049 {
2050 tree stmt = region->inner->entry;
2051
2052 if (TREE_CODE (stmt) == OMP_FOR)
2053 {
2054 struct omp_for_data fd;
2055 extract_omp_for_data (stmt, &fd);
2056 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START + fd.sched_kind;
2057 }
2058 else if (TREE_CODE (stmt) == OMP_SECTIONS)
2059 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2060 else
2061 gcc_unreachable ();
2062 }
2063
2064 /* By default, the value of NUM_THREADS is zero (selected at run time)
2065 and there is no conditional. */
2066 cond = NULL_TREE;
2067 val = build_int_cst (unsigned_type_node, 0);
2068
2069 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2070 if (c)
2071 cond = OMP_CLAUSE_IF_EXPR (c);
2072
2073 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2074 if (c)
2075 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2076
2077 /* Ensure 'val' is of the correct type. */
2078 val = fold_convert (unsigned_type_node, val);
2079
2080 /* If we found the clause 'if (cond)', build either
2081 (cond != 0) or (cond ? val : 1u). */
2082 if (cond)
2083 {
2084 block_stmt_iterator si;
2085
2086 cond = gimple_boolify (cond);
2087
2088 if (integer_zerop (val))
2089 val = build2 (EQ_EXPR, unsigned_type_node, cond,
2090 build_int_cst (TREE_TYPE (cond), 0));
2091 else
2092 {
2093 basic_block cond_bb, then_bb, else_bb;
2094 edge e;
2095 tree t, then_lab, else_lab, tmp;
2096
2097 tmp = create_tmp_var (TREE_TYPE (val), NULL);
2098 e = split_block (bb, NULL);
2099 cond_bb = e->src;
2100 bb = e->dest;
2101 remove_edge (e);
2102
2103 then_bb = create_empty_bb (cond_bb);
2104 else_bb = create_empty_bb (then_bb);
2105 then_lab = create_artificial_label ();
2106 else_lab = create_artificial_label ();
2107
2108 t = build3 (COND_EXPR, void_type_node,
2109 cond,
2110 build_and_jump (&then_lab),
2111 build_and_jump (&else_lab));
2112
2113 si = bsi_start (cond_bb);
2114 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2115
2116 si = bsi_start (then_bb);
2117 t = build1 (LABEL_EXPR, void_type_node, then_lab);
2118 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2119 t = build2 (MODIFY_EXPR, void_type_node, tmp, val);
2120 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2121
2122 si = bsi_start (else_bb);
2123 t = build1 (LABEL_EXPR, void_type_node, else_lab);
2124 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2125 t = build2 (MODIFY_EXPR, void_type_node, tmp,
2126 build_int_cst (unsigned_type_node, 1));
2127 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2128
2129 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2130 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2131 make_edge (then_bb, bb, EDGE_FALLTHRU);
2132 make_edge (else_bb, bb, EDGE_FALLTHRU);
2133
2134 val = tmp;
2135 }
2136
2137 list = NULL_TREE;
2138 val = get_formal_tmp_var (val, &list);
2139 si = bsi_start (bb);
2140 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2141 }
2142
2143 list = NULL_TREE;
2144 args = tree_cons (NULL, val, NULL);
2145 t = OMP_PARALLEL_DATA_ARG (region->entry);
2146 if (t == NULL)
2147 t = null_pointer_node;
2148 else
2149 t = build_fold_addr_expr (t);
2150 args = tree_cons (NULL, t, args);
2151 t = build_fold_addr_expr (OMP_PARALLEL_FN (region->entry));
2152 args = tree_cons (NULL, t, args);
2153
2154 if (ws_args)
2155 args = chainon (args, ws_args);
2156
2157 t = built_in_decls[start_ix];
2158 t = build_function_call_expr (t, args);
2159 gimplify_and_add (t, &list);
2160
2161 t = OMP_PARALLEL_DATA_ARG (region->entry);
2162 if (t == NULL)
2163 t = null_pointer_node;
2164 else
2165 t = build_fold_addr_expr (t);
2166 args = tree_cons (NULL, t, NULL);
2167 t = build_function_call_expr (OMP_PARALLEL_FN (region->entry), args);
2168 gimplify_and_add (t, &list);
2169
2170 t = built_in_decls[BUILT_IN_GOMP_PARALLEL_END];
2171 t = build_function_call_expr (t, NULL);
2172 gimplify_and_add (t, &list);
2173
2174 si = bsi_last (bb);
2175 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2176
2177 pop_gimplify_context (NULL_TREE);
2178 }
2179
2180
2181 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2182 handler. This prevents programs from violating the structured
2183 block semantics with throws. */
2184
2185 static void
2186 maybe_catch_exception (tree *stmt_p)
2187 {
2188 tree f, t;
2189
2190 if (!flag_exceptions)
2191 return;
2192
2193 if (lang_protect_cleanup_actions)
2194 t = lang_protect_cleanup_actions ();
2195 else
2196 {
2197 t = built_in_decls[BUILT_IN_TRAP];
2198 t = build_function_call_expr (t, NULL);
2199 }
2200 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
2201 EH_FILTER_MUST_NOT_THROW (f) = 1;
2202 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
2203
2204 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
2205 append_to_statement_list (f, &TREE_OPERAND (t, 1));
2206
2207 *stmt_p = NULL;
2208 append_to_statement_list (t, stmt_p);
2209 }
2210
2211 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2212
2213 static tree
2214 list2chain (tree list)
2215 {
2216 tree t;
2217
2218 for (t = list; t; t = TREE_CHAIN (t))
2219 {
2220 tree var = TREE_VALUE (t);
2221 if (TREE_CHAIN (t))
2222 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
2223 else
2224 TREE_CHAIN (var) = NULL_TREE;
2225 }
2226
2227 return list ? TREE_VALUE (list) : NULL_TREE;
2228 }
2229
2230
2231 /* Remove barriers in REGION->EXIT's block. Note that this is only
2232 valid for OMP_PARALLEL regions. Since the end of a parallel region
2233 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2234 left a barrier at the end of the OMP_PARALLEL region can now be
2235 removed. */
2236
2237 static void
2238 remove_exit_barrier (struct omp_region *region)
2239 {
2240 block_stmt_iterator si;
2241 basic_block exit_bb;
2242 tree t;
2243
2244 gcc_assert (TREE_CODE (region->entry) == OMP_PARALLEL);
2245
2246 exit_bb = bb_for_stmt (region->exit);
2247
2248 /* The barrier should be immediately before OMP_RETURN_EXPR.
2249 Otherwise, we cannot remove it. */
2250 si = bsi_last (exit_bb);
2251 t = bsi_stmt (si);
2252 gcc_assert (TREE_CODE (t) == OMP_RETURN_EXPR);
2253 bsi_prev (&si);
2254 if (bsi_end_p (si))
2255 return;
2256
2257 t = bsi_stmt (si);
2258 if (TREE_CODE (t) == CALL_EXPR
2259 && get_callee_fndecl (t) == built_in_decls[BUILT_IN_GOMP_BARRIER])
2260 bsi_remove (&si, true);
2261 }
2262
2263
2264 /* Expand the OpenMP parallel directive starting at REGION. */
2265
2266 static void
2267 expand_omp_parallel (struct omp_region *region)
2268 {
2269 basic_block entry_bb, exit_bb, new_bb;
2270 struct function *child_cfun, *saved_cfun;
2271 tree child_fn, block, t, ws_args;
2272 block_stmt_iterator si;
2273 edge e;
2274
2275 child_fn = OMP_PARALLEL_FN (region->entry);
2276 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
2277 saved_cfun = cfun;
2278
2279 entry_bb = bb_for_stmt (region->entry);
2280 exit_bb = bb_for_stmt (region->exit);
2281
2282 if (is_combined_parallel (region))
2283 {
2284 ws_args = region->ws_args;
2285
2286 /* For combined parallel+workshare calls, barriers at the end of
2287 the function are not necessary and can be removed. Since the
2288 caller will have a barrier of its own, the workshare barrier is
2289 superfluous. */
2290 remove_exit_barrier (region);
2291 }
2292 else
2293 ws_args = NULL_TREE;
2294
2295 if (DECL_STRUCT_FUNCTION (OMP_PARALLEL_FN (region->entry))->cfg)
2296 {
2297 /* Due to inlining, it may happen that we have already outlined
2298 the region, in which case all we need to do is make the
2299 sub-graph unreachable and emit the parallel call. */
2300 edge entry_succ_e, exit_succ_e;
2301 block_stmt_iterator si;
2302
2303 entry_succ_e = single_succ_edge (entry_bb);
2304 exit_succ_e = single_succ_edge (exit_bb);
2305
2306 si = bsi_last (entry_bb);
2307 gcc_assert (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
2308 bsi_remove (&si, true);
2309
2310 new_bb = entry_bb;
2311 remove_edge (entry_succ_e);
2312 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
2313 }
2314 else
2315 {
2316 /* If the parallel region needs data sent from the parent
2317 function, then the very first statement of the parallel body
2318 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2319 &.OMP_DATA_O is passed as an argument to the child function,
2320 we need to replace it with the argument as seen by the child
2321 function.
2322
2323 In most cases, this will end up being the identity assignment
2324 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2325 a function call that has been inlined, the original PARM_DECL
2326 .OMP_DATA_I may have been converted into a different local
2327 variable. In which case, we need to keep the assignment. */
2328 if (OMP_PARALLEL_DATA_ARG (region->entry))
2329 {
2330 basic_block entry_succ_bb = single_succ (entry_bb);
2331 block_stmt_iterator si = bsi_start (entry_succ_bb);
2332 tree stmt;
2333
2334 gcc_assert (!bsi_end_p (si));
2335
2336 stmt = bsi_stmt (si);
2337 gcc_assert (TREE_CODE (stmt) == MODIFY_EXPR
2338 && TREE_CODE (TREE_OPERAND (stmt, 1)) == ADDR_EXPR
2339 && TREE_OPERAND (TREE_OPERAND (stmt, 1), 0)
2340 == OMP_PARALLEL_DATA_ARG (region->entry));
2341
2342 if (TREE_OPERAND (stmt, 0) == DECL_ARGUMENTS (child_fn))
2343 bsi_remove (&si, true);
2344 else
2345 TREE_OPERAND (stmt, 1) = DECL_ARGUMENTS (child_fn);
2346 }
2347
2348 /* Declare local variables needed in CHILD_CFUN. */
2349 block = DECL_INITIAL (child_fn);
2350 BLOCK_VARS (block) = list2chain (child_cfun->unexpanded_var_list);
2351 DECL_SAVED_TREE (child_fn) = single_succ (entry_bb)->stmt_list;
2352
2353 /* Reset DECL_CONTEXT on locals and function arguments. */
2354 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
2355 DECL_CONTEXT (t) = child_fn;
2356
2357 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
2358 DECL_CONTEXT (t) = child_fn;
2359
2360 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2361 child function. */
2362 si = bsi_last (entry_bb);
2363 t = bsi_stmt (si);
2364 gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
2365 bsi_remove (&si, true);
2366 e = split_block (entry_bb, t);
2367 entry_bb = e->dest;
2368 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
2369
2370 /* Move the parallel region into CHILD_CFUN. We need to reset
2371 dominance information because the expansion of the inner
2372 regions has invalidated it. */
2373 free_dominance_info (CDI_DOMINATORS);
2374 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
2375 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
2376 cgraph_add_new_function (child_fn);
2377
2378 /* Convert OMP_RETURN into a RETURN_EXPR. */
2379 si = bsi_last (exit_bb);
2380 gcc_assert (!bsi_end_p (si)
2381 && TREE_CODE (bsi_stmt (si)) == OMP_RETURN_EXPR);
2382 t = build1 (RETURN_EXPR, void_type_node, NULL);
2383 bsi_insert_after (&si, t, TSI_SAME_STMT);
2384 bsi_remove (&si, true);
2385 }
2386
2387 /* Emit a library call to launch the children threads. */
2388 expand_parallel_call (region, new_bb, ws_args);
2389 }
2390
2391
2392 /* A subroutine of expand_omp_for. Generate code for a parallel
2393 loop with any schedule. Given parameters:
2394
2395 for (V = N1; V cond N2; V += STEP) BODY;
2396
2397 where COND is "<" or ">", we generate pseudocode
2398
2399 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2400 if (more) goto L0; else goto L3;
2401 L0:
2402 V = istart0;
2403 iend = iend0;
2404 L1:
2405 BODY;
2406 V += STEP;
2407 if (V cond iend) goto L1; else goto L2;
2408 L2:
2409 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2410 L3:
2411
2412 If this is a combined omp parallel loop, instead of the call to
2413 GOMP_loop_foo_start, we emit 'goto L3'. */
2414
2415 static basic_block
2416 expand_omp_for_generic (struct omp_region *region,
2417 struct omp_for_data *fd,
2418 enum built_in_function start_fn,
2419 enum built_in_function next_fn)
2420 {
2421 tree l0, l1, l2, l3;
2422 tree type, istart0, iend0, iend;
2423 tree t, args, list;
2424 basic_block entry_bb, exit_bb, l0_bb, l1_bb, l2_bb;
2425 edge exit_edge;
2426 block_stmt_iterator si;
2427 bool in_combined_parallel = is_combined_parallel (region);
2428
2429 type = TREE_TYPE (fd->v);
2430
2431 istart0 = create_tmp_var (long_integer_type_node, ".istart0");
2432 iend0 = create_tmp_var (long_integer_type_node, ".iend0");
2433
2434 l0 = create_artificial_label ();
2435 l1 = create_artificial_label ();
2436 l2 = create_artificial_label ();
2437 l3 = create_artificial_label ();
2438 iend = create_tmp_var (type, NULL);
2439
2440 entry_bb = bb_for_stmt (region->entry);
2441 l1_bb = single_succ (entry_bb);
2442 exit_bb = bb_for_stmt (region->exit);
2443
2444 si = bsi_last (entry_bb);
2445 gcc_assert (bsi_stmt (si) && TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2446 bsi_remove (&si, true);
2447 list = alloc_stmt_list ();
2448
2449 if (!in_combined_parallel)
2450 {
2451 /* If this is not a combined parallel loop, emit a call to
2452 GOMP_loop_foo_start in ENTRY_BB. */
2453 t = build_fold_addr_expr (iend0);
2454 args = tree_cons (NULL, t, NULL);
2455 t = build_fold_addr_expr (istart0);
2456 args = tree_cons (NULL, t, args);
2457 if (fd->chunk_size)
2458 {
2459 t = fold_convert (long_integer_type_node, fd->chunk_size);
2460 args = tree_cons (NULL, t, args);
2461 }
2462 t = fold_convert (long_integer_type_node, fd->step);
2463 args = tree_cons (NULL, t, args);
2464 t = fold_convert (long_integer_type_node, fd->n2);
2465 args = tree_cons (NULL, t, args);
2466 t = fold_convert (long_integer_type_node, fd->n1);
2467 args = tree_cons (NULL, t, args);
2468 t = build_function_call_expr (built_in_decls[start_fn], args);
2469 t = get_formal_tmp_var (t, &list);
2470 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
2471 build_and_jump (&l3));
2472 append_to_statement_list (t, &list);
2473 si = bsi_last (entry_bb);
2474 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2475 }
2476
2477 /* Iteration setup for sequential loop goes in L0_BB. */
2478 list = alloc_stmt_list ();
2479 t = build1 (LABEL_EXPR, void_type_node, l0);
2480 gimplify_and_add (t, &list);
2481
2482 t = fold_convert (type, istart0);
2483 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2484 gimplify_and_add (t, &list);
2485
2486 t = fold_convert (type, iend0);
2487 t = build2 (MODIFY_EXPR, void_type_node, iend, t);
2488 gimplify_and_add (t, &list);
2489
2490 l0_bb = create_empty_bb (entry_bb);
2491 si = bsi_start (l0_bb);
2492 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2493
2494 /* Loop body goes in L1_BB. */
2495 list = alloc_stmt_list ();
2496 si = bsi_start (l1_bb);
2497 bsi_insert_before (&si, build1 (LABEL_EXPR, void_type_node, l1),
2498 BSI_CONTINUE_LINKING);
2499
2500 /* Code to control the increment and predicate for the sequential
2501 loop goes in the first half of EXIT_BB (we split EXIT_BB so
2502 that we can inherit all the edges going out of the loop
2503 body). */
2504 list = alloc_stmt_list ();
2505
2506 t = build2 (PLUS_EXPR, type, fd->v, fd->step);
2507 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2508 gimplify_and_add (t, &list);
2509
2510 t = build2 (fd->cond_code, boolean_type_node, fd->v, iend);
2511 t = get_formal_tmp_var (t, &list);
2512 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1),
2513 build_and_jump (&l2));
2514 append_to_statement_list (t, &list);
2515
2516 si = bsi_last (exit_bb);
2517 t = bsi_stmt (si);
2518 gcc_assert (t && TREE_CODE (t) == OMP_RETURN_EXPR);
2519 bsi_remove (&si, true);
2520 exit_edge = split_block (exit_bb, t);
2521 exit_edge->flags = EDGE_FALSE_VALUE;
2522
2523 si = bsi_last (exit_bb);
2524 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2525
2526 /* Emit code to get the next parallel iteration in L2_BB. */
2527 list = alloc_stmt_list ();
2528 t = build1 (LABEL_EXPR, void_type_node, l2);
2529 gimplify_and_add (t, &list);
2530
2531 t = build_fold_addr_expr (iend0);
2532 args = tree_cons (NULL, t, NULL);
2533 t = build_fold_addr_expr (istart0);
2534 args = tree_cons (NULL, t, args);
2535 t = build_function_call_expr (built_in_decls[next_fn], args);
2536 t = get_formal_tmp_var (t, &list);
2537 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l0),
2538 build_and_jump (&l3));
2539 append_to_statement_list (t, &list);
2540
2541 l2_bb = exit_edge->dest;
2542 si = bsi_start (l2_bb);
2543 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2544
2545 /* Insert exit label on EXIT_EDGE. */
2546 exit_edge = single_succ_edge (l2_bb);
2547 t = build1 (LABEL_EXPR, void_type_node, l3);
2548 bsi_insert_on_edge_immediate (exit_edge, t);
2549 exit_edge->flags = EDGE_FALSE_VALUE;
2550
2551 /* Connect the new blocks. */
2552 remove_edge (single_succ_edge (entry_bb));
2553 if (in_combined_parallel)
2554 make_edge (entry_bb, l2_bb, EDGE_FALLTHRU);
2555 else
2556 {
2557 make_edge (entry_bb, l0_bb, EDGE_TRUE_VALUE);
2558 make_edge (entry_bb, exit_edge->dest, EDGE_FALSE_VALUE);
2559 }
2560
2561 make_edge (l0_bb, l1_bb, EDGE_FALLTHRU);
2562 make_edge (exit_bb, l1_bb, EDGE_TRUE_VALUE);
2563 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
2564
2565 return exit_edge->dest;
2566 }
2567
2568
2569 /* A subroutine of expand_omp_for. Generate code for a parallel
2570 loop with static schedule and no specified chunk size. Given
2571 parameters:
2572
2573 for (V = N1; V cond N2; V += STEP) BODY;
2574
2575 where COND is "<" or ">", we generate pseudocode
2576
2577 if (cond is <)
2578 adj = STEP - 1;
2579 else
2580 adj = STEP + 1;
2581 n = (adj + N2 - N1) / STEP;
2582 q = n / nthreads;
2583 q += (q * nthreads != n);
2584 s0 = q * threadid;
2585 e0 = min(s0 + q, n);
2586 if (s0 >= e0) goto L2; else goto L0;
2587 L0:
2588 V = s0 * STEP + N1;
2589 e = e0 * STEP + N1;
2590 L1:
2591 BODY;
2592 V += STEP;
2593 if (V cond e) goto L1;
2594 L2:
2595 */
2596
2597 static basic_block
2598 expand_omp_for_static_nochunk (struct omp_region *region,
2599 struct omp_for_data *fd)
2600 {
2601 tree l0, l1, l2, n, q, s0, e0, e, t, nthreads, threadid;
2602 tree type, utype, list;
2603 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, new_exit_bb;
2604 block_stmt_iterator si;
2605 edge exit_edge;
2606
2607 l0 = create_artificial_label ();
2608 l1 = create_artificial_label ();
2609 l2 = create_artificial_label ();
2610
2611 type = TREE_TYPE (fd->v);
2612 utype = lang_hooks.types.unsigned_type (type);
2613
2614 entry_bb = bb_for_stmt (region->entry);
2615 body_bb = single_succ (entry_bb);
2616 exit_bb = bb_for_stmt (region->exit);
2617
2618 /* Iteration space partitioning goes in ENTRY_BB. */
2619 list = alloc_stmt_list ();
2620 t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
2621 t = build_function_call_expr (t, NULL);
2622 t = fold_convert (utype, t);
2623 nthreads = get_formal_tmp_var (t, &list);
2624
2625 t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
2626 t = build_function_call_expr (t, NULL);
2627 t = fold_convert (utype, t);
2628 threadid = get_formal_tmp_var (t, &list);
2629
2630 fd->n1 = fold_convert (type, fd->n1);
2631 if (!is_gimple_val (fd->n1))
2632 fd->n1 = get_formal_tmp_var (fd->n1, &list);
2633
2634 fd->n2 = fold_convert (type, fd->n2);
2635 if (!is_gimple_val (fd->n2))
2636 fd->n2 = get_formal_tmp_var (fd->n2, &list);
2637
2638 fd->step = fold_convert (type, fd->step);
2639 if (!is_gimple_val (fd->step))
2640 fd->step = get_formal_tmp_var (fd->step, &list);
2641
2642 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2643 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2644 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2645 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2646 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2647 t = fold_convert (utype, t);
2648 if (is_gimple_val (t))
2649 n = t;
2650 else
2651 n = get_formal_tmp_var (t, &list);
2652
2653 t = build2 (TRUNC_DIV_EXPR, utype, n, nthreads);
2654 q = get_formal_tmp_var (t, &list);
2655
2656 t = build2 (MULT_EXPR, utype, q, nthreads);
2657 t = build2 (NE_EXPR, utype, t, n);
2658 t = build2 (PLUS_EXPR, utype, q, t);
2659 q = get_formal_tmp_var (t, &list);
2660
2661 t = build2 (MULT_EXPR, utype, q, threadid);
2662 s0 = get_formal_tmp_var (t, &list);
2663
2664 t = build2 (PLUS_EXPR, utype, s0, q);
2665 t = build2 (MIN_EXPR, utype, t, n);
2666 e0 = get_formal_tmp_var (t, &list);
2667
2668 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
2669 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l2),
2670 build_and_jump (&l0));
2671 append_to_statement_list (t, &list);
2672
2673 si = bsi_last (entry_bb);
2674 gcc_assert (bsi_stmt (si) && TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2675 bsi_remove (&si, true);
2676 si = bsi_last (entry_bb);
2677 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2678
2679 /* Setup code for sequential iteration goes in SEQ_START_BB. */
2680 list = alloc_stmt_list ();
2681
2682 t = build1 (LABEL_EXPR, void_type_node, l0);
2683 gimplify_and_add (t, &list);
2684
2685 t = fold_convert (type, s0);
2686 t = build2 (MULT_EXPR, type, t, fd->step);
2687 t = build2 (PLUS_EXPR, type, t, fd->n1);
2688 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2689 gimplify_and_add (t, &list);
2690
2691 t = fold_convert (type, e0);
2692 t = build2 (MULT_EXPR, type, t, fd->step);
2693 t = build2 (PLUS_EXPR, type, t, fd->n1);
2694 e = get_formal_tmp_var (t, &list);
2695
2696 seq_start_bb = create_empty_bb (entry_bb);
2697 si = bsi_start (seq_start_bb);
2698 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2699
2700 /* Original body goes in BODY_BB. */
2701 si = bsi_start (body_bb);
2702 t = build1 (LABEL_EXPR, void_type_node, l1);
2703 bsi_insert_before (&si, t, BSI_CONTINUE_LINKING);
2704
2705 /* Split EXIT_BB at the OMP_RETURN. The code controlling the
2706 sequential loop goes in the original EXIT_BB. The exit out of
2707 the parallel loop goes in the new block (NEW_EXIT_BB). */
2708 si = bsi_last (exit_bb);
2709 t = bsi_stmt (si);
2710 bsi_remove (&si, true);
2711 gcc_assert (t && TREE_CODE (t) == OMP_RETURN_EXPR);
2712 exit_edge = split_block (exit_bb, t);
2713 new_exit_bb = exit_edge->dest;
2714 list = alloc_stmt_list ();
2715
2716 t = build2 (PLUS_EXPR, type, fd->v, fd->step);
2717 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2718 gimplify_and_add (t, &list);
2719
2720 t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
2721 t = get_formal_tmp_var (t, &list);
2722 t = build3 (COND_EXPR, void_type_node, t, build_and_jump (&l1),
2723 build_and_jump (&l2));
2724 append_to_statement_list (t, &list);
2725
2726 si = bsi_last (exit_bb);
2727 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2728
2729 /* Add the exit label to NEW_EXIT_BB. */
2730 si = bsi_start (new_exit_bb);
2731 t = build1 (LABEL_EXPR, void_type_node, l2);
2732 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2733 single_succ_edge (new_exit_bb)->flags = EDGE_FALLTHRU;
2734
2735 /* Connect all the blocks. */
2736 make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU);
2737
2738 remove_edge (single_succ_edge (entry_bb));
2739 make_edge (entry_bb, new_exit_bb, EDGE_TRUE_VALUE);
2740 make_edge (entry_bb, seq_start_bb, EDGE_FALSE_VALUE);
2741
2742 make_edge (exit_bb, body_bb, EDGE_TRUE_VALUE);
2743 find_edge (exit_bb, new_exit_bb)->flags = EDGE_FALSE_VALUE;
2744
2745 return new_exit_bb;
2746 }
2747
2748
2749 /* A subroutine of expand_omp_for. Generate code for a parallel
2750 loop with static schedule and a specified chunk size. Given
2751 parameters:
2752
2753 for (V = N1; V cond N2; V += STEP) BODY;
2754
2755 where COND is "<" or ">", we generate pseudocode
2756
2757 if (cond is <)
2758 adj = STEP - 1;
2759 else
2760 adj = STEP + 1;
2761 n = (adj + N2 - N1) / STEP;
2762 trip = 0;
2763 L0:
2764 s0 = (trip * nthreads + threadid) * CHUNK;
2765 e0 = min(s0 + CHUNK, n);
2766 if (s0 < n) goto L1; else goto L4;
2767 L1:
2768 V = s0 * STEP + N1;
2769 e = e0 * STEP + N1;
2770 L2:
2771 BODY;
2772 V += STEP;
2773 if (V cond e) goto L2; else goto L3;
2774 L3:
2775 trip += 1;
2776 goto L0;
2777 L4:
2778 */
2779
2780 static basic_block
2781 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
2782 {
2783 tree l0, l1, l2, l3, l4, n, s0, e0, e, t;
2784 tree trip, nthreads, threadid;
2785 tree type, utype;
2786 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
2787 basic_block trip_update_bb, new_exit_bb;
2788 edge exit_edge;
2789 tree list;
2790 block_stmt_iterator si;
2791
2792 l0 = create_artificial_label ();
2793 l1 = create_artificial_label ();
2794 l2 = create_artificial_label ();
2795 l3 = create_artificial_label ();
2796 l4 = create_artificial_label ();
2797
2798 type = TREE_TYPE (fd->v);
2799 utype = lang_hooks.types.unsigned_type (type);
2800
2801 entry_bb = bb_for_stmt (region->entry);
2802 body_bb = single_succ (entry_bb);
2803
2804 exit_bb = bb_for_stmt (region->exit);
2805
2806 /* Trip and adjustment setup goes in ENTRY_BB. */
2807 list = alloc_stmt_list ();
2808
2809 t = built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS];
2810 t = build_function_call_expr (t, NULL);
2811 t = fold_convert (utype, t);
2812 nthreads = get_formal_tmp_var (t, &list);
2813
2814 t = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
2815 t = build_function_call_expr (t, NULL);
2816 t = fold_convert (utype, t);
2817 threadid = get_formal_tmp_var (t, &list);
2818
2819 fd->n1 = fold_convert (type, fd->n1);
2820 if (!is_gimple_val (fd->n1))
2821 fd->n1 = get_formal_tmp_var (fd->n1, &list);
2822
2823 fd->n2 = fold_convert (type, fd->n2);
2824 if (!is_gimple_val (fd->n2))
2825 fd->n2 = get_formal_tmp_var (fd->n2, &list);
2826
2827 fd->step = fold_convert (type, fd->step);
2828 if (!is_gimple_val (fd->step))
2829 fd->step = get_formal_tmp_var (fd->step, &list);
2830
2831 fd->chunk_size = fold_convert (utype, fd->chunk_size);
2832 if (!is_gimple_val (fd->chunk_size))
2833 fd->chunk_size = get_formal_tmp_var (fd->chunk_size, &list);
2834
2835 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2836 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2837 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2838 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2839 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2840 t = fold_convert (utype, t);
2841 if (is_gimple_val (t))
2842 n = t;
2843 else
2844 n = get_formal_tmp_var (t, &list);
2845
2846 t = build_int_cst (utype, 0);
2847 trip = get_initialized_tmp_var (t, &list, NULL);
2848
2849 si = bsi_last (entry_bb);
2850 gcc_assert (bsi_stmt (si) && TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2851 bsi_remove (&si, true);
2852 si = bsi_last (entry_bb);
2853 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2854
2855 /* Iteration space partitioning goes in ITER_PART_BB. */
2856 list = alloc_stmt_list ();
2857
2858 t = build1 (LABEL_EXPR, void_type_node, l0);
2859 gimplify_and_add (t, &list);
2860
2861 t = build2 (MULT_EXPR, utype, trip, nthreads);
2862 t = build2 (PLUS_EXPR, utype, t, threadid);
2863 t = build2 (MULT_EXPR, utype, t, fd->chunk_size);
2864 s0 = get_formal_tmp_var (t, &list);
2865
2866 t = build2 (PLUS_EXPR, utype, s0, fd->chunk_size);
2867 t = build2 (MIN_EXPR, utype, t, n);
2868 e0 = get_formal_tmp_var (t, &list);
2869
2870 t = build2 (LT_EXPR, boolean_type_node, s0, n);
2871 t = build3 (COND_EXPR, void_type_node, t,
2872 build_and_jump (&l1), build_and_jump (&l4));
2873 append_to_statement_list (t, &list);
2874
2875 iter_part_bb = create_empty_bb (entry_bb);
2876 si = bsi_start (iter_part_bb);
2877 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2878
2879 /* Setup code for sequential iteration goes in SEQ_START_BB. */
2880 list = alloc_stmt_list ();
2881
2882 t = build1 (LABEL_EXPR, void_type_node, l1);
2883 gimplify_and_add (t, &list);
2884
2885 t = fold_convert (type, s0);
2886 t = build2 (MULT_EXPR, type, t, fd->step);
2887 t = build2 (PLUS_EXPR, type, t, fd->n1);
2888 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2889 gimplify_and_add (t, &list);
2890
2891 t = fold_convert (type, e0);
2892 t = build2 (MULT_EXPR, type, t, fd->step);
2893 t = build2 (PLUS_EXPR, type, t, fd->n1);
2894 e = get_formal_tmp_var (t, &list);
2895
2896 seq_start_bb = create_empty_bb (iter_part_bb);
2897 si = bsi_start (seq_start_bb);
2898 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2899
2900 /* Main loop body goes in BODY_BB. */
2901 si = bsi_start (body_bb);
2902 t = build1 (LABEL_EXPR, void_type_node, l2);
2903 bsi_insert_before (&si, t, BSI_CONTINUE_LINKING);
2904
2905 /* Split EXIT_BB. The code controlling the sequential loop goes in
2906 the first half. The trip update code goes into the second half
2907 (TRIP_UPDATE_BB). */
2908 list = alloc_stmt_list ();
2909
2910 t = build2 (PLUS_EXPR, type, fd->v, fd->step);
2911 t = build2 (MODIFY_EXPR, void_type_node, fd->v, t);
2912 gimplify_and_add (t, &list);
2913
2914 t = build2 (fd->cond_code, boolean_type_node, fd->v, e);
2915 t = get_formal_tmp_var (t, &list);
2916 t = build3 (COND_EXPR, void_type_node, t,
2917 build_and_jump (&l2), build_and_jump (&l3));
2918 append_to_statement_list (t, &list);
2919
2920 si = bsi_last (exit_bb);
2921 t = bsi_stmt (si);
2922 gcc_assert (t && TREE_CODE (t) == OMP_RETURN_EXPR);
2923 bsi_remove (&si, true);
2924 exit_edge = split_block (exit_bb, t);
2925 si = bsi_last (exit_bb);
2926 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2927
2928 /* Trip update code goes into TRIP_UPDATE_BB. */
2929 trip_update_bb = exit_edge->dest;
2930 list = alloc_stmt_list ();
2931
2932 t = build1 (LABEL_EXPR, void_type_node, l3);
2933 gimplify_and_add (t, &list);
2934
2935 t = build_int_cst (utype, 1);
2936 t = build2 (PLUS_EXPR, utype, trip, t);
2937 t = build2 (MODIFY_EXPR, void_type_node, trip, t);
2938 gimplify_and_add (t, &list);
2939
2940 si = bsi_start (trip_update_bb);
2941 bsi_insert_after (&si, list, BSI_CONTINUE_LINKING);
2942 exit_edge = single_succ_edge (trip_update_bb);
2943 exit_edge->flags = EDGE_FALLTHRU;
2944 new_exit_bb = exit_edge->dest;
2945
2946 /* Insert exit label on EXIT_EDGE. */
2947 t = build1 (LABEL_EXPR, void_type_node, l4);
2948 bsi_insert_on_edge_immediate (exit_edge, t);
2949
2950 /* Connect the new blocks. */
2951 remove_edge (single_succ_edge (entry_bb));
2952 make_edge (entry_bb, iter_part_bb, EDGE_FALLTHRU);
2953
2954 make_edge (iter_part_bb, seq_start_bb, EDGE_TRUE_VALUE);
2955 make_edge (iter_part_bb, new_exit_bb, EDGE_FALSE_VALUE);
2956 remove_edge (exit_edge);
2957
2958 make_edge (seq_start_bb, body_bb, EDGE_FALLTHRU);
2959
2960 make_edge (exit_bb, body_bb, EDGE_TRUE_VALUE);
2961 find_edge (exit_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
2962
2963 make_edge (trip_update_bb, iter_part_bb, EDGE_FALLTHRU);
2964
2965 return new_exit_bb;
2966 }
2967
2968
2969 /* Expand the OpenMP loop defined by REGION. */
2970
2971 static void
2972 expand_omp_for (struct omp_region *region)
2973 {
2974 struct omp_for_data fd;
2975 basic_block last_bb = NULL;
2976
2977 push_gimplify_context ();
2978
2979 extract_omp_for_data (region->entry, &fd);
2980
2981 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered)
2982 {
2983 if (fd.chunk_size == NULL)
2984 last_bb = expand_omp_for_static_nochunk (region, &fd);
2985 else
2986 last_bb = expand_omp_for_static_chunk (region, &fd);
2987 }
2988 else
2989 {
2990 int fn_index = fd.sched_kind + fd.have_ordered * 4;
2991 int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
2992 int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
2993 last_bb = expand_omp_for_generic (region, &fd, start_ix, next_ix);
2994 }
2995
2996 pop_gimplify_context (NULL);
2997 }
2998
2999
3000 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3001
3002 v = GOMP_sections_start (n);
3003 L0:
3004 switch (v)
3005 {
3006 case 0:
3007 goto L2;
3008 case 1:
3009 section 1;
3010 goto L1;
3011 case 2:
3012 ...
3013 case n:
3014 ...
3015 default:
3016 abort ();
3017 }
3018 L1:
3019 v = GOMP_sections_next ();
3020 goto L0;
3021 L2:
3022 reduction;
3023
3024 If this is a combined parallel sections, replace the call to
3025 GOMP_sections_start with 'goto L1'. */
3026
3027 static void
3028 expand_omp_sections (struct omp_region *region)
3029 {
3030 tree label_vec, l0, l1, l2, t, u, v;
3031 unsigned i, len;
3032 basic_block entry_bb, exit_bb, l0_bb, l1_bb, default_bb;
3033 edge e, entry_edge, exit_edge;
3034 edge_iterator ei;
3035 block_stmt_iterator si;
3036
3037 entry_bb = bb_for_stmt (region->entry);
3038 exit_bb = bb_for_stmt (region->exit);
3039
3040 l0 = create_artificial_label ();
3041 l1 = create_artificial_label ();
3042 l2 = create_artificial_label ();
3043
3044 v = create_tmp_var (unsigned_type_node, ".section");
3045
3046 /* We will build a switch() with enough cases for all the
3047 OMP_SECTION regions, a '0' case to handle the end of more work
3048 and a default case to abort if something goes wrong. */
3049 len = EDGE_COUNT (entry_bb->succs);
3050 label_vec = make_tree_vec (len + 2);
3051
3052 /* Split ENTRY_BB. The call to GOMP_sections_start goes in the
3053 first half. The second half contains the switch(). */
3054 si = bsi_last (entry_bb);
3055 t = bsi_stmt (si);
3056 gcc_assert (t && TREE_CODE (t) == OMP_SECTIONS);
3057 bsi_remove (&si, true);
3058 entry_edge = split_block (entry_bb, t);
3059 l0_bb = entry_edge->dest;
3060
3061 if (!is_combined_parallel (region))
3062 {
3063 /* If we are not inside a combined parallel+sections region,
3064 call GOMP_sections_start. */
3065 t = build_int_cst (unsigned_type_node, len);
3066 t = tree_cons (NULL, t, NULL);
3067 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
3068 t = build_function_call_expr (u, t);
3069 t = build2 (MODIFY_EXPR, void_type_node, v, t);
3070 si = bsi_last (entry_bb);
3071 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3072 }
3073
3074 /* The switch() statement replacing OMP_SECTIONS goes in L0_BB. */
3075 si = bsi_last (l0_bb);
3076
3077 t = build1 (LABEL_EXPR, void_type_node, l0);
3078 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3079
3080 t = build3 (SWITCH_EXPR, void_type_node, v, NULL, label_vec);
3081 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3082
3083 t = build3 (CASE_LABEL_EXPR, void_type_node,
3084 build_int_cst (unsigned_type_node, 0), NULL, l2);
3085 TREE_VEC_ELT (label_vec, 0) = t;
3086
3087 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3088 i = 1;
3089 FOR_EACH_EDGE (e, ei, l0_bb->succs)
3090 {
3091 basic_block s_entry_bb, s_exit_bb;
3092
3093 e->flags = 0;
3094 s_entry_bb = e->dest;
3095 si = bsi_last (s_entry_bb);
3096 t = bsi_stmt (si);
3097 gcc_assert (t && TREE_CODE (t) == OMP_SECTION);
3098 s_exit_bb = bb_for_stmt (lookup_omp_region (t)->exit);
3099 bsi_remove (&si, true);
3100
3101 t = create_artificial_label ();
3102 u = build_int_cst (unsigned_type_node, i);
3103 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
3104 TREE_VEC_ELT (label_vec, i) = u;
3105 t = build1 (LABEL_EXPR, void_type_node, t);
3106 si = bsi_last (s_entry_bb);
3107 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3108 i++;
3109 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
3110
3111 si = bsi_last (s_exit_bb);
3112 t = bsi_stmt (si);
3113 gcc_assert (t && TREE_CODE (t) == OMP_RETURN_EXPR);
3114 bsi_remove (&si, true);
3115 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
3116 }
3117
3118 /* Error handling code goes in DEFAULT_BB. */
3119 default_bb = create_empty_bb (entry_bb);
3120 si = bsi_start (default_bb);
3121 t = create_artificial_label ();
3122 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
3123 TREE_VEC_ELT (label_vec, len + 1) = u;
3124 t = build1 (LABEL_EXPR, void_type_node, t);
3125 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3126
3127 t = built_in_decls[BUILT_IN_TRAP];
3128 t = build_function_call_expr (t, NULL);
3129 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3130
3131 make_edge (l0_bb, default_bb, 0);
3132
3133 /* Code to get the next section goes in L1_BB. */
3134 si = bsi_last (exit_bb);
3135 t = bsi_stmt (si);
3136 gcc_assert (t && TREE_CODE (t) == OMP_RETURN_EXPR);
3137 bsi_remove (&si, true);
3138 exit_edge = split_block (exit_bb, t);
3139 l1_bb = exit_edge->src;
3140 exit_bb = exit_edge->dest;
3141 si = bsi_start (l1_bb);
3142 t = build1 (LABEL_EXPR, void_type_node, l1);
3143 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3144
3145 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
3146 t = build_function_call_expr (t, NULL);
3147 t = build2 (MODIFY_EXPR, void_type_node, v, t);
3148 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3149
3150 remove_edge (single_succ_edge (l1_bb));
3151 make_edge (l1_bb, l0_bb, EDGE_FALLTHRU);
3152
3153 /* Exit label in EXIT_BB. */
3154 si = bsi_last (exit_bb);
3155 t = build1 (LABEL_EXPR, void_type_node, l2);
3156 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3157
3158 make_edge (l0_bb, exit_bb, 0);
3159 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3160
3161 if (is_combined_parallel (region))
3162 {
3163 /* If this was a combined parallel+sections region, we did not
3164 emit a GOMP_sections_start in the entry block, so we just
3165 need to jump to L1_BB to get the next section. */
3166 remove_edge (single_succ_edge (entry_bb));
3167 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
3168 }
3169 }
3170
3171
3172 /* Generic expansion for OpenMP synchronization directives: single,
3173 master, ordered and critical. All we need to do here is remove the
3174 entry and exit markers for REGION. */
3175
3176 static void
3177 expand_omp_synch (struct omp_region *region)
3178 {
3179 basic_block entry_bb, exit_bb;
3180 block_stmt_iterator si;
3181 tree t;
3182
3183 entry_bb = bb_for_stmt (region->entry);
3184 exit_bb = bb_for_stmt (region->exit);
3185
3186 si = bsi_last (entry_bb);
3187 t = bsi_stmt (si);
3188 gcc_assert (t
3189 && (TREE_CODE (t) == OMP_SINGLE
3190 || TREE_CODE (t) == OMP_MASTER
3191 || TREE_CODE (t) == OMP_ORDERED
3192 || TREE_CODE (t) == OMP_CRITICAL));
3193 bsi_remove (&si, true);
3194 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3195
3196 si = bsi_last (exit_bb);
3197 t = bsi_stmt (si);
3198 gcc_assert (t && TREE_CODE (t) == OMP_RETURN_EXPR);
3199 bsi_remove (&si, true);
3200 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3201 }
3202
3203
3204 /* Expand the parallel region tree rooted at REGION. Expansion
3205 proceeds in depth-first order. Innermost regions are expanded
3206 first. This way, parallel regions that require a new function to
3207 be created (e.g., OMP_PARALLEL) can be expanded without having any
3208 internal dependencies in their body. */
3209
3210 static void
3211 expand_omp (struct omp_region *region)
3212 {
3213 while (region)
3214 {
3215 enum tree_code code = TREE_CODE (region->entry);
3216
3217 if (region->inner)
3218 expand_omp (region->inner);
3219
3220 switch (code)
3221 {
3222 case OMP_PARALLEL:
3223 expand_omp_parallel (region);
3224 break;
3225
3226 case OMP_FOR:
3227 expand_omp_for (region);
3228 break;
3229
3230 case OMP_SECTIONS:
3231 expand_omp_sections (region);
3232 break;
3233
3234 case OMP_SECTION:
3235 /* Individual omp sections are handled together with their
3236 parent OMP_SECTIONS region. */
3237 break;
3238
3239 case OMP_SINGLE:
3240 case OMP_MASTER:
3241 case OMP_ORDERED:
3242 case OMP_CRITICAL:
3243 expand_omp_synch (region);
3244 break;
3245
3246 default:
3247 gcc_unreachable ();
3248 }
3249
3250 /* Expansion adds and removes basic block, edges, creates
3251 and exposes unreachable regions that need to be cleaned up
3252 before proceeding. */
3253 free_dominance_info (CDI_DOMINATORS);
3254 free_dominance_info (CDI_POST_DOMINATORS);
3255 cleanup_tree_cfg ();
3256
3257 region = region->next;
3258 }
3259 }
3260
3261
3262 /* Helper for build_omp_regions. Scan the dominator tree starting at
3263 block BB. PARENT is the region that contains BB. */
3264
3265 static void
3266 build_omp_regions_1 (basic_block bb, struct omp_region *parent)
3267 {
3268 block_stmt_iterator si;
3269 tree stmt;
3270 basic_block son;
3271
3272 si = bsi_last (bb);
3273 if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
3274 {
3275 struct omp_region *region;
3276
3277 stmt = bsi_stmt (si);
3278
3279 if (TREE_CODE (stmt) == OMP_RETURN_EXPR)
3280 {
3281 /* STMT is the return point out of region PARENT. Mark it
3282 as the exit point and make PARENT the immediately
3283 enclosing region. */
3284 gcc_assert (parent);
3285 region = parent;
3286 region->exit = stmt;
3287 parent = parent->outer;
3288
3289 /* If REGION is a parallel region, determine whether it is
3290 a combined parallel+workshare region. */
3291 if (TREE_CODE (region->entry) == OMP_PARALLEL)
3292 determine_parallel_type (region);
3293 }
3294 else
3295 {
3296 /* Otherwise, this directive becomes the parent for a new
3297 region. */
3298 region = new_omp_region (stmt, parent);
3299 parent = region;
3300 }
3301
3302 gcc_assert (region);
3303 if (omp_regions == NULL)
3304 {
3305 omp_regions = splay_tree_new (splay_tree_compare_pointers, 0, 0);
3306 root_omp_region = region;
3307 }
3308
3309 splay_tree_insert (omp_regions, (splay_tree_key) stmt,
3310 (splay_tree_value) region);
3311 }
3312
3313 for (son = first_dom_son (CDI_DOMINATORS, bb);
3314 son;
3315 son = next_dom_son (CDI_DOMINATORS, son))
3316 build_omp_regions_1 (son, parent);
3317 }
3318
3319
3320 /* Scan the CFG and build a tree of OMP regions. Return the root of
3321 the OMP region tree. */
3322
3323 static void
3324 build_omp_regions (void)
3325 {
3326 gcc_assert (omp_regions == NULL && root_omp_region == NULL);
3327 calculate_dominance_info (CDI_DOMINATORS);
3328 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL);
3329 }
3330
3331
3332 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
3333
3334 static void
3335 execute_expand_omp (void)
3336 {
3337 build_omp_regions ();
3338
3339 if (root_omp_region)
3340 {
3341 if (dump_file)
3342 {
3343 fprintf (dump_file, "\nOMP region tree\n\n");
3344 dump_omp_region (dump_file, root_omp_region, 0);
3345 fprintf (dump_file, "\n");
3346 }
3347
3348 expand_omp (root_omp_region);
3349 splay_tree_delete (omp_regions);
3350 root_omp_region = NULL;
3351 omp_regions = NULL;
3352 }
3353 }
3354
3355 static bool
3356 gate_expand_omp (void)
3357 {
3358 return flag_openmp != 0 && errorcount == 0;
3359 }
3360
3361 struct tree_opt_pass pass_expand_omp =
3362 {
3363 "ompexp", /* name */
3364 gate_expand_omp, /* gate */
3365 execute_expand_omp, /* execute */
3366 NULL, /* sub */
3367 NULL, /* next */
3368 0, /* static_pass_number */
3369 0, /* tv_id */
3370 PROP_gimple_any, /* properties_required */
3371 PROP_gimple_lomp, /* properties_provided */
3372 0, /* properties_destroyed */
3373 0, /* todo_flags_start */
3374 TODO_dump_func, /* todo_flags_finish */
3375 0 /* letter */
3376 };
3377 \f
3378 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
3379
3380 /* Lower the OpenMP sections directive in *STMT_P. */
3381
3382 static void
3383 lower_omp_sections (tree *stmt_p, omp_context *ctx)
3384 {
3385 tree new_stmt, stmt, body, bind, block, ilist, olist, new_body;
3386 tree dlist, region_exit;
3387 tree_stmt_iterator tsi;
3388 unsigned i, len;
3389
3390 stmt = *stmt_p;
3391
3392 gcc_assert (OMP_SECTIONS_SECTIONS (stmt) == NULL_TREE);
3393
3394 push_gimplify_context ();
3395
3396 dlist = NULL;
3397 ilist = NULL;
3398 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
3399
3400 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
3401 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
3402 continue;
3403
3404 /* There are two markers per section and one end marker for the
3405 whole construct. */
3406 OMP_SECTIONS_SECTIONS (stmt) = make_tree_vec (2 * len + 1);
3407
3408 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
3409 body = alloc_stmt_list ();
3410 for (i = 0; i < len; i++, tsi_next (&tsi))
3411 {
3412 omp_context *sctx;
3413 tree sec_start, sec_end, sec_body;
3414
3415 sec_start = tsi_stmt (tsi);
3416 sec_body = alloc_stmt_list ();
3417 sctx = maybe_lookup_ctx (sec_start);
3418 gcc_assert (sctx);
3419
3420 lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
3421 append_to_statement_list (OMP_SECTION_BODY (sec_start), &sec_body);
3422
3423 if (i == len - 1)
3424 {
3425 tree l = alloc_stmt_list ();
3426 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
3427 &l, ctx);
3428 append_to_statement_list (l, &sec_body);
3429 }
3430
3431 sec_end = make_node (OMP_RETURN_EXPR);
3432
3433 OMP_SECTION_BODY (sec_start) = sec_body;
3434 append_to_statement_list (sec_start, &body);
3435 append_to_statement_list (sec_end, &body);
3436
3437 TREE_VEC_ELT (OMP_SECTIONS_SECTIONS (stmt), i * 2) = sec_start;
3438 TREE_VEC_ELT (OMP_SECTIONS_SECTIONS (stmt), i * 2 + 1) = sec_end;
3439 }
3440
3441 block = make_node (BLOCK);
3442 bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
3443 maybe_catch_exception (&BIND_EXPR_BODY (bind));
3444
3445 olist = NULL_TREE;
3446 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
3447
3448 /* Unless there's a nowait clause, add a barrier afterward. */
3449 if (!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt), OMP_CLAUSE_NOWAIT))
3450 build_omp_barrier (&olist);
3451
3452 pop_gimplify_context (NULL_TREE);
3453 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
3454
3455 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
3456 TREE_SIDE_EFFECTS (new_stmt) = 1;
3457 OMP_SECTIONS_BODY (stmt) = body;
3458
3459 region_exit = make_node (OMP_RETURN_EXPR);
3460
3461 new_body = alloc_stmt_list ();
3462 append_to_statement_list (ilist, &new_body);
3463 append_to_statement_list (stmt, &new_body);
3464 append_to_statement_list (region_exit, &new_body);
3465 append_to_statement_list (olist, &new_body);
3466 append_to_statement_list (dlist, &new_body);
3467 BIND_EXPR_BODY (new_stmt) = new_body;
3468
3469 TREE_VEC_ELT (OMP_SECTIONS_SECTIONS (stmt), 2 * len) = region_exit;
3470
3471 *stmt_p = new_stmt;
3472 }
3473
3474
3475 /* A subroutine of lower_omp_single. Expand the simple form of
3476 an OMP_SINGLE, without a copyprivate clause:
3477
3478 if (GOMP_single_start ())
3479 BODY;
3480 [ GOMP_barrier (); ] -> unless 'nowait' is present.
3481
3482 FIXME. It may be better to delay expanding the logic of this until
3483 pass_expand_omp. The expanded logic may make the job more difficult
3484 to a synchronization analysis pass. */
3485
3486 static void
3487 lower_omp_single_simple (tree single_stmt, tree *pre_p)
3488 {
3489 tree t;
3490
3491 t = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
3492 t = build_function_call_expr (t, NULL);
3493 t = build3 (COND_EXPR, void_type_node, t,
3494 OMP_SINGLE_BODY (single_stmt), NULL);
3495 gimplify_and_add (t, pre_p);
3496
3497 if (!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt), OMP_CLAUSE_NOWAIT))
3498 build_omp_barrier (pre_p);
3499 }
3500
3501
3502 /* A subroutine of lower_omp_single. Expand the simple form of
3503 an OMP_SINGLE, with a copyprivate clause:
3504
3505 #pragma omp single copyprivate (a, b, c)
3506
3507 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
3508
3509 {
3510 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
3511 {
3512 BODY;
3513 copyout.a = a;
3514 copyout.b = b;
3515 copyout.c = c;
3516 GOMP_single_copy_end (&copyout);
3517 }
3518 else
3519 {
3520 a = copyout_p->a;
3521 b = copyout_p->b;
3522 c = copyout_p->c;
3523 }
3524 GOMP_barrier ();
3525 }
3526
3527 FIXME. It may be better to delay expanding the logic of this until
3528 pass_expand_omp. The expanded logic may make the job more difficult
3529 to a synchronization analysis pass. */
3530
3531 static void
3532 lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
3533 {
3534 tree ptr_type, t, args, l0, l1, l2, copyin_seq;
3535
3536 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
3537
3538 ptr_type = build_pointer_type (ctx->record_type);
3539 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
3540
3541 l0 = create_artificial_label ();
3542 l1 = create_artificial_label ();
3543 l2 = create_artificial_label ();
3544
3545 t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START];
3546 t = build_function_call_expr (t, NULL);
3547 t = fold_convert (ptr_type, t);
3548 t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
3549 gimplify_and_add (t, pre_p);
3550
3551 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
3552 build_int_cst (ptr_type, 0));
3553 t = build3 (COND_EXPR, void_type_node, t,
3554 build_and_jump (&l0), build_and_jump (&l1));
3555 gimplify_and_add (t, pre_p);
3556
3557 t = build1 (LABEL_EXPR, void_type_node, l0);
3558 gimplify_and_add (t, pre_p);
3559
3560 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
3561
3562 copyin_seq = NULL;
3563 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
3564 &copyin_seq, ctx);
3565
3566 t = build_fold_addr_expr (ctx->sender_decl);
3567 args = tree_cons (NULL, t, NULL);
3568 t = built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END];
3569 t = build_function_call_expr (t, args);
3570 gimplify_and_add (t, pre_p);
3571
3572 t = build_and_jump (&l2);
3573 gimplify_and_add (t, pre_p);
3574
3575 t = build1 (LABEL_EXPR, void_type_node, l1);
3576 gimplify_and_add (t, pre_p);
3577
3578 append_to_statement_list (copyin_seq, pre_p);
3579
3580 t = build1 (LABEL_EXPR, void_type_node, l2);
3581 gimplify_and_add (t, pre_p);
3582
3583 build_omp_barrier (pre_p);
3584 }
3585
3586
3587 /* Expand code for an OpenMP single directive. */
3588
3589 static void
3590 lower_omp_single (tree *stmt_p, omp_context *ctx)
3591 {
3592 tree t, bind, block, single_stmt = *stmt_p, dlist;
3593
3594 push_gimplify_context ();
3595
3596 block = make_node (BLOCK);
3597 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
3598 TREE_SIDE_EFFECTS (bind) = 1;
3599
3600 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
3601 &BIND_EXPR_BODY (bind), &dlist, ctx);
3602 lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
3603
3604 if (ctx->record_type)
3605 lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
3606 else
3607 lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
3608
3609 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
3610 t = make_node (OMP_RETURN_EXPR);
3611 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
3612 maybe_catch_exception (&BIND_EXPR_BODY (bind));
3613 pop_gimplify_context (bind);
3614
3615 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
3616 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
3617
3618 OMP_SINGLE_BODY (single_stmt) = alloc_stmt_list ();
3619 append_to_statement_list (bind, &OMP_SINGLE_BODY (single_stmt));
3620 }
3621
3622
3623 /* Expand code for an OpenMP master directive. */
3624
3625 static void
3626 lower_omp_master (tree *stmt_p, omp_context *ctx)
3627 {
3628 tree bind, block, stmt = *stmt_p, lab = NULL, x;
3629
3630 push_gimplify_context ();
3631
3632 block = make_node (BLOCK);
3633 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
3634 TREE_SIDE_EFFECTS (bind) = 1;
3635
3636 x = built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM];
3637 x = build_function_call_expr (x, NULL);
3638 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
3639 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
3640 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
3641
3642 lower_omp (&OMP_MASTER_BODY (stmt), ctx);
3643 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
3644
3645 x = build1 (LABEL_EXPR, void_type_node, lab);
3646 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
3647 x = make_node (OMP_RETURN_EXPR);
3648 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
3649 maybe_catch_exception (&BIND_EXPR_BODY (bind));
3650 pop_gimplify_context (bind);
3651
3652 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
3653 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
3654
3655 OMP_MASTER_BODY (stmt) = alloc_stmt_list ();
3656 append_to_statement_list (bind, &OMP_MASTER_BODY (stmt));
3657 }
3658
3659
3660 /* Expand code for an OpenMP ordered directive. */
3661
3662 static void
3663 lower_omp_ordered (tree *stmt_p, omp_context *ctx)
3664 {
3665 tree bind, block, stmt = *stmt_p, x;
3666
3667 push_gimplify_context ();
3668
3669 block = make_node (BLOCK);
3670 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
3671 TREE_SIDE_EFFECTS (bind) = 1;
3672
3673 x = built_in_decls[BUILT_IN_GOMP_ORDERED_START];
3674 x = build_function_call_expr (x, NULL);
3675 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
3676
3677 lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
3678 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
3679
3680 x = built_in_decls[BUILT_IN_GOMP_ORDERED_END];
3681 x = build_function_call_expr (x, NULL);
3682 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
3683 x = make_node (OMP_RETURN_EXPR);
3684 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
3685 maybe_catch_exception (&BIND_EXPR_BODY (bind));
3686 pop_gimplify_context (bind);
3687
3688 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
3689 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
3690
3691 OMP_ORDERED_BODY (stmt) = alloc_stmt_list ();
3692 append_to_statement_list (bind, &OMP_ORDERED_BODY (stmt));
3693 }
3694
3695
3696 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
3697 substitution of a couple of function calls. But in the NAMED case,
3698 requires that languages coordinate a symbol name. It is therefore
3699 best put here in common code. */
3700
3701 static GTY((param1_is (tree), param2_is (tree)))
3702 splay_tree critical_name_mutexes;
3703
3704 static void
3705 lower_omp_critical (tree *stmt_p, omp_context *ctx)
3706 {
3707 tree bind, block, stmt = *stmt_p;
3708 tree t, lock, unlock, name;
3709
3710 name = OMP_CRITICAL_NAME (stmt);
3711 if (name)
3712 {
3713 tree decl, args;
3714 splay_tree_node n;
3715
3716 if (!critical_name_mutexes)
3717 critical_name_mutexes
3718 = splay_tree_new_ggc (splay_tree_compare_pointers);
3719
3720 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
3721 if (n == NULL)
3722 {
3723 char *new_str;
3724
3725 decl = create_tmp_var_raw (ptr_type_node, NULL);
3726
3727 new_str = ACONCAT ((".gomp_critical_user_",
3728 IDENTIFIER_POINTER (name), NULL));
3729 DECL_NAME (decl) = get_identifier (new_str);
3730 TREE_PUBLIC (decl) = 1;
3731 TREE_STATIC (decl) = 1;
3732 DECL_COMMON (decl) = 1;
3733 DECL_ARTIFICIAL (decl) = 1;
3734 DECL_IGNORED_P (decl) = 1;
3735 cgraph_varpool_finalize_decl (decl);
3736
3737 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
3738 (splay_tree_value) decl);
3739 }
3740 else
3741 decl = (tree) n->value;
3742
3743 args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
3744 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
3745 lock = build_function_call_expr (lock, args);
3746
3747 args = tree_cons (NULL, build_fold_addr_expr (decl), NULL);
3748 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
3749 unlock = build_function_call_expr (unlock, args);
3750 }
3751 else
3752 {
3753 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
3754 lock = build_function_call_expr (lock, NULL);
3755
3756 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
3757 unlock = build_function_call_expr (unlock, NULL);
3758 }
3759
3760 push_gimplify_context ();
3761
3762 block = make_node (BLOCK);
3763 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
3764 TREE_SIDE_EFFECTS (bind) = 1;
3765
3766 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
3767
3768 lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
3769 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
3770 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
3771
3772 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
3773 t = make_node (OMP_RETURN_EXPR);
3774 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
3775
3776 pop_gimplify_context (bind);
3777 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
3778 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
3779
3780 OMP_CRITICAL_BODY (stmt) = alloc_stmt_list ();
3781 append_to_statement_list (bind, &OMP_CRITICAL_BODY (stmt));
3782 }
3783
3784
3785 /* A subroutine of lower_omp_for. Generate code to emit the predicate
3786 for a lastprivate clause. Given a loop control predicate of (V
3787 cond N2), we gate the clause on (!(V cond N2)). The lowered form
3788 is appended to *BODY_P. */
3789
3790 static void
3791 lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
3792 struct omp_context *ctx)
3793 {
3794 tree clauses, cond;
3795 enum tree_code cond_code;
3796
3797 cond_code = fd->cond_code;
3798 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
3799
3800 /* When possible, use a strict equality expression. This can let VRP
3801 type optimizations deduce the value and remove a copy. */
3802 if (host_integerp (fd->step, 0))
3803 {
3804 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
3805 if (step == 1 || step == -1)
3806 cond_code = EQ_EXPR;
3807 }
3808
3809 cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
3810
3811 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
3812 lower_lastprivate_clauses (clauses, cond, body_p, ctx);
3813 }
3814
3815
3816 /* Lower code for an OpenMP loop directive. */
3817
3818 static void
3819 lower_omp_for (tree *stmt_p, omp_context *ctx)
3820 {
3821 tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
3822 struct omp_for_data fd;
3823
3824 stmt = *stmt_p;
3825
3826 push_gimplify_context ();
3827
3828 lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
3829 lower_omp (&OMP_FOR_BODY (stmt), ctx);
3830
3831 /* Move declaration of temporaries in the loop body before we make
3832 it go away. */
3833 if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
3834 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
3835
3836 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
3837 TREE_SIDE_EFFECTS (new_stmt) = 1;
3838 body_p = &BIND_EXPR_BODY (new_stmt);
3839
3840 /* The pre-body and input clauses go before the lowered OMP_FOR. */
3841 ilist = NULL;
3842 dlist = NULL;
3843 append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
3844 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
3845
3846 /* Lower the header expressions. At this point, we can assume that
3847 the header is of the form:
3848
3849 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
3850
3851 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
3852 using the .omp_data_s mapping, if needed. */
3853 rhs_p = &TREE_OPERAND (OMP_FOR_INIT (stmt), 1);
3854 if (!is_gimple_min_invariant (*rhs_p))
3855 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
3856
3857 rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
3858 if (!is_gimple_min_invariant (*rhs_p))
3859 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
3860
3861 rhs_p = &TREE_OPERAND (TREE_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
3862 if (!is_gimple_min_invariant (*rhs_p))
3863 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
3864
3865 /* Once lowered, extract the bounds and clauses. */
3866 extract_omp_for_data (stmt, &fd);
3867
3868 /* Region exit marker goes at the end of the loop body. */
3869 t = make_node (OMP_RETURN_EXPR);
3870 append_to_statement_list (t, &OMP_FOR_BODY (stmt));
3871 maybe_catch_exception (&OMP_FOR_BODY (stmt));
3872 append_to_statement_list (stmt, body_p);
3873
3874 /* After the loop, add exit clauses. */
3875 lower_omp_for_lastprivate (&fd, &dlist, ctx);
3876 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
3877 append_to_statement_list (dlist, body_p);
3878
3879 /* Add a barrier unless the user specified NOWAIT. Note that if
3880 this is a combined parallel+loop construct, the barrier will be
3881 optimized away during expansion (see expand_omp_for). */
3882 if (!fd.have_nowait)
3883 {
3884 tree stmt = alloc_stmt_list ();
3885 build_omp_barrier (&stmt);
3886 append_to_statement_list (stmt, body_p);
3887 }
3888
3889 pop_gimplify_context (NULL_TREE);
3890 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
3891
3892 OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
3893 *stmt_p = new_stmt;
3894 }
3895
3896
3897 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
3898 information for the directive. */
3899
3900 static void
3901 lower_omp_parallel (tree *stmt_p, omp_context *ctx)
3902 {
3903 tree clauses, par_bind, par_body, new_body, bind;
3904 tree olist, ilist, par_olist, par_ilist;
3905 tree stmt, child_fn, t;
3906
3907 stmt = *stmt_p;
3908
3909 clauses = OMP_PARALLEL_CLAUSES (stmt);
3910 par_bind = OMP_PARALLEL_BODY (stmt);
3911 par_body = BIND_EXPR_BODY (par_bind);
3912 child_fn = ctx->cb.dst_fn;
3913
3914 push_gimplify_context ();
3915
3916 par_olist = NULL_TREE;
3917 par_ilist = NULL_TREE;
3918 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
3919 lower_omp (&par_body, ctx);
3920 maybe_catch_exception (&par_body);
3921 lower_reduction_clauses (clauses, &par_olist, ctx);
3922
3923 /* Declare all the variables created by mapping and the variables
3924 declared in the scope of the parallel body. */
3925 record_vars_into (ctx->block_vars, child_fn);
3926 record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
3927
3928 if (ctx->record_type)
3929 {
3930 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
3931 OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
3932 }
3933
3934 olist = NULL_TREE;
3935 ilist = NULL_TREE;
3936 lower_send_clauses (clauses, &ilist, &olist, ctx);
3937 lower_send_shared_vars (&ilist, &olist, ctx);
3938
3939 /* Once all the expansions are done, sequence all the different
3940 fragments inside OMP_PARALLEL_BODY. */
3941 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
3942 append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
3943
3944 new_body = alloc_stmt_list ();
3945
3946 if (ctx->record_type)
3947 {
3948 t = build_fold_addr_expr (ctx->sender_decl);
3949 t = build2 (MODIFY_EXPR, void_type_node, ctx->receiver_decl, t);
3950 append_to_statement_list (t, &new_body);
3951 }
3952
3953 append_to_statement_list (par_ilist, &new_body);
3954 append_to_statement_list (par_body, &new_body);
3955 append_to_statement_list (par_olist, &new_body);
3956 t = make_node (OMP_RETURN_EXPR);
3957 append_to_statement_list (t, &new_body);
3958 OMP_PARALLEL_BODY (stmt) = new_body;
3959
3960 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
3961 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
3962
3963 *stmt_p = bind;
3964
3965 pop_gimplify_context (NULL_TREE);
3966 }
3967
3968
3969 /* Pass *TP back through the gimplifier within the context determined by WI.
3970 This handles replacement of DECL_VALUE_EXPR, as well as adjusting the
3971 flags on ADDR_EXPR. */
3972
3973 static void
3974 lower_regimplify (tree *tp, struct walk_stmt_info *wi)
3975 {
3976 enum gimplify_status gs;
3977 tree pre = NULL;
3978
3979 if (wi->is_lhs)
3980 gs = gimplify_expr (tp, &pre, NULL, is_gimple_lvalue, fb_lvalue);
3981 else if (wi->val_only)
3982 gs = gimplify_expr (tp, &pre, NULL, is_gimple_val, fb_rvalue);
3983 else
3984 gs = gimplify_expr (tp, &pre, NULL, is_gimple_formal_tmp_var, fb_rvalue);
3985 gcc_assert (gs == GS_ALL_DONE);
3986
3987 if (pre)
3988 tsi_link_before (&wi->tsi, pre, TSI_SAME_STMT);
3989 }
3990
3991
3992 /* Callback for walk_stmts. Lower the OpenMP directive pointed by TP. */
3993
3994 static tree
3995 lower_omp_1 (tree *tp, int *walk_subtrees, void *data)
3996 {
3997 struct walk_stmt_info *wi = data;
3998 omp_context *ctx = wi->info;
3999 tree t = *tp;
4000
4001 /* If we have issued syntax errors, avoid doing any heavy lifting.
4002 Just replace the OpenMP directives with a NOP to avoid
4003 confusing RTL expansion. */
4004 if (errorcount && OMP_DIRECTIVE_P (*tp))
4005 {
4006 *tp = build_empty_stmt ();
4007 return NULL_TREE;
4008 }
4009
4010 *walk_subtrees = 0;
4011 switch (TREE_CODE (*tp))
4012 {
4013 case OMP_PARALLEL:
4014 ctx = maybe_lookup_ctx (t);
4015 lower_omp_parallel (tp, ctx);
4016 break;
4017
4018 case OMP_FOR:
4019 ctx = maybe_lookup_ctx (t);
4020 gcc_assert (ctx);
4021 lower_omp_for (tp, ctx);
4022 break;
4023
4024 case OMP_SECTIONS:
4025 ctx = maybe_lookup_ctx (t);
4026 gcc_assert (ctx);
4027 lower_omp_sections (tp, ctx);
4028 break;
4029
4030 case OMP_SINGLE:
4031 ctx = maybe_lookup_ctx (t);
4032 gcc_assert (ctx);
4033 lower_omp_single (tp, ctx);
4034 break;
4035
4036 case OMP_MASTER:
4037 ctx = maybe_lookup_ctx (t);
4038 gcc_assert (ctx);
4039 lower_omp_master (tp, ctx);
4040 break;
4041
4042 case OMP_ORDERED:
4043 ctx = maybe_lookup_ctx (t);
4044 gcc_assert (ctx);
4045 lower_omp_ordered (tp, ctx);
4046 break;
4047
4048 case OMP_CRITICAL:
4049 ctx = maybe_lookup_ctx (t);
4050 gcc_assert (ctx);
4051 lower_omp_critical (tp, ctx);
4052 break;
4053
4054 case VAR_DECL:
4055 if (ctx && DECL_HAS_VALUE_EXPR_P (t))
4056 lower_regimplify (tp, wi);
4057 break;
4058
4059 case ADDR_EXPR:
4060 if (ctx)
4061 lower_regimplify (tp, wi);
4062 break;
4063
4064 case ARRAY_REF:
4065 case ARRAY_RANGE_REF:
4066 case REALPART_EXPR:
4067 case IMAGPART_EXPR:
4068 case COMPONENT_REF:
4069 case VIEW_CONVERT_EXPR:
4070 if (ctx)
4071 lower_regimplify (tp, wi);
4072 break;
4073
4074 case INDIRECT_REF:
4075 if (ctx)
4076 {
4077 wi->is_lhs = false;
4078 wi->val_only = true;
4079 lower_regimplify (&TREE_OPERAND (t, 0), wi);
4080 }
4081 break;
4082
4083 default:
4084 if (!TYPE_P (t) && !DECL_P (t))
4085 *walk_subtrees = 1;
4086 break;
4087 }
4088
4089 return NULL_TREE;
4090 }
4091
4092 static void
4093 lower_omp (tree *stmt_p, omp_context *ctx)
4094 {
4095 struct walk_stmt_info wi;
4096
4097 memset (&wi, 0, sizeof (wi));
4098 wi.callback = lower_omp_1;
4099 wi.info = ctx;
4100 wi.val_only = true;
4101 wi.want_locations = true;
4102
4103 walk_stmts (&wi, stmt_p);
4104 }
4105 \f
4106 /* Main entry point. */
4107
4108 static void
4109 execute_lower_omp (void)
4110 {
4111 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
4112 delete_omp_context);
4113
4114 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
4115 gcc_assert (parallel_nesting_level == 0);
4116
4117 if (all_contexts->root)
4118 lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
4119
4120 if (all_contexts)
4121 {
4122 splay_tree_delete (all_contexts);
4123 all_contexts = NULL;
4124 }
4125 }
4126
4127 static bool
4128 gate_lower_omp (void)
4129 {
4130 return flag_openmp != 0;
4131 }
4132
4133 struct tree_opt_pass pass_lower_omp =
4134 {
4135 "omplower", /* name */
4136 gate_lower_omp, /* gate */
4137 execute_lower_omp, /* execute */
4138 NULL, /* sub */
4139 NULL, /* next */
4140 0, /* static_pass_number */
4141 0, /* tv_id */
4142 PROP_gimple_any, /* properties_required */
4143 PROP_gimple_lomp, /* properties_provided */
4144 0, /* properties_destroyed */
4145 0, /* todo_flags_start */
4146 TODO_dump_func, /* todo_flags_finish */
4147 0 /* letter */
4148 };
4149 \f
4150 /* The following is a utility to diagnose OpenMP structured block violations.
4151 It's part of the "omplower" pass, as that's invoked too late. It should
4152 be invoked by the respective front ends after gimplification. */
4153
4154 static splay_tree all_labels;
4155
4156 /* Check for mismatched contexts and generate an error if needed. Return
4157 true if an error is detected. */
4158
4159 static bool
4160 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
4161 {
4162 bool exit_p = true;
4163
4164 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
4165 return false;
4166
4167 /* Try to avoid confusing the user by producing and error message
4168 with correct "exit" or "enter" verbage. We prefer "exit"
4169 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
4170 if (branch_ctx == NULL)
4171 exit_p = false;
4172 else
4173 {
4174 while (label_ctx)
4175 {
4176 if (TREE_VALUE (label_ctx) == branch_ctx)
4177 {
4178 exit_p = false;
4179 break;
4180 }
4181 label_ctx = TREE_CHAIN (label_ctx);
4182 }
4183 }
4184
4185 if (exit_p)
4186 error ("invalid exit from OpenMP structured block");
4187 else
4188 error ("invalid entry to OpenMP structured block");
4189
4190 *stmt_p = build_empty_stmt ();
4191 return true;
4192 }
4193
4194 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
4195 where in the tree each label is found. */
4196
4197 static tree
4198 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
4199 {
4200 struct walk_stmt_info *wi = data;
4201 tree context = (tree) wi->info;
4202 tree inner_context;
4203 tree t = *tp;
4204
4205 *walk_subtrees = 0;
4206 switch (TREE_CODE (t))
4207 {
4208 case OMP_PARALLEL:
4209 case OMP_SECTIONS:
4210 case OMP_SINGLE:
4211 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
4212 /* FALLTHRU */
4213 case OMP_SECTION:
4214 case OMP_MASTER:
4215 case OMP_ORDERED:
4216 case OMP_CRITICAL:
4217 /* The minimal context here is just a tree of statements. */
4218 inner_context = tree_cons (NULL, t, context);
4219 wi->info = inner_context;
4220 walk_stmts (wi, &OMP_BODY (t));
4221 wi->info = context;
4222 break;
4223
4224 case OMP_FOR:
4225 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
4226 inner_context = tree_cons (NULL, t, context);
4227 wi->info = inner_context;
4228 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
4229 walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
4230 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
4231 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
4232 walk_stmts (wi, &OMP_FOR_BODY (t));
4233 wi->info = context;
4234 break;
4235
4236 case LABEL_EXPR:
4237 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
4238 (splay_tree_value) context);
4239 break;
4240
4241 default:
4242 break;
4243 }
4244
4245 return NULL_TREE;
4246 }
4247
4248 /* Pass 2: Check each branch and see if its context differs from that of
4249 the destination label's context. */
4250
4251 static tree
4252 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
4253 {
4254 struct walk_stmt_info *wi = data;
4255 tree context = (tree) wi->info;
4256 splay_tree_node n;
4257 tree t = *tp;
4258
4259 *walk_subtrees = 0;
4260 switch (TREE_CODE (t))
4261 {
4262 case OMP_PARALLEL:
4263 case OMP_SECTIONS:
4264 case OMP_SINGLE:
4265 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
4266 /* FALLTHRU */
4267 case OMP_SECTION:
4268 case OMP_MASTER:
4269 case OMP_ORDERED:
4270 case OMP_CRITICAL:
4271 wi->info = t;
4272 walk_stmts (wi, &OMP_BODY (t));
4273 wi->info = context;
4274 break;
4275
4276 case OMP_FOR:
4277 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
4278 wi->info = t;
4279 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
4280 walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
4281 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
4282 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
4283 walk_stmts (wi, &OMP_FOR_BODY (t));
4284 wi->info = context;
4285 break;
4286
4287 case GOTO_EXPR:
4288 {
4289 tree lab = GOTO_DESTINATION (t);
4290 if (TREE_CODE (lab) != LABEL_DECL)
4291 break;
4292
4293 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
4294 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
4295 }
4296 break;
4297
4298 case SWITCH_EXPR:
4299 {
4300 tree vec = SWITCH_LABELS (t);
4301 int i, len = TREE_VEC_LENGTH (vec);
4302 for (i = 0; i < len; ++i)
4303 {
4304 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
4305 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
4306 if (diagnose_sb_0 (tp, context, (tree) n->value))
4307 break;
4308 }
4309 }
4310 break;
4311
4312 case RETURN_EXPR:
4313 diagnose_sb_0 (tp, context, NULL_TREE);
4314 break;
4315
4316 default:
4317 break;
4318 }
4319
4320 return NULL_TREE;
4321 }
4322
4323 void
4324 diagnose_omp_structured_block_errors (tree fndecl)
4325 {
4326 tree save_current = current_function_decl;
4327 struct walk_stmt_info wi;
4328
4329 current_function_decl = fndecl;
4330
4331 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
4332
4333 memset (&wi, 0, sizeof (wi));
4334 wi.callback = diagnose_sb_1;
4335 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
4336
4337 memset (&wi, 0, sizeof (wi));
4338 wi.callback = diagnose_sb_2;
4339 wi.want_locations = true;
4340 wi.want_return_expr = true;
4341 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
4342
4343 splay_tree_delete (all_labels);
4344 all_labels = NULL;
4345
4346 current_function_decl = save_current;
4347 }
4348
4349 #include "gt-omp-low.h"