driver-i386.c (detect_caches_amd, [...]): Fix -Wc++-compat and/or -Wcast-qual warnings.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
35 #include "timevar.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "toplev.h"
40 #include "tree-pass.h"
41 #include "ggc.h"
42 #include "except.h"
43 #include "splay-tree.h"
44 #include "optabs.h"
45 #include "cfgloop.h"
46
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
53
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
57
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
60
61 typedef struct omp_context
62 {
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
68
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 tree stmt;
72
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
79
80 /* These are used just by task contexts, if task firstprivate fn is
81 needed. srecord_type is used to communicate from the thread
82 that encountered the task construct to task firstprivate fn,
83 record_type is allocated by GOMP_task, initialized by task firstprivate
84 fn and passed to the task body fn. */
85 splay_tree sfield_map;
86 tree srecord_type;
87
88 /* A chain of variables to add to the top-level block surrounding the
89 construct. In the case of a parallel, this is in the child function. */
90 tree block_vars;
91
92 /* What to do with variables with implicitly determined sharing
93 attributes. */
94 enum omp_clause_default_kind default_kind;
95
96 /* Nesting depth of this context. Used to beautify error messages re
97 invalid gotos. The outermost ctx is depth 1, with depth 0 being
98 reserved for the main body of the function. */
99 int depth;
100
101 /* True if this parallel directive is nested within another. */
102 bool is_nested;
103 } omp_context;
104
105
106 struct omp_for_data_loop
107 {
108 tree v, n1, n2, step;
109 enum tree_code cond_code;
110 };
111
112 /* A structure describing the main elements of a parallel loop. */
113
114 struct omp_for_data
115 {
116 struct omp_for_data_loop loop;
117 tree chunk_size, for_stmt;
118 tree pre, iter_type;
119 int collapse;
120 bool have_nowait, have_ordered;
121 enum omp_clause_schedule_kind sched_kind;
122 struct omp_for_data_loop *loops;
123 };
124
125
126 static splay_tree all_contexts;
127 static int taskreg_nesting_level;
128 struct omp_region *root_omp_region;
129 static bitmap task_shared_vars;
130
131 static void scan_omp (tree *, omp_context *);
132 static void lower_omp (tree *, omp_context *);
133 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
134 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
135
136 /* Find an OpenMP clause of type KIND within CLAUSES. */
137
138 tree
139 find_omp_clause (tree clauses, enum tree_code kind)
140 {
141 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
142 if (OMP_CLAUSE_CODE (clauses) == kind)
143 return clauses;
144
145 return NULL_TREE;
146 }
147
148 /* Return true if CTX is for an omp parallel. */
149
150 static inline bool
151 is_parallel_ctx (omp_context *ctx)
152 {
153 return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
154 }
155
156
157 /* Return true if CTX is for an omp task. */
158
159 static inline bool
160 is_task_ctx (omp_context *ctx)
161 {
162 return TREE_CODE (ctx->stmt) == OMP_TASK;
163 }
164
165
166 /* Return true if CTX is for an omp parallel or omp task. */
167
168 static inline bool
169 is_taskreg_ctx (omp_context *ctx)
170 {
171 return TREE_CODE (ctx->stmt) == OMP_PARALLEL
172 || TREE_CODE (ctx->stmt) == OMP_TASK;
173 }
174
175
176 /* Return true if REGION is a combined parallel+workshare region. */
177
178 static inline bool
179 is_combined_parallel (struct omp_region *region)
180 {
181 return region->is_combined_parallel;
182 }
183
184
185 /* Extract the header elements of parallel loop FOR_STMT and store
186 them into *FD. */
187
188 static void
189 extract_omp_for_data (tree for_stmt, struct omp_for_data *fd,
190 struct omp_for_data_loop *loops)
191 {
192 tree t, var, *collapse_iter, *collapse_count;
193 tree count = NULL_TREE, iter_type = long_integer_type_node;
194 struct omp_for_data_loop *loop;
195 int i;
196 struct omp_for_data_loop dummy_loop;
197
198 fd->for_stmt = for_stmt;
199 fd->pre = NULL;
200 fd->collapse = TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt));
201 if (fd->collapse > 1)
202 fd->loops = loops;
203 else
204 fd->loops = &fd->loop;
205
206 fd->have_nowait = fd->have_ordered = false;
207 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
208 fd->chunk_size = NULL_TREE;
209 collapse_iter = NULL;
210 collapse_count = NULL;
211
212 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
213 switch (OMP_CLAUSE_CODE (t))
214 {
215 case OMP_CLAUSE_NOWAIT:
216 fd->have_nowait = true;
217 break;
218 case OMP_CLAUSE_ORDERED:
219 fd->have_ordered = true;
220 break;
221 case OMP_CLAUSE_SCHEDULE:
222 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
223 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
224 break;
225 case OMP_CLAUSE_COLLAPSE:
226 if (fd->collapse > 1)
227 {
228 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
229 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
230 }
231 default:
232 break;
233 }
234
235 /* FIXME: for now map schedule(auto) to schedule(static).
236 There should be analysis to determine whether all iterations
237 are approximately the same amount of work (then schedule(static)
238 is best) or if it varies (then schedule(dynamic,N) is better). */
239 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
240 {
241 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
242 gcc_assert (fd->chunk_size == NULL);
243 }
244 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
245 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
246 gcc_assert (fd->chunk_size == NULL);
247 else if (fd->chunk_size == NULL)
248 {
249 /* We only need to compute a default chunk size for ordered
250 static loops and dynamic loops. */
251 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
252 || fd->have_ordered
253 || fd->collapse > 1)
254 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
255 ? integer_zero_node : integer_one_node;
256 }
257
258 for (i = 0; i < fd->collapse; i++)
259 {
260 if (fd->collapse == 1)
261 loop = &fd->loop;
262 else if (loops != NULL)
263 loop = loops + i;
264 else
265 loop = &dummy_loop;
266
267 t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
268 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
269 loop->v = GIMPLE_STMT_OPERAND (t, 0);
270 gcc_assert (SSA_VAR_P (loop->v));
271 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
272 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
273 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
274 loop->n1 = GIMPLE_STMT_OPERAND (t, 1);
275
276 t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
277 loop->cond_code = TREE_CODE (t);
278 gcc_assert (TREE_OPERAND (t, 0) == var);
279 loop->n2 = TREE_OPERAND (t, 1);
280 switch (loop->cond_code)
281 {
282 case LT_EXPR:
283 case GT_EXPR:
284 break;
285 case LE_EXPR:
286 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
287 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
288 loop->n2, size_one_node);
289 else
290 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
291 build_int_cst (TREE_TYPE (loop->n2), 1));
292 loop->cond_code = LT_EXPR;
293 break;
294 case GE_EXPR:
295 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
296 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
297 loop->n2, size_int (-1));
298 else
299 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
300 build_int_cst (TREE_TYPE (loop->n2), 1));
301 loop->cond_code = GT_EXPR;
302 break;
303 default:
304 gcc_unreachable ();
305 }
306
307 t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
308 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
309 gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
310 t = GIMPLE_STMT_OPERAND (t, 1);
311 gcc_assert (TREE_OPERAND (t, 0) == var);
312 switch (TREE_CODE (t))
313 {
314 case PLUS_EXPR:
315 case POINTER_PLUS_EXPR:
316 loop->step = TREE_OPERAND (t, 1);
317 break;
318 case MINUS_EXPR:
319 loop->step = TREE_OPERAND (t, 1);
320 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
321 loop->step);
322 break;
323 default:
324 gcc_unreachable ();
325 }
326
327 if (iter_type != long_long_unsigned_type_node)
328 {
329 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
330 iter_type = long_long_unsigned_type_node;
331 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
332 && TYPE_PRECISION (TREE_TYPE (loop->v))
333 >= TYPE_PRECISION (iter_type))
334 {
335 tree n;
336
337 if (loop->cond_code == LT_EXPR)
338 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
339 loop->n2, loop->step);
340 else
341 n = loop->n1;
342 if (TREE_CODE (n) != INTEGER_CST
343 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
344 iter_type = long_long_unsigned_type_node;
345 }
346 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
347 > TYPE_PRECISION (iter_type))
348 {
349 tree n1, n2;
350
351 if (loop->cond_code == LT_EXPR)
352 {
353 n1 = loop->n1;
354 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
355 loop->n2, loop->step);
356 }
357 else
358 {
359 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
360 loop->n2, loop->step);
361 n2 = loop->n1;
362 }
363 if (TREE_CODE (n1) != INTEGER_CST
364 || TREE_CODE (n2) != INTEGER_CST
365 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
366 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
367 iter_type = long_long_unsigned_type_node;
368 }
369 }
370
371 if (collapse_count && *collapse_count == NULL)
372 {
373 if ((i == 0 || count != NULL_TREE)
374 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
375 && TREE_CONSTANT (loop->n1)
376 && TREE_CONSTANT (loop->n2)
377 && TREE_CODE (loop->step) == INTEGER_CST)
378 {
379 tree itype = TREE_TYPE (loop->v);
380
381 if (POINTER_TYPE_P (itype))
382 itype
383 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
384 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
385 t = fold_build2 (PLUS_EXPR, itype,
386 fold_convert (itype, loop->step), t);
387 t = fold_build2 (PLUS_EXPR, itype, t,
388 fold_convert (itype, loop->n2));
389 t = fold_build2 (MINUS_EXPR, itype, t,
390 fold_convert (itype, loop->n1));
391 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
392 t = fold_build2 (TRUNC_DIV_EXPR, itype,
393 fold_build1 (NEGATE_EXPR, itype, t),
394 fold_build1 (NEGATE_EXPR, itype,
395 fold_convert (itype,
396 loop->step)));
397 else
398 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
399 fold_convert (itype, loop->step));
400 t = fold_convert (long_long_unsigned_type_node, t);
401 if (count != NULL_TREE)
402 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
403 count, t);
404 else
405 count = t;
406 if (TREE_CODE (count) != INTEGER_CST)
407 count = NULL_TREE;
408 }
409 else
410 count = NULL_TREE;
411 }
412 }
413
414 if (count)
415 {
416 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
417 iter_type = long_long_unsigned_type_node;
418 else
419 iter_type = long_integer_type_node;
420 }
421 else if (collapse_iter && *collapse_iter != NULL)
422 iter_type = TREE_TYPE (*collapse_iter);
423 fd->iter_type = iter_type;
424 if (collapse_iter && *collapse_iter == NULL)
425 *collapse_iter = create_tmp_var (iter_type, ".iter");
426 if (collapse_count && *collapse_count == NULL)
427 {
428 if (count)
429 *collapse_count = fold_convert (iter_type, count);
430 else
431 *collapse_count = create_tmp_var (iter_type, ".count");
432 }
433
434 if (fd->collapse > 1)
435 {
436 fd->loop.v = *collapse_iter;
437 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
438 fd->loop.n2 = *collapse_count;
439 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
440 fd->loop.cond_code = LT_EXPR;
441 }
442 }
443
444
445 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
446 is the immediate dominator of PAR_ENTRY_BB, return true if there
447 are no data dependencies that would prevent expanding the parallel
448 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
449
450 When expanding a combined parallel+workshare region, the call to
451 the child function may need additional arguments in the case of
452 OMP_FOR regions. In some cases, these arguments are computed out
453 of variables passed in from the parent to the child via 'struct
454 .omp_data_s'. For instance:
455
456 #pragma omp parallel for schedule (guided, i * 4)
457 for (j ...)
458
459 Is lowered into:
460
461 # BLOCK 2 (PAR_ENTRY_BB)
462 .omp_data_o.i = i;
463 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
464
465 # BLOCK 3 (WS_ENTRY_BB)
466 .omp_data_i = &.omp_data_o;
467 D.1667 = .omp_data_i->i;
468 D.1598 = D.1667 * 4;
469 #pragma omp for schedule (guided, D.1598)
470
471 When we outline the parallel region, the call to the child function
472 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
473 that value is computed *after* the call site. So, in principle we
474 cannot do the transformation.
475
476 To see whether the code in WS_ENTRY_BB blocks the combined
477 parallel+workshare call, we collect all the variables used in the
478 OMP_FOR header check whether they appear on the LHS of any
479 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
480 call.
481
482 FIXME. If we had the SSA form built at this point, we could merely
483 hoist the code in block 3 into block 2 and be done with it. But at
484 this point we don't have dataflow information and though we could
485 hack something up here, it is really not worth the aggravation. */
486
487 static bool
488 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
489 {
490 struct omp_for_data fd;
491 tree par_stmt, ws_stmt;
492
493 par_stmt = last_stmt (par_entry_bb);
494 ws_stmt = last_stmt (ws_entry_bb);
495
496 if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
497 return true;
498
499 gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
500
501 extract_omp_for_data (ws_stmt, &fd, NULL);
502
503 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
504 return false;
505 if (fd.iter_type != long_integer_type_node)
506 return false;
507
508 /* FIXME. We give up too easily here. If any of these arguments
509 are not constants, they will likely involve variables that have
510 been mapped into fields of .omp_data_s for sharing with the child
511 function. With appropriate data flow, it would be possible to
512 see through this. */
513 if (!is_gimple_min_invariant (fd.loop.n1)
514 || !is_gimple_min_invariant (fd.loop.n2)
515 || !is_gimple_min_invariant (fd.loop.step)
516 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
517 return false;
518
519 return true;
520 }
521
522
523 /* Collect additional arguments needed to emit a combined
524 parallel+workshare call. WS_STMT is the workshare directive being
525 expanded. */
526
527 static tree
528 get_ws_args_for (tree ws_stmt)
529 {
530 tree t;
531
532 if (TREE_CODE (ws_stmt) == OMP_FOR)
533 {
534 struct omp_for_data fd;
535 tree ws_args;
536
537 extract_omp_for_data (ws_stmt, &fd, NULL);
538
539 ws_args = NULL_TREE;
540 if (fd.chunk_size)
541 {
542 t = fold_convert (long_integer_type_node, fd.chunk_size);
543 ws_args = tree_cons (NULL, t, ws_args);
544 }
545
546 t = fold_convert (long_integer_type_node, fd.loop.step);
547 ws_args = tree_cons (NULL, t, ws_args);
548
549 t = fold_convert (long_integer_type_node, fd.loop.n2);
550 ws_args = tree_cons (NULL, t, ws_args);
551
552 t = fold_convert (long_integer_type_node, fd.loop.n1);
553 ws_args = tree_cons (NULL, t, ws_args);
554
555 return ws_args;
556 }
557 else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
558 {
559 /* Number of sections is equal to the number of edges from the
560 OMP_SECTIONS_SWITCH statement, except for the one to the exit
561 of the sections region. */
562 basic_block bb = single_succ (bb_for_stmt (ws_stmt));
563 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
564 t = tree_cons (NULL, t, NULL);
565 return t;
566 }
567
568 gcc_unreachable ();
569 }
570
571
572 /* Discover whether REGION is a combined parallel+workshare region. */
573
574 static void
575 determine_parallel_type (struct omp_region *region)
576 {
577 basic_block par_entry_bb, par_exit_bb;
578 basic_block ws_entry_bb, ws_exit_bb;
579
580 if (region == NULL || region->inner == NULL
581 || region->exit == NULL || region->inner->exit == NULL
582 || region->inner->cont == NULL)
583 return;
584
585 /* We only support parallel+for and parallel+sections. */
586 if (region->type != OMP_PARALLEL
587 || (region->inner->type != OMP_FOR
588 && region->inner->type != OMP_SECTIONS))
589 return;
590
591 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
592 WS_EXIT_BB -> PAR_EXIT_BB. */
593 par_entry_bb = region->entry;
594 par_exit_bb = region->exit;
595 ws_entry_bb = region->inner->entry;
596 ws_exit_bb = region->inner->exit;
597
598 if (single_succ (par_entry_bb) == ws_entry_bb
599 && single_succ (ws_exit_bb) == par_exit_bb
600 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
601 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
602 || (last_and_only_stmt (ws_entry_bb)
603 && last_and_only_stmt (par_exit_bb))))
604 {
605 tree ws_stmt = last_stmt (ws_entry_bb);
606
607 if (region->inner->type == OMP_FOR)
608 {
609 /* If this is a combined parallel loop, we need to determine
610 whether or not to use the combined library calls. There
611 are two cases where we do not apply the transformation:
612 static loops and any kind of ordered loop. In the first
613 case, we already open code the loop so there is no need
614 to do anything else. In the latter case, the combined
615 parallel loop call would still need extra synchronization
616 to implement ordered semantics, so there would not be any
617 gain in using the combined call. */
618 tree clauses = OMP_FOR_CLAUSES (ws_stmt);
619 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
620 if (c == NULL
621 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
622 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
623 {
624 region->is_combined_parallel = false;
625 region->inner->is_combined_parallel = false;
626 return;
627 }
628 }
629
630 region->is_combined_parallel = true;
631 region->inner->is_combined_parallel = true;
632 region->ws_args = get_ws_args_for (ws_stmt);
633 }
634 }
635
636
637 /* Return true if EXPR is variable sized. */
638
639 static inline bool
640 is_variable_sized (const_tree expr)
641 {
642 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
643 }
644
645 /* Return true if DECL is a reference type. */
646
647 static inline bool
648 is_reference (tree decl)
649 {
650 return lang_hooks.decls.omp_privatize_by_reference (decl);
651 }
652
653 /* Lookup variables in the decl or field splay trees. The "maybe" form
654 allows for the variable form to not have been entered, otherwise we
655 assert that the variable must have been entered. */
656
657 static inline tree
658 lookup_decl (tree var, omp_context *ctx)
659 {
660 tree *n;
661 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
662 return *n;
663 }
664
665 static inline tree
666 maybe_lookup_decl (const_tree var, omp_context *ctx)
667 {
668 tree *n;
669 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
670 return n ? *n : NULL_TREE;
671 }
672
673 static inline tree
674 lookup_field (tree var, omp_context *ctx)
675 {
676 splay_tree_node n;
677 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
678 return (tree) n->value;
679 }
680
681 static inline tree
682 lookup_sfield (tree var, omp_context *ctx)
683 {
684 splay_tree_node n;
685 n = splay_tree_lookup (ctx->sfield_map
686 ? ctx->sfield_map : ctx->field_map,
687 (splay_tree_key) var);
688 return (tree) n->value;
689 }
690
691 static inline tree
692 maybe_lookup_field (tree var, omp_context *ctx)
693 {
694 splay_tree_node n;
695 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
696 return n ? (tree) n->value : NULL_TREE;
697 }
698
699 /* Return true if DECL should be copied by pointer. SHARED_CTX is
700 the parallel context if DECL is to be shared. */
701
702 static bool
703 use_pointer_for_field (tree decl, omp_context *shared_ctx)
704 {
705 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
706 return true;
707
708 /* We can only use copy-in/copy-out semantics for shared variables
709 when we know the value is not accessible from an outer scope. */
710 if (shared_ctx)
711 {
712 /* ??? Trivially accessible from anywhere. But why would we even
713 be passing an address in this case? Should we simply assert
714 this to be false, or should we have a cleanup pass that removes
715 these from the list of mappings? */
716 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
717 return true;
718
719 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
720 without analyzing the expression whether or not its location
721 is accessible to anyone else. In the case of nested parallel
722 regions it certainly may be. */
723 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
724 return true;
725
726 /* Do not use copy-in/copy-out for variables that have their
727 address taken. */
728 if (TREE_ADDRESSABLE (decl))
729 return true;
730
731 /* Disallow copy-in/out in nested parallel if
732 decl is shared in outer parallel, otherwise
733 each thread could store the shared variable
734 in its own copy-in location, making the
735 variable no longer really shared. */
736 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
737 {
738 omp_context *up;
739
740 for (up = shared_ctx->outer; up; up = up->outer)
741 if (maybe_lookup_decl (decl, up))
742 break;
743
744 if (up && is_taskreg_ctx (up))
745 {
746 tree c;
747
748 for (c = OMP_TASKREG_CLAUSES (up->stmt);
749 c; c = OMP_CLAUSE_CHAIN (c))
750 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
751 && OMP_CLAUSE_DECL (c) == decl)
752 break;
753
754 if (c)
755 return true;
756 }
757 }
758
759 /* For tasks avoid using copy-in/out, unless they are readonly
760 (in which case just copy-in is used). As tasks can be
761 deferred or executed in different thread, when GOMP_task
762 returns, the task hasn't necessarily terminated. */
763 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
764 {
765 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
766 if (is_gimple_reg (outer))
767 {
768 /* Taking address of OUTER in lower_send_shared_vars
769 might need regimplification of everything that uses the
770 variable. */
771 if (!task_shared_vars)
772 task_shared_vars = BITMAP_ALLOC (NULL);
773 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
774 TREE_ADDRESSABLE (outer) = 1;
775 }
776 return true;
777 }
778 }
779
780 return false;
781 }
782
783 /* Create a new VAR_DECL and copy information from VAR to it. */
784
785 tree
786 copy_var_decl (tree var, tree name, tree type)
787 {
788 tree copy = build_decl (VAR_DECL, name, type);
789
790 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
791 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
792 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
793 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
794 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
795 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
796 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
797 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
798 TREE_USED (copy) = 1;
799 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
800
801 return copy;
802 }
803
804 /* Construct a new automatic decl similar to VAR. */
805
806 static tree
807 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
808 {
809 tree copy = copy_var_decl (var, name, type);
810
811 DECL_CONTEXT (copy) = current_function_decl;
812 TREE_CHAIN (copy) = ctx->block_vars;
813 ctx->block_vars = copy;
814
815 return copy;
816 }
817
818 static tree
819 omp_copy_decl_1 (tree var, omp_context *ctx)
820 {
821 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
822 }
823
824 /* Build tree nodes to access the field for VAR on the receiver side. */
825
826 static tree
827 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
828 {
829 tree x, field = lookup_field (var, ctx);
830
831 /* If the receiver record type was remapped in the child function,
832 remap the field into the new record type. */
833 x = maybe_lookup_field (field, ctx);
834 if (x != NULL)
835 field = x;
836
837 x = build_fold_indirect_ref (ctx->receiver_decl);
838 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
839 if (by_ref)
840 x = build_fold_indirect_ref (x);
841
842 return x;
843 }
844
845 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
846 of a parallel, this is a component reference; for workshare constructs
847 this is some variable. */
848
849 static tree
850 build_outer_var_ref (tree var, omp_context *ctx)
851 {
852 tree x;
853
854 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
855 x = var;
856 else if (is_variable_sized (var))
857 {
858 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
859 x = build_outer_var_ref (x, ctx);
860 x = build_fold_indirect_ref (x);
861 }
862 else if (is_taskreg_ctx (ctx))
863 {
864 bool by_ref = use_pointer_for_field (var, NULL);
865 x = build_receiver_ref (var, by_ref, ctx);
866 }
867 else if (ctx->outer)
868 x = lookup_decl (var, ctx->outer);
869 else if (is_reference (var))
870 /* This can happen with orphaned constructs. If var is reference, it is
871 possible it is shared and as such valid. */
872 x = var;
873 else
874 gcc_unreachable ();
875
876 if (is_reference (var))
877 x = build_fold_indirect_ref (x);
878
879 return x;
880 }
881
882 /* Build tree nodes to access the field for VAR on the sender side. */
883
884 static tree
885 build_sender_ref (tree var, omp_context *ctx)
886 {
887 tree field = lookup_sfield (var, ctx);
888 return build3 (COMPONENT_REF, TREE_TYPE (field),
889 ctx->sender_decl, field, NULL);
890 }
891
892 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
893
894 static void
895 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
896 {
897 tree field, type, sfield = NULL_TREE;
898
899 gcc_assert ((mask & 1) == 0
900 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
901 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
902 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
903
904 type = TREE_TYPE (var);
905 if (by_ref)
906 type = build_pointer_type (type);
907 else if ((mask & 3) == 1 && is_reference (var))
908 type = TREE_TYPE (type);
909
910 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
911
912 /* Remember what variable this field was created for. This does have a
913 side effect of making dwarf2out ignore this member, so for helpful
914 debugging we clear it later in delete_omp_context. */
915 DECL_ABSTRACT_ORIGIN (field) = var;
916 if (type == TREE_TYPE (var))
917 {
918 DECL_ALIGN (field) = DECL_ALIGN (var);
919 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
920 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
921 }
922 else
923 DECL_ALIGN (field) = TYPE_ALIGN (type);
924
925 if ((mask & 3) == 3)
926 {
927 insert_field_into_struct (ctx->record_type, field);
928 if (ctx->srecord_type)
929 {
930 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
931 DECL_ABSTRACT_ORIGIN (sfield) = var;
932 DECL_ALIGN (sfield) = DECL_ALIGN (field);
933 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
934 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
935 insert_field_into_struct (ctx->srecord_type, sfield);
936 }
937 }
938 else
939 {
940 if (ctx->srecord_type == NULL_TREE)
941 {
942 tree t;
943
944 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
945 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
946 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
947 {
948 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
949 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
950 insert_field_into_struct (ctx->srecord_type, sfield);
951 splay_tree_insert (ctx->sfield_map,
952 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
953 (splay_tree_value) sfield);
954 }
955 }
956 sfield = field;
957 insert_field_into_struct ((mask & 1) ? ctx->record_type
958 : ctx->srecord_type, field);
959 }
960
961 if (mask & 1)
962 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
963 (splay_tree_value) field);
964 if ((mask & 2) && ctx->sfield_map)
965 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
966 (splay_tree_value) sfield);
967 }
968
969 static tree
970 install_var_local (tree var, omp_context *ctx)
971 {
972 tree new_var = omp_copy_decl_1 (var, ctx);
973 insert_decl_map (&ctx->cb, var, new_var);
974 return new_var;
975 }
976
977 /* Adjust the replacement for DECL in CTX for the new context. This means
978 copying the DECL_VALUE_EXPR, and fixing up the type. */
979
980 static void
981 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
982 {
983 tree new_decl, size;
984
985 new_decl = lookup_decl (decl, ctx);
986
987 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
988
989 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
990 && DECL_HAS_VALUE_EXPR_P (decl))
991 {
992 tree ve = DECL_VALUE_EXPR (decl);
993 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
994 SET_DECL_VALUE_EXPR (new_decl, ve);
995 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
996 }
997
998 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
999 {
1000 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1001 if (size == error_mark_node)
1002 size = TYPE_SIZE (TREE_TYPE (new_decl));
1003 DECL_SIZE (new_decl) = size;
1004
1005 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1006 if (size == error_mark_node)
1007 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1008 DECL_SIZE_UNIT (new_decl) = size;
1009 }
1010 }
1011
1012 /* The callback for remap_decl. Search all containing contexts for a
1013 mapping of the variable; this avoids having to duplicate the splay
1014 tree ahead of time. We know a mapping doesn't already exist in the
1015 given context. Create new mappings to implement default semantics. */
1016
1017 static tree
1018 omp_copy_decl (tree var, copy_body_data *cb)
1019 {
1020 omp_context *ctx = (omp_context *) cb;
1021 tree new_var;
1022
1023 if (TREE_CODE (var) == LABEL_DECL)
1024 {
1025 new_var = create_artificial_label ();
1026 DECL_CONTEXT (new_var) = current_function_decl;
1027 insert_decl_map (&ctx->cb, var, new_var);
1028 return new_var;
1029 }
1030
1031 while (!is_taskreg_ctx (ctx))
1032 {
1033 ctx = ctx->outer;
1034 if (ctx == NULL)
1035 return var;
1036 new_var = maybe_lookup_decl (var, ctx);
1037 if (new_var)
1038 return new_var;
1039 }
1040
1041 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1042 return var;
1043
1044 return error_mark_node;
1045 }
1046
1047
1048 /* Return the parallel region associated with STMT. */
1049
1050 /* Debugging dumps for parallel regions. */
1051 void dump_omp_region (FILE *, struct omp_region *, int);
1052 void debug_omp_region (struct omp_region *);
1053 void debug_all_omp_regions (void);
1054
1055 /* Dump the parallel region tree rooted at REGION. */
1056
1057 void
1058 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1059 {
1060 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1061 tree_code_name[region->type]);
1062
1063 if (region->inner)
1064 dump_omp_region (file, region->inner, indent + 4);
1065
1066 if (region->cont)
1067 {
1068 fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "",
1069 region->cont->index);
1070 }
1071
1072 if (region->exit)
1073 fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
1074 region->exit->index);
1075 else
1076 fprintf (file, "%*s[no exit marker]\n", indent, "");
1077
1078 if (region->next)
1079 dump_omp_region (file, region->next, indent);
1080 }
1081
1082 void
1083 debug_omp_region (struct omp_region *region)
1084 {
1085 dump_omp_region (stderr, region, 0);
1086 }
1087
1088 void
1089 debug_all_omp_regions (void)
1090 {
1091 dump_omp_region (stderr, root_omp_region, 0);
1092 }
1093
1094
1095 /* Create a new parallel region starting at STMT inside region PARENT. */
1096
1097 struct omp_region *
1098 new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
1099 {
1100 struct omp_region *region = XCNEW (struct omp_region);
1101
1102 region->outer = parent;
1103 region->entry = bb;
1104 region->type = type;
1105
1106 if (parent)
1107 {
1108 /* This is a nested region. Add it to the list of inner
1109 regions in PARENT. */
1110 region->next = parent->inner;
1111 parent->inner = region;
1112 }
1113 else
1114 {
1115 /* This is a toplevel region. Add it to the list of toplevel
1116 regions in ROOT_OMP_REGION. */
1117 region->next = root_omp_region;
1118 root_omp_region = region;
1119 }
1120
1121 return region;
1122 }
1123
1124 /* Release the memory associated with the region tree rooted at REGION. */
1125
1126 static void
1127 free_omp_region_1 (struct omp_region *region)
1128 {
1129 struct omp_region *i, *n;
1130
1131 for (i = region->inner; i ; i = n)
1132 {
1133 n = i->next;
1134 free_omp_region_1 (i);
1135 }
1136
1137 free (region);
1138 }
1139
1140 /* Release the memory for the entire omp region tree. */
1141
1142 void
1143 free_omp_regions (void)
1144 {
1145 struct omp_region *r, *n;
1146 for (r = root_omp_region; r ; r = n)
1147 {
1148 n = r->next;
1149 free_omp_region_1 (r);
1150 }
1151 root_omp_region = NULL;
1152 }
1153
1154
1155 /* Create a new context, with OUTER_CTX being the surrounding context. */
1156
1157 static omp_context *
1158 new_omp_context (tree stmt, omp_context *outer_ctx)
1159 {
1160 omp_context *ctx = XCNEW (omp_context);
1161
1162 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1163 (splay_tree_value) ctx);
1164 ctx->stmt = stmt;
1165
1166 if (outer_ctx)
1167 {
1168 ctx->outer = outer_ctx;
1169 ctx->cb = outer_ctx->cb;
1170 ctx->cb.block = NULL;
1171 ctx->depth = outer_ctx->depth + 1;
1172 }
1173 else
1174 {
1175 ctx->cb.src_fn = current_function_decl;
1176 ctx->cb.dst_fn = current_function_decl;
1177 ctx->cb.src_node = cgraph_node (current_function_decl);
1178 ctx->cb.dst_node = ctx->cb.src_node;
1179 ctx->cb.src_cfun = cfun;
1180 ctx->cb.copy_decl = omp_copy_decl;
1181 ctx->cb.eh_region = -1;
1182 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1183 ctx->depth = 1;
1184 }
1185
1186 ctx->cb.decl_map = pointer_map_create ();
1187
1188 return ctx;
1189 }
1190
1191 static void maybe_catch_exception (tree *stmt_p);
1192
1193 /* Finalize task copyfn. */
1194
1195 static void
1196 finalize_task_copyfn (tree task_stmt)
1197 {
1198 struct function *child_cfun;
1199 tree child_fn, old_fn;
1200
1201 child_fn = OMP_TASK_COPYFN (task_stmt);
1202 if (child_fn == NULL_TREE)
1203 return;
1204
1205 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1206
1207 /* Inform the callgraph about the new function. */
1208 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1209 = cfun->curr_properties;
1210
1211 old_fn = current_function_decl;
1212 push_cfun (child_cfun);
1213 current_function_decl = child_fn;
1214 gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1215 maybe_catch_exception (&BIND_EXPR_BODY (DECL_SAVED_TREE (child_fn)));
1216 pop_cfun ();
1217 current_function_decl = old_fn;
1218
1219 cgraph_add_new_function (child_fn, false);
1220 }
1221
1222 /* Destroy a omp_context data structures. Called through the splay tree
1223 value delete callback. */
1224
1225 static void
1226 delete_omp_context (splay_tree_value value)
1227 {
1228 omp_context *ctx = (omp_context *) value;
1229
1230 pointer_map_destroy (ctx->cb.decl_map);
1231
1232 if (ctx->field_map)
1233 splay_tree_delete (ctx->field_map);
1234 if (ctx->sfield_map)
1235 splay_tree_delete (ctx->sfield_map);
1236
1237 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1238 it produces corrupt debug information. */
1239 if (ctx->record_type)
1240 {
1241 tree t;
1242 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1243 DECL_ABSTRACT_ORIGIN (t) = NULL;
1244 }
1245 if (ctx->srecord_type)
1246 {
1247 tree t;
1248 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1249 DECL_ABSTRACT_ORIGIN (t) = NULL;
1250 }
1251
1252 if (is_task_ctx (ctx))
1253 finalize_task_copyfn (ctx->stmt);
1254
1255 XDELETE (ctx);
1256 }
1257
1258 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1259 context. */
1260
1261 static void
1262 fixup_child_record_type (omp_context *ctx)
1263 {
1264 tree f, type = ctx->record_type;
1265
1266 /* ??? It isn't sufficient to just call remap_type here, because
1267 variably_modified_type_p doesn't work the way we expect for
1268 record types. Testing each field for whether it needs remapping
1269 and creating a new record by hand works, however. */
1270 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1271 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1272 break;
1273 if (f)
1274 {
1275 tree name, new_fields = NULL;
1276
1277 type = lang_hooks.types.make_type (RECORD_TYPE);
1278 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1279 name = build_decl (TYPE_DECL, name, type);
1280 TYPE_NAME (type) = name;
1281
1282 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1283 {
1284 tree new_f = copy_node (f);
1285 DECL_CONTEXT (new_f) = type;
1286 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1287 TREE_CHAIN (new_f) = new_fields;
1288 walk_tree (&DECL_SIZE (new_f), copy_body_r, &ctx->cb, NULL);
1289 walk_tree (&DECL_SIZE_UNIT (new_f), copy_body_r, &ctx->cb, NULL);
1290 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_body_r, &ctx->cb, NULL);
1291 new_fields = new_f;
1292
1293 /* Arrange to be able to look up the receiver field
1294 given the sender field. */
1295 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1296 (splay_tree_value) new_f);
1297 }
1298 TYPE_FIELDS (type) = nreverse (new_fields);
1299 layout_type (type);
1300 }
1301
1302 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1303 }
1304
1305 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1306 specified by CLAUSES. */
1307
1308 static void
1309 scan_sharing_clauses (tree clauses, omp_context *ctx)
1310 {
1311 tree c, decl;
1312 bool scan_array_reductions = false;
1313
1314 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1315 {
1316 bool by_ref;
1317
1318 switch (OMP_CLAUSE_CODE (c))
1319 {
1320 case OMP_CLAUSE_PRIVATE:
1321 decl = OMP_CLAUSE_DECL (c);
1322 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1323 goto do_private;
1324 else if (!is_variable_sized (decl))
1325 install_var_local (decl, ctx);
1326 break;
1327
1328 case OMP_CLAUSE_SHARED:
1329 gcc_assert (is_taskreg_ctx (ctx));
1330 decl = OMP_CLAUSE_DECL (c);
1331 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1332 || !is_variable_sized (decl));
1333 /* Global variables don't need to be copied,
1334 the receiver side will use them directly. */
1335 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1336 break;
1337 by_ref = use_pointer_for_field (decl, ctx);
1338 if (! TREE_READONLY (decl)
1339 || TREE_ADDRESSABLE (decl)
1340 || by_ref
1341 || is_reference (decl))
1342 {
1343 install_var_field (decl, by_ref, 3, ctx);
1344 install_var_local (decl, ctx);
1345 break;
1346 }
1347 /* We don't need to copy const scalar vars back. */
1348 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1349 goto do_private;
1350
1351 case OMP_CLAUSE_LASTPRIVATE:
1352 /* Let the corresponding firstprivate clause create
1353 the variable. */
1354 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1355 break;
1356 /* FALLTHRU */
1357
1358 case OMP_CLAUSE_FIRSTPRIVATE:
1359 case OMP_CLAUSE_REDUCTION:
1360 decl = OMP_CLAUSE_DECL (c);
1361 do_private:
1362 if (is_variable_sized (decl))
1363 {
1364 if (is_task_ctx (ctx))
1365 install_var_field (decl, false, 1, ctx);
1366 break;
1367 }
1368 else if (is_taskreg_ctx (ctx))
1369 {
1370 bool global
1371 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1372 by_ref = use_pointer_for_field (decl, NULL);
1373
1374 if (is_task_ctx (ctx)
1375 && (global || by_ref || is_reference (decl)))
1376 {
1377 install_var_field (decl, false, 1, ctx);
1378 if (!global)
1379 install_var_field (decl, by_ref, 2, ctx);
1380 }
1381 else if (!global)
1382 install_var_field (decl, by_ref, 3, ctx);
1383 }
1384 install_var_local (decl, ctx);
1385 break;
1386
1387 case OMP_CLAUSE_COPYPRIVATE:
1388 if (ctx->outer)
1389 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
1390 /* FALLTHRU */
1391
1392 case OMP_CLAUSE_COPYIN:
1393 decl = OMP_CLAUSE_DECL (c);
1394 by_ref = use_pointer_for_field (decl, NULL);
1395 install_var_field (decl, by_ref, 3, ctx);
1396 break;
1397
1398 case OMP_CLAUSE_DEFAULT:
1399 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1400 break;
1401
1402 case OMP_CLAUSE_IF:
1403 case OMP_CLAUSE_NUM_THREADS:
1404 case OMP_CLAUSE_SCHEDULE:
1405 if (ctx->outer)
1406 scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1407 break;
1408
1409 case OMP_CLAUSE_NOWAIT:
1410 case OMP_CLAUSE_ORDERED:
1411 case OMP_CLAUSE_COLLAPSE:
1412 case OMP_CLAUSE_UNTIED:
1413 break;
1414
1415 default:
1416 gcc_unreachable ();
1417 }
1418 }
1419
1420 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1421 {
1422 switch (OMP_CLAUSE_CODE (c))
1423 {
1424 case OMP_CLAUSE_LASTPRIVATE:
1425 /* Let the corresponding firstprivate clause create
1426 the variable. */
1427 if (OMP_CLAUSE_LASTPRIVATE_STMT (c))
1428 scan_array_reductions = true;
1429 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1430 break;
1431 /* FALLTHRU */
1432
1433 case OMP_CLAUSE_PRIVATE:
1434 case OMP_CLAUSE_FIRSTPRIVATE:
1435 case OMP_CLAUSE_REDUCTION:
1436 decl = OMP_CLAUSE_DECL (c);
1437 if (is_variable_sized (decl))
1438 install_var_local (decl, ctx);
1439 fixup_remapped_decl (decl, ctx,
1440 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1441 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1442 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1443 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1444 scan_array_reductions = true;
1445 break;
1446
1447 case OMP_CLAUSE_SHARED:
1448 decl = OMP_CLAUSE_DECL (c);
1449 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1450 fixup_remapped_decl (decl, ctx, false);
1451 break;
1452
1453 case OMP_CLAUSE_COPYPRIVATE:
1454 case OMP_CLAUSE_COPYIN:
1455 case OMP_CLAUSE_DEFAULT:
1456 case OMP_CLAUSE_IF:
1457 case OMP_CLAUSE_NUM_THREADS:
1458 case OMP_CLAUSE_SCHEDULE:
1459 case OMP_CLAUSE_NOWAIT:
1460 case OMP_CLAUSE_ORDERED:
1461 case OMP_CLAUSE_COLLAPSE:
1462 case OMP_CLAUSE_UNTIED:
1463 break;
1464
1465 default:
1466 gcc_unreachable ();
1467 }
1468 }
1469
1470 if (scan_array_reductions)
1471 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1472 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1473 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1474 {
1475 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
1476 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
1477 }
1478 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1479 && OMP_CLAUSE_LASTPRIVATE_STMT (c))
1480 scan_omp (&OMP_CLAUSE_LASTPRIVATE_STMT (c), ctx);
1481 }
1482
1483 /* Create a new name for omp child function. Returns an identifier. */
1484
1485 static GTY(()) unsigned int tmp_ompfn_id_num;
1486
1487 static tree
1488 create_omp_child_function_name (bool task_copy)
1489 {
1490 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1491 size_t len = IDENTIFIER_LENGTH (name);
1492 char *tmp_name, *prefix;
1493 const char *suffix;
1494
1495 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1496 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1497 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1498 strcpy (prefix + len, suffix);
1499 #ifndef NO_DOT_IN_LABEL
1500 prefix[len] = '.';
1501 #elif !defined NO_DOLLAR_IN_LABEL
1502 prefix[len] = '$';
1503 #endif
1504 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1505 return get_identifier (tmp_name);
1506 }
1507
1508 /* Build a decl for the omp child function. It'll not contain a body
1509 yet, just the bare decl. */
1510
1511 static void
1512 create_omp_child_function (omp_context *ctx, bool task_copy)
1513 {
1514 tree decl, type, name, t;
1515
1516 name = create_omp_child_function_name (task_copy);
1517 if (task_copy)
1518 type = build_function_type_list (void_type_node, ptr_type_node,
1519 ptr_type_node, NULL_TREE);
1520 else
1521 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1522
1523 decl = build_decl (FUNCTION_DECL, name, type);
1524 decl = lang_hooks.decls.pushdecl (decl);
1525
1526 if (!task_copy)
1527 ctx->cb.dst_fn = decl;
1528 else
1529 OMP_TASK_COPYFN (ctx->stmt) = decl;
1530
1531 TREE_STATIC (decl) = 1;
1532 TREE_USED (decl) = 1;
1533 DECL_ARTIFICIAL (decl) = 1;
1534 DECL_IGNORED_P (decl) = 0;
1535 TREE_PUBLIC (decl) = 0;
1536 DECL_UNINLINABLE (decl) = 1;
1537 DECL_EXTERNAL (decl) = 0;
1538 DECL_CONTEXT (decl) = NULL_TREE;
1539 DECL_INITIAL (decl) = make_node (BLOCK);
1540
1541 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1542 DECL_ARTIFICIAL (t) = 1;
1543 DECL_IGNORED_P (t) = 1;
1544 DECL_RESULT (decl) = t;
1545
1546 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1547 DECL_ARTIFICIAL (t) = 1;
1548 DECL_ARG_TYPE (t) = ptr_type_node;
1549 DECL_CONTEXT (t) = current_function_decl;
1550 TREE_USED (t) = 1;
1551 DECL_ARGUMENTS (decl) = t;
1552 if (!task_copy)
1553 ctx->receiver_decl = t;
1554 else
1555 {
1556 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1557 ptr_type_node);
1558 DECL_ARTIFICIAL (t) = 1;
1559 DECL_ARG_TYPE (t) = ptr_type_node;
1560 DECL_CONTEXT (t) = current_function_decl;
1561 TREE_USED (t) = 1;
1562 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1563 DECL_ARGUMENTS (decl) = t;
1564 }
1565
1566 /* Allocate memory for the function structure. The call to
1567 allocate_struct_function clobbers CFUN, so we need to restore
1568 it afterward. */
1569 push_struct_function (decl);
1570 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
1571 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
1572 pop_cfun ();
1573 }
1574
1575
1576 /* Scan an OpenMP parallel directive. */
1577
1578 static void
1579 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
1580 {
1581 omp_context *ctx;
1582 tree name;
1583
1584 /* Ignore parallel directives with empty bodies, unless there
1585 are copyin clauses. */
1586 if (optimize > 0
1587 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
1588 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
1589 {
1590 *stmt_p = build_empty_stmt ();
1591 return;
1592 }
1593
1594 ctx = new_omp_context (*stmt_p, outer_ctx);
1595 if (taskreg_nesting_level > 1)
1596 ctx->is_nested = true;
1597 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1598 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1599 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1600 name = create_tmp_var_name (".omp_data_s");
1601 name = build_decl (TYPE_DECL, name, ctx->record_type);
1602 TYPE_NAME (ctx->record_type) = name;
1603 create_omp_child_function (ctx, false);
1604 OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
1605
1606 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
1607 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
1608
1609 if (TYPE_FIELDS (ctx->record_type) == NULL)
1610 ctx->record_type = ctx->receiver_decl = NULL;
1611 else
1612 {
1613 layout_type (ctx->record_type);
1614 fixup_child_record_type (ctx);
1615 }
1616 }
1617
1618 /* Scan an OpenMP task directive. */
1619
1620 static void
1621 scan_omp_task (tree *stmt_p, omp_context *outer_ctx)
1622 {
1623 omp_context *ctx;
1624 tree name;
1625
1626 /* Ignore task directives with empty bodies. */
1627 if (optimize > 0
1628 && empty_body_p (OMP_TASK_BODY (*stmt_p)))
1629 {
1630 *stmt_p = build_empty_stmt ();
1631 return;
1632 }
1633
1634 ctx = new_omp_context (*stmt_p, outer_ctx);
1635 if (taskreg_nesting_level > 1)
1636 ctx->is_nested = true;
1637 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1638 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1639 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1640 name = create_tmp_var_name (".omp_data_s");
1641 name = build_decl (TYPE_DECL, name, ctx->record_type);
1642 TYPE_NAME (ctx->record_type) = name;
1643 create_omp_child_function (ctx, false);
1644 OMP_TASK_FN (*stmt_p) = ctx->cb.dst_fn;
1645
1646 scan_sharing_clauses (OMP_TASK_CLAUSES (*stmt_p), ctx);
1647
1648 if (ctx->srecord_type)
1649 {
1650 name = create_tmp_var_name (".omp_data_a");
1651 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1652 TYPE_NAME (ctx->srecord_type) = name;
1653 create_omp_child_function (ctx, true);
1654 }
1655
1656 scan_omp (&OMP_TASK_BODY (*stmt_p), ctx);
1657
1658 if (TYPE_FIELDS (ctx->record_type) == NULL)
1659 {
1660 ctx->record_type = ctx->receiver_decl = NULL;
1661 OMP_TASK_ARG_SIZE (*stmt_p)
1662 = build_int_cst (long_integer_type_node, 0);
1663 OMP_TASK_ARG_ALIGN (*stmt_p)
1664 = build_int_cst (long_integer_type_node, 1);
1665 }
1666 else
1667 {
1668 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1669 /* Move VLA fields to the end. */
1670 p = &TYPE_FIELDS (ctx->record_type);
1671 while (*p)
1672 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1673 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1674 {
1675 *q = *p;
1676 *p = TREE_CHAIN (*p);
1677 TREE_CHAIN (*q) = NULL_TREE;
1678 q = &TREE_CHAIN (*q);
1679 }
1680 else
1681 p = &TREE_CHAIN (*p);
1682 *p = vla_fields;
1683 layout_type (ctx->record_type);
1684 fixup_child_record_type (ctx);
1685 if (ctx->srecord_type)
1686 layout_type (ctx->srecord_type);
1687 OMP_TASK_ARG_SIZE (*stmt_p)
1688 = fold_convert (long_integer_type_node,
1689 TYPE_SIZE_UNIT (ctx->record_type));
1690 OMP_TASK_ARG_ALIGN (*stmt_p)
1691 = build_int_cst (long_integer_type_node,
1692 TYPE_ALIGN_UNIT (ctx->record_type));
1693 }
1694 }
1695
1696
1697 /* Scan an OpenMP loop directive. */
1698
1699 static void
1700 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
1701 {
1702 omp_context *ctx;
1703 tree stmt;
1704 int i;
1705
1706 stmt = *stmt_p;
1707 ctx = new_omp_context (stmt, outer_ctx);
1708
1709 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
1710
1711 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1712 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
1713 {
1714 scan_omp (&TREE_VEC_ELT (OMP_FOR_INIT (stmt), i), ctx);
1715 scan_omp (&TREE_VEC_ELT (OMP_FOR_COND (stmt), i), ctx);
1716 scan_omp (&TREE_VEC_ELT (OMP_FOR_INCR (stmt), i), ctx);
1717 }
1718 scan_omp (&OMP_FOR_BODY (stmt), ctx);
1719 }
1720
1721 /* Scan an OpenMP sections directive. */
1722
1723 static void
1724 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
1725 {
1726 tree stmt;
1727 omp_context *ctx;
1728
1729 stmt = *stmt_p;
1730 ctx = new_omp_context (stmt, outer_ctx);
1731 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
1732 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
1733 }
1734
1735 /* Scan an OpenMP single directive. */
1736
1737 static void
1738 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
1739 {
1740 tree stmt = *stmt_p;
1741 omp_context *ctx;
1742 tree name;
1743
1744 ctx = new_omp_context (stmt, outer_ctx);
1745 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1746 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1747 name = create_tmp_var_name (".omp_copy_s");
1748 name = build_decl (TYPE_DECL, name, ctx->record_type);
1749 TYPE_NAME (ctx->record_type) = name;
1750
1751 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1752 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1753
1754 if (TYPE_FIELDS (ctx->record_type) == NULL)
1755 ctx->record_type = NULL;
1756 else
1757 layout_type (ctx->record_type);
1758 }
1759
1760
1761 /* Check OpenMP nesting restrictions. */
1762 static void
1763 check_omp_nesting_restrictions (tree t, omp_context *ctx)
1764 {
1765 switch (TREE_CODE (t))
1766 {
1767 case OMP_FOR:
1768 case OMP_SECTIONS:
1769 case OMP_SINGLE:
1770 case CALL_EXPR:
1771 for (; ctx != NULL; ctx = ctx->outer)
1772 switch (TREE_CODE (ctx->stmt))
1773 {
1774 case OMP_FOR:
1775 case OMP_SECTIONS:
1776 case OMP_SINGLE:
1777 case OMP_ORDERED:
1778 case OMP_MASTER:
1779 case OMP_TASK:
1780 if (TREE_CODE (t) == CALL_EXPR)
1781 {
1782 warning (0, "barrier region may not be closely nested inside "
1783 "of work-sharing, critical, ordered, master or "
1784 "explicit task region");
1785 return;
1786 }
1787 warning (0, "work-sharing region may not be closely nested inside "
1788 "of work-sharing, critical, ordered, master or explicit "
1789 "task region");
1790 return;
1791 case OMP_PARALLEL:
1792 return;
1793 default:
1794 break;
1795 }
1796 break;
1797 case OMP_MASTER:
1798 for (; ctx != NULL; ctx = ctx->outer)
1799 switch (TREE_CODE (ctx->stmt))
1800 {
1801 case OMP_FOR:
1802 case OMP_SECTIONS:
1803 case OMP_SINGLE:
1804 case OMP_TASK:
1805 warning (0, "master region may not be closely nested inside "
1806 "of work-sharing or explicit task region");
1807 return;
1808 case OMP_PARALLEL:
1809 return;
1810 default:
1811 break;
1812 }
1813 break;
1814 case OMP_ORDERED:
1815 for (; ctx != NULL; ctx = ctx->outer)
1816 switch (TREE_CODE (ctx->stmt))
1817 {
1818 case OMP_CRITICAL:
1819 case OMP_TASK:
1820 warning (0, "ordered region may not be closely nested inside "
1821 "of critical or explicit task region");
1822 return;
1823 case OMP_FOR:
1824 if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
1825 OMP_CLAUSE_ORDERED) == NULL)
1826 warning (0, "ordered region must be closely nested inside "
1827 "a loop region with an ordered clause");
1828 return;
1829 case OMP_PARALLEL:
1830 return;
1831 default:
1832 break;
1833 }
1834 break;
1835 case OMP_CRITICAL:
1836 for (; ctx != NULL; ctx = ctx->outer)
1837 if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
1838 && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
1839 {
1840 warning (0, "critical region may not be nested inside a critical "
1841 "region with the same name");
1842 return;
1843 }
1844 break;
1845 default:
1846 break;
1847 }
1848 }
1849
1850
1851 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1852
1853 static tree
1854 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1855 {
1856 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1857 omp_context *ctx = (omp_context *) wi->info;
1858 tree t = *tp;
1859
1860 if (EXPR_HAS_LOCATION (t))
1861 input_location = EXPR_LOCATION (t);
1862
1863 /* Check the OpenMP nesting restrictions. */
1864 if (ctx != NULL)
1865 {
1866 if (OMP_DIRECTIVE_P (t))
1867 check_omp_nesting_restrictions (t, ctx);
1868 else if (TREE_CODE (t) == CALL_EXPR)
1869 {
1870 tree fndecl = get_callee_fndecl (t);
1871 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1872 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1873 check_omp_nesting_restrictions (t, ctx);
1874 }
1875 }
1876
1877 *walk_subtrees = 0;
1878 switch (TREE_CODE (t))
1879 {
1880 case OMP_PARALLEL:
1881 taskreg_nesting_level++;
1882 scan_omp_parallel (tp, ctx);
1883 taskreg_nesting_level--;
1884 break;
1885
1886 case OMP_TASK:
1887 taskreg_nesting_level++;
1888 scan_omp_task (tp, ctx);
1889 taskreg_nesting_level--;
1890 break;
1891
1892 case OMP_FOR:
1893 scan_omp_for (tp, ctx);
1894 break;
1895
1896 case OMP_SECTIONS:
1897 scan_omp_sections (tp, ctx);
1898 break;
1899
1900 case OMP_SINGLE:
1901 scan_omp_single (tp, ctx);
1902 break;
1903
1904 case OMP_SECTION:
1905 case OMP_MASTER:
1906 case OMP_ORDERED:
1907 case OMP_CRITICAL:
1908 ctx = new_omp_context (*tp, ctx);
1909 scan_omp (&OMP_BODY (*tp), ctx);
1910 break;
1911
1912 case BIND_EXPR:
1913 {
1914 tree var;
1915 *walk_subtrees = 1;
1916
1917 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1918 insert_decl_map (&ctx->cb, var, var);
1919 }
1920 break;
1921
1922 case VAR_DECL:
1923 case PARM_DECL:
1924 case LABEL_DECL:
1925 case RESULT_DECL:
1926 if (ctx)
1927 *tp = remap_decl (t, &ctx->cb);
1928 break;
1929
1930 default:
1931 if (ctx && TYPE_P (t))
1932 *tp = remap_type (t, &ctx->cb);
1933 else if (!DECL_P (t))
1934 *walk_subtrees = 1;
1935 break;
1936 }
1937
1938 return NULL_TREE;
1939 }
1940
1941
1942 /* Scan all the statements starting at STMT_P. CTX contains context
1943 information about the OpenMP directives and clauses found during
1944 the scan. */
1945
1946 static void
1947 scan_omp (tree *stmt_p, omp_context *ctx)
1948 {
1949 location_t saved_location;
1950 struct walk_stmt_info wi;
1951
1952 memset (&wi, 0, sizeof (wi));
1953 wi.callback = scan_omp_1;
1954 wi.info = ctx;
1955 wi.want_bind_expr = (ctx != NULL);
1956 wi.want_locations = true;
1957
1958 saved_location = input_location;
1959 walk_stmts (&wi, stmt_p);
1960 input_location = saved_location;
1961 }
1962 \f
1963 /* Re-gimplification and code generation routines. */
1964
1965 /* Build a call to GOMP_barrier. */
1966
1967 static tree
1968 build_omp_barrier (void)
1969 {
1970 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1971 }
1972
1973 /* If a context was created for STMT when it was scanned, return it. */
1974
1975 static omp_context *
1976 maybe_lookup_ctx (tree stmt)
1977 {
1978 splay_tree_node n;
1979 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1980 return n ? (omp_context *) n->value : NULL;
1981 }
1982
1983
1984 /* Find the mapping for DECL in CTX or the immediately enclosing
1985 context that has a mapping for DECL.
1986
1987 If CTX is a nested parallel directive, we may have to use the decl
1988 mappings created in CTX's parent context. Suppose that we have the
1989 following parallel nesting (variable UIDs showed for clarity):
1990
1991 iD.1562 = 0;
1992 #omp parallel shared(iD.1562) -> outer parallel
1993 iD.1562 = iD.1562 + 1;
1994
1995 #omp parallel shared (iD.1562) -> inner parallel
1996 iD.1562 = iD.1562 - 1;
1997
1998 Each parallel structure will create a distinct .omp_data_s structure
1999 for copying iD.1562 in/out of the directive:
2000
2001 outer parallel .omp_data_s.1.i -> iD.1562
2002 inner parallel .omp_data_s.2.i -> iD.1562
2003
2004 A shared variable mapping will produce a copy-out operation before
2005 the parallel directive and a copy-in operation after it. So, in
2006 this case we would have:
2007
2008 iD.1562 = 0;
2009 .omp_data_o.1.i = iD.1562;
2010 #omp parallel shared(iD.1562) -> outer parallel
2011 .omp_data_i.1 = &.omp_data_o.1
2012 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2013
2014 .omp_data_o.2.i = iD.1562; -> **
2015 #omp parallel shared(iD.1562) -> inner parallel
2016 .omp_data_i.2 = &.omp_data_o.2
2017 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2018
2019
2020 ** This is a problem. The symbol iD.1562 cannot be referenced
2021 inside the body of the outer parallel region. But since we are
2022 emitting this copy operation while expanding the inner parallel
2023 directive, we need to access the CTX structure of the outer
2024 parallel directive to get the correct mapping:
2025
2026 .omp_data_o.2.i = .omp_data_i.1->i
2027
2028 Since there may be other workshare or parallel directives enclosing
2029 the parallel directive, it may be necessary to walk up the context
2030 parent chain. This is not a problem in general because nested
2031 parallelism happens only rarely. */
2032
2033 static tree
2034 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2035 {
2036 tree t;
2037 omp_context *up;
2038
2039 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2040 t = maybe_lookup_decl (decl, up);
2041
2042 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2043
2044 return t ? t : decl;
2045 }
2046
2047
2048 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2049 in outer contexts. */
2050
2051 static tree
2052 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2053 {
2054 tree t = NULL;
2055 omp_context *up;
2056
2057 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2058 t = maybe_lookup_decl (decl, up);
2059
2060 return t ? t : decl;
2061 }
2062
2063
2064 /* Construct the initialization value for reduction CLAUSE. */
2065
2066 tree
2067 omp_reduction_init (tree clause, tree type)
2068 {
2069 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2070 {
2071 case PLUS_EXPR:
2072 case MINUS_EXPR:
2073 case BIT_IOR_EXPR:
2074 case BIT_XOR_EXPR:
2075 case TRUTH_OR_EXPR:
2076 case TRUTH_ORIF_EXPR:
2077 case TRUTH_XOR_EXPR:
2078 case NE_EXPR:
2079 return fold_convert (type, integer_zero_node);
2080
2081 case MULT_EXPR:
2082 case TRUTH_AND_EXPR:
2083 case TRUTH_ANDIF_EXPR:
2084 case EQ_EXPR:
2085 return fold_convert (type, integer_one_node);
2086
2087 case BIT_AND_EXPR:
2088 return fold_convert (type, integer_minus_one_node);
2089
2090 case MAX_EXPR:
2091 if (SCALAR_FLOAT_TYPE_P (type))
2092 {
2093 REAL_VALUE_TYPE max, min;
2094 if (HONOR_INFINITIES (TYPE_MODE (type)))
2095 {
2096 real_inf (&max);
2097 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2098 }
2099 else
2100 real_maxval (&min, 1, TYPE_MODE (type));
2101 return build_real (type, min);
2102 }
2103 else
2104 {
2105 gcc_assert (INTEGRAL_TYPE_P (type));
2106 return TYPE_MIN_VALUE (type);
2107 }
2108
2109 case MIN_EXPR:
2110 if (SCALAR_FLOAT_TYPE_P (type))
2111 {
2112 REAL_VALUE_TYPE max;
2113 if (HONOR_INFINITIES (TYPE_MODE (type)))
2114 real_inf (&max);
2115 else
2116 real_maxval (&max, 0, TYPE_MODE (type));
2117 return build_real (type, max);
2118 }
2119 else
2120 {
2121 gcc_assert (INTEGRAL_TYPE_P (type));
2122 return TYPE_MAX_VALUE (type);
2123 }
2124
2125 default:
2126 gcc_unreachable ();
2127 }
2128 }
2129
2130 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2131 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2132 private variables. Initialization statements go in ILIST, while calls
2133 to destructors go in DLIST. */
2134
2135 static void
2136 lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
2137 omp_context *ctx)
2138 {
2139 tree_stmt_iterator diter;
2140 tree c, dtor, copyin_seq, x, ptr;
2141 bool copyin_by_ref = false;
2142 bool lastprivate_firstprivate = false;
2143 int pass;
2144
2145 *dlist = alloc_stmt_list ();
2146 diter = tsi_start (*dlist);
2147 copyin_seq = NULL;
2148
2149 /* Do all the fixed sized types in the first pass, and the variable sized
2150 types in the second pass. This makes sure that the scalar arguments to
2151 the variable sized types are processed before we use them in the
2152 variable sized operations. */
2153 for (pass = 0; pass < 2; ++pass)
2154 {
2155 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2156 {
2157 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2158 tree var, new_var;
2159 bool by_ref;
2160
2161 switch (c_kind)
2162 {
2163 case OMP_CLAUSE_PRIVATE:
2164 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2165 continue;
2166 break;
2167 case OMP_CLAUSE_SHARED:
2168 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2169 {
2170 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2171 continue;
2172 }
2173 case OMP_CLAUSE_FIRSTPRIVATE:
2174 case OMP_CLAUSE_COPYIN:
2175 case OMP_CLAUSE_REDUCTION:
2176 break;
2177 case OMP_CLAUSE_LASTPRIVATE:
2178 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2179 {
2180 lastprivate_firstprivate = true;
2181 if (pass != 0)
2182 continue;
2183 }
2184 break;
2185 default:
2186 continue;
2187 }
2188
2189 new_var = var = OMP_CLAUSE_DECL (c);
2190 if (c_kind != OMP_CLAUSE_COPYIN)
2191 new_var = lookup_decl (var, ctx);
2192
2193 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2194 {
2195 if (pass != 0)
2196 continue;
2197 }
2198 else if (is_variable_sized (var))
2199 {
2200 /* For variable sized types, we need to allocate the
2201 actual storage here. Call alloca and store the
2202 result in the pointer decl that we created elsewhere. */
2203 if (pass == 0)
2204 continue;
2205
2206 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2207 {
2208 ptr = DECL_VALUE_EXPR (new_var);
2209 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2210 ptr = TREE_OPERAND (ptr, 0);
2211 gcc_assert (DECL_P (ptr));
2212 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2213 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2214 x = fold_convert (TREE_TYPE (ptr), x);
2215 x = build_gimple_modify_stmt (ptr, x);
2216 gimplify_and_add (x, ilist);
2217 }
2218 }
2219 else if (is_reference (var))
2220 {
2221 /* For references that are being privatized for Fortran,
2222 allocate new backing storage for the new pointer
2223 variable. This allows us to avoid changing all the
2224 code that expects a pointer to something that expects
2225 a direct variable. Note that this doesn't apply to
2226 C++, since reference types are disallowed in data
2227 sharing clauses there, except for NRV optimized
2228 return values. */
2229 if (pass == 0)
2230 continue;
2231
2232 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2233 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2234 {
2235 x = build_receiver_ref (var, false, ctx);
2236 x = build_fold_addr_expr (x);
2237 }
2238 else if (TREE_CONSTANT (x))
2239 {
2240 const char *name = NULL;
2241 if (DECL_NAME (var))
2242 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2243
2244 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2245 name);
2246 gimple_add_tmp_var (x);
2247 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2248 }
2249 else
2250 {
2251 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2252 x = fold_convert (TREE_TYPE (new_var), x);
2253 }
2254
2255 x = build_gimple_modify_stmt (new_var, x);
2256 gimplify_and_add (x, ilist);
2257
2258 new_var = build_fold_indirect_ref (new_var);
2259 }
2260 else if (c_kind == OMP_CLAUSE_REDUCTION
2261 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2262 {
2263 if (pass == 0)
2264 continue;
2265 }
2266 else if (pass != 0)
2267 continue;
2268
2269 switch (OMP_CLAUSE_CODE (c))
2270 {
2271 case OMP_CLAUSE_SHARED:
2272 /* Shared global vars are just accessed directly. */
2273 if (is_global_var (new_var))
2274 break;
2275 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2276 needs to be delayed until after fixup_child_record_type so
2277 that we get the correct type during the dereference. */
2278 by_ref = use_pointer_for_field (var, ctx);
2279 x = build_receiver_ref (var, by_ref, ctx);
2280 SET_DECL_VALUE_EXPR (new_var, x);
2281 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2282
2283 /* ??? If VAR is not passed by reference, and the variable
2284 hasn't been initialized yet, then we'll get a warning for
2285 the store into the omp_data_s structure. Ideally, we'd be
2286 able to notice this and not store anything at all, but
2287 we're generating code too early. Suppress the warning. */
2288 if (!by_ref)
2289 TREE_NO_WARNING (var) = 1;
2290 break;
2291
2292 case OMP_CLAUSE_LASTPRIVATE:
2293 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2294 break;
2295 /* FALLTHRU */
2296
2297 case OMP_CLAUSE_PRIVATE:
2298 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2299 x = build_outer_var_ref (var, ctx);
2300 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2301 {
2302 if (is_task_ctx (ctx))
2303 x = build_receiver_ref (var, false, ctx);
2304 else
2305 x = build_outer_var_ref (var, ctx);
2306 }
2307 else
2308 x = NULL;
2309 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2310 if (x)
2311 gimplify_and_add (x, ilist);
2312 /* FALLTHRU */
2313
2314 do_dtor:
2315 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2316 if (x)
2317 {
2318 dtor = x;
2319 gimplify_stmt (&dtor);
2320 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
2321 }
2322 break;
2323
2324 case OMP_CLAUSE_FIRSTPRIVATE:
2325 if (is_task_ctx (ctx))
2326 {
2327 if (is_reference (var) || is_variable_sized (var))
2328 goto do_dtor;
2329 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2330 ctx))
2331 || use_pointer_for_field (var, NULL))
2332 {
2333 x = build_receiver_ref (var, false, ctx);
2334 SET_DECL_VALUE_EXPR (new_var, x);
2335 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2336 goto do_dtor;
2337 }
2338 }
2339 x = build_outer_var_ref (var, ctx);
2340 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2341 gimplify_and_add (x, ilist);
2342 goto do_dtor;
2343 break;
2344
2345 case OMP_CLAUSE_COPYIN:
2346 by_ref = use_pointer_for_field (var, NULL);
2347 x = build_receiver_ref (var, by_ref, ctx);
2348 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2349 append_to_statement_list (x, &copyin_seq);
2350 copyin_by_ref |= by_ref;
2351 break;
2352
2353 case OMP_CLAUSE_REDUCTION:
2354 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2355 {
2356 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2357 x = build_outer_var_ref (var, ctx);
2358
2359 if (is_reference (var))
2360 x = build_fold_addr_expr (x);
2361 SET_DECL_VALUE_EXPR (placeholder, x);
2362 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2363 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
2364 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
2365 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2366 }
2367 else
2368 {
2369 x = omp_reduction_init (c, TREE_TYPE (new_var));
2370 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2371 x = build_gimple_modify_stmt (new_var, x);
2372 gimplify_and_add (x, ilist);
2373 }
2374 break;
2375
2376 default:
2377 gcc_unreachable ();
2378 }
2379 }
2380 }
2381
2382 /* The copyin sequence is not to be executed by the main thread, since
2383 that would result in self-copies. Perhaps not visible to scalars,
2384 but it certainly is to C++ operator=. */
2385 if (copyin_seq)
2386 {
2387 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2388 x = build2 (NE_EXPR, boolean_type_node, x,
2389 build_int_cst (TREE_TYPE (x), 0));
2390 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2391 gimplify_and_add (x, ilist);
2392 }
2393
2394 /* If any copyin variable is passed by reference, we must ensure the
2395 master thread doesn't modify it before it is copied over in all
2396 threads. Similarly for variables in both firstprivate and
2397 lastprivate clauses we need to ensure the lastprivate copying
2398 happens after firstprivate copying in all threads. */
2399 if (copyin_by_ref || lastprivate_firstprivate)
2400 gimplify_and_add (build_omp_barrier (), ilist);
2401 }
2402
2403
2404 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2405 both parallel and workshare constructs. PREDICATE may be NULL if it's
2406 always true. */
2407
2408 static void
2409 lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
2410 omp_context *ctx)
2411 {
2412 tree sub_list, x, c;
2413 bool par_clauses = false;
2414
2415 /* Early exit if there are no lastprivate clauses. */
2416 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2417 if (clauses == NULL)
2418 {
2419 /* If this was a workshare clause, see if it had been combined
2420 with its parallel. In that case, look for the clauses on the
2421 parallel statement itself. */
2422 if (is_parallel_ctx (ctx))
2423 return;
2424
2425 ctx = ctx->outer;
2426 if (ctx == NULL || !is_parallel_ctx (ctx))
2427 return;
2428
2429 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
2430 OMP_CLAUSE_LASTPRIVATE);
2431 if (clauses == NULL)
2432 return;
2433 par_clauses = true;
2434 }
2435
2436 sub_list = alloc_stmt_list ();
2437
2438 for (c = clauses; c ;)
2439 {
2440 tree var, new_var;
2441
2442 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2443 {
2444 var = OMP_CLAUSE_DECL (c);
2445 new_var = lookup_decl (var, ctx);
2446
2447 if (OMP_CLAUSE_LASTPRIVATE_STMT (c))
2448 gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c), &sub_list);
2449 OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL;
2450
2451 x = build_outer_var_ref (var, ctx);
2452 if (is_reference (var))
2453 new_var = build_fold_indirect_ref (new_var);
2454 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2455 append_to_statement_list (x, &sub_list);
2456 }
2457 c = OMP_CLAUSE_CHAIN (c);
2458 if (c == NULL && !par_clauses)
2459 {
2460 /* If this was a workshare clause, see if it had been combined
2461 with its parallel. In that case, continue looking for the
2462 clauses also on the parallel statement itself. */
2463 if (is_parallel_ctx (ctx))
2464 break;
2465
2466 ctx = ctx->outer;
2467 if (ctx == NULL || !is_parallel_ctx (ctx))
2468 break;
2469
2470 c = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
2471 OMP_CLAUSE_LASTPRIVATE);
2472 par_clauses = true;
2473 }
2474 }
2475
2476 if (predicate)
2477 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
2478 else
2479 x = sub_list;
2480
2481 gimplify_and_add (x, stmt_list);
2482 }
2483
2484
2485 /* Generate code to implement the REDUCTION clauses. */
2486
2487 static void
2488 lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
2489 {
2490 tree sub_list = NULL, x, c;
2491 int count = 0;
2492
2493 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2494 update in that case, otherwise use a lock. */
2495 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2496 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2497 {
2498 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2499 {
2500 /* Never use OMP_ATOMIC for array reductions. */
2501 count = -1;
2502 break;
2503 }
2504 count++;
2505 }
2506
2507 if (count == 0)
2508 return;
2509
2510 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2511 {
2512 tree var, ref, new_var;
2513 enum tree_code code;
2514
2515 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2516 continue;
2517
2518 var = OMP_CLAUSE_DECL (c);
2519 new_var = lookup_decl (var, ctx);
2520 if (is_reference (var))
2521 new_var = build_fold_indirect_ref (new_var);
2522 ref = build_outer_var_ref (var, ctx);
2523 code = OMP_CLAUSE_REDUCTION_CODE (c);
2524
2525 /* reduction(-:var) sums up the partial results, so it acts
2526 identically to reduction(+:var). */
2527 if (code == MINUS_EXPR)
2528 code = PLUS_EXPR;
2529
2530 if (count == 1)
2531 {
2532 tree addr = build_fold_addr_expr (ref);
2533
2534 addr = save_expr (addr);
2535 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2536 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2537 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2538 gimplify_and_add (x, stmt_list);
2539 return;
2540 }
2541
2542 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2543 {
2544 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2545
2546 if (is_reference (var))
2547 ref = build_fold_addr_expr (ref);
2548 SET_DECL_VALUE_EXPR (placeholder, ref);
2549 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2550 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
2551 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
2552 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2553 }
2554 else
2555 {
2556 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2557 ref = build_outer_var_ref (var, ctx);
2558 x = build_gimple_modify_stmt (ref, x);
2559 append_to_statement_list (x, &sub_list);
2560 }
2561 }
2562
2563 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2564 gimplify_and_add (x, stmt_list);
2565
2566 gimplify_and_add (sub_list, stmt_list);
2567
2568 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2569 gimplify_and_add (x, stmt_list);
2570 }
2571
2572
2573 /* Generate code to implement the COPYPRIVATE clauses. */
2574
2575 static void
2576 lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
2577 omp_context *ctx)
2578 {
2579 tree c;
2580
2581 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2582 {
2583 tree var, ref, x;
2584 bool by_ref;
2585
2586 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2587 continue;
2588
2589 var = OMP_CLAUSE_DECL (c);
2590 by_ref = use_pointer_for_field (var, NULL);
2591
2592 ref = build_sender_ref (var, ctx);
2593 x = lookup_decl_in_outer_ctx (var, ctx);
2594 x = by_ref ? build_fold_addr_expr (x) : x;
2595 x = build_gimple_modify_stmt (ref, x);
2596 gimplify_and_add (x, slist);
2597
2598 ref = build_receiver_ref (var, by_ref, ctx);
2599 if (is_reference (var))
2600 {
2601 ref = build_fold_indirect_ref (ref);
2602 var = build_fold_indirect_ref (var);
2603 }
2604 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2605 gimplify_and_add (x, rlist);
2606 }
2607 }
2608
2609
2610 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2611 and REDUCTION from the sender (aka parent) side. */
2612
2613 static void
2614 lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
2615 {
2616 tree c;
2617
2618 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2619 {
2620 tree val, ref, x, var;
2621 bool by_ref, do_in = false, do_out = false;
2622
2623 switch (OMP_CLAUSE_CODE (c))
2624 {
2625 case OMP_CLAUSE_PRIVATE:
2626 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2627 break;
2628 continue;
2629 case OMP_CLAUSE_FIRSTPRIVATE:
2630 case OMP_CLAUSE_COPYIN:
2631 case OMP_CLAUSE_LASTPRIVATE:
2632 case OMP_CLAUSE_REDUCTION:
2633 break;
2634 default:
2635 continue;
2636 }
2637
2638 val = OMP_CLAUSE_DECL (c);
2639 var = lookup_decl_in_outer_ctx (val, ctx);
2640
2641 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2642 && is_global_var (var))
2643 continue;
2644 if (is_variable_sized (val))
2645 continue;
2646 by_ref = use_pointer_for_field (val, NULL);
2647
2648 switch (OMP_CLAUSE_CODE (c))
2649 {
2650 case OMP_CLAUSE_PRIVATE:
2651 case OMP_CLAUSE_FIRSTPRIVATE:
2652 case OMP_CLAUSE_COPYIN:
2653 do_in = true;
2654 break;
2655
2656 case OMP_CLAUSE_LASTPRIVATE:
2657 if (by_ref || is_reference (val))
2658 {
2659 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2660 continue;
2661 do_in = true;
2662 }
2663 else
2664 {
2665 do_out = true;
2666 if (lang_hooks.decls.omp_private_outer_ref (val))
2667 do_in = true;
2668 }
2669 break;
2670
2671 case OMP_CLAUSE_REDUCTION:
2672 do_in = true;
2673 do_out = !(by_ref || is_reference (val));
2674 break;
2675
2676 default:
2677 gcc_unreachable ();
2678 }
2679
2680 if (do_in)
2681 {
2682 ref = build_sender_ref (val, ctx);
2683 x = by_ref ? build_fold_addr_expr (var) : var;
2684 x = build_gimple_modify_stmt (ref, x);
2685 gimplify_and_add (x, ilist);
2686 if (is_task_ctx (ctx))
2687 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2688 }
2689
2690 if (do_out)
2691 {
2692 ref = build_sender_ref (val, ctx);
2693 x = build_gimple_modify_stmt (var, ref);
2694 gimplify_and_add (x, olist);
2695 }
2696 }
2697 }
2698
2699 /* Generate code to implement SHARED from the sender (aka parent) side.
2700 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2701 got automatically shared. */
2702
2703 static void
2704 lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
2705 {
2706 tree var, ovar, nvar, f, x, record_type;
2707
2708 if (ctx->record_type == NULL)
2709 return;
2710
2711 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2712 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2713 {
2714 ovar = DECL_ABSTRACT_ORIGIN (f);
2715 nvar = maybe_lookup_decl (ovar, ctx);
2716 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2717 continue;
2718
2719 /* If CTX is a nested parallel directive. Find the immediately
2720 enclosing parallel or workshare construct that contains a
2721 mapping for OVAR. */
2722 var = lookup_decl_in_outer_ctx (ovar, ctx);
2723
2724 if (use_pointer_for_field (ovar, ctx))
2725 {
2726 x = build_sender_ref (ovar, ctx);
2727 var = build_fold_addr_expr (var);
2728 x = build_gimple_modify_stmt (x, var);
2729 gimplify_and_add (x, ilist);
2730 }
2731 else
2732 {
2733 x = build_sender_ref (ovar, ctx);
2734 x = build_gimple_modify_stmt (x, var);
2735 gimplify_and_add (x, ilist);
2736
2737 if (!TREE_READONLY (var))
2738 {
2739 x = build_sender_ref (ovar, ctx);
2740 x = build_gimple_modify_stmt (var, x);
2741 gimplify_and_add (x, olist);
2742 }
2743 }
2744 }
2745 }
2746
2747 /* Build the function calls to GOMP_parallel_start etc to actually
2748 generate the parallel operation. REGION is the parallel region
2749 being expanded. BB is the block where to insert the code. WS_ARGS
2750 will be set if this is a call to a combined parallel+workshare
2751 construct, it contains the list of additional arguments needed by
2752 the workshare construct. */
2753
2754 static void
2755 expand_parallel_call (struct omp_region *region, basic_block bb,
2756 tree entry_stmt, tree ws_args)
2757 {
2758 tree t, t1, t2, val, cond, c, clauses;
2759 block_stmt_iterator si;
2760 int start_ix;
2761
2762 clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
2763
2764 /* Determine what flavor of GOMP_parallel_start we will be
2765 emitting. */
2766 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2767 if (is_combined_parallel (region))
2768 {
2769 switch (region->inner->type)
2770 {
2771 case OMP_FOR:
2772 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2773 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2774 + (region->inner->sched_kind
2775 == OMP_CLAUSE_SCHEDULE_RUNTIME
2776 ? 3 : region->inner->sched_kind);
2777 break;
2778 case OMP_SECTIONS:
2779 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2780 break;
2781 default:
2782 gcc_unreachable ();
2783 }
2784 }
2785
2786 /* By default, the value of NUM_THREADS is zero (selected at run time)
2787 and there is no conditional. */
2788 cond = NULL_TREE;
2789 val = build_int_cst (unsigned_type_node, 0);
2790
2791 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2792 if (c)
2793 cond = OMP_CLAUSE_IF_EXPR (c);
2794
2795 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2796 if (c)
2797 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2798
2799 /* Ensure 'val' is of the correct type. */
2800 val = fold_convert (unsigned_type_node, val);
2801
2802 /* If we found the clause 'if (cond)', build either
2803 (cond != 0) or (cond ? val : 1u). */
2804 if (cond)
2805 {
2806 block_stmt_iterator si;
2807
2808 cond = gimple_boolify (cond);
2809
2810 if (integer_zerop (val))
2811 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2812 build_int_cst (TREE_TYPE (cond), 0));
2813 else
2814 {
2815 basic_block cond_bb, then_bb, else_bb;
2816 edge e, e_then, e_else;
2817 tree t, tmp_then, tmp_else, tmp_join, tmp_var;
2818
2819 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2820 if (gimple_in_ssa_p (cfun))
2821 {
2822 tmp_then = make_ssa_name (tmp_var, NULL_TREE);
2823 tmp_else = make_ssa_name (tmp_var, NULL_TREE);
2824 tmp_join = make_ssa_name (tmp_var, NULL_TREE);
2825 }
2826 else
2827 {
2828 tmp_then = tmp_var;
2829 tmp_else = tmp_var;
2830 tmp_join = tmp_var;
2831 }
2832
2833 e = split_block (bb, NULL);
2834 cond_bb = e->src;
2835 bb = e->dest;
2836 remove_edge (e);
2837
2838 then_bb = create_empty_bb (cond_bb);
2839 else_bb = create_empty_bb (then_bb);
2840 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2841 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2842
2843 t = build3 (COND_EXPR, void_type_node,
2844 cond, NULL_TREE, NULL_TREE);
2845
2846 si = bsi_start (cond_bb);
2847 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2848
2849 si = bsi_start (then_bb);
2850 t = build_gimple_modify_stmt (tmp_then, val);
2851 if (gimple_in_ssa_p (cfun))
2852 SSA_NAME_DEF_STMT (tmp_then) = t;
2853 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2854
2855 si = bsi_start (else_bb);
2856 t = build_gimple_modify_stmt (tmp_else,
2857 build_int_cst (unsigned_type_node, 1));
2858 if (gimple_in_ssa_p (cfun))
2859 SSA_NAME_DEF_STMT (tmp_else) = t;
2860 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2861
2862 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2863 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2864 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2865 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2866
2867 if (gimple_in_ssa_p (cfun))
2868 {
2869 tree phi = create_phi_node (tmp_join, bb);
2870 SSA_NAME_DEF_STMT (tmp_join) = phi;
2871 add_phi_arg (phi, tmp_then, e_then);
2872 add_phi_arg (phi, tmp_else, e_else);
2873 }
2874
2875 val = tmp_join;
2876 }
2877
2878 si = bsi_start (bb);
2879 val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
2880 false, BSI_CONTINUE_LINKING);
2881 }
2882
2883 si = bsi_last (bb);
2884 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2885 if (t == NULL)
2886 t1 = null_pointer_node;
2887 else
2888 t1 = build_fold_addr_expr (t);
2889 t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
2890
2891 if (ws_args)
2892 {
2893 tree args = tree_cons (NULL, t2,
2894 tree_cons (NULL, t1,
2895 tree_cons (NULL, val, ws_args)));
2896 t = build_function_call_expr (built_in_decls[start_ix], args);
2897 }
2898 else
2899 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
2900
2901 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2902 false, BSI_CONTINUE_LINKING);
2903
2904 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2905 if (t == NULL)
2906 t = null_pointer_node;
2907 else
2908 t = build_fold_addr_expr (t);
2909 t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
2910 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2911 false, BSI_CONTINUE_LINKING);
2912
2913 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
2914 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2915 false, BSI_CONTINUE_LINKING);
2916 }
2917
2918
2919 /* Build the function call to GOMP_task to actually
2920 generate the task operation. BB is the block where to insert the code. */
2921
2922 static void
2923 expand_task_call (basic_block bb, tree entry_stmt)
2924 {
2925 tree t, t1, t2, t3, flags, cond, c, clauses;
2926 block_stmt_iterator si;
2927
2928 clauses = OMP_TASK_CLAUSES (entry_stmt);
2929
2930 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2931 if (c)
2932 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
2933 else
2934 cond = boolean_true_node;
2935
2936 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
2937 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
2938
2939 si = bsi_last (bb);
2940 t = OMP_TASK_DATA_ARG (entry_stmt);
2941 if (t == NULL)
2942 t2 = null_pointer_node;
2943 else
2944 t2 = build_fold_addr_expr (t);
2945 t1 = build_fold_addr_expr (OMP_TASK_FN (entry_stmt));
2946 t = OMP_TASK_COPYFN (entry_stmt);
2947 if (t == NULL)
2948 t3 = null_pointer_node;
2949 else
2950 t3 = build_fold_addr_expr (t);
2951
2952 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
2953 OMP_TASK_ARG_SIZE (entry_stmt),
2954 OMP_TASK_ARG_ALIGN (entry_stmt), cond, flags);
2955
2956 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2957 false, BSI_CONTINUE_LINKING);
2958 }
2959
2960
2961 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2962 handler. This prevents programs from violating the structured
2963 block semantics with throws. */
2964
2965 static void
2966 maybe_catch_exception (tree *stmt_p)
2967 {
2968 tree f, t;
2969
2970 if (!flag_exceptions)
2971 return;
2972
2973 if (lang_protect_cleanup_actions)
2974 t = lang_protect_cleanup_actions ();
2975 else
2976 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
2977 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
2978 EH_FILTER_MUST_NOT_THROW (f) = 1;
2979 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
2980
2981 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
2982 append_to_statement_list (f, &TREE_OPERAND (t, 1));
2983
2984 *stmt_p = NULL;
2985 append_to_statement_list (t, stmt_p);
2986 }
2987
2988 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2989
2990 static tree
2991 list2chain (tree list)
2992 {
2993 tree t;
2994
2995 for (t = list; t; t = TREE_CHAIN (t))
2996 {
2997 tree var = TREE_VALUE (t);
2998 if (TREE_CHAIN (t))
2999 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3000 else
3001 TREE_CHAIN (var) = NULL_TREE;
3002 }
3003
3004 return list ? TREE_VALUE (list) : NULL_TREE;
3005 }
3006
3007
3008 /* Remove barriers in REGION->EXIT's block. Note that this is only
3009 valid for OMP_PARALLEL regions. Since the end of a parallel region
3010 is an implicit barrier, any workshare inside the OMP_PARALLEL that
3011 left a barrier at the end of the OMP_PARALLEL region can now be
3012 removed. */
3013
3014 static void
3015 remove_exit_barrier (struct omp_region *region)
3016 {
3017 block_stmt_iterator si;
3018 basic_block exit_bb;
3019 edge_iterator ei;
3020 edge e;
3021 tree t;
3022
3023 exit_bb = region->exit;
3024
3025 /* If the parallel region doesn't return, we don't have REGION->EXIT
3026 block at all. */
3027 if (! exit_bb)
3028 return;
3029
3030 /* The last insn in the block will be the parallel's OMP_RETURN. The
3031 workshare's OMP_RETURN will be in a preceding block. The kinds of
3032 statements that can appear in between are extremely limited -- no
3033 memory operations at all. Here, we allow nothing at all, so the
3034 only thing we allow to precede this OMP_RETURN is a label. */
3035 si = bsi_last (exit_bb);
3036 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3037 bsi_prev (&si);
3038 if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR)
3039 return;
3040
3041 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3042 {
3043 si = bsi_last (e->src);
3044 if (bsi_end_p (si))
3045 continue;
3046 t = bsi_stmt (si);
3047 if (TREE_CODE (t) == OMP_RETURN)
3048 OMP_RETURN_NOWAIT (t) = 1;
3049 }
3050 }
3051
3052 static void
3053 remove_exit_barriers (struct omp_region *region)
3054 {
3055 if (region->type == OMP_PARALLEL)
3056 remove_exit_barrier (region);
3057
3058 if (region->inner)
3059 {
3060 region = region->inner;
3061 remove_exit_barriers (region);
3062 while (region->next)
3063 {
3064 region = region->next;
3065 remove_exit_barriers (region);
3066 }
3067 }
3068 }
3069
3070 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3071 calls. These can't be declared as const functions, but
3072 within one parallel body they are constant, so they can be
3073 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3074 which are declared const. Similarly for task body, except
3075 that in untied task omp_get_thread_num () can change at any task
3076 scheduling point. */
3077
3078 static void
3079 optimize_omp_library_calls (tree entry_stmt)
3080 {
3081 basic_block bb;
3082 block_stmt_iterator bsi;
3083 tree thr_num_id
3084 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3085 tree num_thr_id
3086 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3087 bool untied_task = (TREE_CODE (entry_stmt) == OMP_TASK
3088 && find_omp_clause (OMP_TASK_CLAUSES (entry_stmt),
3089 OMP_CLAUSE_UNTIED) != NULL);
3090
3091 FOR_EACH_BB (bb)
3092 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
3093 {
3094 tree stmt = bsi_stmt (bsi);
3095 tree call = get_call_expr_in (stmt);
3096 tree decl;
3097
3098 if (call
3099 && (decl = get_callee_fndecl (call))
3100 && DECL_EXTERNAL (decl)
3101 && TREE_PUBLIC (decl)
3102 && DECL_INITIAL (decl) == NULL)
3103 {
3104 tree built_in;
3105
3106 if (DECL_NAME (decl) == thr_num_id)
3107 {
3108 /* In #pragma omp task untied omp_get_thread_num () can change
3109 during the execution of the task region. */
3110 if (untied_task)
3111 continue;
3112 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3113 }
3114 else if (DECL_NAME (decl) == num_thr_id)
3115 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3116 else
3117 continue;
3118
3119 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3120 || call_expr_nargs (call) != 0)
3121 continue;
3122
3123 if (flag_exceptions && !TREE_NOTHROW (decl))
3124 continue;
3125
3126 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3127 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3128 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3129 continue;
3130
3131 CALL_EXPR_FN (call) = build_fold_addr_expr (built_in);
3132 }
3133 }
3134 }
3135
3136 /* Expand the OpenMP parallel or task directive starting at REGION. */
3137
3138 static void
3139 expand_omp_taskreg (struct omp_region *region)
3140 {
3141 basic_block entry_bb, exit_bb, new_bb;
3142 struct function *child_cfun;
3143 tree child_fn, block, t, ws_args;
3144 block_stmt_iterator si;
3145 tree entry_stmt;
3146 edge e;
3147
3148 entry_stmt = last_stmt (region->entry);
3149 child_fn = OMP_TASKREG_FN (entry_stmt);
3150 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3151 /* If this function has been already instrumented, make sure
3152 the child function isn't instrumented again. */
3153 child_cfun->after_tree_profile = cfun->after_tree_profile;
3154
3155 entry_bb = region->entry;
3156 exit_bb = region->exit;
3157
3158 if (is_combined_parallel (region))
3159 ws_args = region->ws_args;
3160 else
3161 ws_args = NULL_TREE;
3162
3163 if (child_cfun->cfg)
3164 {
3165 /* Due to inlining, it may happen that we have already outlined
3166 the region, in which case all we need to do is make the
3167 sub-graph unreachable and emit the parallel call. */
3168 edge entry_succ_e, exit_succ_e;
3169 block_stmt_iterator si;
3170
3171 entry_succ_e = single_succ_edge (entry_bb);
3172
3173 si = bsi_last (entry_bb);
3174 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL
3175 || TREE_CODE (bsi_stmt (si)) == OMP_TASK);
3176 bsi_remove (&si, true);
3177
3178 new_bb = entry_bb;
3179 if (exit_bb)
3180 {
3181 exit_succ_e = single_succ_edge (exit_bb);
3182 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3183 }
3184 remove_edge_and_dominated_blocks (entry_succ_e);
3185 }
3186 else
3187 {
3188 /* If the parallel region needs data sent from the parent
3189 function, then the very first statement (except possible
3190 tree profile counter updates) of the parallel body
3191 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3192 &.OMP_DATA_O is passed as an argument to the child function,
3193 we need to replace it with the argument as seen by the child
3194 function.
3195
3196 In most cases, this will end up being the identity assignment
3197 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3198 a function call that has been inlined, the original PARM_DECL
3199 .OMP_DATA_I may have been converted into a different local
3200 variable. In which case, we need to keep the assignment. */
3201 if (OMP_TASKREG_DATA_ARG (entry_stmt))
3202 {
3203 basic_block entry_succ_bb = single_succ (entry_bb);
3204 block_stmt_iterator si;
3205 tree parcopy_stmt = NULL_TREE, arg, narg;
3206
3207 for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
3208 {
3209 tree stmt, arg;
3210
3211 gcc_assert (!bsi_end_p (si));
3212 stmt = bsi_stmt (si);
3213 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3214 continue;
3215
3216 arg = GIMPLE_STMT_OPERAND (stmt, 1);
3217 STRIP_NOPS (arg);
3218 if (TREE_CODE (arg) == ADDR_EXPR
3219 && TREE_OPERAND (arg, 0)
3220 == OMP_TASKREG_DATA_ARG (entry_stmt))
3221 {
3222 parcopy_stmt = stmt;
3223 break;
3224 }
3225 }
3226
3227 gcc_assert (parcopy_stmt != NULL_TREE);
3228 arg = DECL_ARGUMENTS (child_fn);
3229
3230 if (!gimple_in_ssa_p (cfun))
3231 {
3232 if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
3233 bsi_remove (&si, true);
3234 else
3235 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
3236 }
3237 else
3238 {
3239 /* If we are in ssa form, we must load the value from the default
3240 definition of the argument. That should not be defined now,
3241 since the argument is not used uninitialized. */
3242 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3243 narg = make_ssa_name (arg, build_empty_stmt ());
3244 set_default_def (arg, narg);
3245 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
3246 update_stmt (parcopy_stmt);
3247 }
3248 }
3249
3250 /* Declare local variables needed in CHILD_CFUN. */
3251 block = DECL_INITIAL (child_fn);
3252 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3253 DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
3254
3255 /* Reset DECL_CONTEXT on function arguments. */
3256 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3257 DECL_CONTEXT (t) = child_fn;
3258
3259 /* Split ENTRY_BB at OMP_PARALLEL or OMP_TASK, so that it can be
3260 moved to the child function. */
3261 si = bsi_last (entry_bb);
3262 t = bsi_stmt (si);
3263 gcc_assert (t && (TREE_CODE (t) == OMP_PARALLEL
3264 || TREE_CODE (t) == OMP_TASK));
3265 bsi_remove (&si, true);
3266 e = split_block (entry_bb, t);
3267 entry_bb = e->dest;
3268 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3269
3270 /* Convert OMP_RETURN into a RETURN_EXPR. */
3271 if (exit_bb)
3272 {
3273 si = bsi_last (exit_bb);
3274 gcc_assert (!bsi_end_p (si)
3275 && TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3276 t = build1 (RETURN_EXPR, void_type_node, NULL);
3277 bsi_insert_after (&si, t, BSI_SAME_STMT);
3278 bsi_remove (&si, true);
3279 }
3280
3281 /* Move the parallel region into CHILD_CFUN. */
3282
3283 if (gimple_in_ssa_p (cfun))
3284 {
3285 push_cfun (child_cfun);
3286 init_tree_ssa (child_cfun);
3287 init_ssa_operands ();
3288 cfun->gimple_df->in_ssa_p = true;
3289 pop_cfun ();
3290 }
3291 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
3292 if (exit_bb)
3293 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3294
3295 /* Inform the callgraph about the new function. */
3296 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3297 = cfun->curr_properties;
3298 cgraph_add_new_function (child_fn, true);
3299
3300 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3301 fixed in a following pass. */
3302 push_cfun (child_cfun);
3303 if (optimize)
3304 optimize_omp_library_calls (entry_stmt);
3305 rebuild_cgraph_edges ();
3306
3307 /* Some EH regions might become dead, see PR34608. If
3308 pass_cleanup_cfg isn't the first pass to happen with the
3309 new child, these dead EH edges might cause problems.
3310 Clean them up now. */
3311 if (flag_exceptions)
3312 {
3313 basic_block bb;
3314 tree save_current = current_function_decl;
3315 bool changed = false;
3316
3317 current_function_decl = child_fn;
3318 FOR_EACH_BB (bb)
3319 changed |= tree_purge_dead_eh_edges (bb);
3320 if (changed)
3321 cleanup_tree_cfg ();
3322 current_function_decl = save_current;
3323 }
3324 pop_cfun ();
3325 }
3326
3327 /* Emit a library call to launch the children threads. */
3328 if (TREE_CODE (entry_stmt) == OMP_PARALLEL)
3329 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3330 else
3331 expand_task_call (new_bb, entry_stmt);
3332 update_ssa (TODO_update_ssa_only_virtuals);
3333 }
3334
3335
3336 /* A subroutine of expand_omp_for. Generate code for a parallel
3337 loop with any schedule. Given parameters:
3338
3339 for (V = N1; V cond N2; V += STEP) BODY;
3340
3341 where COND is "<" or ">", we generate pseudocode
3342
3343 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3344 if (more) goto L0; else goto L3;
3345 L0:
3346 V = istart0;
3347 iend = iend0;
3348 L1:
3349 BODY;
3350 V += STEP;
3351 if (V cond iend) goto L1; else goto L2;
3352 L2:
3353 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3354 L3:
3355
3356 If this is a combined omp parallel loop, instead of the call to
3357 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3358
3359 For collapsed loops, given parameters:
3360 collapse(3)
3361 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3362 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3363 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3364 BODY;
3365
3366 we generate pseudocode
3367
3368 if (cond3 is <)
3369 adj = STEP3 - 1;
3370 else
3371 adj = STEP3 + 1;
3372 count3 = (adj + N32 - N31) / STEP3;
3373 if (cond2 is <)
3374 adj = STEP2 - 1;
3375 else
3376 adj = STEP2 + 1;
3377 count2 = (adj + N22 - N21) / STEP2;
3378 if (cond1 is <)
3379 adj = STEP1 - 1;
3380 else
3381 adj = STEP1 + 1;
3382 count1 = (adj + N12 - N11) / STEP1;
3383 count = count1 * count2 * count3;
3384 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3385 if (more) goto L0; else goto L3;
3386 L0:
3387 V = istart0;
3388 T = V;
3389 V3 = N31 + (T % count3) * STEP3;
3390 T = T / count3;
3391 V2 = N21 + (T % count2) * STEP2;
3392 T = T / count2;
3393 V1 = N11 + T * STEP1;
3394 iend = iend0;
3395 L1:
3396 BODY;
3397 V += 1;
3398 if (V < iend) goto L10; else goto L2;
3399 L10:
3400 V3 += STEP3;
3401 if (V3 cond3 N32) goto L1; else goto L11;
3402 L11:
3403 V3 = N31;
3404 V2 += STEP2;
3405 if (V2 cond2 N22) goto L1; else goto L12;
3406 L12:
3407 V2 = N21;
3408 V1 += STEP1;
3409 goto L1;
3410 L2:
3411 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3412 L3:
3413
3414 */
3415
3416 static void
3417 expand_omp_for_generic (struct omp_region *region,
3418 struct omp_for_data *fd,
3419 enum built_in_function start_fn,
3420 enum built_in_function next_fn)
3421 {
3422 tree type, istart0, iend0, iend, phi;
3423 tree t, vmain, vback, bias = NULL_TREE;
3424 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3425 basic_block l2_bb = NULL, l3_bb = NULL;
3426 block_stmt_iterator si;
3427 bool in_combined_parallel = is_combined_parallel (region);
3428 bool broken_loop = region->cont == NULL;
3429 edge e, ne;
3430 tree *counts = NULL;
3431 int i;
3432
3433 gcc_assert (!broken_loop || !in_combined_parallel);
3434 gcc_assert (fd->iter_type == long_integer_type_node
3435 || !in_combined_parallel);
3436
3437 type = TREE_TYPE (fd->loop.v);
3438 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3439 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3440 TREE_ADDRESSABLE (istart0) = 1;
3441 TREE_ADDRESSABLE (iend0) = 1;
3442 if (gimple_in_ssa_p (cfun))
3443 {
3444 add_referenced_var (istart0);
3445 add_referenced_var (iend0);
3446 }
3447
3448 /* See if we need to bias by LLONG_MIN. */
3449 if (fd->iter_type == long_long_unsigned_type_node
3450 && TREE_CODE (type) == INTEGER_TYPE
3451 && !TYPE_UNSIGNED (type))
3452 {
3453 tree n1, n2;
3454
3455 if (fd->loop.cond_code == LT_EXPR)
3456 {
3457 n1 = fd->loop.n1;
3458 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3459 }
3460 else
3461 {
3462 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3463 n2 = fd->loop.n1;
3464 }
3465 if (TREE_CODE (n1) != INTEGER_CST
3466 || TREE_CODE (n2) != INTEGER_CST
3467 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3468 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3469 }
3470
3471 entry_bb = region->entry;
3472 cont_bb = region->cont;
3473 collapse_bb = NULL;
3474 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3475 gcc_assert (broken_loop
3476 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3477 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3478 l1_bb = single_succ (l0_bb);
3479 if (!broken_loop)
3480 {
3481 l2_bb = create_empty_bb (cont_bb);
3482 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3483 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3484 }
3485 else
3486 l2_bb = NULL;
3487 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3488 exit_bb = region->exit;
3489
3490 si = bsi_last (entry_bb);
3491
3492 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
3493 if (fd->collapse > 1)
3494 {
3495 /* collapsed loops need work for expansion in SSA form. */
3496 gcc_assert (!gimple_in_ssa_p (cfun));
3497 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3498 for (i = 0; i < fd->collapse; i++)
3499 {
3500 tree itype = TREE_TYPE (fd->loops[i].v);
3501
3502 if (POINTER_TYPE_P (itype))
3503 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3504 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3505 ? -1 : 1));
3506 t = fold_build2 (PLUS_EXPR, itype,
3507 fold_convert (itype, fd->loops[i].step), t);
3508 t = fold_build2 (PLUS_EXPR, itype, t,
3509 fold_convert (itype, fd->loops[i].n2));
3510 t = fold_build2 (MINUS_EXPR, itype, t,
3511 fold_convert (itype, fd->loops[i].n1));
3512 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3513 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3514 fold_build1 (NEGATE_EXPR, itype, t),
3515 fold_build1 (NEGATE_EXPR, itype,
3516 fold_convert (itype,
3517 fd->loops[i].step)));
3518 else
3519 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3520 fold_convert (itype, fd->loops[i].step));
3521 t = fold_convert (type, t);
3522 if (TREE_CODE (t) == INTEGER_CST)
3523 counts[i] = t;
3524 else
3525 {
3526 counts[i] = create_tmp_var (type, ".count");
3527 t = build_gimple_modify_stmt (counts[i], t);
3528 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3529 true, BSI_SAME_STMT);
3530 }
3531 if (SSA_VAR_P (fd->loop.n2))
3532 {
3533 if (i == 0)
3534 t = build_gimple_modify_stmt (fd->loop.n2, counts[0]);
3535 else
3536 {
3537 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3538 t = build_gimple_modify_stmt (fd->loop.n2, t);
3539 }
3540 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3541 true, BSI_SAME_STMT);
3542 }
3543 }
3544 }
3545 if (in_combined_parallel)
3546 {
3547 /* In a combined parallel loop, emit a call to
3548 GOMP_loop_foo_next. */
3549 t = build_call_expr (built_in_decls[next_fn], 2,
3550 build_fold_addr_expr (istart0),
3551 build_fold_addr_expr (iend0));
3552 }
3553 else
3554 {
3555 tree t0, t1, t2, t3, t4;
3556 /* If this is not a combined parallel loop, emit a call to
3557 GOMP_loop_foo_start in ENTRY_BB. */
3558 t4 = build_fold_addr_expr (iend0);
3559 t3 = build_fold_addr_expr (istart0);
3560 t2 = fold_convert (fd->iter_type, fd->loop.step);
3561 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3562 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3563 if (bias)
3564 {
3565 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3566 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3567 }
3568 if (fd->iter_type == long_integer_type_node)
3569 {
3570 if (fd->chunk_size)
3571 {
3572 t = fold_convert (fd->iter_type, fd->chunk_size);
3573 t = build_call_expr (built_in_decls[start_fn], 6,
3574 t0, t1, t2, t, t3, t4);
3575 }
3576 else
3577 t = build_call_expr (built_in_decls[start_fn], 5,
3578 t0, t1, t2, t3, t4);
3579 }
3580 else
3581 {
3582 tree t5;
3583 tree c_bool_type;
3584
3585 /* The GOMP_loop_ull_*start functions have additional boolean
3586 argument, true for < loops and false for > loops.
3587 In Fortran, the C bool type can be different from
3588 boolean_type_node. */
3589 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3590 t5 = build_int_cst (c_bool_type,
3591 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3592 if (fd->chunk_size)
3593 {
3594 t = fold_convert (fd->iter_type, fd->chunk_size);
3595 t = build_call_expr (built_in_decls[start_fn], 7,
3596 t5, t0, t1, t2, t, t3, t4);
3597 }
3598 else
3599 t = build_call_expr (built_in_decls[start_fn], 6,
3600 t5, t0, t1, t2, t3, t4);
3601 }
3602 }
3603 if (TREE_TYPE (t) != boolean_type_node)
3604 t = fold_build2 (NE_EXPR, boolean_type_node,
3605 t, build_int_cst (TREE_TYPE (t), 0));
3606 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3607 true, BSI_SAME_STMT);
3608 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3609 bsi_insert_after (&si, t, BSI_SAME_STMT);
3610
3611 /* Remove the OMP_FOR statement. */
3612 bsi_remove (&si, true);
3613
3614 /* Iteration setup for sequential loop goes in L0_BB. */
3615 si = bsi_start (l0_bb);
3616 if (bias)
3617 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3618 istart0, bias));
3619 else
3620 t = fold_convert (type, istart0);
3621 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3622 false, BSI_CONTINUE_LINKING);
3623 t = build_gimple_modify_stmt (fd->loop.v, t);
3624 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3625 if (gimple_in_ssa_p (cfun))
3626 SSA_NAME_DEF_STMT (fd->loop.v) = t;
3627
3628 if (bias)
3629 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3630 iend0, bias));
3631 else
3632 t = fold_convert (type, iend0);
3633 iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3634 false, BSI_CONTINUE_LINKING);
3635 if (fd->collapse > 1)
3636 {
3637 tree tem = create_tmp_var (type, ".tem");
3638
3639 t = build_gimple_modify_stmt (tem, fd->loop.v);
3640 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3641 for (i = fd->collapse - 1; i >= 0; i--)
3642 {
3643 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3644 itype = vtype;
3645 if (POINTER_TYPE_P (vtype))
3646 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3647 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3648 t = fold_convert (itype, t);
3649 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3650 if (POINTER_TYPE_P (vtype))
3651 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3652 fd->loops[i].n1, fold_convert (sizetype, t));
3653 else
3654 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3655 t = build_gimple_modify_stmt (fd->loops[i].v, t);
3656 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3657 false, BSI_CONTINUE_LINKING);
3658 if (i != 0)
3659 {
3660 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3661 t = build_gimple_modify_stmt (tem, t);
3662 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3663 false, BSI_CONTINUE_LINKING);
3664 }
3665 }
3666 }
3667
3668 if (!broken_loop)
3669 {
3670 /* Code to control the increment and predicate for the sequential
3671 loop goes in the CONT_BB. */
3672 si = bsi_last (cont_bb);
3673 t = bsi_stmt (si);
3674 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
3675 vmain = TREE_OPERAND (t, 1);
3676 vback = TREE_OPERAND (t, 0);
3677
3678 if (POINTER_TYPE_P (type))
3679 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3680 fold_convert (sizetype, fd->loop.step));
3681 else
3682 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3683 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3684 true, BSI_SAME_STMT);
3685 t = build_gimple_modify_stmt (vback, t);
3686 bsi_insert_before (&si, t, BSI_SAME_STMT);
3687 if (gimple_in_ssa_p (cfun))
3688 SSA_NAME_DEF_STMT (vback) = t;
3689
3690 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3691 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3692 bsi_insert_before (&si, t, BSI_SAME_STMT);
3693
3694 /* Remove OMP_CONTINUE. */
3695 bsi_remove (&si, true);
3696
3697 if (fd->collapse > 1)
3698 {
3699 basic_block last_bb, bb;
3700
3701 last_bb = cont_bb;
3702 for (i = fd->collapse - 1; i >= 0; i--)
3703 {
3704 tree vtype = TREE_TYPE (fd->loops[i].v);
3705
3706 bb = create_empty_bb (last_bb);
3707 si = bsi_start (bb);
3708
3709 if (i < fd->collapse - 1)
3710 {
3711 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3712 e->probability = REG_BR_PROB_BASE / 8;
3713
3714 t = build_gimple_modify_stmt (fd->loops[i + 1].v,
3715 fd->loops[i + 1].n1);
3716 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3717 false, BSI_CONTINUE_LINKING);
3718 }
3719 else
3720 collapse_bb = bb;
3721
3722 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3723
3724 if (POINTER_TYPE_P (vtype))
3725 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3726 fd->loops[i].v,
3727 fold_convert (sizetype, fd->loops[i].step));
3728 else
3729 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3730 fd->loops[i].step);
3731 t = build_gimple_modify_stmt (fd->loops[i].v, t);
3732 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3733 false, BSI_CONTINUE_LINKING);
3734
3735 if (i > 0)
3736 {
3737 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3738 fd->loops[i].v, fd->loops[i].n2);
3739 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3740 false, BSI_CONTINUE_LINKING);
3741 t = build3 (COND_EXPR, void_type_node, t,
3742 NULL_TREE, NULL_TREE);
3743 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3744 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3745 e->probability = REG_BR_PROB_BASE * 7 / 8;
3746 }
3747 else
3748 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3749 last_bb = bb;
3750 }
3751 }
3752
3753 /* Emit code to get the next parallel iteration in L2_BB. */
3754 si = bsi_start (l2_bb);
3755
3756 t = build_call_expr (built_in_decls[next_fn], 2,
3757 build_fold_addr_expr (istart0),
3758 build_fold_addr_expr (iend0));
3759 if (TREE_TYPE (t) != boolean_type_node)
3760 t = fold_build2 (NE_EXPR, boolean_type_node,
3761 t, build_int_cst (TREE_TYPE (t), 0));
3762 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3763 false, BSI_CONTINUE_LINKING);
3764 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3765 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3766 }
3767
3768 /* Add the loop cleanup function. */
3769 si = bsi_last (exit_bb);
3770 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
3771 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3772 else
3773 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3774 t = build_call_expr (t, 0);
3775 bsi_insert_after (&si, t, BSI_SAME_STMT);
3776 bsi_remove (&si, true);
3777
3778 /* Connect the new blocks. */
3779 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3780 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3781
3782 if (!broken_loop)
3783 {
3784 e = find_edge (cont_bb, l3_bb);
3785 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3786
3787 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
3788 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3789 PHI_ARG_DEF_FROM_EDGE (phi, e));
3790 remove_edge (e);
3791
3792 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3793 if (fd->collapse > 1)
3794 {
3795 e = find_edge (cont_bb, l1_bb);
3796 remove_edge (e);
3797 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3798 }
3799 else
3800 {
3801 e = find_edge (cont_bb, l1_bb);
3802 e->flags = EDGE_TRUE_VALUE;
3803 }
3804 e->probability = REG_BR_PROB_BASE * 7 / 8;
3805 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
3806 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3807
3808 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3809 recompute_dominator (CDI_DOMINATORS, l2_bb));
3810 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3811 recompute_dominator (CDI_DOMINATORS, l3_bb));
3812 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3813 recompute_dominator (CDI_DOMINATORS, l0_bb));
3814 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3815 recompute_dominator (CDI_DOMINATORS, l1_bb));
3816 }
3817 }
3818
3819
3820 /* A subroutine of expand_omp_for. Generate code for a parallel
3821 loop with static schedule and no specified chunk size. Given
3822 parameters:
3823
3824 for (V = N1; V cond N2; V += STEP) BODY;
3825
3826 where COND is "<" or ">", we generate pseudocode
3827
3828 if (cond is <)
3829 adj = STEP - 1;
3830 else
3831 adj = STEP + 1;
3832 if ((__typeof (V)) -1 > 0 && cond is >)
3833 n = -(adj + N2 - N1) / -STEP;
3834 else
3835 n = (adj + N2 - N1) / STEP;
3836 q = n / nthreads;
3837 q += (q * nthreads != n);
3838 s0 = q * threadid;
3839 e0 = min(s0 + q, n);
3840 V = s0 * STEP + N1;
3841 if (s0 >= e0) goto L2; else goto L0;
3842 L0:
3843 e = e0 * STEP + N1;
3844 L1:
3845 BODY;
3846 V += STEP;
3847 if (V cond e) goto L1;
3848 L2:
3849 */
3850
3851 static void
3852 expand_omp_for_static_nochunk (struct omp_region *region,
3853 struct omp_for_data *fd)
3854 {
3855 tree n, q, s0, e0, e, t, nthreads, threadid;
3856 tree type, itype, vmain, vback;
3857 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
3858 basic_block fin_bb;
3859 block_stmt_iterator si;
3860
3861 itype = type = TREE_TYPE (fd->loop.v);
3862 if (POINTER_TYPE_P (type))
3863 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3864
3865 entry_bb = region->entry;
3866 cont_bb = region->cont;
3867 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3868 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3869 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3870 body_bb = single_succ (seq_start_bb);
3871 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
3872 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3873 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
3874 exit_bb = region->exit;
3875
3876 /* Iteration space partitioning goes in ENTRY_BB. */
3877 si = bsi_last (entry_bb);
3878 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
3879
3880 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
3881 t = fold_convert (itype, t);
3882 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3883 true, BSI_SAME_STMT);
3884
3885 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
3886 t = fold_convert (itype, t);
3887 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3888 true, BSI_SAME_STMT);
3889
3890 fd->loop.n1
3891 = force_gimple_operand_bsi (&si, fold_convert (type, fd->loop.n1),
3892 true, NULL_TREE, true, BSI_SAME_STMT);
3893 fd->loop.n2
3894 = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.n2),
3895 true, NULL_TREE, true, BSI_SAME_STMT);
3896 fd->loop.step
3897 = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.step),
3898 true, NULL_TREE, true, BSI_SAME_STMT);
3899
3900 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
3901 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
3902 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
3903 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
3904 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
3905 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3906 fold_build1 (NEGATE_EXPR, itype, t),
3907 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
3908 else
3909 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
3910 t = fold_convert (itype, t);
3911 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3912
3913 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
3914 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3915
3916 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
3917 t = fold_build2 (NE_EXPR, itype, t, n);
3918 t = fold_build2 (PLUS_EXPR, itype, q, t);
3919 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3920
3921 t = build2 (MULT_EXPR, itype, q, threadid);
3922 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3923
3924 t = fold_build2 (PLUS_EXPR, itype, s0, q);
3925 t = fold_build2 (MIN_EXPR, itype, t, n);
3926 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3927
3928 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
3929 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3930 bsi_insert_before (&si, t, BSI_SAME_STMT);
3931
3932 /* Remove the OMP_FOR statement. */
3933 bsi_remove (&si, true);
3934
3935 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3936 si = bsi_start (seq_start_bb);
3937
3938 t = fold_convert (itype, s0);
3939 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
3940 if (POINTER_TYPE_P (type))
3941 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
3942 fold_convert (sizetype, t));
3943 else
3944 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
3945 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3946 false, BSI_CONTINUE_LINKING);
3947 t = build_gimple_modify_stmt (fd->loop.v, t);
3948 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3949 if (gimple_in_ssa_p (cfun))
3950 SSA_NAME_DEF_STMT (fd->loop.v) = t;
3951
3952 t = fold_convert (itype, e0);
3953 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
3954 if (POINTER_TYPE_P (type))
3955 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
3956 fold_convert (sizetype, t));
3957 else
3958 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
3959 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3960 false, BSI_CONTINUE_LINKING);
3961
3962 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
3963 si = bsi_last (cont_bb);
3964 t = bsi_stmt (si);
3965 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
3966 vmain = TREE_OPERAND (t, 1);
3967 vback = TREE_OPERAND (t, 0);
3968
3969 if (POINTER_TYPE_P (type))
3970 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3971 fold_convert (sizetype, fd->loop.step));
3972 else
3973 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3974 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3975 true, BSI_SAME_STMT);
3976 t = build_gimple_modify_stmt (vback, t);
3977 bsi_insert_before (&si, t, BSI_SAME_STMT);
3978 if (gimple_in_ssa_p (cfun))
3979 SSA_NAME_DEF_STMT (vback) = t;
3980
3981 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
3982 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3983 bsi_insert_before (&si, t, BSI_SAME_STMT);
3984
3985 /* Remove the OMP_CONTINUE statement. */
3986 bsi_remove (&si, true);
3987
3988 /* Replace the OMP_RETURN with a barrier, or nothing. */
3989 si = bsi_last (exit_bb);
3990 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3991 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3992 false, BSI_SAME_STMT);
3993 bsi_remove (&si, true);
3994
3995 /* Connect all the blocks. */
3996 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3997 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3998
3999 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4000 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4001
4002 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4003 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4004 recompute_dominator (CDI_DOMINATORS, body_bb));
4005 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4006 recompute_dominator (CDI_DOMINATORS, fin_bb));
4007 }
4008
4009
4010 /* A subroutine of expand_omp_for. Generate code for a parallel
4011 loop with static schedule and a specified chunk size. Given
4012 parameters:
4013
4014 for (V = N1; V cond N2; V += STEP) BODY;
4015
4016 where COND is "<" or ">", we generate pseudocode
4017
4018 if (cond is <)
4019 adj = STEP - 1;
4020 else
4021 adj = STEP + 1;
4022 if ((__typeof (V)) -1 > 0 && cond is >)
4023 n = -(adj + N2 - N1) / -STEP;
4024 else
4025 n = (adj + N2 - N1) / STEP;
4026 trip = 0;
4027 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4028 here so that V is defined
4029 if the loop is not entered
4030 L0:
4031 s0 = (trip * nthreads + threadid) * CHUNK;
4032 e0 = min(s0 + CHUNK, n);
4033 if (s0 < n) goto L1; else goto L4;
4034 L1:
4035 V = s0 * STEP + N1;
4036 e = e0 * STEP + N1;
4037 L2:
4038 BODY;
4039 V += STEP;
4040 if (V cond e) goto L2; else goto L3;
4041 L3:
4042 trip += 1;
4043 goto L0;
4044 L4:
4045 */
4046
4047 static void
4048 expand_omp_for_static_chunk (struct omp_region *region,
4049 struct omp_for_data *fd)
4050 {
4051 tree n, s0, e0, e, t, phi, nphi, args;
4052 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4053 tree type, itype, cont, v_main, v_back, v_extra;
4054 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4055 basic_block trip_update_bb, cont_bb, fin_bb;
4056 block_stmt_iterator si;
4057 edge se, re, ene;
4058
4059 itype = type = TREE_TYPE (fd->loop.v);
4060 if (POINTER_TYPE_P (type))
4061 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4062
4063 entry_bb = region->entry;
4064 se = split_block (entry_bb, last_stmt (entry_bb));
4065 entry_bb = se->src;
4066 iter_part_bb = se->dest;
4067 cont_bb = region->cont;
4068 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4069 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4070 == FALLTHRU_EDGE (cont_bb)->dest);
4071 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4072 body_bb = single_succ (seq_start_bb);
4073 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4074 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4075 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4076 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4077 exit_bb = region->exit;
4078
4079 /* Trip and adjustment setup goes in ENTRY_BB. */
4080 si = bsi_last (entry_bb);
4081 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
4082
4083 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4084 t = fold_convert (itype, t);
4085 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4086 true, BSI_SAME_STMT);
4087
4088 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4089 t = fold_convert (itype, t);
4090 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4091 true, BSI_SAME_STMT);
4092
4093 fd->loop.n1
4094 = force_gimple_operand_bsi (&si, fold_convert (type, fd->loop.n1),
4095 true, NULL_TREE, true, BSI_SAME_STMT);
4096 fd->loop.n2
4097 = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.n2),
4098 true, NULL_TREE, true, BSI_SAME_STMT);
4099 fd->loop.step
4100 = force_gimple_operand_bsi (&si, fold_convert (itype, fd->loop.step),
4101 true, NULL_TREE, true, BSI_SAME_STMT);
4102 fd->chunk_size
4103 = force_gimple_operand_bsi (&si, fold_convert (itype, fd->chunk_size),
4104 true, NULL_TREE, true, BSI_SAME_STMT);
4105
4106 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4107 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4108 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4109 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4110 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4111 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4112 fold_build1 (NEGATE_EXPR, itype, t),
4113 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4114 else
4115 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4116 t = fold_convert (itype, t);
4117 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4118 true, BSI_SAME_STMT);
4119
4120 trip_var = create_tmp_var (itype, ".trip");
4121 if (gimple_in_ssa_p (cfun))
4122 {
4123 add_referenced_var (trip_var);
4124 trip_init = make_ssa_name (trip_var, NULL_TREE);
4125 trip_main = make_ssa_name (trip_var, NULL_TREE);
4126 trip_back = make_ssa_name (trip_var, NULL_TREE);
4127 }
4128 else
4129 {
4130 trip_init = trip_var;
4131 trip_main = trip_var;
4132 trip_back = trip_var;
4133 }
4134
4135 t = build_gimple_modify_stmt (trip_init, build_int_cst (itype, 0));
4136 bsi_insert_before (&si, t, BSI_SAME_STMT);
4137 if (gimple_in_ssa_p (cfun))
4138 SSA_NAME_DEF_STMT (trip_init) = t;
4139
4140 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4141 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4142 if (POINTER_TYPE_P (type))
4143 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4144 fold_convert (sizetype, t));
4145 else
4146 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4147 v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4148 true, BSI_SAME_STMT);
4149
4150 /* Remove the OMP_FOR. */
4151 bsi_remove (&si, true);
4152
4153 /* Iteration space partitioning goes in ITER_PART_BB. */
4154 si = bsi_last (iter_part_bb);
4155
4156 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4157 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4158 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4159 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4160 false, BSI_CONTINUE_LINKING);
4161
4162 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4163 t = fold_build2 (MIN_EXPR, itype, t, n);
4164 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4165 false, BSI_CONTINUE_LINKING);
4166
4167 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4168 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
4169 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
4170
4171 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4172 si = bsi_start (seq_start_bb);
4173
4174 t = fold_convert (itype, s0);
4175 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4176 if (POINTER_TYPE_P (type))
4177 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4178 fold_convert (sizetype, t));
4179 else
4180 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4181 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
4182 false, BSI_CONTINUE_LINKING);
4183 t = build_gimple_modify_stmt (fd->loop.v, t);
4184 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
4185 if (gimple_in_ssa_p (cfun))
4186 SSA_NAME_DEF_STMT (fd->loop.v) = t;
4187
4188 t = fold_convert (itype, e0);
4189 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4190 if (POINTER_TYPE_P (type))
4191 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4192 fold_convert (sizetype, t));
4193 else
4194 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4195 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
4196 false, BSI_CONTINUE_LINKING);
4197
4198 /* The code controlling the sequential loop goes in CONT_BB,
4199 replacing the OMP_CONTINUE. */
4200 si = bsi_last (cont_bb);
4201 cont = bsi_stmt (si);
4202 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
4203 v_main = TREE_OPERAND (cont, 1);
4204 v_back = TREE_OPERAND (cont, 0);
4205
4206 if (POINTER_TYPE_P (type))
4207 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4208 fold_convert (sizetype, fd->loop.step));
4209 else
4210 t = build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4211 t = build_gimple_modify_stmt (v_back, t);
4212 bsi_insert_before (&si, t, BSI_SAME_STMT);
4213 if (gimple_in_ssa_p (cfun))
4214 SSA_NAME_DEF_STMT (v_back) = t;
4215
4216 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4217 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
4218 bsi_insert_before (&si, t, BSI_SAME_STMT);
4219
4220 /* Remove OMP_CONTINUE. */
4221 bsi_remove (&si, true);
4222
4223 /* Trip update code goes into TRIP_UPDATE_BB. */
4224 si = bsi_start (trip_update_bb);
4225
4226 t = build_int_cst (itype, 1);
4227 t = build2 (PLUS_EXPR, itype, trip_main, t);
4228 t = build_gimple_modify_stmt (trip_back, t);
4229 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
4230 if (gimple_in_ssa_p (cfun))
4231 SSA_NAME_DEF_STMT (trip_back) = t;
4232
4233 /* Replace the OMP_RETURN with a barrier, or nothing. */
4234 si = bsi_last (exit_bb);
4235 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
4236 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
4237 false, BSI_SAME_STMT);
4238 bsi_remove (&si, true);
4239
4240 /* Connect the new blocks. */
4241 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4242 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4243
4244 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4245 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4246
4247 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4248
4249 if (gimple_in_ssa_p (cfun))
4250 {
4251 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4252 remove arguments of the phi nodes in fin_bb. We need to create
4253 appropriate phi nodes in iter_part_bb instead. */
4254 se = single_pred_edge (fin_bb);
4255 re = single_succ_edge (trip_update_bb);
4256 ene = single_succ_edge (entry_bb);
4257
4258 args = PENDING_STMT (re);
4259 PENDING_STMT (re) = NULL_TREE;
4260 for (phi = phi_nodes (fin_bb);
4261 phi && args;
4262 phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
4263 {
4264 t = PHI_RESULT (phi);
4265 gcc_assert (t == TREE_PURPOSE (args));
4266 nphi = create_phi_node (t, iter_part_bb);
4267 SSA_NAME_DEF_STMT (t) = nphi;
4268
4269 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4270 /* A special case -- fd->loop.v is not yet computed in
4271 iter_part_bb, we need to use v_extra instead. */
4272 if (t == fd->loop.v)
4273 t = v_extra;
4274 add_phi_arg (nphi, t, ene);
4275 add_phi_arg (nphi, TREE_VALUE (args), re);
4276 }
4277 gcc_assert (!phi && !args);
4278 while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
4279 remove_phi_node (phi, NULL_TREE, false);
4280
4281 /* Make phi node for trip. */
4282 phi = create_phi_node (trip_main, iter_part_bb);
4283 SSA_NAME_DEF_STMT (trip_main) = phi;
4284 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4285 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4286 }
4287
4288 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4289 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4290 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4291 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4292 recompute_dominator (CDI_DOMINATORS, fin_bb));
4293 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4294 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4295 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4296 recompute_dominator (CDI_DOMINATORS, body_bb));
4297 }
4298
4299
4300 /* Expand the OpenMP loop defined by REGION. */
4301
4302 static void
4303 expand_omp_for (struct omp_region *region)
4304 {
4305 struct omp_for_data fd;
4306 struct omp_for_data_loop *loops;
4307
4308 loops
4309 = (struct omp_for_data_loop *)
4310 alloca (TREE_VEC_LENGTH (OMP_FOR_INIT (last_stmt (region->entry)))
4311 * sizeof (struct omp_for_data_loop));
4312
4313 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4314 region->sched_kind = fd.sched_kind;
4315
4316 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4317 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4318 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4319 if (region->cont)
4320 {
4321 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4322 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4323 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4324 }
4325
4326 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4327 && !fd.have_ordered
4328 && fd.collapse == 1
4329 && region->cont != NULL)
4330 {
4331 if (fd.chunk_size == NULL)
4332 expand_omp_for_static_nochunk (region, &fd);
4333 else
4334 expand_omp_for_static_chunk (region, &fd);
4335 }
4336 else
4337 {
4338 int fn_index, start_ix, next_ix;
4339
4340 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4341 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4342 ? 3 : fd.sched_kind;
4343 fn_index += fd.have_ordered * 4;
4344 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4345 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4346 if (fd.iter_type == long_long_unsigned_type_node)
4347 {
4348 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4349 - BUILT_IN_GOMP_LOOP_STATIC_START;
4350 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4351 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4352 }
4353 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4354 }
4355
4356 update_ssa (TODO_update_ssa_only_virtuals);
4357 }
4358
4359
4360 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4361
4362 v = GOMP_sections_start (n);
4363 L0:
4364 switch (v)
4365 {
4366 case 0:
4367 goto L2;
4368 case 1:
4369 section 1;
4370 goto L1;
4371 case 2:
4372 ...
4373 case n:
4374 ...
4375 default:
4376 abort ();
4377 }
4378 L1:
4379 v = GOMP_sections_next ();
4380 goto L0;
4381 L2:
4382 reduction;
4383
4384 If this is a combined parallel sections, replace the call to
4385 GOMP_sections_start with call to GOMP_sections_next. */
4386
4387 static void
4388 expand_omp_sections (struct omp_region *region)
4389 {
4390 tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
4391 unsigned i, casei, len;
4392 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4393 block_stmt_iterator si;
4394 edge_iterator ei;
4395 edge e;
4396 struct omp_region *inner;
4397 bool exit_reachable = region->cont != NULL;
4398
4399 gcc_assert (exit_reachable == (region->exit != NULL));
4400 entry_bb = region->entry;
4401 l0_bb = single_succ (entry_bb);
4402 l1_bb = region->cont;
4403 l2_bb = region->exit;
4404 if (exit_reachable)
4405 {
4406 if (single_pred (l2_bb) == l0_bb)
4407 l2 = tree_block_label (l2_bb);
4408 else
4409 {
4410 /* This can happen if there are reductions. */
4411 len = EDGE_COUNT (l0_bb->succs);
4412 gcc_assert (len > 0);
4413 e = EDGE_SUCC (l0_bb, len - 1);
4414 si = bsi_last (e->dest);
4415 l2 = NULL_TREE;
4416 if (bsi_end_p (si) || TREE_CODE (bsi_stmt (si)) != OMP_SECTION)
4417 l2 = tree_block_label (e->dest);
4418 else
4419 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4420 {
4421 si = bsi_last (e->dest);
4422 if (bsi_end_p (si) || TREE_CODE (bsi_stmt (si)) != OMP_SECTION)
4423 {
4424 l2 = tree_block_label (e->dest);
4425 break;
4426 }
4427 }
4428 }
4429 default_bb = create_empty_bb (l1_bb->prev_bb);
4430 l1 = tree_block_label (l1_bb);
4431 }
4432 else
4433 {
4434 default_bb = create_empty_bb (l0_bb);
4435 l1 = NULL_TREE;
4436 l2 = tree_block_label (default_bb);
4437 }
4438
4439 /* We will build a switch() with enough cases for all the
4440 OMP_SECTION regions, a '0' case to handle the end of more work
4441 and a default case to abort if something goes wrong. */
4442 len = EDGE_COUNT (l0_bb->succs);
4443 label_vec = make_tree_vec (len + 1);
4444
4445 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4446 OMP_SECTIONS statement. */
4447 si = bsi_last (entry_bb);
4448 sections_stmt = bsi_stmt (si);
4449 gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
4450 vin = OMP_SECTIONS_CONTROL (sections_stmt);
4451 if (!is_combined_parallel (region))
4452 {
4453 /* If we are not inside a combined parallel+sections region,
4454 call GOMP_sections_start. */
4455 t = build_int_cst (unsigned_type_node,
4456 exit_reachable ? len - 1 : len);
4457 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4458 t = build_call_expr (u, 1, t);
4459 }
4460 else
4461 {
4462 /* Otherwise, call GOMP_sections_next. */
4463 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4464 t = build_call_expr (u, 0);
4465 }
4466 t = build_gimple_modify_stmt (vin, t);
4467 bsi_insert_after (&si, t, BSI_SAME_STMT);
4468 if (gimple_in_ssa_p (cfun))
4469 SSA_NAME_DEF_STMT (vin) = t;
4470 bsi_remove (&si, true);
4471
4472 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
4473 si = bsi_last (l0_bb);
4474 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
4475 if (exit_reachable)
4476 {
4477 cont = last_stmt (l1_bb);
4478 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
4479 vmain = TREE_OPERAND (cont, 1);
4480 vnext = TREE_OPERAND (cont, 0);
4481 }
4482 else
4483 {
4484 vmain = vin;
4485 vnext = NULL_TREE;
4486 }
4487
4488 t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
4489 bsi_insert_after (&si, t, BSI_SAME_STMT);
4490 bsi_remove (&si, true);
4491
4492 i = 0;
4493 if (exit_reachable)
4494 {
4495 t = build3 (CASE_LABEL_EXPR, void_type_node,
4496 build_int_cst (unsigned_type_node, 0), NULL, l2);
4497 TREE_VEC_ELT (label_vec, 0) = t;
4498 i++;
4499 }
4500
4501 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
4502 for (inner = region->inner, casei = 1;
4503 inner;
4504 inner = inner->next, i++, casei++)
4505 {
4506 basic_block s_entry_bb, s_exit_bb;
4507
4508 /* Skip optional reduction region. */
4509 if (inner->type == OMP_ATOMIC_LOAD)
4510 {
4511 --i;
4512 --casei;
4513 continue;
4514 }
4515
4516 s_entry_bb = inner->entry;
4517 s_exit_bb = inner->exit;
4518
4519 t = tree_block_label (s_entry_bb);
4520 u = build_int_cst (unsigned_type_node, casei);
4521 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4522 TREE_VEC_ELT (label_vec, i) = u;
4523
4524 si = bsi_last (s_entry_bb);
4525 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
4526 gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
4527 bsi_remove (&si, true);
4528 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4529
4530 if (s_exit_bb == NULL)
4531 continue;
4532
4533 si = bsi_last (s_exit_bb);
4534 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
4535 bsi_remove (&si, true);
4536
4537 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4538 }
4539
4540 /* Error handling code goes in DEFAULT_BB. */
4541 t = tree_block_label (default_bb);
4542 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4543 TREE_VEC_ELT (label_vec, len) = u;
4544 make_edge (l0_bb, default_bb, 0);
4545
4546 si = bsi_start (default_bb);
4547 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
4548 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
4549
4550 if (exit_reachable)
4551 {
4552 /* Code to get the next section goes in L1_BB. */
4553 si = bsi_last (l1_bb);
4554 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
4555
4556 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4557 t = build_gimple_modify_stmt (vnext, t);
4558 bsi_insert_after (&si, t, BSI_SAME_STMT);
4559 if (gimple_in_ssa_p (cfun))
4560 SSA_NAME_DEF_STMT (vnext) = t;
4561 bsi_remove (&si, true);
4562
4563 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4564
4565 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
4566 si = bsi_last (l2_bb);
4567 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
4568 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4569 else
4570 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4571 t = build_call_expr (t, 0);
4572 bsi_insert_after (&si, t, BSI_SAME_STMT);
4573 bsi_remove (&si, true);
4574 }
4575
4576 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4577 }
4578
4579
4580 /* Expand code for an OpenMP single directive. We've already expanded
4581 much of the code, here we simply place the GOMP_barrier call. */
4582
4583 static void
4584 expand_omp_single (struct omp_region *region)
4585 {
4586 basic_block entry_bb, exit_bb;
4587 block_stmt_iterator si;
4588 bool need_barrier = false;
4589
4590 entry_bb = region->entry;
4591 exit_bb = region->exit;
4592
4593 si = bsi_last (entry_bb);
4594 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4595 be removed. We need to ensure that the thread that entered the single
4596 does not exit before the data is copied out by the other threads. */
4597 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)),
4598 OMP_CLAUSE_COPYPRIVATE))
4599 need_barrier = true;
4600 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE);
4601 bsi_remove (&si, true);
4602 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4603
4604 si = bsi_last (exit_bb);
4605 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
4606 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
4607 false, BSI_SAME_STMT);
4608 bsi_remove (&si, true);
4609 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4610 }
4611
4612
4613 /* Generic expansion for OpenMP synchronization directives: master,
4614 ordered and critical. All we need to do here is remove the entry
4615 and exit markers for REGION. */
4616
4617 static void
4618 expand_omp_synch (struct omp_region *region)
4619 {
4620 basic_block entry_bb, exit_bb;
4621 block_stmt_iterator si;
4622
4623 entry_bb = region->entry;
4624 exit_bb = region->exit;
4625
4626 si = bsi_last (entry_bb);
4627 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE
4628 || TREE_CODE (bsi_stmt (si)) == OMP_MASTER
4629 || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED
4630 || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL);
4631 bsi_remove (&si, true);
4632 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4633
4634 if (exit_bb)
4635 {
4636 si = bsi_last (exit_bb);
4637 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
4638 bsi_remove (&si, true);
4639 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4640 }
4641 }
4642
4643 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4644 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4645 size of the data type, and thus usable to find the index of the builtin
4646 decl. Returns false if the expression is not of the proper form. */
4647
4648 static bool
4649 expand_omp_atomic_fetch_op (basic_block load_bb,
4650 tree addr, tree loaded_val,
4651 tree stored_val, int index)
4652 {
4653 enum built_in_function base;
4654 tree decl, itype, call;
4655 enum insn_code *optab;
4656 tree rhs;
4657 basic_block store_bb = single_succ (load_bb);
4658 block_stmt_iterator bsi;
4659 tree stmt;
4660
4661 /* We expect to find the following sequences:
4662
4663 load_bb:
4664 OMP_ATOMIC_LOAD (tmp, mem)
4665
4666 store_bb:
4667 val = tmp OP something; (or: something OP tmp)
4668 OMP_STORE (val)
4669
4670 ???FIXME: Allow a more flexible sequence.
4671 Perhaps use data flow to pick the statements.
4672
4673 */
4674
4675 bsi = bsi_after_labels (store_bb);
4676 stmt = bsi_stmt (bsi);
4677 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
4678 return false;
4679 bsi_next (&bsi);
4680 if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE)
4681 return false;
4682
4683 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0))
4684 return false;
4685
4686 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
4687
4688 /* Check for one of the supported fetch-op operations. */
4689 switch (TREE_CODE (rhs))
4690 {
4691 case PLUS_EXPR:
4692 case POINTER_PLUS_EXPR:
4693 base = BUILT_IN_FETCH_AND_ADD_N;
4694 optab = sync_add_optab;
4695 break;
4696 case MINUS_EXPR:
4697 base = BUILT_IN_FETCH_AND_SUB_N;
4698 optab = sync_add_optab;
4699 break;
4700 case BIT_AND_EXPR:
4701 base = BUILT_IN_FETCH_AND_AND_N;
4702 optab = sync_and_optab;
4703 break;
4704 case BIT_IOR_EXPR:
4705 base = BUILT_IN_FETCH_AND_OR_N;
4706 optab = sync_ior_optab;
4707 break;
4708 case BIT_XOR_EXPR:
4709 base = BUILT_IN_FETCH_AND_XOR_N;
4710 optab = sync_xor_optab;
4711 break;
4712 default:
4713 return false;
4714 }
4715 /* Make sure the expression is of the proper form. */
4716 if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0))
4717 rhs = TREE_OPERAND (rhs, 1);
4718 else if (commutative_tree_code (TREE_CODE (rhs))
4719 && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0))
4720 rhs = TREE_OPERAND (rhs, 0);
4721 else
4722 return false;
4723
4724 decl = built_in_decls[base + index + 1];
4725 itype = TREE_TYPE (TREE_TYPE (decl));
4726
4727 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4728 return false;
4729
4730 bsi = bsi_last (load_bb);
4731 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
4732 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4733 force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT);
4734 bsi_remove (&bsi, true);
4735
4736 bsi = bsi_last (store_bb);
4737 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
4738 bsi_remove (&bsi, true);
4739 bsi = bsi_last (store_bb);
4740 bsi_remove (&bsi, true);
4741
4742 if (gimple_in_ssa_p (cfun))
4743 update_ssa (TODO_update_ssa_no_phi);
4744
4745 return true;
4746 }
4747
4748 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4749
4750 oldval = *addr;
4751 repeat:
4752 newval = rhs; // with oldval replacing *addr in rhs
4753 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4754 if (oldval != newval)
4755 goto repeat;
4756
4757 INDEX is log2 of the size of the data type, and thus usable to find the
4758 index of the builtin decl. */
4759
4760 static bool
4761 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4762 tree addr, tree loaded_val, tree stored_val,
4763 int index)
4764 {
4765 tree loadedi, storedi, initial, new_storedi, old_vali;
4766 tree type, itype, cmpxchg, iaddr;
4767 block_stmt_iterator bsi;
4768 basic_block loop_header = single_succ (load_bb);
4769 tree phi, x;
4770 edge e;
4771
4772 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4773 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4774 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4775
4776 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4777 return false;
4778
4779 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
4780 bsi = bsi_last (load_bb);
4781 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
4782 /* For floating-point values, we'll need to view-convert them to integers
4783 so that we can perform the atomic compare and swap. Simplify the
4784 following code by always setting up the "i"ntegral variables. */
4785 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4786 {
4787 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
4788 x = build_gimple_modify_stmt (iaddr,
4789 fold_convert (TREE_TYPE (iaddr), addr));
4790 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
4791 true, BSI_SAME_STMT);
4792 DECL_NO_TBAA_P (iaddr) = 1;
4793 DECL_POINTER_ALIAS_SET (iaddr) = 0;
4794 loadedi = create_tmp_var (itype, NULL);
4795 if (gimple_in_ssa_p (cfun))
4796 {
4797 add_referenced_var (iaddr);
4798 add_referenced_var (loadedi);
4799 loadedi = make_ssa_name (loadedi, NULL);
4800 }
4801 }
4802 else
4803 {
4804 iaddr = addr;
4805 loadedi = loaded_val;
4806 }
4807 initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (iaddr),
4808 true, NULL_TREE, true, BSI_SAME_STMT);
4809
4810 /* Move the value to the LOADEDI temporary. */
4811 if (gimple_in_ssa_p (cfun))
4812 {
4813 gcc_assert (phi_nodes (loop_header) == NULL_TREE);
4814 phi = create_phi_node (loadedi, loop_header);
4815 SSA_NAME_DEF_STMT (loadedi) = phi;
4816 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
4817 initial);
4818 }
4819 else
4820 bsi_insert_before (&bsi,
4821 build_gimple_modify_stmt (loadedi, initial),
4822 BSI_SAME_STMT);
4823 if (loadedi != loaded_val)
4824 {
4825 block_stmt_iterator bsi2;
4826
4827 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
4828 bsi2 = bsi_start (loop_header);
4829 if (gimple_in_ssa_p (cfun))
4830 {
4831 x = force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
4832 true, BSI_SAME_STMT);
4833 x = build_gimple_modify_stmt (loaded_val, x);
4834 bsi_insert_before (&bsi2, x, BSI_SAME_STMT);
4835 SSA_NAME_DEF_STMT (loaded_val) = x;
4836 }
4837 else
4838 {
4839 x = build_gimple_modify_stmt (loaded_val, x);
4840 force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
4841 true, BSI_SAME_STMT);
4842 }
4843 }
4844 bsi_remove (&bsi, true);
4845
4846 bsi = bsi_last (store_bb);
4847 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
4848
4849 if (iaddr == addr)
4850 storedi = stored_val;
4851 else
4852 storedi =
4853 force_gimple_operand_bsi (&bsi,
4854 build1 (VIEW_CONVERT_EXPR, itype,
4855 stored_val), true, NULL_TREE, true,
4856 BSI_SAME_STMT);
4857
4858 /* Build the compare&swap statement. */
4859 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
4860 new_storedi = force_gimple_operand_bsi (&bsi,
4861 fold_convert (itype, new_storedi),
4862 true, NULL_TREE,
4863 true, BSI_SAME_STMT);
4864
4865 if (gimple_in_ssa_p (cfun))
4866 old_vali = loadedi;
4867 else
4868 {
4869 old_vali = create_tmp_var (itype, NULL);
4870 if (gimple_in_ssa_p (cfun))
4871 add_referenced_var (old_vali);
4872 x = build_gimple_modify_stmt (old_vali, loadedi);
4873 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
4874 true, BSI_SAME_STMT);
4875
4876 x = build_gimple_modify_stmt (loadedi, new_storedi);
4877 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
4878 true, BSI_SAME_STMT);
4879 }
4880
4881 /* Note that we always perform the comparison as an integer, even for
4882 floating point. This allows the atomic operation to properly
4883 succeed even with NaNs and -0.0. */
4884 x = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
4885 x = build3 (COND_EXPR, void_type_node, x, NULL_TREE, NULL_TREE);
4886 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
4887
4888 /* Update cfg. */
4889 e = single_succ_edge (store_bb);
4890 e->flags &= ~EDGE_FALLTHRU;
4891 e->flags |= EDGE_FALSE_VALUE;
4892
4893 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
4894
4895 /* Copy the new value to loadedi (we already did that before the condition
4896 if we are not in SSA). */
4897 if (gimple_in_ssa_p (cfun))
4898 {
4899 phi = phi_nodes (loop_header);
4900 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
4901 }
4902
4903 /* Remove OMP_ATOMIC_STORE. */
4904 bsi_remove (&bsi, true);
4905
4906 if (gimple_in_ssa_p (cfun))
4907 update_ssa (TODO_update_ssa_no_phi);
4908
4909 return true;
4910 }
4911
4912 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4913
4914 GOMP_atomic_start ();
4915 *addr = rhs;
4916 GOMP_atomic_end ();
4917
4918 The result is not globally atomic, but works so long as all parallel
4919 references are within #pragma omp atomic directives. According to
4920 responses received from omp@openmp.org, appears to be within spec.
4921 Which makes sense, since that's how several other compilers handle
4922 this situation as well.
4923 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
4924 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
4925
4926 We replace
4927 OMP_ATOMIC_LOAD (loaded_val, addr) with
4928 loaded_val = *addr;
4929
4930 and replace
4931 OMP_ATOMIC_ATORE (stored_val) with
4932 *addr = stored_val;
4933 */
4934
4935 static bool
4936 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
4937 tree addr, tree loaded_val, tree stored_val)
4938 {
4939 block_stmt_iterator bsi;
4940 tree t;
4941
4942 bsi = bsi_last (load_bb);
4943 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
4944
4945 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
4946 t = build_function_call_expr (t, 0);
4947 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
4948
4949 t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr));
4950 if (gimple_in_ssa_p (cfun))
4951 SSA_NAME_DEF_STMT (loaded_val) = t;
4952 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
4953 bsi_remove (&bsi, true);
4954
4955 bsi = bsi_last (store_bb);
4956 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
4957
4958 t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)),
4959 stored_val);
4960 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
4961
4962 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
4963 t = build_function_call_expr (t, 0);
4964 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
4965 bsi_remove (&bsi, true);
4966
4967 if (gimple_in_ssa_p (cfun))
4968 update_ssa (TODO_update_ssa_no_phi);
4969 return true;
4970 }
4971
4972 /* Expand an OMP_ATOMIC statement. We try to expand
4973 using expand_omp_atomic_fetch_op. If it failed, we try to
4974 call expand_omp_atomic_pipeline, and if it fails too, the
4975 ultimate fallback is wrapping the operation in a mutex
4976 (expand_omp_atomic_mutex). REGION is the atomic region built
4977 by build_omp_regions_1(). */
4978
4979 static void
4980 expand_omp_atomic (struct omp_region *region)
4981 {
4982 basic_block load_bb = region->entry, store_bb = region->exit;
4983 tree load = last_stmt (load_bb), store = last_stmt (store_bb);
4984 tree loaded_val = TREE_OPERAND (load, 0);
4985 tree addr = TREE_OPERAND (load, 1);
4986 tree stored_val = TREE_OPERAND (store, 0);
4987 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4988 HOST_WIDE_INT index;
4989
4990 /* Make sure the type is one of the supported sizes. */
4991 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
4992 index = exact_log2 (index);
4993 if (index >= 0 && index <= 4)
4994 {
4995 unsigned int align = TYPE_ALIGN_UNIT (type);
4996
4997 /* __sync builtins require strict data alignment. */
4998 if (exact_log2 (align) >= index)
4999 {
5000 /* When possible, use specialized atomic update functions. */
5001 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5002 && store_bb == single_succ (load_bb))
5003 {
5004 if (expand_omp_atomic_fetch_op (load_bb, addr,
5005 loaded_val, stored_val, index))
5006 return;
5007 }
5008
5009 /* If we don't have specialized __sync builtins, try and implement
5010 as a compare and swap loop. */
5011 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5012 loaded_val, stored_val, index))
5013 return;
5014 }
5015 }
5016
5017 /* The ultimate fallback is wrapping the operation in a mutex. */
5018 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5019 }
5020
5021
5022 /* Expand the parallel region tree rooted at REGION. Expansion
5023 proceeds in depth-first order. Innermost regions are expanded
5024 first. This way, parallel regions that require a new function to
5025 be created (e.g., OMP_PARALLEL) can be expanded without having any
5026 internal dependencies in their body. */
5027
5028 static void
5029 expand_omp (struct omp_region *region)
5030 {
5031 while (region)
5032 {
5033 /* First, determine whether this is a combined parallel+workshare
5034 region. */
5035 if (region->type == OMP_PARALLEL)
5036 determine_parallel_type (region);
5037
5038 if (region->inner)
5039 expand_omp (region->inner);
5040
5041 switch (region->type)
5042 {
5043 case OMP_PARALLEL:
5044 expand_omp_taskreg (region);
5045 break;
5046
5047 case OMP_TASK:
5048 expand_omp_taskreg (region);
5049 break;
5050
5051 case OMP_FOR:
5052 expand_omp_for (region);
5053 break;
5054
5055 case OMP_SECTIONS:
5056 expand_omp_sections (region);
5057 break;
5058
5059 case OMP_SECTION:
5060 /* Individual omp sections are handled together with their
5061 parent OMP_SECTIONS region. */
5062 break;
5063
5064 case OMP_SINGLE:
5065 expand_omp_single (region);
5066 break;
5067
5068 case OMP_MASTER:
5069 case OMP_ORDERED:
5070 case OMP_CRITICAL:
5071 expand_omp_synch (region);
5072 break;
5073
5074 case OMP_ATOMIC_LOAD:
5075 expand_omp_atomic (region);
5076 break;
5077
5078
5079 default:
5080 gcc_unreachable ();
5081 }
5082
5083 region = region->next;
5084 }
5085 }
5086
5087
5088 /* Helper for build_omp_regions. Scan the dominator tree starting at
5089 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5090 true, the function ends once a single tree is built (otherwise, whole
5091 forest of OMP constructs may be built). */
5092
5093 static void
5094 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5095 bool single_tree)
5096 {
5097 block_stmt_iterator si;
5098 tree stmt;
5099 basic_block son;
5100
5101 si = bsi_last (bb);
5102 if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
5103 {
5104 struct omp_region *region;
5105 enum tree_code code;
5106
5107 stmt = bsi_stmt (si);
5108 code = TREE_CODE (stmt);
5109 if (code == OMP_RETURN)
5110 {
5111 /* STMT is the return point out of region PARENT. Mark it
5112 as the exit point and make PARENT the immediately
5113 enclosing region. */
5114 gcc_assert (parent);
5115 region = parent;
5116 region->exit = bb;
5117 parent = parent->outer;
5118 }
5119 else if (code == OMP_ATOMIC_STORE)
5120 {
5121 /* OMP_ATOMIC_STORE is analogous to OMP_RETURN, but matches with
5122 OMP_ATOMIC_LOAD. */
5123 gcc_assert (parent);
5124 gcc_assert (parent->type == OMP_ATOMIC_LOAD);
5125 region = parent;
5126 region->exit = bb;
5127 parent = parent->outer;
5128 }
5129
5130 else if (code == OMP_CONTINUE)
5131 {
5132 gcc_assert (parent);
5133 parent->cont = bb;
5134 }
5135 else if (code == OMP_SECTIONS_SWITCH)
5136 {
5137 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
5138 it. */ ;
5139 }
5140 else
5141 {
5142 /* Otherwise, this directive becomes the parent for a new
5143 region. */
5144 region = new_omp_region (bb, code, parent);
5145 parent = region;
5146 }
5147 }
5148
5149 if (single_tree && !parent)
5150 return;
5151
5152 for (son = first_dom_son (CDI_DOMINATORS, bb);
5153 son;
5154 son = next_dom_son (CDI_DOMINATORS, son))
5155 build_omp_regions_1 (son, parent, single_tree);
5156 }
5157
5158 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5159 root_omp_region. */
5160
5161 static void
5162 build_omp_regions_root (basic_block root)
5163 {
5164 gcc_assert (root_omp_region == NULL);
5165 build_omp_regions_1 (root, NULL, true);
5166 gcc_assert (root_omp_region != NULL);
5167 }
5168
5169 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5170
5171 void
5172 omp_expand_local (basic_block head)
5173 {
5174 build_omp_regions_root (head);
5175 if (dump_file && (dump_flags & TDF_DETAILS))
5176 {
5177 fprintf (dump_file, "\nOMP region tree\n\n");
5178 dump_omp_region (dump_file, root_omp_region, 0);
5179 fprintf (dump_file, "\n");
5180 }
5181
5182 remove_exit_barriers (root_omp_region);
5183 expand_omp (root_omp_region);
5184
5185 free_omp_regions ();
5186 }
5187
5188 /* Scan the CFG and build a tree of OMP regions. Return the root of
5189 the OMP region tree. */
5190
5191 static void
5192 build_omp_regions (void)
5193 {
5194 gcc_assert (root_omp_region == NULL);
5195 calculate_dominance_info (CDI_DOMINATORS);
5196 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5197 }
5198
5199
5200 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5201
5202 static unsigned int
5203 execute_expand_omp (void)
5204 {
5205 build_omp_regions ();
5206
5207 if (!root_omp_region)
5208 return 0;
5209
5210 if (dump_file)
5211 {
5212 fprintf (dump_file, "\nOMP region tree\n\n");
5213 dump_omp_region (dump_file, root_omp_region, 0);
5214 fprintf (dump_file, "\n");
5215 }
5216
5217 remove_exit_barriers (root_omp_region);
5218
5219 expand_omp (root_omp_region);
5220
5221 cleanup_tree_cfg ();
5222
5223 free_omp_regions ();
5224
5225 return 0;
5226 }
5227
5228 /* OMP expansion -- the default pass, run before creation of SSA form. */
5229
5230 static bool
5231 gate_expand_omp (void)
5232 {
5233 return (flag_openmp != 0 && errorcount == 0);
5234 }
5235
5236 struct gimple_opt_pass pass_expand_omp =
5237 {
5238 {
5239 GIMPLE_PASS,
5240 "ompexp", /* name */
5241 gate_expand_omp, /* gate */
5242 execute_expand_omp, /* execute */
5243 NULL, /* sub */
5244 NULL, /* next */
5245 0, /* static_pass_number */
5246 0, /* tv_id */
5247 PROP_gimple_any, /* properties_required */
5248 PROP_gimple_lomp, /* properties_provided */
5249 0, /* properties_destroyed */
5250 0, /* todo_flags_start */
5251 TODO_dump_func /* todo_flags_finish */
5252 }
5253 };
5254 \f
5255 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5256
5257 /* Lower the OpenMP sections directive in *STMT_P. */
5258
5259 static void
5260 lower_omp_sections (tree *stmt_p, omp_context *ctx)
5261 {
5262 tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control;
5263 tree t, dlist;
5264 tree_stmt_iterator tsi;
5265 unsigned i, len;
5266
5267 stmt = *stmt_p;
5268
5269 push_gimplify_context ();
5270
5271 dlist = NULL;
5272 ilist = NULL;
5273 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
5274
5275 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
5276 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
5277 continue;
5278
5279 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
5280 body = alloc_stmt_list ();
5281 for (i = 0; i < len; i++, tsi_next (&tsi))
5282 {
5283 omp_context *sctx;
5284 tree sec_start, sec_end;
5285
5286 sec_start = tsi_stmt (tsi);
5287 sctx = maybe_lookup_ctx (sec_start);
5288 gcc_assert (sctx);
5289
5290 append_to_statement_list (sec_start, &body);
5291
5292 lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
5293 append_to_statement_list (OMP_SECTION_BODY (sec_start), &body);
5294 OMP_SECTION_BODY (sec_start) = NULL;
5295
5296 if (i == len - 1)
5297 {
5298 tree l = alloc_stmt_list ();
5299 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
5300 &l, ctx);
5301 append_to_statement_list (l, &body);
5302 OMP_SECTION_LAST (sec_start) = 1;
5303 }
5304
5305 sec_end = make_node (OMP_RETURN);
5306 append_to_statement_list (sec_end, &body);
5307 }
5308
5309 block = make_node (BLOCK);
5310 bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
5311
5312 olist = NULL_TREE;
5313 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
5314
5315 pop_gimplify_context (NULL_TREE);
5316 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
5317
5318 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
5319 TREE_SIDE_EFFECTS (new_stmt) = 1;
5320
5321 new_body = alloc_stmt_list ();
5322 append_to_statement_list (ilist, &new_body);
5323 append_to_statement_list (stmt, &new_body);
5324 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body);
5325 append_to_statement_list (bind, &new_body);
5326
5327 control = create_tmp_var (unsigned_type_node, ".section");
5328 t = build2 (OMP_CONTINUE, void_type_node, control, control);
5329 OMP_SECTIONS_CONTROL (stmt) = control;
5330 append_to_statement_list (t, &new_body);
5331
5332 append_to_statement_list (olist, &new_body);
5333 append_to_statement_list (dlist, &new_body);
5334
5335 maybe_catch_exception (&new_body);
5336
5337 t = make_node (OMP_RETURN);
5338 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt),
5339 OMP_CLAUSE_NOWAIT);
5340 append_to_statement_list (t, &new_body);
5341
5342 BIND_EXPR_BODY (new_stmt) = new_body;
5343 OMP_SECTIONS_BODY (stmt) = NULL;
5344
5345 *stmt_p = new_stmt;
5346 }
5347
5348
5349 /* A subroutine of lower_omp_single. Expand the simple form of
5350 an OMP_SINGLE, without a copyprivate clause:
5351
5352 if (GOMP_single_start ())
5353 BODY;
5354 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5355
5356 FIXME. It may be better to delay expanding the logic of this until
5357 pass_expand_omp. The expanded logic may make the job more difficult
5358 to a synchronization analysis pass. */
5359
5360 static void
5361 lower_omp_single_simple (tree single_stmt, tree *pre_p)
5362 {
5363 tree t;
5364
5365 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
5366 if (TREE_TYPE (t) != boolean_type_node)
5367 t = fold_build2 (NE_EXPR, boolean_type_node,
5368 t, build_int_cst (TREE_TYPE (t), 0));
5369 t = build3 (COND_EXPR, void_type_node, t,
5370 OMP_SINGLE_BODY (single_stmt), NULL);
5371 gimplify_and_add (t, pre_p);
5372 }
5373
5374
5375 /* A subroutine of lower_omp_single. Expand the simple form of
5376 an OMP_SINGLE, with a copyprivate clause:
5377
5378 #pragma omp single copyprivate (a, b, c)
5379
5380 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5381
5382 {
5383 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5384 {
5385 BODY;
5386 copyout.a = a;
5387 copyout.b = b;
5388 copyout.c = c;
5389 GOMP_single_copy_end (&copyout);
5390 }
5391 else
5392 {
5393 a = copyout_p->a;
5394 b = copyout_p->b;
5395 c = copyout_p->c;
5396 }
5397 GOMP_barrier ();
5398 }
5399
5400 FIXME. It may be better to delay expanding the logic of this until
5401 pass_expand_omp. The expanded logic may make the job more difficult
5402 to a synchronization analysis pass. */
5403
5404 static void
5405 lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
5406 {
5407 tree ptr_type, t, l0, l1, l2, copyin_seq;
5408
5409 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5410
5411 ptr_type = build_pointer_type (ctx->record_type);
5412 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5413
5414 l0 = create_artificial_label ();
5415 l1 = create_artificial_label ();
5416 l2 = create_artificial_label ();
5417
5418 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5419 t = fold_convert (ptr_type, t);
5420 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
5421 gimplify_and_add (t, pre_p);
5422
5423 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5424 build_int_cst (ptr_type, 0));
5425 t = build3 (COND_EXPR, void_type_node, t,
5426 build_and_jump (&l0), build_and_jump (&l1));
5427 gimplify_and_add (t, pre_p);
5428
5429 t = build1 (LABEL_EXPR, void_type_node, l0);
5430 gimplify_and_add (t, pre_p);
5431
5432 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
5433
5434 copyin_seq = NULL;
5435 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
5436 &copyin_seq, ctx);
5437
5438 t = build_fold_addr_expr (ctx->sender_decl);
5439 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5440 gimplify_and_add (t, pre_p);
5441
5442 t = build_and_jump (&l2);
5443 gimplify_and_add (t, pre_p);
5444
5445 t = build1 (LABEL_EXPR, void_type_node, l1);
5446 gimplify_and_add (t, pre_p);
5447
5448 append_to_statement_list (copyin_seq, pre_p);
5449
5450 t = build1 (LABEL_EXPR, void_type_node, l2);
5451 gimplify_and_add (t, pre_p);
5452 }
5453
5454
5455 /* Expand code for an OpenMP single directive. */
5456
5457 static void
5458 lower_omp_single (tree *stmt_p, omp_context *ctx)
5459 {
5460 tree t, bind, block, single_stmt = *stmt_p, dlist;
5461
5462 push_gimplify_context ();
5463
5464 block = make_node (BLOCK);
5465 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
5466 TREE_SIDE_EFFECTS (bind) = 1;
5467
5468 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
5469 &BIND_EXPR_BODY (bind), &dlist, ctx);
5470 lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
5471
5472 append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind));
5473
5474 if (ctx->record_type)
5475 lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
5476 else
5477 lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
5478
5479 OMP_SINGLE_BODY (single_stmt) = NULL;
5480
5481 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
5482
5483 maybe_catch_exception (&BIND_EXPR_BODY (bind));
5484
5485 t = make_node (OMP_RETURN);
5486 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt),
5487 OMP_CLAUSE_NOWAIT);
5488 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
5489
5490 pop_gimplify_context (bind);
5491
5492 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
5493 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
5494 }
5495
5496
5497 /* Expand code for an OpenMP master directive. */
5498
5499 static void
5500 lower_omp_master (tree *stmt_p, omp_context *ctx)
5501 {
5502 tree bind, block, stmt = *stmt_p, lab = NULL, x;
5503
5504 push_gimplify_context ();
5505
5506 block = make_node (BLOCK);
5507 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
5508 TREE_SIDE_EFFECTS (bind) = 1;
5509
5510 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
5511
5512 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5513 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5514 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5515 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
5516
5517 lower_omp (&OMP_MASTER_BODY (stmt), ctx);
5518 maybe_catch_exception (&OMP_MASTER_BODY (stmt));
5519 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
5520 OMP_MASTER_BODY (stmt) = NULL;
5521
5522 x = build1 (LABEL_EXPR, void_type_node, lab);
5523 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
5524
5525 x = make_node (OMP_RETURN);
5526 OMP_RETURN_NOWAIT (x) = 1;
5527 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
5528
5529 pop_gimplify_context (bind);
5530
5531 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
5532 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
5533 }
5534
5535
5536 /* Expand code for an OpenMP ordered directive. */
5537
5538 static void
5539 lower_omp_ordered (tree *stmt_p, omp_context *ctx)
5540 {
5541 tree bind, block, stmt = *stmt_p, x;
5542
5543 push_gimplify_context ();
5544
5545 block = make_node (BLOCK);
5546 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
5547 TREE_SIDE_EFFECTS (bind) = 1;
5548
5549 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
5550
5551 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5552 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
5553
5554 lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
5555 maybe_catch_exception (&OMP_ORDERED_BODY (stmt));
5556 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
5557 OMP_ORDERED_BODY (stmt) = NULL;
5558
5559 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5560 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
5561
5562 x = make_node (OMP_RETURN);
5563 OMP_RETURN_NOWAIT (x) = 1;
5564 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
5565
5566 pop_gimplify_context (bind);
5567
5568 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
5569 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
5570 }
5571
5572
5573 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
5574 substitution of a couple of function calls. But in the NAMED case,
5575 requires that languages coordinate a symbol name. It is therefore
5576 best put here in common code. */
5577
5578 static GTY((param1_is (tree), param2_is (tree)))
5579 splay_tree critical_name_mutexes;
5580
5581 static void
5582 lower_omp_critical (tree *stmt_p, omp_context *ctx)
5583 {
5584 tree bind, block, stmt = *stmt_p;
5585 tree t, lock, unlock, name;
5586
5587 name = OMP_CRITICAL_NAME (stmt);
5588 if (name)
5589 {
5590 tree decl;
5591 splay_tree_node n;
5592
5593 if (!critical_name_mutexes)
5594 critical_name_mutexes
5595 = splay_tree_new_ggc (splay_tree_compare_pointers);
5596
5597 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5598 if (n == NULL)
5599 {
5600 char *new_str;
5601
5602 decl = create_tmp_var_raw (ptr_type_node, NULL);
5603
5604 new_str = ACONCAT ((".gomp_critical_user_",
5605 IDENTIFIER_POINTER (name), NULL));
5606 DECL_NAME (decl) = get_identifier (new_str);
5607 TREE_PUBLIC (decl) = 1;
5608 TREE_STATIC (decl) = 1;
5609 DECL_COMMON (decl) = 1;
5610 DECL_ARTIFICIAL (decl) = 1;
5611 DECL_IGNORED_P (decl) = 1;
5612 varpool_finalize_decl (decl);
5613
5614 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5615 (splay_tree_value) decl);
5616 }
5617 else
5618 decl = (tree) n->value;
5619
5620 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5621 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5622
5623 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5624 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5625 }
5626 else
5627 {
5628 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5629 lock = build_call_expr (lock, 0);
5630
5631 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5632 unlock = build_call_expr (unlock, 0);
5633 }
5634
5635 push_gimplify_context ();
5636
5637 block = make_node (BLOCK);
5638 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
5639 TREE_SIDE_EFFECTS (bind) = 1;
5640
5641 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
5642
5643 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
5644
5645 lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
5646 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
5647 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
5648 OMP_CRITICAL_BODY (stmt) = NULL;
5649
5650 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
5651
5652 t = make_node (OMP_RETURN);
5653 OMP_RETURN_NOWAIT (t) = 1;
5654 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
5655
5656 pop_gimplify_context (bind);
5657 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
5658 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
5659 }
5660
5661
5662 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5663 for a lastprivate clause. Given a loop control predicate of (V
5664 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5665 is appended to *DLIST, iterator initialization is appended to
5666 *BODY_P. */
5667
5668 static void
5669 lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
5670 tree *dlist, struct omp_context *ctx)
5671 {
5672 tree clauses, cond, stmts, vinit, t;
5673 enum tree_code cond_code;
5674
5675 cond_code = fd->loop.cond_code;
5676 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5677
5678 /* When possible, use a strict equality expression. This can let VRP
5679 type optimizations deduce the value and remove a copy. */
5680 if (host_integerp (fd->loop.step, 0))
5681 {
5682 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5683 if (step == 1 || step == -1)
5684 cond_code = EQ_EXPR;
5685 }
5686
5687 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5688
5689 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
5690 stmts = NULL;
5691 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5692 if (stmts != NULL)
5693 {
5694 append_to_statement_list (*dlist, &stmts);
5695 *dlist = stmts;
5696
5697 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5698 vinit = fd->loop.n1;
5699 if (cond_code == EQ_EXPR
5700 && host_integerp (fd->loop.n2, 0)
5701 && ! integer_zerop (fd->loop.n2))
5702 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5703
5704 /* Initialize the iterator variable, so that threads that don't execute
5705 any iterations don't execute the lastprivate clauses by accident. */
5706 t = build_gimple_modify_stmt (fd->loop.v, vinit);
5707 gimplify_and_add (t, body_p);
5708 }
5709 }
5710
5711
5712 /* Lower code for an OpenMP loop directive. */
5713
5714 static void
5715 lower_omp_for (tree *stmt_p, omp_context *ctx)
5716 {
5717 tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
5718 struct omp_for_data fd;
5719 int i;
5720
5721 stmt = *stmt_p;
5722
5723 push_gimplify_context ();
5724
5725 lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
5726 lower_omp (&OMP_FOR_BODY (stmt), ctx);
5727
5728 /* Move declaration of temporaries in the loop body before we make
5729 it go away. */
5730 if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
5731 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
5732
5733 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
5734 TREE_SIDE_EFFECTS (new_stmt) = 1;
5735 body_p = &BIND_EXPR_BODY (new_stmt);
5736
5737 /* The pre-body and input clauses go before the lowered OMP_FOR. */
5738 ilist = NULL;
5739 dlist = NULL;
5740 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
5741 append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
5742
5743 /* Lower the header expressions. At this point, we can assume that
5744 the header is of the form:
5745
5746 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5747
5748 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5749 using the .omp_data_s mapping, if needed. */
5750 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
5751 {
5752 rhs_p = &GIMPLE_STMT_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (stmt), i), 1);
5753 if (!is_gimple_min_invariant (*rhs_p))
5754 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
5755
5756 rhs_p = &TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_COND (stmt), i), 1);
5757 if (!is_gimple_min_invariant (*rhs_p))
5758 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
5759
5760 rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND
5761 (TREE_VEC_ELT (OMP_FOR_INCR (stmt), i), 1), 1);
5762 if (!is_gimple_min_invariant (*rhs_p))
5763 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
5764 }
5765
5766 /* Once lowered, extract the bounds and clauses. */
5767 extract_omp_for_data (stmt, &fd, NULL);
5768
5769 lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
5770
5771 append_to_statement_list (stmt, body_p);
5772
5773 append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
5774
5775 t = build2 (OMP_CONTINUE, void_type_node, fd.loop.v, fd.loop.v);
5776 append_to_statement_list (t, body_p);
5777
5778 /* After the loop, add exit clauses. */
5779 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
5780 append_to_statement_list (dlist, body_p);
5781
5782 maybe_catch_exception (body_p);
5783
5784 /* Region exit marker goes at the end of the loop body. */
5785 t = make_node (OMP_RETURN);
5786 OMP_RETURN_NOWAIT (t) = fd.have_nowait;
5787 append_to_statement_list (t, body_p);
5788
5789 pop_gimplify_context (NULL_TREE);
5790 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
5791
5792 OMP_FOR_BODY (stmt) = NULL_TREE;
5793 OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
5794 *stmt_p = new_stmt;
5795 }
5796
5797 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
5798 or OMP_PARALLEL. */
5799
5800 static tree
5801 check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
5802 {
5803 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5804 int *info = (int *) wi->info;
5805
5806 *walk_subtrees = 0;
5807 switch (TREE_CODE (*tp))
5808 {
5809 case OMP_FOR:
5810 case OMP_SECTIONS:
5811 *info = *info == 0 ? 1 : -1;
5812 break;
5813 default:
5814 *info = -1;
5815 break;
5816 }
5817 return NULL;
5818 }
5819
5820 struct omp_taskcopy_context
5821 {
5822 /* This field must be at the beginning, as we do "inheritance": Some
5823 callback functions for tree-inline.c (e.g., omp_copy_decl)
5824 receive a copy_body_data pointer that is up-casted to an
5825 omp_context pointer. */
5826 copy_body_data cb;
5827 omp_context *ctx;
5828 };
5829
5830 static tree
5831 task_copyfn_copy_decl (tree var, copy_body_data *cb)
5832 {
5833 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
5834
5835 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
5836 return create_tmp_var (TREE_TYPE (var), NULL);
5837
5838 return var;
5839 }
5840
5841 static tree
5842 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
5843 {
5844 tree name, new_fields = NULL, type, f;
5845
5846 type = lang_hooks.types.make_type (RECORD_TYPE);
5847 name = DECL_NAME (TYPE_NAME (orig_type));
5848 name = build_decl (TYPE_DECL, name, type);
5849 TYPE_NAME (type) = name;
5850
5851 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
5852 {
5853 tree new_f = copy_node (f);
5854 DECL_CONTEXT (new_f) = type;
5855 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
5856 TREE_CHAIN (new_f) = new_fields;
5857 walk_tree (&DECL_SIZE (new_f), copy_body_r, &tcctx->cb, NULL);
5858 walk_tree (&DECL_SIZE_UNIT (new_f), copy_body_r, &tcctx->cb, NULL);
5859 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_body_r, &tcctx->cb, NULL);
5860 new_fields = new_f;
5861 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
5862 }
5863 TYPE_FIELDS (type) = nreverse (new_fields);
5864 layout_type (type);
5865 return type;
5866 }
5867
5868 /* Create task copyfn. */
5869
5870 static void
5871 create_task_copyfn (tree task_stmt, omp_context *ctx)
5872 {
5873 struct function *child_cfun;
5874 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
5875 tree record_type, srecord_type, bind, list;
5876 bool record_needs_remap = false, srecord_needs_remap = false;
5877 splay_tree_node n;
5878 struct omp_taskcopy_context tcctx;
5879
5880 child_fn = OMP_TASK_COPYFN (task_stmt);
5881 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
5882 gcc_assert (child_cfun->cfg == NULL);
5883 child_cfun->dont_save_pending_sizes_p = 1;
5884 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
5885
5886 /* Reset DECL_CONTEXT on function arguments. */
5887 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
5888 DECL_CONTEXT (t) = child_fn;
5889
5890 /* Populate the function. */
5891 push_gimplify_context ();
5892 current_function_decl = child_fn;
5893
5894 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
5895 TREE_SIDE_EFFECTS (bind) = 1;
5896 list = NULL;
5897 DECL_SAVED_TREE (child_fn) = bind;
5898 DECL_SOURCE_LOCATION (child_fn) = EXPR_LOCATION (task_stmt);
5899
5900 /* Remap src and dst argument types if needed. */
5901 record_type = ctx->record_type;
5902 srecord_type = ctx->srecord_type;
5903 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
5904 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
5905 {
5906 record_needs_remap = true;
5907 break;
5908 }
5909 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
5910 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
5911 {
5912 srecord_needs_remap = true;
5913 break;
5914 }
5915
5916 if (record_needs_remap || srecord_needs_remap)
5917 {
5918 memset (&tcctx, '\0', sizeof (tcctx));
5919 tcctx.cb.src_fn = ctx->cb.src_fn;
5920 tcctx.cb.dst_fn = child_fn;
5921 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
5922 tcctx.cb.dst_node = tcctx.cb.src_node;
5923 tcctx.cb.src_cfun = ctx->cb.src_cfun;
5924 tcctx.cb.copy_decl = task_copyfn_copy_decl;
5925 tcctx.cb.eh_region = -1;
5926 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
5927 tcctx.cb.decl_map = pointer_map_create ();
5928 tcctx.ctx = ctx;
5929
5930 if (record_needs_remap)
5931 record_type = task_copyfn_remap_type (&tcctx, record_type);
5932 if (srecord_needs_remap)
5933 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
5934 }
5935 else
5936 tcctx.cb.decl_map = NULL;
5937
5938 push_cfun (child_cfun);
5939
5940 arg = DECL_ARGUMENTS (child_fn);
5941 TREE_TYPE (arg) = build_pointer_type (record_type);
5942 sarg = TREE_CHAIN (arg);
5943 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
5944
5945 /* First pass: initialize temporaries used in record_type and srecord_type
5946 sizes and field offsets. */
5947 if (tcctx.cb.decl_map)
5948 for (c = OMP_TASK_CLAUSES (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
5949 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
5950 {
5951 tree *p;
5952
5953 decl = OMP_CLAUSE_DECL (c);
5954 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
5955 if (p == NULL)
5956 continue;
5957 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
5958 sf = (tree) n->value;
5959 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
5960 src = build_fold_indirect_ref (sarg);
5961 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
5962 t = build_gimple_modify_stmt (*p, src);
5963 append_to_statement_list (t, &list);
5964 }
5965
5966 /* Second pass: copy shared var pointers and copy construct non-VLA
5967 firstprivate vars. */
5968 for (c = OMP_TASK_CLAUSES (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
5969 switch (OMP_CLAUSE_CODE (c))
5970 {
5971 case OMP_CLAUSE_SHARED:
5972 decl = OMP_CLAUSE_DECL (c);
5973 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
5974 if (n == NULL)
5975 break;
5976 f = (tree) n->value;
5977 if (tcctx.cb.decl_map)
5978 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
5979 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
5980 sf = (tree) n->value;
5981 if (tcctx.cb.decl_map)
5982 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
5983 src = build_fold_indirect_ref (sarg);
5984 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
5985 dst = build_fold_indirect_ref (arg);
5986 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
5987 t = build_gimple_modify_stmt (dst, src);
5988 append_to_statement_list (t, &list);
5989 break;
5990 case OMP_CLAUSE_FIRSTPRIVATE:
5991 decl = OMP_CLAUSE_DECL (c);
5992 if (is_variable_sized (decl))
5993 break;
5994 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
5995 if (n == NULL)
5996 break;
5997 f = (tree) n->value;
5998 if (tcctx.cb.decl_map)
5999 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6000 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6001 if (n != NULL)
6002 {
6003 sf = (tree) n->value;
6004 if (tcctx.cb.decl_map)
6005 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6006 src = build_fold_indirect_ref (sarg);
6007 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6008 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6009 src = build_fold_indirect_ref (src);
6010 }
6011 else
6012 src = decl;
6013 dst = build_fold_indirect_ref (arg);
6014 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6015 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6016 append_to_statement_list (t, &list);
6017 break;
6018 case OMP_CLAUSE_PRIVATE:
6019 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6020 break;
6021 decl = OMP_CLAUSE_DECL (c);
6022 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6023 f = (tree) n->value;
6024 if (tcctx.cb.decl_map)
6025 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6026 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6027 if (n != NULL)
6028 {
6029 sf = (tree) n->value;
6030 if (tcctx.cb.decl_map)
6031 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6032 src = build_fold_indirect_ref (sarg);
6033 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6034 if (use_pointer_for_field (decl, NULL))
6035 src = build_fold_indirect_ref (src);
6036 }
6037 else
6038 src = decl;
6039 dst = build_fold_indirect_ref (arg);
6040 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6041 t = build_gimple_modify_stmt (dst, src);
6042 append_to_statement_list (t, &list);
6043 break;
6044 default:
6045 break;
6046 }
6047
6048 /* Last pass: handle VLA firstprivates. */
6049 if (tcctx.cb.decl_map)
6050 for (c = OMP_TASK_CLAUSES (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6051 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6052 {
6053 tree ind, ptr, df;
6054
6055 decl = OMP_CLAUSE_DECL (c);
6056 if (!is_variable_sized (decl))
6057 continue;
6058 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6059 if (n == NULL)
6060 continue;
6061 f = (tree) n->value;
6062 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6063 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6064 ind = DECL_VALUE_EXPR (decl);
6065 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6066 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6067 n = splay_tree_lookup (ctx->sfield_map,
6068 (splay_tree_key) TREE_OPERAND (ind, 0));
6069 sf = (tree) n->value;
6070 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6071 src = build_fold_indirect_ref (sarg);
6072 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6073 src = build_fold_indirect_ref (src);
6074 dst = build_fold_indirect_ref (arg);
6075 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6076 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6077 append_to_statement_list (t, &list);
6078 n = splay_tree_lookup (ctx->field_map,
6079 (splay_tree_key) TREE_OPERAND (ind, 0));
6080 df = (tree) n->value;
6081 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6082 ptr = build_fold_indirect_ref (arg);
6083 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6084 t = build_gimple_modify_stmt (ptr, build_fold_addr_expr (dst));
6085 append_to_statement_list (t, &list);
6086 }
6087
6088 t = build1 (RETURN_EXPR, void_type_node, NULL);
6089 append_to_statement_list (t, &list);
6090
6091 if (tcctx.cb.decl_map)
6092 pointer_map_destroy (tcctx.cb.decl_map);
6093 pop_gimplify_context (NULL);
6094 BIND_EXPR_BODY (bind) = list;
6095 pop_cfun ();
6096 current_function_decl = ctx->cb.src_fn;
6097 }
6098
6099 /* Lower the OpenMP parallel or task directive in *STMT_P. CTX holds context
6100 information for the directive. */
6101
6102 static void
6103 lower_omp_taskreg (tree *stmt_p, omp_context *ctx)
6104 {
6105 tree clauses, par_bind, par_body, new_body, bind;
6106 tree olist, ilist, par_olist, par_ilist;
6107 tree stmt, child_fn, t;
6108
6109 stmt = *stmt_p;
6110
6111 clauses = OMP_TASKREG_CLAUSES (stmt);
6112 par_bind = OMP_TASKREG_BODY (stmt);
6113 par_body = BIND_EXPR_BODY (par_bind);
6114 child_fn = ctx->cb.dst_fn;
6115 if (TREE_CODE (stmt) == OMP_PARALLEL && !OMP_PARALLEL_COMBINED (stmt))
6116 {
6117 struct walk_stmt_info wi;
6118 int ws_num = 0;
6119
6120 memset (&wi, 0, sizeof (wi));
6121 wi.callback = check_combined_parallel;
6122 wi.info = &ws_num;
6123 wi.val_only = true;
6124 walk_stmts (&wi, &par_bind);
6125 if (ws_num == 1)
6126 OMP_PARALLEL_COMBINED (stmt) = 1;
6127 }
6128 if (ctx->srecord_type)
6129 create_task_copyfn (stmt, ctx);
6130
6131 push_gimplify_context ();
6132
6133 par_olist = NULL_TREE;
6134 par_ilist = NULL_TREE;
6135 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6136 lower_omp (&par_body, ctx);
6137 if (TREE_CODE (stmt) == OMP_PARALLEL)
6138 lower_reduction_clauses (clauses, &par_olist, ctx);
6139
6140 /* Declare all the variables created by mapping and the variables
6141 declared in the scope of the parallel body. */
6142 record_vars_into (ctx->block_vars, child_fn);
6143 record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
6144
6145 if (ctx->record_type)
6146 {
6147 ctx->sender_decl
6148 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6149 : ctx->record_type, ".omp_data_o");
6150 OMP_TASKREG_DATA_ARG (stmt) = ctx->sender_decl;
6151 }
6152
6153 olist = NULL_TREE;
6154 ilist = NULL_TREE;
6155 lower_send_clauses (clauses, &ilist, &olist, ctx);
6156 lower_send_shared_vars (&ilist, &olist, ctx);
6157
6158 /* Once all the expansions are done, sequence all the different
6159 fragments inside OMP_TASKREG_BODY. */
6160 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6161 append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
6162
6163 new_body = alloc_stmt_list ();
6164
6165 if (ctx->record_type)
6166 {
6167 t = build_fold_addr_expr (ctx->sender_decl);
6168 /* fixup_child_record_type might have changed receiver_decl's type. */
6169 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6170 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
6171 append_to_statement_list (t, &new_body);
6172 }
6173
6174 append_to_statement_list (par_ilist, &new_body);
6175 append_to_statement_list (par_body, &new_body);
6176 append_to_statement_list (par_olist, &new_body);
6177 maybe_catch_exception (&new_body);
6178 t = make_node (OMP_RETURN);
6179 append_to_statement_list (t, &new_body);
6180 OMP_TASKREG_BODY (stmt) = new_body;
6181
6182 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
6183 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
6184
6185 *stmt_p = bind;
6186
6187 pop_gimplify_context (NULL_TREE);
6188 }
6189
6190 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6191 regimplified. */
6192
6193 static tree
6194 lower_omp_2 (tree *tp, int *walk_subtrees, void *data)
6195 {
6196 tree t = *tp;
6197 omp_context *ctx = (omp_context *) data;
6198
6199 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6200 if (TREE_CODE (t) == VAR_DECL
6201 && ((ctx && DECL_HAS_VALUE_EXPR_P (t))
6202 || (task_shared_vars
6203 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))))
6204 return t;
6205
6206 /* If a global variable has been privatized, TREE_CONSTANT on
6207 ADDR_EXPR might be wrong. */
6208 if (ctx && TREE_CODE (t) == ADDR_EXPR)
6209 recompute_tree_invariant_for_addr_expr (t);
6210
6211 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6212 return NULL_TREE;
6213 }
6214
6215 static void
6216 lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
6217 {
6218 tree t = *tp;
6219
6220 if (!t)
6221 return;
6222
6223 if (EXPR_HAS_LOCATION (t))
6224 input_location = EXPR_LOCATION (t);
6225
6226 /* If we have issued syntax errors, avoid doing any heavy lifting.
6227 Just replace the OpenMP directives with a NOP to avoid
6228 confusing RTL expansion. */
6229 if (errorcount && OMP_DIRECTIVE_P (t))
6230 {
6231 *tp = build_empty_stmt ();
6232 return;
6233 }
6234
6235 switch (TREE_CODE (t))
6236 {
6237 case STATEMENT_LIST:
6238 {
6239 tree_stmt_iterator i;
6240 for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i))
6241 lower_omp_1 (tsi_stmt_ptr (i), ctx, &i);
6242 }
6243 break;
6244
6245 case COND_EXPR:
6246 lower_omp_1 (&COND_EXPR_THEN (t), ctx, NULL);
6247 lower_omp_1 (&COND_EXPR_ELSE (t), ctx, NULL);
6248 if ((ctx || task_shared_vars)
6249 && walk_tree (&COND_EXPR_COND (t), lower_omp_2, ctx, NULL))
6250 {
6251 tree pre = NULL;
6252 gimplify_expr (&COND_EXPR_COND (t), &pre, NULL,
6253 is_gimple_condexpr, fb_rvalue);
6254 if (pre)
6255 {
6256 if (tsi)
6257 tsi_link_before (tsi, pre, TSI_SAME_STMT);
6258 else
6259 {
6260 append_to_statement_list (t, &pre);
6261 *tp = pre;
6262 }
6263 }
6264 }
6265 break;
6266 case CATCH_EXPR:
6267 lower_omp_1 (&CATCH_BODY (t), ctx, NULL);
6268 break;
6269 case EH_FILTER_EXPR:
6270 lower_omp_1 (&EH_FILTER_FAILURE (t), ctx, NULL);
6271 break;
6272 case TRY_CATCH_EXPR:
6273 case TRY_FINALLY_EXPR:
6274 lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
6275 lower_omp_1 (&TREE_OPERAND (t, 1), ctx, NULL);
6276 break;
6277 case BIND_EXPR:
6278 lower_omp_1 (&BIND_EXPR_BODY (t), ctx, NULL);
6279 break;
6280 case RETURN_EXPR:
6281 lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
6282 break;
6283
6284 case OMP_PARALLEL:
6285 case OMP_TASK:
6286 ctx = maybe_lookup_ctx (t);
6287 lower_omp_taskreg (tp, ctx);
6288 break;
6289 case OMP_FOR:
6290 ctx = maybe_lookup_ctx (t);
6291 gcc_assert (ctx);
6292 lower_omp_for (tp, ctx);
6293 break;
6294 case OMP_SECTIONS:
6295 ctx = maybe_lookup_ctx (t);
6296 gcc_assert (ctx);
6297 lower_omp_sections (tp, ctx);
6298 break;
6299 case OMP_SINGLE:
6300 ctx = maybe_lookup_ctx (t);
6301 gcc_assert (ctx);
6302 lower_omp_single (tp, ctx);
6303 break;
6304 case OMP_MASTER:
6305 ctx = maybe_lookup_ctx (t);
6306 gcc_assert (ctx);
6307 lower_omp_master (tp, ctx);
6308 break;
6309 case OMP_ORDERED:
6310 ctx = maybe_lookup_ctx (t);
6311 gcc_assert (ctx);
6312 lower_omp_ordered (tp, ctx);
6313 break;
6314 case OMP_CRITICAL:
6315 ctx = maybe_lookup_ctx (t);
6316 gcc_assert (ctx);
6317 lower_omp_critical (tp, ctx);
6318 break;
6319
6320 default:
6321 if ((ctx || task_shared_vars)
6322 && walk_tree (tp, lower_omp_2, ctx, NULL))
6323 {
6324 /* The gimplifier doesn't gimplify CALL_EXPR_STATIC_CHAIN.
6325 Handle that here. */
6326 tree call = get_call_expr_in (t);
6327 if (call
6328 && CALL_EXPR_STATIC_CHAIN (call)
6329 && walk_tree (&CALL_EXPR_STATIC_CHAIN (call), lower_omp_2,
6330 ctx, NULL))
6331 {
6332 tree pre = NULL;
6333 gimplify_expr (&CALL_EXPR_STATIC_CHAIN (call), &pre, NULL,
6334 is_gimple_val, fb_rvalue);
6335 if (pre)
6336 {
6337 if (tsi)
6338 tsi_link_before (tsi, pre, TSI_SAME_STMT);
6339 else
6340 {
6341 append_to_statement_list (t, &pre);
6342 lower_omp_1 (&pre, ctx, NULL);
6343 *tp = pre;
6344 return;
6345 }
6346 }
6347 }
6348
6349 if (tsi == NULL)
6350 gimplify_stmt (tp);
6351 else
6352 {
6353 tree pre = NULL;
6354 gimplify_expr (tp, &pre, NULL, is_gimple_stmt, fb_none);
6355 if (pre)
6356 tsi_link_before (tsi, pre, TSI_SAME_STMT);
6357 }
6358 }
6359 break;
6360 }
6361 }
6362
6363 static void
6364 lower_omp (tree *stmt_p, omp_context *ctx)
6365 {
6366 lower_omp_1 (stmt_p, ctx, NULL);
6367 }
6368 \f
6369 /* Main entry point. */
6370
6371 static unsigned int
6372 execute_lower_omp (void)
6373 {
6374 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6375 delete_omp_context);
6376
6377 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
6378 gcc_assert (taskreg_nesting_level == 0);
6379
6380 if (all_contexts->root)
6381 {
6382 if (task_shared_vars)
6383 push_gimplify_context ();
6384 lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
6385 if (task_shared_vars)
6386 pop_gimplify_context (NULL);
6387 }
6388
6389 if (all_contexts)
6390 {
6391 splay_tree_delete (all_contexts);
6392 all_contexts = NULL;
6393 }
6394 BITMAP_FREE (task_shared_vars);
6395 return 0;
6396 }
6397
6398 static bool
6399 gate_lower_omp (void)
6400 {
6401 return flag_openmp != 0;
6402 }
6403
6404 struct gimple_opt_pass pass_lower_omp =
6405 {
6406 {
6407 GIMPLE_PASS,
6408 "omplower", /* name */
6409 gate_lower_omp, /* gate */
6410 execute_lower_omp, /* execute */
6411 NULL, /* sub */
6412 NULL, /* next */
6413 0, /* static_pass_number */
6414 0, /* tv_id */
6415 PROP_gimple_any, /* properties_required */
6416 PROP_gimple_lomp, /* properties_provided */
6417 0, /* properties_destroyed */
6418 0, /* todo_flags_start */
6419 TODO_dump_func /* todo_flags_finish */
6420 }
6421 };
6422 \f
6423 /* The following is a utility to diagnose OpenMP structured block violations.
6424 It is not part of the "omplower" pass, as that's invoked too late. It
6425 should be invoked by the respective front ends after gimplification. */
6426
6427 static splay_tree all_labels;
6428
6429 /* Check for mismatched contexts and generate an error if needed. Return
6430 true if an error is detected. */
6431
6432 static bool
6433 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
6434 {
6435 bool exit_p = true;
6436
6437 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
6438 return false;
6439
6440 /* Try to avoid confusing the user by producing and error message
6441 with correct "exit" or "enter" verbiage. We prefer "exit"
6442 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6443 if (branch_ctx == NULL)
6444 exit_p = false;
6445 else
6446 {
6447 while (label_ctx)
6448 {
6449 if (TREE_VALUE (label_ctx) == branch_ctx)
6450 {
6451 exit_p = false;
6452 break;
6453 }
6454 label_ctx = TREE_CHAIN (label_ctx);
6455 }
6456 }
6457
6458 if (exit_p)
6459 error ("invalid exit from OpenMP structured block");
6460 else
6461 error ("invalid entry to OpenMP structured block");
6462
6463 *stmt_p = build_empty_stmt ();
6464 return true;
6465 }
6466
6467 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6468 where in the tree each label is found. */
6469
6470 static tree
6471 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
6472 {
6473 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6474 tree context = (tree) wi->info;
6475 tree inner_context;
6476 tree t = *tp;
6477 int i;
6478
6479 *walk_subtrees = 0;
6480 switch (TREE_CODE (t))
6481 {
6482 case OMP_PARALLEL:
6483 case OMP_TASK:
6484 case OMP_SECTIONS:
6485 case OMP_SINGLE:
6486 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
6487 /* FALLTHRU */
6488 case OMP_SECTION:
6489 case OMP_MASTER:
6490 case OMP_ORDERED:
6491 case OMP_CRITICAL:
6492 /* The minimal context here is just a tree of statements. */
6493 inner_context = tree_cons (NULL, t, context);
6494 wi->info = inner_context;
6495 walk_stmts (wi, &OMP_BODY (t));
6496 wi->info = context;
6497 break;
6498
6499 case OMP_FOR:
6500 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
6501 inner_context = tree_cons (NULL, t, context);
6502 wi->info = inner_context;
6503 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
6504 {
6505 walk_tree (&TREE_VEC_ELT (OMP_FOR_INIT (t), i), diagnose_sb_1,
6506 wi, NULL);
6507 walk_tree (&TREE_VEC_ELT (OMP_FOR_COND (t), i), diagnose_sb_1,
6508 wi, NULL);
6509 walk_tree (&TREE_VEC_ELT (OMP_FOR_INCR (t), i), diagnose_sb_1,
6510 wi, NULL);
6511 }
6512 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
6513 walk_stmts (wi, &OMP_FOR_BODY (t));
6514 wi->info = context;
6515 break;
6516
6517 case LABEL_EXPR:
6518 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
6519 (splay_tree_value) context);
6520 break;
6521
6522 default:
6523 break;
6524 }
6525
6526 return NULL_TREE;
6527 }
6528
6529 /* Pass 2: Check each branch and see if its context differs from that of
6530 the destination label's context. */
6531
6532 static tree
6533 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
6534 {
6535 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6536 tree context = (tree) wi->info;
6537 splay_tree_node n;
6538 tree t = *tp;
6539 int i;
6540
6541 *walk_subtrees = 0;
6542 switch (TREE_CODE (t))
6543 {
6544 case OMP_PARALLEL:
6545 case OMP_TASK:
6546 case OMP_SECTIONS:
6547 case OMP_SINGLE:
6548 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
6549 /* FALLTHRU */
6550 case OMP_SECTION:
6551 case OMP_MASTER:
6552 case OMP_ORDERED:
6553 case OMP_CRITICAL:
6554 wi->info = t;
6555 walk_stmts (wi, &OMP_BODY (t));
6556 wi->info = context;
6557 break;
6558
6559 case OMP_FOR:
6560 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
6561 wi->info = t;
6562 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (t)); i++)
6563 {
6564 walk_tree (&TREE_VEC_ELT (OMP_FOR_INIT (t), i), diagnose_sb_2,
6565 wi, NULL);
6566 walk_tree (&TREE_VEC_ELT (OMP_FOR_COND (t), i), diagnose_sb_2,
6567 wi, NULL);
6568 walk_tree (&TREE_VEC_ELT (OMP_FOR_INCR (t), i), diagnose_sb_2,
6569 wi, NULL);
6570 }
6571 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
6572 walk_stmts (wi, &OMP_FOR_BODY (t));
6573 wi->info = context;
6574 break;
6575
6576 case GOTO_EXPR:
6577 {
6578 tree lab = GOTO_DESTINATION (t);
6579 if (TREE_CODE (lab) != LABEL_DECL)
6580 break;
6581
6582 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6583 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
6584 }
6585 break;
6586
6587 case SWITCH_EXPR:
6588 {
6589 tree vec = SWITCH_LABELS (t);
6590 int i, len = TREE_VEC_LENGTH (vec);
6591 for (i = 0; i < len; ++i)
6592 {
6593 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
6594 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6595 if (diagnose_sb_0 (tp, context, (tree) n->value))
6596 break;
6597 }
6598 }
6599 break;
6600
6601 case RETURN_EXPR:
6602 diagnose_sb_0 (tp, context, NULL_TREE);
6603 break;
6604
6605 default:
6606 break;
6607 }
6608
6609 return NULL_TREE;
6610 }
6611
6612 void
6613 diagnose_omp_structured_block_errors (tree fndecl)
6614 {
6615 tree save_current = current_function_decl;
6616 struct walk_stmt_info wi;
6617
6618 current_function_decl = fndecl;
6619
6620 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6621
6622 memset (&wi, 0, sizeof (wi));
6623 wi.callback = diagnose_sb_1;
6624 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
6625
6626 memset (&wi, 0, sizeof (wi));
6627 wi.callback = diagnose_sb_2;
6628 wi.want_locations = true;
6629 wi.want_return_expr = true;
6630 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
6631
6632 splay_tree_delete (all_labels);
6633 all_labels = NULL;
6634
6635 current_function_decl = save_current;
6636 }
6637
6638 #include "gt-omp-low.h"