gimplify.c (mark_addressable): Export.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
464 }
465
466
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511 {
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
520
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
522
523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542 }
543
544
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549 static tree
550 get_ws_args_for (gimple ws_stmt)
551 {
552 tree t;
553
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
559 extract_omp_for_data (ws_stmt, &fd, NULL);
560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
570
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
573
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
580 {
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591 }
592
593
594 /* Discover whether REGION is a combined parallel+workshare region. */
595
596 static void
597 determine_parallel_type (struct omp_region *region)
598 {
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
606
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
626 {
627 gimple ws_stmt = last_stmt (ws_entry_bb);
628
629 if (region->inner->type == GIMPLE_OMP_FOR)
630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
655 }
656 }
657
658
659 /* Return true if EXPR is variable sized. */
660
661 static inline bool
662 is_variable_sized (const_tree expr)
663 {
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665 }
666
667 /* Return true if DECL is a reference type. */
668
669 static inline bool
670 is_reference (tree decl)
671 {
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673 }
674
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
681 {
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
685 }
686
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
693 }
694
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
697 {
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701 }
702
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719 }
720
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
723
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
726 {
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
765
766 if (up)
767 {
768 tree c;
769
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
800 }
801
802 return false;
803 }
804
805 /* Create a new VAR_DECL and copy information from VAR to it. */
806
807 tree
808 copy_var_decl (tree var, tree name, tree type)
809 {
810 tree copy = build_decl (VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822
823 return copy;
824 }
825
826 /* Construct a new automatic decl similar to VAR. */
827
828 static tree
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 {
831 tree copy = copy_var_decl (var, name, type);
832
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
836
837 return copy;
838 }
839
840 static tree
841 omp_copy_decl_1 (tree var, omp_context *ctx)
842 {
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
844 }
845
846 /* Build tree nodes to access the field for VAR on the receiver side. */
847
848 static tree
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 {
851 tree x, field = lookup_field (var, ctx);
852
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
858
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
863
864 return x;
865 }
866
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
870
871 static tree
872 build_outer_var_ref (tree var, omp_context *ctx)
873 {
874 tree x;
875
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
877 x = var;
878 else if (is_variable_sized (var))
879 {
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
883 }
884 else if (is_taskreg_ctx (ctx))
885 {
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
888 }
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
895 else
896 gcc_unreachable ();
897
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
900
901 return x;
902 }
903
904 /* Build tree nodes to access the field for VAR on the sender side. */
905
906 static tree
907 build_sender_ref (tree var, omp_context *ctx)
908 {
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
912 }
913
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915
916 static void
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 {
919 tree field, type, sfield = NULL_TREE;
920
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
931
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
939 {
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 }
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
946
947 if ((mask & 3) == 3)
948 {
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
951 {
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
976 }
977 }
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
981 }
982
983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
989 }
990
991 static tree
992 install_var_local (tree var, omp_context *ctx)
993 {
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
997 }
998
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001
1002 static void
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 {
1005 tree new_decl, size;
1006
1007 new_decl = lookup_decl (decl, ctx);
1008
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1013 {
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1018 }
1019
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 {
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1026
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1031 }
1032 }
1033
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1038
1039 static tree
1040 omp_copy_decl (tree var, copy_body_data *cb)
1041 {
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1044
1045 if (TREE_CODE (var) == LABEL_DECL)
1046 {
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1051 }
1052
1053 while (!is_taskreg_ctx (ctx))
1054 {
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1061 }
1062
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1065
1066 return error_mark_node;
1067 }
1068
1069
1070 /* Return the parallel region associated with STMT. */
1071
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1076
1077 /* Dump the parallel region tree rooted at REGION. */
1078
1079 void
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 {
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1084
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1087
1088 if (region->cont)
1089 {
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1092 }
1093
1094 if (region->exit)
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1097 else
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099
1100 if (region->next)
1101 dump_omp_region (file, region->next, indent);
1102 }
1103
1104 void
1105 debug_omp_region (struct omp_region *region)
1106 {
1107 dump_omp_region (stderr, region, 0);
1108 }
1109
1110 void
1111 debug_all_omp_regions (void)
1112 {
1113 dump_omp_region (stderr, root_omp_region, 0);
1114 }
1115
1116
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1118
1119 struct omp_region *
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1122 {
1123 struct omp_region *region = XCNEW (struct omp_region);
1124
1125 region->outer = parent;
1126 region->entry = bb;
1127 region->type = type;
1128
1129 if (parent)
1130 {
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1135 }
1136 else
1137 {
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1142 }
1143
1144 return region;
1145 }
1146
1147 /* Release the memory associated with the region tree rooted at REGION. */
1148
1149 static void
1150 free_omp_region_1 (struct omp_region *region)
1151 {
1152 struct omp_region *i, *n;
1153
1154 for (i = region->inner; i ; i = n)
1155 {
1156 n = i->next;
1157 free_omp_region_1 (i);
1158 }
1159
1160 free (region);
1161 }
1162
1163 /* Release the memory for the entire omp region tree. */
1164
1165 void
1166 free_omp_regions (void)
1167 {
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1170 {
1171 n = r->next;
1172 free_omp_region_1 (r);
1173 }
1174 root_omp_region = NULL;
1175 }
1176
1177
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 {
1183 omp_context *ctx = XCNEW (omp_context);
1184
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1188
1189 if (outer_ctx)
1190 {
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1195 }
1196 else
1197 {
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1207 }
1208
1209 ctx->cb.decl_map = pointer_map_create ();
1210
1211 return ctx;
1212 }
1213
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1215
1216 /* Finalize task copyfn. */
1217
1218 static void
1219 finalize_task_copyfn (gimple task_stmt)
1220 {
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1224 gimple bind;
1225
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1228 return;
1229
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1235
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1244 {
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1248 }
1249 gimple_set_body (child_fn, seq);
1250 pop_cfun ();
1251 current_function_decl = old_fn;
1252
1253 cgraph_add_new_function (child_fn, false);
1254 }
1255
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1258
1259 static void
1260 delete_omp_context (splay_tree_value value)
1261 {
1262 omp_context *ctx = (omp_context *) value;
1263
1264 pointer_map_destroy (ctx->cb.decl_map);
1265
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1270
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1274 {
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 }
1279 if (ctx->srecord_type)
1280 {
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1284 }
1285
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1288
1289 XDELETE (ctx);
1290 }
1291
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1294
1295 static void
1296 fixup_child_record_type (omp_context *ctx)
1297 {
1298 tree f, type = ctx->record_type;
1299
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1308 {
1309 tree name, new_fields = NULL;
1310
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1315
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 {
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 new_fields = new_f;
1328
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1333 }
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1336 }
1337
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1339 }
1340
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1343
1344 static void
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 {
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1349
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 {
1352 bool by_ref;
1353
1354 switch (OMP_CLAUSE_CODE (c))
1355 {
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1362 break;
1363
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1378 {
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1381 break;
1382 }
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1385 goto do_private;
1386
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1393
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1399 {
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1403 }
1404 else if (is_taskreg_ctx (ctx))
1405 {
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1409
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1412 {
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1416 }
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1419 }
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1426 /* FALLTHRU */
1427
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1432 break;
1433
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1437
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1443 break;
1444
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1449 break;
1450
1451 default:
1452 gcc_unreachable ();
1453 }
1454 }
1455
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 {
1458 switch (OMP_CLAUSE_CODE (c))
1459 {
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1468
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1482
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1487 break;
1488
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1499 break;
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 {
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 }
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1517 }
1518
1519 /* Create a new name for omp child function. Returns an identifier. */
1520
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1522
1523 static tree
1524 create_omp_child_function_name (bool task_copy)
1525 {
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1529 const char *suffix;
1530
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537 #elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539 #endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1542 }
1543
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 {
1550 tree decl, type, name, t;
1551
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1561
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1576
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1581
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1591 {
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_ADDRESSABLE (t) = 1;
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1601 }
1602
1603 /* Allocate memory for the function structure. The call to
1604 allocate_struct_function clobbers CFUN, so we need to restore
1605 it afterward. */
1606 push_struct_function (decl);
1607 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1608 cfun->function_end_locus = gimple_location (ctx->stmt);
1609 pop_cfun ();
1610 }
1611
1612
1613 /* Scan an OpenMP parallel directive. */
1614
1615 static void
1616 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1617 {
1618 omp_context *ctx;
1619 tree name;
1620 gimple stmt = gsi_stmt (*gsi);
1621
1622 /* Ignore parallel directives with empty bodies, unless there
1623 are copyin clauses. */
1624 if (optimize > 0
1625 && empty_body_p (gimple_omp_body (stmt))
1626 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1627 OMP_CLAUSE_COPYIN) == NULL)
1628 {
1629 gsi_replace (gsi, gimple_build_nop (), false);
1630 return;
1631 }
1632
1633 ctx = new_omp_context (stmt, outer_ctx);
1634 if (taskreg_nesting_level > 1)
1635 ctx->is_nested = true;
1636 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1637 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1638 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1639 name = create_tmp_var_name (".omp_data_s");
1640 name = build_decl (TYPE_DECL, name, ctx->record_type);
1641 TYPE_NAME (ctx->record_type) = name;
1642 create_omp_child_function (ctx, false);
1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1644
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
1647
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1650 else
1651 {
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1654 }
1655 }
1656
1657 /* Scan an OpenMP task directive. */
1658
1659 static void
1660 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1661 {
1662 omp_context *ctx;
1663 tree name, t;
1664 gimple stmt = gsi_stmt (*gsi);
1665
1666 /* Ignore task directives with empty bodies. */
1667 if (optimize > 0
1668 && empty_body_p (gimple_omp_body (stmt)))
1669 {
1670 gsi_replace (gsi, gimple_build_nop (), false);
1671 return;
1672 }
1673
1674 ctx = new_omp_context (stmt, outer_ctx);
1675 if (taskreg_nesting_level > 1)
1676 ctx->is_nested = true;
1677 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1678 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1679 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1680 name = create_tmp_var_name (".omp_data_s");
1681 name = build_decl (TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687
1688 if (ctx->srecord_type)
1689 {
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1692 TYPE_NAME (ctx->srecord_type) = name;
1693 create_omp_child_function (ctx, true);
1694 }
1695
1696 scan_omp (gimple_omp_body (stmt), ctx);
1697
1698 if (TYPE_FIELDS (ctx->record_type) == NULL)
1699 {
1700 ctx->record_type = ctx->receiver_decl = NULL;
1701 t = build_int_cst (long_integer_type_node, 0);
1702 gimple_omp_task_set_arg_size (stmt, t);
1703 t = build_int_cst (long_integer_type_node, 1);
1704 gimple_omp_task_set_arg_align (stmt, t);
1705 }
1706 else
1707 {
1708 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1709 /* Move VLA fields to the end. */
1710 p = &TYPE_FIELDS (ctx->record_type);
1711 while (*p)
1712 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1713 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1714 {
1715 *q = *p;
1716 *p = TREE_CHAIN (*p);
1717 TREE_CHAIN (*q) = NULL_TREE;
1718 q = &TREE_CHAIN (*q);
1719 }
1720 else
1721 p = &TREE_CHAIN (*p);
1722 *p = vla_fields;
1723 layout_type (ctx->record_type);
1724 fixup_child_record_type (ctx);
1725 if (ctx->srecord_type)
1726 layout_type (ctx->srecord_type);
1727 t = fold_convert (long_integer_type_node,
1728 TYPE_SIZE_UNIT (ctx->record_type));
1729 gimple_omp_task_set_arg_size (stmt, t);
1730 t = build_int_cst (long_integer_type_node,
1731 TYPE_ALIGN_UNIT (ctx->record_type));
1732 gimple_omp_task_set_arg_align (stmt, t);
1733 }
1734 }
1735
1736
1737 /* Scan an OpenMP loop directive. */
1738
1739 static void
1740 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1741 {
1742 omp_context *ctx;
1743 size_t i;
1744
1745 ctx = new_omp_context (stmt, outer_ctx);
1746
1747 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1748
1749 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1750 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1751 {
1752 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1756 }
1757 scan_omp (gimple_omp_body (stmt), ctx);
1758 }
1759
1760 /* Scan an OpenMP sections directive. */
1761
1762 static void
1763 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1764 {
1765 omp_context *ctx;
1766
1767 ctx = new_omp_context (stmt, outer_ctx);
1768 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1769 scan_omp (gimple_omp_body (stmt), ctx);
1770 }
1771
1772 /* Scan an OpenMP single directive. */
1773
1774 static void
1775 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1776 {
1777 omp_context *ctx;
1778 tree name;
1779
1780 ctx = new_omp_context (stmt, outer_ctx);
1781 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1782 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1783 name = create_tmp_var_name (".omp_copy_s");
1784 name = build_decl (TYPE_DECL, name, ctx->record_type);
1785 TYPE_NAME (ctx->record_type) = name;
1786
1787 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1788 scan_omp (gimple_omp_body (stmt), ctx);
1789
1790 if (TYPE_FIELDS (ctx->record_type) == NULL)
1791 ctx->record_type = NULL;
1792 else
1793 layout_type (ctx->record_type);
1794 }
1795
1796
1797 /* Check OpenMP nesting restrictions. */
1798 static void
1799 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1800 {
1801 switch (gimple_code (stmt))
1802 {
1803 case GIMPLE_OMP_FOR:
1804 case GIMPLE_OMP_SECTIONS:
1805 case GIMPLE_OMP_SINGLE:
1806 case GIMPLE_CALL:
1807 for (; ctx != NULL; ctx = ctx->outer)
1808 switch (gimple_code (ctx->stmt))
1809 {
1810 case GIMPLE_OMP_FOR:
1811 case GIMPLE_OMP_SECTIONS:
1812 case GIMPLE_OMP_SINGLE:
1813 case GIMPLE_OMP_ORDERED:
1814 case GIMPLE_OMP_MASTER:
1815 case GIMPLE_OMP_TASK:
1816 if (is_gimple_call (stmt))
1817 {
1818 warning (0, "barrier region may not be closely nested inside "
1819 "of work-sharing, critical, ordered, master or "
1820 "explicit task region");
1821 return;
1822 }
1823 warning (0, "work-sharing region may not be closely nested inside "
1824 "of work-sharing, critical, ordered, master or explicit "
1825 "task region");
1826 return;
1827 case GIMPLE_OMP_PARALLEL:
1828 return;
1829 default:
1830 break;
1831 }
1832 break;
1833 case GIMPLE_OMP_MASTER:
1834 for (; ctx != NULL; ctx = ctx->outer)
1835 switch (gimple_code (ctx->stmt))
1836 {
1837 case GIMPLE_OMP_FOR:
1838 case GIMPLE_OMP_SECTIONS:
1839 case GIMPLE_OMP_SINGLE:
1840 case GIMPLE_OMP_TASK:
1841 warning (0, "master region may not be closely nested inside "
1842 "of work-sharing or explicit task region");
1843 return;
1844 case GIMPLE_OMP_PARALLEL:
1845 return;
1846 default:
1847 break;
1848 }
1849 break;
1850 case GIMPLE_OMP_ORDERED:
1851 for (; ctx != NULL; ctx = ctx->outer)
1852 switch (gimple_code (ctx->stmt))
1853 {
1854 case GIMPLE_OMP_CRITICAL:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "ordered region may not be closely nested inside "
1857 "of critical or explicit task region");
1858 return;
1859 case GIMPLE_OMP_FOR:
1860 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1861 OMP_CLAUSE_ORDERED) == NULL)
1862 warning (0, "ordered region must be closely nested inside "
1863 "a loop region with an ordered clause");
1864 return;
1865 case GIMPLE_OMP_PARALLEL:
1866 return;
1867 default:
1868 break;
1869 }
1870 break;
1871 case GIMPLE_OMP_CRITICAL:
1872 for (; ctx != NULL; ctx = ctx->outer)
1873 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1874 && (gimple_omp_critical_name (stmt)
1875 == gimple_omp_critical_name (ctx->stmt)))
1876 {
1877 warning (0, "critical region may not be nested inside a critical "
1878 "region with the same name");
1879 return;
1880 }
1881 break;
1882 default:
1883 break;
1884 }
1885 }
1886
1887
1888 /* Helper function scan_omp.
1889
1890 Callback for walk_tree or operators in walk_gimple_stmt used to
1891 scan for OpenMP directives in TP. */
1892
1893 static tree
1894 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1895 {
1896 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1897 omp_context *ctx = (omp_context *) wi->info;
1898 tree t = *tp;
1899
1900 switch (TREE_CODE (t))
1901 {
1902 case VAR_DECL:
1903 case PARM_DECL:
1904 case LABEL_DECL:
1905 case RESULT_DECL:
1906 if (ctx)
1907 *tp = remap_decl (t, &ctx->cb);
1908 break;
1909
1910 default:
1911 if (ctx && TYPE_P (t))
1912 *tp = remap_type (t, &ctx->cb);
1913 else if (!DECL_P (t))
1914 *walk_subtrees = 1;
1915 break;
1916 }
1917
1918 return NULL_TREE;
1919 }
1920
1921
1922 /* Helper function for scan_omp.
1923
1924 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1925 the current statement in GSI. */
1926
1927 static tree
1928 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1929 struct walk_stmt_info *wi)
1930 {
1931 gimple stmt = gsi_stmt (*gsi);
1932 omp_context *ctx = (omp_context *) wi->info;
1933
1934 if (gimple_has_location (stmt))
1935 input_location = gimple_location (stmt);
1936
1937 /* Check the OpenMP nesting restrictions. */
1938 if (ctx != NULL)
1939 {
1940 if (is_gimple_omp (stmt))
1941 check_omp_nesting_restrictions (stmt, ctx);
1942 else if (is_gimple_call (stmt))
1943 {
1944 tree fndecl = gimple_call_fndecl (stmt);
1945 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1946 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1947 check_omp_nesting_restrictions (stmt, ctx);
1948 }
1949 }
1950
1951 *handled_ops_p = true;
1952
1953 switch (gimple_code (stmt))
1954 {
1955 case GIMPLE_OMP_PARALLEL:
1956 taskreg_nesting_level++;
1957 scan_omp_parallel (gsi, ctx);
1958 taskreg_nesting_level--;
1959 break;
1960
1961 case GIMPLE_OMP_TASK:
1962 taskreg_nesting_level++;
1963 scan_omp_task (gsi, ctx);
1964 taskreg_nesting_level--;
1965 break;
1966
1967 case GIMPLE_OMP_FOR:
1968 scan_omp_for (stmt, ctx);
1969 break;
1970
1971 case GIMPLE_OMP_SECTIONS:
1972 scan_omp_sections (stmt, ctx);
1973 break;
1974
1975 case GIMPLE_OMP_SINGLE:
1976 scan_omp_single (stmt, ctx);
1977 break;
1978
1979 case GIMPLE_OMP_SECTION:
1980 case GIMPLE_OMP_MASTER:
1981 case GIMPLE_OMP_ORDERED:
1982 case GIMPLE_OMP_CRITICAL:
1983 ctx = new_omp_context (stmt, ctx);
1984 scan_omp (gimple_omp_body (stmt), ctx);
1985 break;
1986
1987 case GIMPLE_BIND:
1988 {
1989 tree var;
1990
1991 *handled_ops_p = false;
1992 if (ctx)
1993 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1994 insert_decl_map (&ctx->cb, var, var);
1995 }
1996 break;
1997 default:
1998 *handled_ops_p = false;
1999 break;
2000 }
2001
2002 return NULL_TREE;
2003 }
2004
2005
2006 /* Scan all the statements starting at the current statement. CTX
2007 contains context information about the OpenMP directives and
2008 clauses found during the scan. */
2009
2010 static void
2011 scan_omp (gimple_seq body, omp_context *ctx)
2012 {
2013 location_t saved_location;
2014 struct walk_stmt_info wi;
2015
2016 memset (&wi, 0, sizeof (wi));
2017 wi.info = ctx;
2018 wi.want_locations = true;
2019
2020 saved_location = input_location;
2021 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2022 input_location = saved_location;
2023 }
2024 \f
2025 /* Re-gimplification and code generation routines. */
2026
2027 /* Build a call to GOMP_barrier. */
2028
2029 static tree
2030 build_omp_barrier (void)
2031 {
2032 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2033 }
2034
2035 /* If a context was created for STMT when it was scanned, return it. */
2036
2037 static omp_context *
2038 maybe_lookup_ctx (gimple stmt)
2039 {
2040 splay_tree_node n;
2041 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2042 return n ? (omp_context *) n->value : NULL;
2043 }
2044
2045
2046 /* Find the mapping for DECL in CTX or the immediately enclosing
2047 context that has a mapping for DECL.
2048
2049 If CTX is a nested parallel directive, we may have to use the decl
2050 mappings created in CTX's parent context. Suppose that we have the
2051 following parallel nesting (variable UIDs showed for clarity):
2052
2053 iD.1562 = 0;
2054 #omp parallel shared(iD.1562) -> outer parallel
2055 iD.1562 = iD.1562 + 1;
2056
2057 #omp parallel shared (iD.1562) -> inner parallel
2058 iD.1562 = iD.1562 - 1;
2059
2060 Each parallel structure will create a distinct .omp_data_s structure
2061 for copying iD.1562 in/out of the directive:
2062
2063 outer parallel .omp_data_s.1.i -> iD.1562
2064 inner parallel .omp_data_s.2.i -> iD.1562
2065
2066 A shared variable mapping will produce a copy-out operation before
2067 the parallel directive and a copy-in operation after it. So, in
2068 this case we would have:
2069
2070 iD.1562 = 0;
2071 .omp_data_o.1.i = iD.1562;
2072 #omp parallel shared(iD.1562) -> outer parallel
2073 .omp_data_i.1 = &.omp_data_o.1
2074 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2075
2076 .omp_data_o.2.i = iD.1562; -> **
2077 #omp parallel shared(iD.1562) -> inner parallel
2078 .omp_data_i.2 = &.omp_data_o.2
2079 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2080
2081
2082 ** This is a problem. The symbol iD.1562 cannot be referenced
2083 inside the body of the outer parallel region. But since we are
2084 emitting this copy operation while expanding the inner parallel
2085 directive, we need to access the CTX structure of the outer
2086 parallel directive to get the correct mapping:
2087
2088 .omp_data_o.2.i = .omp_data_i.1->i
2089
2090 Since there may be other workshare or parallel directives enclosing
2091 the parallel directive, it may be necessary to walk up the context
2092 parent chain. This is not a problem in general because nested
2093 parallelism happens only rarely. */
2094
2095 static tree
2096 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2097 {
2098 tree t;
2099 omp_context *up;
2100
2101 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2102 t = maybe_lookup_decl (decl, up);
2103
2104 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2105
2106 return t ? t : decl;
2107 }
2108
2109
2110 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2111 in outer contexts. */
2112
2113 static tree
2114 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2115 {
2116 tree t = NULL;
2117 omp_context *up;
2118
2119 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2120 t = maybe_lookup_decl (decl, up);
2121
2122 return t ? t : decl;
2123 }
2124
2125
2126 /* Construct the initialization value for reduction CLAUSE. */
2127
2128 tree
2129 omp_reduction_init (tree clause, tree type)
2130 {
2131 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2132 {
2133 case PLUS_EXPR:
2134 case MINUS_EXPR:
2135 case BIT_IOR_EXPR:
2136 case BIT_XOR_EXPR:
2137 case TRUTH_OR_EXPR:
2138 case TRUTH_ORIF_EXPR:
2139 case TRUTH_XOR_EXPR:
2140 case NE_EXPR:
2141 return fold_convert (type, integer_zero_node);
2142
2143 case MULT_EXPR:
2144 case TRUTH_AND_EXPR:
2145 case TRUTH_ANDIF_EXPR:
2146 case EQ_EXPR:
2147 return fold_convert (type, integer_one_node);
2148
2149 case BIT_AND_EXPR:
2150 return fold_convert (type, integer_minus_one_node);
2151
2152 case MAX_EXPR:
2153 if (SCALAR_FLOAT_TYPE_P (type))
2154 {
2155 REAL_VALUE_TYPE max, min;
2156 if (HONOR_INFINITIES (TYPE_MODE (type)))
2157 {
2158 real_inf (&max);
2159 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2160 }
2161 else
2162 real_maxval (&min, 1, TYPE_MODE (type));
2163 return build_real (type, min);
2164 }
2165 else
2166 {
2167 gcc_assert (INTEGRAL_TYPE_P (type));
2168 return TYPE_MIN_VALUE (type);
2169 }
2170
2171 case MIN_EXPR:
2172 if (SCALAR_FLOAT_TYPE_P (type))
2173 {
2174 REAL_VALUE_TYPE max;
2175 if (HONOR_INFINITIES (TYPE_MODE (type)))
2176 real_inf (&max);
2177 else
2178 real_maxval (&max, 0, TYPE_MODE (type));
2179 return build_real (type, max);
2180 }
2181 else
2182 {
2183 gcc_assert (INTEGRAL_TYPE_P (type));
2184 return TYPE_MAX_VALUE (type);
2185 }
2186
2187 default:
2188 gcc_unreachable ();
2189 }
2190 }
2191
2192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2194 private variables. Initialization statements go in ILIST, while calls
2195 to destructors go in DLIST. */
2196
2197 static void
2198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2199 omp_context *ctx)
2200 {
2201 gimple_stmt_iterator diter;
2202 tree c, dtor, copyin_seq, x, ptr;
2203 bool copyin_by_ref = false;
2204 bool lastprivate_firstprivate = false;
2205 int pass;
2206
2207 *dlist = gimple_seq_alloc ();
2208 diter = gsi_start (*dlist);
2209 copyin_seq = NULL;
2210
2211 /* Do all the fixed sized types in the first pass, and the variable sized
2212 types in the second pass. This makes sure that the scalar arguments to
2213 the variable sized types are processed before we use them in the
2214 variable sized operations. */
2215 for (pass = 0; pass < 2; ++pass)
2216 {
2217 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2218 {
2219 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2220 tree var, new_var;
2221 bool by_ref;
2222
2223 switch (c_kind)
2224 {
2225 case OMP_CLAUSE_PRIVATE:
2226 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2227 continue;
2228 break;
2229 case OMP_CLAUSE_SHARED:
2230 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2231 {
2232 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2233 continue;
2234 }
2235 case OMP_CLAUSE_FIRSTPRIVATE:
2236 case OMP_CLAUSE_COPYIN:
2237 case OMP_CLAUSE_REDUCTION:
2238 break;
2239 case OMP_CLAUSE_LASTPRIVATE:
2240 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2241 {
2242 lastprivate_firstprivate = true;
2243 if (pass != 0)
2244 continue;
2245 }
2246 break;
2247 default:
2248 continue;
2249 }
2250
2251 new_var = var = OMP_CLAUSE_DECL (c);
2252 if (c_kind != OMP_CLAUSE_COPYIN)
2253 new_var = lookup_decl (var, ctx);
2254
2255 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2256 {
2257 if (pass != 0)
2258 continue;
2259 }
2260 else if (is_variable_sized (var))
2261 {
2262 /* For variable sized types, we need to allocate the
2263 actual storage here. Call alloca and store the
2264 result in the pointer decl that we created elsewhere. */
2265 if (pass == 0)
2266 continue;
2267
2268 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2269 {
2270 gimple stmt;
2271 tree tmp;
2272
2273 ptr = DECL_VALUE_EXPR (new_var);
2274 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2275 ptr = TREE_OPERAND (ptr, 0);
2276 gcc_assert (DECL_P (ptr));
2277 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2278
2279 /* void *tmp = __builtin_alloca */
2280 stmt
2281 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2282 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2283 gimple_add_tmp_var (tmp);
2284 gimple_call_set_lhs (stmt, tmp);
2285
2286 gimple_seq_add_stmt (ilist, stmt);
2287
2288 x = fold_convert (TREE_TYPE (ptr), tmp);
2289 gimplify_assign (ptr, x, ilist);
2290 }
2291 }
2292 else if (is_reference (var))
2293 {
2294 /* For references that are being privatized for Fortran,
2295 allocate new backing storage for the new pointer
2296 variable. This allows us to avoid changing all the
2297 code that expects a pointer to something that expects
2298 a direct variable. Note that this doesn't apply to
2299 C++, since reference types are disallowed in data
2300 sharing clauses there, except for NRV optimized
2301 return values. */
2302 if (pass == 0)
2303 continue;
2304
2305 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2306 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2307 {
2308 x = build_receiver_ref (var, false, ctx);
2309 x = build_fold_addr_expr (x);
2310 }
2311 else if (TREE_CONSTANT (x))
2312 {
2313 const char *name = NULL;
2314 if (DECL_NAME (var))
2315 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2316
2317 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2318 name);
2319 gimple_add_tmp_var (x);
2320 TREE_ADDRESSABLE (x) = 1;
2321 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2322 }
2323 else
2324 {
2325 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2326 x = fold_convert (TREE_TYPE (new_var), x);
2327 }
2328
2329 gimplify_assign (new_var, x, ilist);
2330
2331 new_var = build_fold_indirect_ref (new_var);
2332 }
2333 else if (c_kind == OMP_CLAUSE_REDUCTION
2334 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2335 {
2336 if (pass == 0)
2337 continue;
2338 }
2339 else if (pass != 0)
2340 continue;
2341
2342 switch (OMP_CLAUSE_CODE (c))
2343 {
2344 case OMP_CLAUSE_SHARED:
2345 /* Shared global vars are just accessed directly. */
2346 if (is_global_var (new_var))
2347 break;
2348 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2349 needs to be delayed until after fixup_child_record_type so
2350 that we get the correct type during the dereference. */
2351 by_ref = use_pointer_for_field (var, ctx);
2352 x = build_receiver_ref (var, by_ref, ctx);
2353 SET_DECL_VALUE_EXPR (new_var, x);
2354 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2355
2356 /* ??? If VAR is not passed by reference, and the variable
2357 hasn't been initialized yet, then we'll get a warning for
2358 the store into the omp_data_s structure. Ideally, we'd be
2359 able to notice this and not store anything at all, but
2360 we're generating code too early. Suppress the warning. */
2361 if (!by_ref)
2362 TREE_NO_WARNING (var) = 1;
2363 break;
2364
2365 case OMP_CLAUSE_LASTPRIVATE:
2366 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2367 break;
2368 /* FALLTHRU */
2369
2370 case OMP_CLAUSE_PRIVATE:
2371 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2372 x = build_outer_var_ref (var, ctx);
2373 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2374 {
2375 if (is_task_ctx (ctx))
2376 x = build_receiver_ref (var, false, ctx);
2377 else
2378 x = build_outer_var_ref (var, ctx);
2379 }
2380 else
2381 x = NULL;
2382 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2383 if (x)
2384 gimplify_and_add (x, ilist);
2385 /* FALLTHRU */
2386
2387 do_dtor:
2388 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2389 if (x)
2390 {
2391 gimple_seq tseq = NULL;
2392
2393 dtor = x;
2394 gimplify_stmt (&dtor, &tseq);
2395 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2396 }
2397 break;
2398
2399 case OMP_CLAUSE_FIRSTPRIVATE:
2400 if (is_task_ctx (ctx))
2401 {
2402 if (is_reference (var) || is_variable_sized (var))
2403 goto do_dtor;
2404 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2405 ctx))
2406 || use_pointer_for_field (var, NULL))
2407 {
2408 x = build_receiver_ref (var, false, ctx);
2409 SET_DECL_VALUE_EXPR (new_var, x);
2410 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2411 goto do_dtor;
2412 }
2413 }
2414 x = build_outer_var_ref (var, ctx);
2415 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2416 gimplify_and_add (x, ilist);
2417 goto do_dtor;
2418 break;
2419
2420 case OMP_CLAUSE_COPYIN:
2421 by_ref = use_pointer_for_field (var, NULL);
2422 x = build_receiver_ref (var, by_ref, ctx);
2423 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2424 append_to_statement_list (x, &copyin_seq);
2425 copyin_by_ref |= by_ref;
2426 break;
2427
2428 case OMP_CLAUSE_REDUCTION:
2429 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2430 {
2431 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2432 x = build_outer_var_ref (var, ctx);
2433
2434 if (is_reference (var))
2435 x = build_fold_addr_expr (x);
2436 SET_DECL_VALUE_EXPR (placeholder, x);
2437 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2438 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2439 gimple_seq_add_seq (ilist,
2440 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2441 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2442 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2443 }
2444 else
2445 {
2446 x = omp_reduction_init (c, TREE_TYPE (new_var));
2447 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2448 gimplify_assign (new_var, x, ilist);
2449 }
2450 break;
2451
2452 default:
2453 gcc_unreachable ();
2454 }
2455 }
2456 }
2457
2458 /* The copyin sequence is not to be executed by the main thread, since
2459 that would result in self-copies. Perhaps not visible to scalars,
2460 but it certainly is to C++ operator=. */
2461 if (copyin_seq)
2462 {
2463 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2464 x = build2 (NE_EXPR, boolean_type_node, x,
2465 build_int_cst (TREE_TYPE (x), 0));
2466 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2467 gimplify_and_add (x, ilist);
2468 }
2469
2470 /* If any copyin variable is passed by reference, we must ensure the
2471 master thread doesn't modify it before it is copied over in all
2472 threads. Similarly for variables in both firstprivate and
2473 lastprivate clauses we need to ensure the lastprivate copying
2474 happens after firstprivate copying in all threads. */
2475 if (copyin_by_ref || lastprivate_firstprivate)
2476 gimplify_and_add (build_omp_barrier (), ilist);
2477 }
2478
2479
2480 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2481 both parallel and workshare constructs. PREDICATE may be NULL if it's
2482 always true. */
2483
2484 static void
2485 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2486 omp_context *ctx)
2487 {
2488 tree x, c, label = NULL;
2489 bool par_clauses = false;
2490
2491 /* Early exit if there are no lastprivate clauses. */
2492 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2493 if (clauses == NULL)
2494 {
2495 /* If this was a workshare clause, see if it had been combined
2496 with its parallel. In that case, look for the clauses on the
2497 parallel statement itself. */
2498 if (is_parallel_ctx (ctx))
2499 return;
2500
2501 ctx = ctx->outer;
2502 if (ctx == NULL || !is_parallel_ctx (ctx))
2503 return;
2504
2505 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2506 OMP_CLAUSE_LASTPRIVATE);
2507 if (clauses == NULL)
2508 return;
2509 par_clauses = true;
2510 }
2511
2512 if (predicate)
2513 {
2514 gimple stmt;
2515 tree label_true, arm1, arm2;
2516
2517 label = create_artificial_label ();
2518 label_true = create_artificial_label ();
2519 arm1 = TREE_OPERAND (predicate, 0);
2520 arm2 = TREE_OPERAND (predicate, 1);
2521 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2522 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2523 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2524 label_true, label);
2525 gimple_seq_add_stmt (stmt_list, stmt);
2526 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2527 }
2528
2529 for (c = clauses; c ;)
2530 {
2531 tree var, new_var;
2532
2533 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2534 {
2535 var = OMP_CLAUSE_DECL (c);
2536 new_var = lookup_decl (var, ctx);
2537
2538 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2539 {
2540 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2541 gimple_seq_add_seq (stmt_list,
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2543 }
2544 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2545
2546 x = build_outer_var_ref (var, ctx);
2547 if (is_reference (var))
2548 new_var = build_fold_indirect_ref (new_var);
2549 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2550 gimplify_and_add (x, stmt_list);
2551 }
2552 c = OMP_CLAUSE_CHAIN (c);
2553 if (c == NULL && !par_clauses)
2554 {
2555 /* If this was a workshare clause, see if it had been combined
2556 with its parallel. In that case, continue looking for the
2557 clauses also on the parallel statement itself. */
2558 if (is_parallel_ctx (ctx))
2559 break;
2560
2561 ctx = ctx->outer;
2562 if (ctx == NULL || !is_parallel_ctx (ctx))
2563 break;
2564
2565 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2566 OMP_CLAUSE_LASTPRIVATE);
2567 par_clauses = true;
2568 }
2569 }
2570
2571 if (label)
2572 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2573 }
2574
2575
2576 /* Generate code to implement the REDUCTION clauses. */
2577
2578 static void
2579 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2580 {
2581 gimple_seq sub_seq = NULL;
2582 gimple stmt;
2583 tree x, c;
2584 int count = 0;
2585
2586 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2587 update in that case, otherwise use a lock. */
2588 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2589 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2590 {
2591 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2592 {
2593 /* Never use OMP_ATOMIC for array reductions. */
2594 count = -1;
2595 break;
2596 }
2597 count++;
2598 }
2599
2600 if (count == 0)
2601 return;
2602
2603 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2604 {
2605 tree var, ref, new_var;
2606 enum tree_code code;
2607
2608 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2609 continue;
2610
2611 var = OMP_CLAUSE_DECL (c);
2612 new_var = lookup_decl (var, ctx);
2613 if (is_reference (var))
2614 new_var = build_fold_indirect_ref (new_var);
2615 ref = build_outer_var_ref (var, ctx);
2616 code = OMP_CLAUSE_REDUCTION_CODE (c);
2617
2618 /* reduction(-:var) sums up the partial results, so it acts
2619 identically to reduction(+:var). */
2620 if (code == MINUS_EXPR)
2621 code = PLUS_EXPR;
2622
2623 if (count == 1)
2624 {
2625 tree addr = build_fold_addr_expr (ref);
2626
2627 addr = save_expr (addr);
2628 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2629 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2630 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2631 gimplify_and_add (x, stmt_seqp);
2632 return;
2633 }
2634
2635 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2636 {
2637 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2638
2639 if (is_reference (var))
2640 ref = build_fold_addr_expr (ref);
2641 SET_DECL_VALUE_EXPR (placeholder, ref);
2642 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2643 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2644 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2645 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2646 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2647 }
2648 else
2649 {
2650 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2651 ref = build_outer_var_ref (var, ctx);
2652 gimplify_assign (ref, x, &sub_seq);
2653 }
2654 }
2655
2656 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2657 gimple_seq_add_stmt (stmt_seqp, stmt);
2658
2659 gimple_seq_add_seq (stmt_seqp, sub_seq);
2660
2661 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2662 gimple_seq_add_stmt (stmt_seqp, stmt);
2663 }
2664
2665
2666 /* Generate code to implement the COPYPRIVATE clauses. */
2667
2668 static void
2669 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2670 omp_context *ctx)
2671 {
2672 tree c;
2673
2674 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2675 {
2676 tree var, ref, x;
2677 bool by_ref;
2678
2679 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2680 continue;
2681
2682 var = OMP_CLAUSE_DECL (c);
2683 by_ref = use_pointer_for_field (var, NULL);
2684
2685 ref = build_sender_ref (var, ctx);
2686 x = lookup_decl_in_outer_ctx (var, ctx);
2687 x = by_ref ? build_fold_addr_expr (x) : x;
2688 gimplify_assign (ref, x, slist);
2689
2690 ref = build_receiver_ref (var, by_ref, ctx);
2691 if (is_reference (var))
2692 {
2693 ref = build_fold_indirect_ref (ref);
2694 var = build_fold_indirect_ref (var);
2695 }
2696 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2697 gimplify_and_add (x, rlist);
2698 }
2699 }
2700
2701
2702 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2703 and REDUCTION from the sender (aka parent) side. */
2704
2705 static void
2706 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2707 omp_context *ctx)
2708 {
2709 tree c;
2710
2711 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2712 {
2713 tree val, ref, x, var;
2714 bool by_ref, do_in = false, do_out = false;
2715
2716 switch (OMP_CLAUSE_CODE (c))
2717 {
2718 case OMP_CLAUSE_PRIVATE:
2719 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2720 break;
2721 continue;
2722 case OMP_CLAUSE_FIRSTPRIVATE:
2723 case OMP_CLAUSE_COPYIN:
2724 case OMP_CLAUSE_LASTPRIVATE:
2725 case OMP_CLAUSE_REDUCTION:
2726 break;
2727 default:
2728 continue;
2729 }
2730
2731 val = OMP_CLAUSE_DECL (c);
2732 var = lookup_decl_in_outer_ctx (val, ctx);
2733
2734 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2735 && is_global_var (var))
2736 continue;
2737 if (is_variable_sized (val))
2738 continue;
2739 by_ref = use_pointer_for_field (val, NULL);
2740
2741 switch (OMP_CLAUSE_CODE (c))
2742 {
2743 case OMP_CLAUSE_PRIVATE:
2744 case OMP_CLAUSE_FIRSTPRIVATE:
2745 case OMP_CLAUSE_COPYIN:
2746 do_in = true;
2747 break;
2748
2749 case OMP_CLAUSE_LASTPRIVATE:
2750 if (by_ref || is_reference (val))
2751 {
2752 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2753 continue;
2754 do_in = true;
2755 }
2756 else
2757 {
2758 do_out = true;
2759 if (lang_hooks.decls.omp_private_outer_ref (val))
2760 do_in = true;
2761 }
2762 break;
2763
2764 case OMP_CLAUSE_REDUCTION:
2765 do_in = true;
2766 do_out = !(by_ref || is_reference (val));
2767 break;
2768
2769 default:
2770 gcc_unreachable ();
2771 }
2772
2773 if (do_in)
2774 {
2775 ref = build_sender_ref (val, ctx);
2776 x = by_ref ? build_fold_addr_expr (var) : var;
2777 gimplify_assign (ref, x, ilist);
2778 if (is_task_ctx (ctx))
2779 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2780 }
2781
2782 if (do_out)
2783 {
2784 ref = build_sender_ref (val, ctx);
2785 gimplify_assign (var, ref, olist);
2786 }
2787 }
2788 }
2789
2790 /* Generate code to implement SHARED from the sender (aka parent)
2791 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2792 list things that got automatically shared. */
2793
2794 static void
2795 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2796 {
2797 tree var, ovar, nvar, f, x, record_type;
2798
2799 if (ctx->record_type == NULL)
2800 return;
2801
2802 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2803 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2804 {
2805 ovar = DECL_ABSTRACT_ORIGIN (f);
2806 nvar = maybe_lookup_decl (ovar, ctx);
2807 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2808 continue;
2809
2810 /* If CTX is a nested parallel directive. Find the immediately
2811 enclosing parallel or workshare construct that contains a
2812 mapping for OVAR. */
2813 var = lookup_decl_in_outer_ctx (ovar, ctx);
2814
2815 if (use_pointer_for_field (ovar, ctx))
2816 {
2817 x = build_sender_ref (ovar, ctx);
2818 var = build_fold_addr_expr (var);
2819 gimplify_assign (x, var, ilist);
2820 }
2821 else
2822 {
2823 x = build_sender_ref (ovar, ctx);
2824 gimplify_assign (x, var, ilist);
2825
2826 if (!TREE_READONLY (var)
2827 /* We don't need to receive a new reference to a result
2828 or parm decl. In fact we may not store to it as we will
2829 invalidate any pending RSO and generate wrong gimple
2830 during inlining. */
2831 && !((TREE_CODE (var) == RESULT_DECL
2832 || TREE_CODE (var) == PARM_DECL)
2833 && DECL_BY_REFERENCE (var)))
2834 {
2835 x = build_sender_ref (ovar, ctx);
2836 gimplify_assign (var, x, olist);
2837 }
2838 }
2839 }
2840 }
2841
2842
2843 /* A convenience function to build an empty GIMPLE_COND with just the
2844 condition. */
2845
2846 static gimple
2847 gimple_build_cond_empty (tree cond)
2848 {
2849 enum tree_code pred_code;
2850 tree lhs, rhs;
2851
2852 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2853 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2854 }
2855
2856
2857 /* Build the function calls to GOMP_parallel_start etc to actually
2858 generate the parallel operation. REGION is the parallel region
2859 being expanded. BB is the block where to insert the code. WS_ARGS
2860 will be set if this is a call to a combined parallel+workshare
2861 construct, it contains the list of additional arguments needed by
2862 the workshare construct. */
2863
2864 static void
2865 expand_parallel_call (struct omp_region *region, basic_block bb,
2866 gimple entry_stmt, tree ws_args)
2867 {
2868 tree t, t1, t2, val, cond, c, clauses;
2869 gimple_stmt_iterator gsi;
2870 gimple stmt;
2871 int start_ix;
2872
2873 clauses = gimple_omp_parallel_clauses (entry_stmt);
2874
2875 /* Determine what flavor of GOMP_parallel_start we will be
2876 emitting. */
2877 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2878 if (is_combined_parallel (region))
2879 {
2880 switch (region->inner->type)
2881 {
2882 case GIMPLE_OMP_FOR:
2883 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2884 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2885 + (region->inner->sched_kind
2886 == OMP_CLAUSE_SCHEDULE_RUNTIME
2887 ? 3 : region->inner->sched_kind);
2888 break;
2889 case GIMPLE_OMP_SECTIONS:
2890 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2891 break;
2892 default:
2893 gcc_unreachable ();
2894 }
2895 }
2896
2897 /* By default, the value of NUM_THREADS is zero (selected at run time)
2898 and there is no conditional. */
2899 cond = NULL_TREE;
2900 val = build_int_cst (unsigned_type_node, 0);
2901
2902 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2903 if (c)
2904 cond = OMP_CLAUSE_IF_EXPR (c);
2905
2906 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2907 if (c)
2908 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2909
2910 /* Ensure 'val' is of the correct type. */
2911 val = fold_convert (unsigned_type_node, val);
2912
2913 /* If we found the clause 'if (cond)', build either
2914 (cond != 0) or (cond ? val : 1u). */
2915 if (cond)
2916 {
2917 gimple_stmt_iterator gsi;
2918
2919 cond = gimple_boolify (cond);
2920
2921 if (integer_zerop (val))
2922 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2923 build_int_cst (TREE_TYPE (cond), 0));
2924 else
2925 {
2926 basic_block cond_bb, then_bb, else_bb;
2927 edge e, e_then, e_else;
2928 tree tmp_then, tmp_else, tmp_join, tmp_var;
2929
2930 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2931 if (gimple_in_ssa_p (cfun))
2932 {
2933 tmp_then = make_ssa_name (tmp_var, NULL);
2934 tmp_else = make_ssa_name (tmp_var, NULL);
2935 tmp_join = make_ssa_name (tmp_var, NULL);
2936 }
2937 else
2938 {
2939 tmp_then = tmp_var;
2940 tmp_else = tmp_var;
2941 tmp_join = tmp_var;
2942 }
2943
2944 e = split_block (bb, NULL);
2945 cond_bb = e->src;
2946 bb = e->dest;
2947 remove_edge (e);
2948
2949 then_bb = create_empty_bb (cond_bb);
2950 else_bb = create_empty_bb (then_bb);
2951 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2952 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2953
2954 stmt = gimple_build_cond_empty (cond);
2955 gsi = gsi_start_bb (cond_bb);
2956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2957
2958 gsi = gsi_start_bb (then_bb);
2959 stmt = gimple_build_assign (tmp_then, val);
2960 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2961
2962 gsi = gsi_start_bb (else_bb);
2963 stmt = gimple_build_assign
2964 (tmp_else, build_int_cst (unsigned_type_node, 1));
2965 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2966
2967 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2968 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2969 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2970 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2971
2972 if (gimple_in_ssa_p (cfun))
2973 {
2974 gimple phi = create_phi_node (tmp_join, bb);
2975 SSA_NAME_DEF_STMT (tmp_join) = phi;
2976 add_phi_arg (phi, tmp_then, e_then);
2977 add_phi_arg (phi, tmp_else, e_else);
2978 }
2979
2980 val = tmp_join;
2981 }
2982
2983 gsi = gsi_start_bb (bb);
2984 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2985 false, GSI_CONTINUE_LINKING);
2986 }
2987
2988 gsi = gsi_last_bb (bb);
2989 t = gimple_omp_parallel_data_arg (entry_stmt);
2990 if (t == NULL)
2991 t1 = null_pointer_node;
2992 else
2993 t1 = build_fold_addr_expr (t);
2994 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2995
2996 if (ws_args)
2997 {
2998 tree args = tree_cons (NULL, t2,
2999 tree_cons (NULL, t1,
3000 tree_cons (NULL, val, ws_args)));
3001 t = build_function_call_expr (built_in_decls[start_ix], args);
3002 }
3003 else
3004 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3005
3006 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3007 false, GSI_CONTINUE_LINKING);
3008
3009 t = gimple_omp_parallel_data_arg (entry_stmt);
3010 if (t == NULL)
3011 t = null_pointer_node;
3012 else
3013 t = build_fold_addr_expr (t);
3014 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3015 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3016 false, GSI_CONTINUE_LINKING);
3017
3018 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3019 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3020 false, GSI_CONTINUE_LINKING);
3021 }
3022
3023
3024 /* Build the function call to GOMP_task to actually
3025 generate the task operation. BB is the block where to insert the code. */
3026
3027 static void
3028 expand_task_call (basic_block bb, gimple entry_stmt)
3029 {
3030 tree t, t1, t2, t3, flags, cond, c, clauses;
3031 gimple_stmt_iterator gsi;
3032
3033 clauses = gimple_omp_task_clauses (entry_stmt);
3034
3035 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3036 if (c)
3037 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3038 else
3039 cond = boolean_true_node;
3040
3041 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3042 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3043
3044 gsi = gsi_last_bb (bb);
3045 t = gimple_omp_task_data_arg (entry_stmt);
3046 if (t == NULL)
3047 t2 = null_pointer_node;
3048 else
3049 t2 = build_fold_addr_expr (t);
3050 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3051 t = gimple_omp_task_copy_fn (entry_stmt);
3052 if (t == NULL)
3053 t3 = null_pointer_node;
3054 else
3055 t3 = build_fold_addr_expr (t);
3056
3057 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3058 gimple_omp_task_arg_size (entry_stmt),
3059 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3060
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
3063 }
3064
3065
3066 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3067 catch handler and return it. This prevents programs from violating the
3068 structured block semantics with throws. */
3069
3070 static gimple_seq
3071 maybe_catch_exception (gimple_seq body)
3072 {
3073 gimple f, t;
3074
3075 if (!flag_exceptions)
3076 return body;
3077
3078 if (lang_protect_cleanup_actions)
3079 t = lang_protect_cleanup_actions ();
3080 else
3081 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3082
3083 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3084 gimple_eh_filter_set_must_not_throw (f, true);
3085
3086 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3087 GIMPLE_TRY_CATCH);
3088
3089 return gimple_seq_alloc_with_stmt (t);
3090 }
3091
3092 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3093
3094 static tree
3095 list2chain (tree list)
3096 {
3097 tree t;
3098
3099 for (t = list; t; t = TREE_CHAIN (t))
3100 {
3101 tree var = TREE_VALUE (t);
3102 if (TREE_CHAIN (t))
3103 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3104 else
3105 TREE_CHAIN (var) = NULL_TREE;
3106 }
3107
3108 return list ? TREE_VALUE (list) : NULL_TREE;
3109 }
3110
3111
3112 /* Remove barriers in REGION->EXIT's block. Note that this is only
3113 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3114 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3115 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3116 removed. */
3117
3118 static void
3119 remove_exit_barrier (struct omp_region *region)
3120 {
3121 gimple_stmt_iterator gsi;
3122 basic_block exit_bb;
3123 edge_iterator ei;
3124 edge e;
3125 gimple stmt;
3126
3127 exit_bb = region->exit;
3128
3129 /* If the parallel region doesn't return, we don't have REGION->EXIT
3130 block at all. */
3131 if (! exit_bb)
3132 return;
3133
3134 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3135 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3136 statements that can appear in between are extremely limited -- no
3137 memory operations at all. Here, we allow nothing at all, so the
3138 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3139 gsi = gsi_last_bb (exit_bb);
3140 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3141 gsi_prev (&gsi);
3142 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3143 return;
3144
3145 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3146 {
3147 gsi = gsi_last_bb (e->src);
3148 if (gsi_end_p (gsi))
3149 continue;
3150 stmt = gsi_stmt (gsi);
3151 if (gimple_code (stmt) == GIMPLE_OMP_RETURN)
3152 gimple_omp_return_set_nowait (stmt);
3153 }
3154 }
3155
3156 static void
3157 remove_exit_barriers (struct omp_region *region)
3158 {
3159 if (region->type == GIMPLE_OMP_PARALLEL)
3160 remove_exit_barrier (region);
3161
3162 if (region->inner)
3163 {
3164 region = region->inner;
3165 remove_exit_barriers (region);
3166 while (region->next)
3167 {
3168 region = region->next;
3169 remove_exit_barriers (region);
3170 }
3171 }
3172 }
3173
3174 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3175 calls. These can't be declared as const functions, but
3176 within one parallel body they are constant, so they can be
3177 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3178 which are declared const. Similarly for task body, except
3179 that in untied task omp_get_thread_num () can change at any task
3180 scheduling point. */
3181
3182 static void
3183 optimize_omp_library_calls (gimple entry_stmt)
3184 {
3185 basic_block bb;
3186 gimple_stmt_iterator gsi;
3187 tree thr_num_id
3188 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3189 tree num_thr_id
3190 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3191 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3192 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3193 OMP_CLAUSE_UNTIED) != NULL);
3194
3195 FOR_EACH_BB (bb)
3196 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3197 {
3198 gimple call = gsi_stmt (gsi);
3199 tree decl;
3200
3201 if (is_gimple_call (call)
3202 && (decl = gimple_call_fndecl (call))
3203 && DECL_EXTERNAL (decl)
3204 && TREE_PUBLIC (decl)
3205 && DECL_INITIAL (decl) == NULL)
3206 {
3207 tree built_in;
3208
3209 if (DECL_NAME (decl) == thr_num_id)
3210 {
3211 /* In #pragma omp task untied omp_get_thread_num () can change
3212 during the execution of the task region. */
3213 if (untied_task)
3214 continue;
3215 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3216 }
3217 else if (DECL_NAME (decl) == num_thr_id)
3218 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3219 else
3220 continue;
3221
3222 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3223 || gimple_call_num_args (call) != 0)
3224 continue;
3225
3226 if (flag_exceptions && !TREE_NOTHROW (decl))
3227 continue;
3228
3229 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3230 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3231 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3232 continue;
3233
3234 gimple_call_set_fndecl (call, built_in);
3235 }
3236 }
3237 }
3238
3239 /* Expand the OpenMP parallel or task directive starting at REGION. */
3240
3241 static void
3242 expand_omp_taskreg (struct omp_region *region)
3243 {
3244 basic_block entry_bb, exit_bb, new_bb;
3245 struct function *child_cfun;
3246 tree child_fn, block, t, ws_args, *tp;
3247 gimple_stmt_iterator gsi;
3248 gimple entry_stmt, stmt;
3249 edge e;
3250
3251 entry_stmt = last_stmt (region->entry);
3252 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3253 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3254 /* If this function has been already instrumented, make sure
3255 the child function isn't instrumented again. */
3256 child_cfun->after_tree_profile = cfun->after_tree_profile;
3257
3258 entry_bb = region->entry;
3259 exit_bb = region->exit;
3260
3261 if (is_combined_parallel (region))
3262 ws_args = region->ws_args;
3263 else
3264 ws_args = NULL_TREE;
3265
3266 if (child_cfun->cfg)
3267 {
3268 /* Due to inlining, it may happen that we have already outlined
3269 the region, in which case all we need to do is make the
3270 sub-graph unreachable and emit the parallel call. */
3271 edge entry_succ_e, exit_succ_e;
3272 gimple_stmt_iterator gsi;
3273
3274 entry_succ_e = single_succ_edge (entry_bb);
3275
3276 gsi = gsi_last_bb (entry_bb);
3277 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3278 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3279 gsi_remove (&gsi, true);
3280
3281 new_bb = entry_bb;
3282 if (exit_bb)
3283 {
3284 exit_succ_e = single_succ_edge (exit_bb);
3285 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3286 }
3287 remove_edge_and_dominated_blocks (entry_succ_e);
3288 }
3289 else
3290 {
3291 /* If the parallel region needs data sent from the parent
3292 function, then the very first statement (except possible
3293 tree profile counter updates) of the parallel body
3294 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3295 &.OMP_DATA_O is passed as an argument to the child function,
3296 we need to replace it with the argument as seen by the child
3297 function.
3298
3299 In most cases, this will end up being the identity assignment
3300 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3301 a function call that has been inlined, the original PARM_DECL
3302 .OMP_DATA_I may have been converted into a different local
3303 variable. In which case, we need to keep the assignment. */
3304 if (gimple_omp_taskreg_data_arg (entry_stmt))
3305 {
3306 basic_block entry_succ_bb = single_succ (entry_bb);
3307 gimple_stmt_iterator gsi;
3308 tree arg, narg;
3309 gimple parcopy_stmt = NULL;
3310
3311 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3312 {
3313 gimple stmt;
3314
3315 gcc_assert (!gsi_end_p (gsi));
3316 stmt = gsi_stmt (gsi);
3317 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3318 continue;
3319
3320 if (gimple_num_ops (stmt) == 2)
3321 {
3322 tree arg = gimple_assign_rhs1 (stmt);
3323
3324 /* We're ignore the subcode because we're
3325 effectively doing a STRIP_NOPS. */
3326
3327 if (TREE_CODE (arg) == ADDR_EXPR
3328 && TREE_OPERAND (arg, 0)
3329 == gimple_omp_taskreg_data_arg (entry_stmt))
3330 {
3331 parcopy_stmt = stmt;
3332 break;
3333 }
3334 }
3335 }
3336
3337 gcc_assert (parcopy_stmt != NULL);
3338 arg = DECL_ARGUMENTS (child_fn);
3339
3340 if (!gimple_in_ssa_p (cfun))
3341 {
3342 if (gimple_assign_lhs (parcopy_stmt) == arg)
3343 gsi_remove (&gsi, true);
3344 else
3345 {
3346 /* ?? Is setting the subcode really necessary ?? */
3347 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3348 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3349 }
3350 }
3351 else
3352 {
3353 /* If we are in ssa form, we must load the value from the default
3354 definition of the argument. That should not be defined now,
3355 since the argument is not used uninitialized. */
3356 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3357 narg = make_ssa_name (arg, gimple_build_nop ());
3358 set_default_def (arg, narg);
3359 /* ?? Is setting the subcode really necessary ?? */
3360 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3361 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3362 update_stmt (parcopy_stmt);
3363 }
3364 }
3365
3366 /* Declare local variables needed in CHILD_CFUN. */
3367 block = DECL_INITIAL (child_fn);
3368 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3369 DECL_SAVED_TREE (child_fn) = NULL;
3370 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3371 TREE_USED (block) = 1;
3372
3373 /* Reset DECL_CONTEXT on function arguments. */
3374 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3375 DECL_CONTEXT (t) = child_fn;
3376
3377 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3378 so that it can be moved to the child function. */
3379 gsi = gsi_last_bb (entry_bb);
3380 stmt = gsi_stmt (gsi);
3381 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3382 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3383 gsi_remove (&gsi, true);
3384 e = split_block (entry_bb, stmt);
3385 entry_bb = e->dest;
3386 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3387
3388 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3389 if (exit_bb)
3390 {
3391 gsi = gsi_last_bb (exit_bb);
3392 gcc_assert (!gsi_end_p (gsi)
3393 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3394 stmt = gimple_build_return (NULL);
3395 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3396 gsi_remove (&gsi, true);
3397 }
3398
3399 /* Move the parallel region into CHILD_CFUN. */
3400
3401 if (gimple_in_ssa_p (cfun))
3402 {
3403 push_cfun (child_cfun);
3404 init_tree_ssa (child_cfun);
3405 init_ssa_operands ();
3406 cfun->gimple_df->in_ssa_p = true;
3407 pop_cfun ();
3408 block = NULL_TREE;
3409 }
3410 else
3411 block = gimple_block (entry_stmt);
3412
3413 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3414 if (exit_bb)
3415 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3416
3417 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3418 for (tp = &child_cfun->local_decls; *tp; )
3419 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3420 tp = &TREE_CHAIN (*tp);
3421 else
3422 *tp = TREE_CHAIN (*tp);
3423
3424 /* Inform the callgraph about the new function. */
3425 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3426 = cfun->curr_properties;
3427 cgraph_add_new_function (child_fn, true);
3428
3429 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3430 fixed in a following pass. */
3431 push_cfun (child_cfun);
3432 if (optimize)
3433 optimize_omp_library_calls (entry_stmt);
3434 rebuild_cgraph_edges ();
3435
3436 /* Some EH regions might become dead, see PR34608. If
3437 pass_cleanup_cfg isn't the first pass to happen with the
3438 new child, these dead EH edges might cause problems.
3439 Clean them up now. */
3440 if (flag_exceptions)
3441 {
3442 basic_block bb;
3443 tree save_current = current_function_decl;
3444 bool changed = false;
3445
3446 current_function_decl = child_fn;
3447 FOR_EACH_BB (bb)
3448 changed |= gimple_purge_dead_eh_edges (bb);
3449 if (changed)
3450 cleanup_tree_cfg ();
3451 current_function_decl = save_current;
3452 }
3453 pop_cfun ();
3454 }
3455
3456 /* Emit a library call to launch the children threads. */
3457 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3458 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3459 else
3460 expand_task_call (new_bb, entry_stmt);
3461 update_ssa (TODO_update_ssa_only_virtuals);
3462 }
3463
3464
3465 /* A subroutine of expand_omp_for. Generate code for a parallel
3466 loop with any schedule. Given parameters:
3467
3468 for (V = N1; V cond N2; V += STEP) BODY;
3469
3470 where COND is "<" or ">", we generate pseudocode
3471
3472 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3473 if (more) goto L0; else goto L3;
3474 L0:
3475 V = istart0;
3476 iend = iend0;
3477 L1:
3478 BODY;
3479 V += STEP;
3480 if (V cond iend) goto L1; else goto L2;
3481 L2:
3482 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3483 L3:
3484
3485 If this is a combined omp parallel loop, instead of the call to
3486 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3487
3488 For collapsed loops, given parameters:
3489 collapse(3)
3490 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3491 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3492 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3493 BODY;
3494
3495 we generate pseudocode
3496
3497 if (cond3 is <)
3498 adj = STEP3 - 1;
3499 else
3500 adj = STEP3 + 1;
3501 count3 = (adj + N32 - N31) / STEP3;
3502 if (cond2 is <)
3503 adj = STEP2 - 1;
3504 else
3505 adj = STEP2 + 1;
3506 count2 = (adj + N22 - N21) / STEP2;
3507 if (cond1 is <)
3508 adj = STEP1 - 1;
3509 else
3510 adj = STEP1 + 1;
3511 count1 = (adj + N12 - N11) / STEP1;
3512 count = count1 * count2 * count3;
3513 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3514 if (more) goto L0; else goto L3;
3515 L0:
3516 V = istart0;
3517 T = V;
3518 V3 = N31 + (T % count3) * STEP3;
3519 T = T / count3;
3520 V2 = N21 + (T % count2) * STEP2;
3521 T = T / count2;
3522 V1 = N11 + T * STEP1;
3523 iend = iend0;
3524 L1:
3525 BODY;
3526 V += 1;
3527 if (V < iend) goto L10; else goto L2;
3528 L10:
3529 V3 += STEP3;
3530 if (V3 cond3 N32) goto L1; else goto L11;
3531 L11:
3532 V3 = N31;
3533 V2 += STEP2;
3534 if (V2 cond2 N22) goto L1; else goto L12;
3535 L12:
3536 V2 = N21;
3537 V1 += STEP1;
3538 goto L1;
3539 L2:
3540 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3541 L3:
3542
3543 */
3544
3545 static void
3546 expand_omp_for_generic (struct omp_region *region,
3547 struct omp_for_data *fd,
3548 enum built_in_function start_fn,
3549 enum built_in_function next_fn)
3550 {
3551 tree type, istart0, iend0, iend;
3552 tree t, vmain, vback, bias = NULL_TREE;
3553 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3554 basic_block l2_bb = NULL, l3_bb = NULL;
3555 gimple_stmt_iterator gsi;
3556 gimple stmt;
3557 bool in_combined_parallel = is_combined_parallel (region);
3558 bool broken_loop = region->cont == NULL;
3559 edge e, ne;
3560 tree *counts = NULL;
3561 int i;
3562
3563 gcc_assert (!broken_loop || !in_combined_parallel);
3564 gcc_assert (fd->iter_type == long_integer_type_node
3565 || !in_combined_parallel);
3566
3567 type = TREE_TYPE (fd->loop.v);
3568 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3569 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3570 TREE_ADDRESSABLE (istart0) = 1;
3571 TREE_ADDRESSABLE (iend0) = 1;
3572 if (gimple_in_ssa_p (cfun))
3573 {
3574 add_referenced_var (istart0);
3575 add_referenced_var (iend0);
3576 }
3577
3578 /* See if we need to bias by LLONG_MIN. */
3579 if (fd->iter_type == long_long_unsigned_type_node
3580 && TREE_CODE (type) == INTEGER_TYPE
3581 && !TYPE_UNSIGNED (type))
3582 {
3583 tree n1, n2;
3584
3585 if (fd->loop.cond_code == LT_EXPR)
3586 {
3587 n1 = fd->loop.n1;
3588 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3589 }
3590 else
3591 {
3592 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3593 n2 = fd->loop.n1;
3594 }
3595 if (TREE_CODE (n1) != INTEGER_CST
3596 || TREE_CODE (n2) != INTEGER_CST
3597 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3598 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3599 }
3600
3601 entry_bb = region->entry;
3602 cont_bb = region->cont;
3603 collapse_bb = NULL;
3604 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3605 gcc_assert (broken_loop
3606 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3607 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3608 l1_bb = single_succ (l0_bb);
3609 if (!broken_loop)
3610 {
3611 l2_bb = create_empty_bb (cont_bb);
3612 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3613 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3614 }
3615 else
3616 l2_bb = NULL;
3617 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3618 exit_bb = region->exit;
3619
3620 gsi = gsi_last_bb (entry_bb);
3621
3622 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3623 if (fd->collapse > 1)
3624 {
3625 /* collapsed loops need work for expansion in SSA form. */
3626 gcc_assert (!gimple_in_ssa_p (cfun));
3627 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3628 for (i = 0; i < fd->collapse; i++)
3629 {
3630 tree itype = TREE_TYPE (fd->loops[i].v);
3631
3632 if (POINTER_TYPE_P (itype))
3633 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3634 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3635 ? -1 : 1));
3636 t = fold_build2 (PLUS_EXPR, itype,
3637 fold_convert (itype, fd->loops[i].step), t);
3638 t = fold_build2 (PLUS_EXPR, itype, t,
3639 fold_convert (itype, fd->loops[i].n2));
3640 t = fold_build2 (MINUS_EXPR, itype, t,
3641 fold_convert (itype, fd->loops[i].n1));
3642 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3643 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3644 fold_build1 (NEGATE_EXPR, itype, t),
3645 fold_build1 (NEGATE_EXPR, itype,
3646 fold_convert (itype,
3647 fd->loops[i].step)));
3648 else
3649 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3650 fold_convert (itype, fd->loops[i].step));
3651 t = fold_convert (type, t);
3652 if (TREE_CODE (t) == INTEGER_CST)
3653 counts[i] = t;
3654 else
3655 {
3656 counts[i] = create_tmp_var (type, ".count");
3657 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3658 true, GSI_SAME_STMT);
3659 stmt = gimple_build_assign (counts[i], t);
3660 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3661 }
3662 if (SSA_VAR_P (fd->loop.n2))
3663 {
3664 if (i == 0)
3665 t = counts[0];
3666 else
3667 {
3668 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3669 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3670 true, GSI_SAME_STMT);
3671 }
3672 stmt = gimple_build_assign (fd->loop.n2, t);
3673 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3674 }
3675 }
3676 }
3677 if (in_combined_parallel)
3678 {
3679 /* In a combined parallel loop, emit a call to
3680 GOMP_loop_foo_next. */
3681 t = build_call_expr (built_in_decls[next_fn], 2,
3682 build_fold_addr_expr (istart0),
3683 build_fold_addr_expr (iend0));
3684 }
3685 else
3686 {
3687 tree t0, t1, t2, t3, t4;
3688 /* If this is not a combined parallel loop, emit a call to
3689 GOMP_loop_foo_start in ENTRY_BB. */
3690 t4 = build_fold_addr_expr (iend0);
3691 t3 = build_fold_addr_expr (istart0);
3692 t2 = fold_convert (fd->iter_type, fd->loop.step);
3693 if (POINTER_TYPE_P (type)
3694 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3695 {
3696 /* Avoid casting pointers to integer of a different size. */
3697 tree itype
3698 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3699 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3700 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3701 }
3702 else
3703 {
3704 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3705 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3706 }
3707 if (bias)
3708 {
3709 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3710 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3711 }
3712 if (fd->iter_type == long_integer_type_node)
3713 {
3714 if (fd->chunk_size)
3715 {
3716 t = fold_convert (fd->iter_type, fd->chunk_size);
3717 t = build_call_expr (built_in_decls[start_fn], 6,
3718 t0, t1, t2, t, t3, t4);
3719 }
3720 else
3721 t = build_call_expr (built_in_decls[start_fn], 5,
3722 t0, t1, t2, t3, t4);
3723 }
3724 else
3725 {
3726 tree t5;
3727 tree c_bool_type;
3728
3729 /* The GOMP_loop_ull_*start functions have additional boolean
3730 argument, true for < loops and false for > loops.
3731 In Fortran, the C bool type can be different from
3732 boolean_type_node. */
3733 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3734 t5 = build_int_cst (c_bool_type,
3735 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3736 if (fd->chunk_size)
3737 {
3738 t = fold_convert (fd->iter_type, fd->chunk_size);
3739 t = build_call_expr (built_in_decls[start_fn], 7,
3740 t5, t0, t1, t2, t, t3, t4);
3741 }
3742 else
3743 t = build_call_expr (built_in_decls[start_fn], 6,
3744 t5, t0, t1, t2, t3, t4);
3745 }
3746 }
3747 if (TREE_TYPE (t) != boolean_type_node)
3748 t = fold_build2 (NE_EXPR, boolean_type_node,
3749 t, build_int_cst (TREE_TYPE (t), 0));
3750 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3751 true, GSI_SAME_STMT);
3752 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3753
3754 /* Remove the GIMPLE_OMP_FOR statement. */
3755 gsi_remove (&gsi, true);
3756
3757 /* Iteration setup for sequential loop goes in L0_BB. */
3758 gsi = gsi_start_bb (l0_bb);
3759 if (bias)
3760 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3761 istart0, bias));
3762 else
3763 t = fold_convert (type, istart0);
3764 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3765 false, GSI_CONTINUE_LINKING);
3766 stmt = gimple_build_assign (fd->loop.v, t);
3767 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3768
3769 if (bias)
3770 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3771 iend0, bias));
3772 else
3773 t = fold_convert (type, iend0);
3774 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3775 false, GSI_CONTINUE_LINKING);
3776 if (fd->collapse > 1)
3777 {
3778 tree tem = create_tmp_var (type, ".tem");
3779
3780 stmt = gimple_build_assign (tem, fd->loop.v);
3781 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3782 for (i = fd->collapse - 1; i >= 0; i--)
3783 {
3784 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3785 itype = vtype;
3786 if (POINTER_TYPE_P (vtype))
3787 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3788 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3789 t = fold_convert (itype, t);
3790 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3791 if (POINTER_TYPE_P (vtype))
3792 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3793 fd->loops[i].n1, fold_convert (sizetype, t));
3794 else
3795 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3796 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3797 false, GSI_CONTINUE_LINKING);
3798 stmt = gimple_build_assign (fd->loops[i].v, t);
3799 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3800 if (i != 0)
3801 {
3802 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3803 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3804 false, GSI_CONTINUE_LINKING);
3805 stmt = gimple_build_assign (tem, t);
3806 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3807 }
3808 }
3809 }
3810
3811 if (!broken_loop)
3812 {
3813 /* Code to control the increment and predicate for the sequential
3814 loop goes in the CONT_BB. */
3815 gsi = gsi_last_bb (cont_bb);
3816 stmt = gsi_stmt (gsi);
3817 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3818 vmain = gimple_omp_continue_control_use (stmt);
3819 vback = gimple_omp_continue_control_def (stmt);
3820
3821 if (POINTER_TYPE_P (type))
3822 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3823 fold_convert (sizetype, fd->loop.step));
3824 else
3825 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3826 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3827 true, GSI_SAME_STMT);
3828 stmt = gimple_build_assign (vback, t);
3829 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3830
3831 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3832 stmt = gimple_build_cond_empty (t);
3833 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3834
3835 /* Remove GIMPLE_OMP_CONTINUE. */
3836 gsi_remove (&gsi, true);
3837
3838 if (fd->collapse > 1)
3839 {
3840 basic_block last_bb, bb;
3841
3842 last_bb = cont_bb;
3843 for (i = fd->collapse - 1; i >= 0; i--)
3844 {
3845 tree vtype = TREE_TYPE (fd->loops[i].v);
3846
3847 bb = create_empty_bb (last_bb);
3848 gsi = gsi_start_bb (bb);
3849
3850 if (i < fd->collapse - 1)
3851 {
3852 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3853 e->probability = REG_BR_PROB_BASE / 8;
3854
3855 t = fd->loops[i + 1].n1;
3856 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3857 false, GSI_CONTINUE_LINKING);
3858 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3859 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3860 }
3861 else
3862 collapse_bb = bb;
3863
3864 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3865
3866 if (POINTER_TYPE_P (vtype))
3867 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3868 fd->loops[i].v,
3869 fold_convert (sizetype, fd->loops[i].step));
3870 else
3871 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3872 fd->loops[i].step);
3873 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3874 false, GSI_CONTINUE_LINKING);
3875 stmt = gimple_build_assign (fd->loops[i].v, t);
3876 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3877
3878 if (i > 0)
3879 {
3880 t = fd->loops[i].n2;
3881 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3882 false, GSI_CONTINUE_LINKING);
3883 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3884 fd->loops[i].v, t);
3885 stmt = gimple_build_cond_empty (t);
3886 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3887 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3888 e->probability = REG_BR_PROB_BASE * 7 / 8;
3889 }
3890 else
3891 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3892 last_bb = bb;
3893 }
3894 }
3895
3896 /* Emit code to get the next parallel iteration in L2_BB. */
3897 gsi = gsi_start_bb (l2_bb);
3898
3899 t = build_call_expr (built_in_decls[next_fn], 2,
3900 build_fold_addr_expr (istart0),
3901 build_fold_addr_expr (iend0));
3902 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3903 false, GSI_CONTINUE_LINKING);
3904 if (TREE_TYPE (t) != boolean_type_node)
3905 t = fold_build2 (NE_EXPR, boolean_type_node,
3906 t, build_int_cst (TREE_TYPE (t), 0));
3907 stmt = gimple_build_cond_empty (t);
3908 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3909 }
3910
3911 /* Add the loop cleanup function. */
3912 gsi = gsi_last_bb (exit_bb);
3913 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3914 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3915 else
3916 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3917 stmt = gimple_build_call (t, 0);
3918 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3919 gsi_remove (&gsi, true);
3920
3921 /* Connect the new blocks. */
3922 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3923 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3924
3925 if (!broken_loop)
3926 {
3927 gimple_seq phis;
3928
3929 e = find_edge (cont_bb, l3_bb);
3930 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3931
3932 phis = phi_nodes (l3_bb);
3933 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3934 {
3935 gimple phi = gsi_stmt (gsi);
3936 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3937 PHI_ARG_DEF_FROM_EDGE (phi, e));
3938 }
3939 remove_edge (e);
3940
3941 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3942 if (fd->collapse > 1)
3943 {
3944 e = find_edge (cont_bb, l1_bb);
3945 remove_edge (e);
3946 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3947 }
3948 else
3949 {
3950 e = find_edge (cont_bb, l1_bb);
3951 e->flags = EDGE_TRUE_VALUE;
3952 }
3953 e->probability = REG_BR_PROB_BASE * 7 / 8;
3954 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
3955 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3956
3957 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3958 recompute_dominator (CDI_DOMINATORS, l2_bb));
3959 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3960 recompute_dominator (CDI_DOMINATORS, l3_bb));
3961 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3962 recompute_dominator (CDI_DOMINATORS, l0_bb));
3963 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3964 recompute_dominator (CDI_DOMINATORS, l1_bb));
3965 }
3966 }
3967
3968
3969 /* A subroutine of expand_omp_for. Generate code for a parallel
3970 loop with static schedule and no specified chunk size. Given
3971 parameters:
3972
3973 for (V = N1; V cond N2; V += STEP) BODY;
3974
3975 where COND is "<" or ">", we generate pseudocode
3976
3977 if (cond is <)
3978 adj = STEP - 1;
3979 else
3980 adj = STEP + 1;
3981 if ((__typeof (V)) -1 > 0 && cond is >)
3982 n = -(adj + N2 - N1) / -STEP;
3983 else
3984 n = (adj + N2 - N1) / STEP;
3985 q = n / nthreads;
3986 q += (q * nthreads != n);
3987 s0 = q * threadid;
3988 e0 = min(s0 + q, n);
3989 V = s0 * STEP + N1;
3990 if (s0 >= e0) goto L2; else goto L0;
3991 L0:
3992 e = e0 * STEP + N1;
3993 L1:
3994 BODY;
3995 V += STEP;
3996 if (V cond e) goto L1;
3997 L2:
3998 */
3999
4000 static void
4001 expand_omp_for_static_nochunk (struct omp_region *region,
4002 struct omp_for_data *fd)
4003 {
4004 tree n, q, s0, e0, e, t, nthreads, threadid;
4005 tree type, itype, vmain, vback;
4006 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4007 basic_block fin_bb;
4008 gimple_stmt_iterator gsi;
4009 gimple stmt;
4010
4011 itype = type = TREE_TYPE (fd->loop.v);
4012 if (POINTER_TYPE_P (type))
4013 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4014
4015 entry_bb = region->entry;
4016 cont_bb = region->cont;
4017 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4018 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4019 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4020 body_bb = single_succ (seq_start_bb);
4021 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4022 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4023 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4024 exit_bb = region->exit;
4025
4026 /* Iteration space partitioning goes in ENTRY_BB. */
4027 gsi = gsi_last_bb (entry_bb);
4028 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4029
4030 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4031 t = fold_convert (itype, t);
4032 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4033 true, GSI_SAME_STMT);
4034
4035 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4036 t = fold_convert (itype, t);
4037 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4038 true, GSI_SAME_STMT);
4039
4040 fd->loop.n1
4041 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4042 true, NULL_TREE, true, GSI_SAME_STMT);
4043 fd->loop.n2
4044 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4045 true, NULL_TREE, true, GSI_SAME_STMT);
4046 fd->loop.step
4047 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4048 true, NULL_TREE, true, GSI_SAME_STMT);
4049
4050 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4051 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4052 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4053 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4054 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4055 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4056 fold_build1 (NEGATE_EXPR, itype, t),
4057 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4058 else
4059 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4060 t = fold_convert (itype, t);
4061 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4062
4063 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4064 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4065
4066 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4067 t = fold_build2 (NE_EXPR, itype, t, n);
4068 t = fold_build2 (PLUS_EXPR, itype, q, t);
4069 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4070
4071 t = build2 (MULT_EXPR, itype, q, threadid);
4072 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4073
4074 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4075 t = fold_build2 (MIN_EXPR, itype, t, n);
4076 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4077
4078 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4079 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4080
4081 /* Remove the GIMPLE_OMP_FOR statement. */
4082 gsi_remove (&gsi, true);
4083
4084 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4085 gsi = gsi_start_bb (seq_start_bb);
4086
4087 t = fold_convert (itype, s0);
4088 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4089 if (POINTER_TYPE_P (type))
4090 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4091 fold_convert (sizetype, t));
4092 else
4093 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4094 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4095 false, GSI_CONTINUE_LINKING);
4096 stmt = gimple_build_assign (fd->loop.v, t);
4097 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4098
4099 t = fold_convert (itype, e0);
4100 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4101 if (POINTER_TYPE_P (type))
4102 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4103 fold_convert (sizetype, t));
4104 else
4105 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4106 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4107 false, GSI_CONTINUE_LINKING);
4108
4109 /* The code controlling the sequential loop replaces the
4110 GIMPLE_OMP_CONTINUE. */
4111 gsi = gsi_last_bb (cont_bb);
4112 stmt = gsi_stmt (gsi);
4113 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4114 vmain = gimple_omp_continue_control_use (stmt);
4115 vback = gimple_omp_continue_control_def (stmt);
4116
4117 if (POINTER_TYPE_P (type))
4118 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4119 fold_convert (sizetype, fd->loop.step));
4120 else
4121 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4122 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4123 true, GSI_SAME_STMT);
4124 stmt = gimple_build_assign (vback, t);
4125 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4126
4127 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4128 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4129
4130 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4131 gsi_remove (&gsi, true);
4132
4133 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4134 gsi = gsi_last_bb (exit_bb);
4135 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4136 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4137 false, GSI_SAME_STMT);
4138 gsi_remove (&gsi, true);
4139
4140 /* Connect all the blocks. */
4141 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4142 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4143
4144 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4145 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4146
4147 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4148 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4149 recompute_dominator (CDI_DOMINATORS, body_bb));
4150 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4151 recompute_dominator (CDI_DOMINATORS, fin_bb));
4152 }
4153
4154
4155 /* A subroutine of expand_omp_for. Generate code for a parallel
4156 loop with static schedule and a specified chunk size. Given
4157 parameters:
4158
4159 for (V = N1; V cond N2; V += STEP) BODY;
4160
4161 where COND is "<" or ">", we generate pseudocode
4162
4163 if (cond is <)
4164 adj = STEP - 1;
4165 else
4166 adj = STEP + 1;
4167 if ((__typeof (V)) -1 > 0 && cond is >)
4168 n = -(adj + N2 - N1) / -STEP;
4169 else
4170 n = (adj + N2 - N1) / STEP;
4171 trip = 0;
4172 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4173 here so that V is defined
4174 if the loop is not entered
4175 L0:
4176 s0 = (trip * nthreads + threadid) * CHUNK;
4177 e0 = min(s0 + CHUNK, n);
4178 if (s0 < n) goto L1; else goto L4;
4179 L1:
4180 V = s0 * STEP + N1;
4181 e = e0 * STEP + N1;
4182 L2:
4183 BODY;
4184 V += STEP;
4185 if (V cond e) goto L2; else goto L3;
4186 L3:
4187 trip += 1;
4188 goto L0;
4189 L4:
4190 */
4191
4192 static void
4193 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4194 {
4195 tree n, s0, e0, e, t;
4196 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4197 tree type, itype, v_main, v_back, v_extra;
4198 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4199 basic_block trip_update_bb, cont_bb, fin_bb;
4200 gimple_stmt_iterator si;
4201 gimple stmt;
4202 edge se;
4203
4204 itype = type = TREE_TYPE (fd->loop.v);
4205 if (POINTER_TYPE_P (type))
4206 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4207
4208 entry_bb = region->entry;
4209 se = split_block (entry_bb, last_stmt (entry_bb));
4210 entry_bb = se->src;
4211 iter_part_bb = se->dest;
4212 cont_bb = region->cont;
4213 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4214 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4215 == FALLTHRU_EDGE (cont_bb)->dest);
4216 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4217 body_bb = single_succ (seq_start_bb);
4218 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4219 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4220 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4221 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4222 exit_bb = region->exit;
4223
4224 /* Trip and adjustment setup goes in ENTRY_BB. */
4225 si = gsi_last_bb (entry_bb);
4226 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4227
4228 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4229 t = fold_convert (itype, t);
4230 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4231 true, GSI_SAME_STMT);
4232
4233 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4234 t = fold_convert (itype, t);
4235 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4236 true, GSI_SAME_STMT);
4237
4238 fd->loop.n1
4239 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4240 true, NULL_TREE, true, GSI_SAME_STMT);
4241 fd->loop.n2
4242 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4243 true, NULL_TREE, true, GSI_SAME_STMT);
4244 fd->loop.step
4245 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4246 true, NULL_TREE, true, GSI_SAME_STMT);
4247 fd->chunk_size
4248 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4249 true, NULL_TREE, true, GSI_SAME_STMT);
4250
4251 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4252 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4253 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4254 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4255 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4256 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4257 fold_build1 (NEGATE_EXPR, itype, t),
4258 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4259 else
4260 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4261 t = fold_convert (itype, t);
4262 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4263 true, GSI_SAME_STMT);
4264
4265 trip_var = create_tmp_var (itype, ".trip");
4266 if (gimple_in_ssa_p (cfun))
4267 {
4268 add_referenced_var (trip_var);
4269 trip_init = make_ssa_name (trip_var, NULL);
4270 trip_main = make_ssa_name (trip_var, NULL);
4271 trip_back = make_ssa_name (trip_var, NULL);
4272 }
4273 else
4274 {
4275 trip_init = trip_var;
4276 trip_main = trip_var;
4277 trip_back = trip_var;
4278 }
4279
4280 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4281 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4282
4283 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4284 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4285 if (POINTER_TYPE_P (type))
4286 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4287 fold_convert (sizetype, t));
4288 else
4289 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4290 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4291 true, GSI_SAME_STMT);
4292
4293 /* Remove the GIMPLE_OMP_FOR. */
4294 gsi_remove (&si, true);
4295
4296 /* Iteration space partitioning goes in ITER_PART_BB. */
4297 si = gsi_last_bb (iter_part_bb);
4298
4299 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4300 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4301 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4302 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4303 false, GSI_CONTINUE_LINKING);
4304
4305 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4306 t = fold_build2 (MIN_EXPR, itype, t, n);
4307 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4308 false, GSI_CONTINUE_LINKING);
4309
4310 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4311 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4312
4313 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4314 si = gsi_start_bb (seq_start_bb);
4315
4316 t = fold_convert (itype, s0);
4317 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4318 if (POINTER_TYPE_P (type))
4319 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4320 fold_convert (sizetype, t));
4321 else
4322 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4323 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4324 false, GSI_CONTINUE_LINKING);
4325 stmt = gimple_build_assign (fd->loop.v, t);
4326 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4327
4328 t = fold_convert (itype, e0);
4329 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4330 if (POINTER_TYPE_P (type))
4331 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4332 fold_convert (sizetype, t));
4333 else
4334 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4335 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4336 false, GSI_CONTINUE_LINKING);
4337
4338 /* The code controlling the sequential loop goes in CONT_BB,
4339 replacing the GIMPLE_OMP_CONTINUE. */
4340 si = gsi_last_bb (cont_bb);
4341 stmt = gsi_stmt (si);
4342 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4343 v_main = gimple_omp_continue_control_use (stmt);
4344 v_back = gimple_omp_continue_control_def (stmt);
4345
4346 if (POINTER_TYPE_P (type))
4347 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4348 fold_convert (sizetype, fd->loop.step));
4349 else
4350 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4351 stmt = gimple_build_assign (v_back, t);
4352 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4353
4354 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4355 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4356
4357 /* Remove GIMPLE_OMP_CONTINUE. */
4358 gsi_remove (&si, true);
4359
4360 /* Trip update code goes into TRIP_UPDATE_BB. */
4361 si = gsi_start_bb (trip_update_bb);
4362
4363 t = build_int_cst (itype, 1);
4364 t = build2 (PLUS_EXPR, itype, trip_main, t);
4365 stmt = gimple_build_assign (trip_back, t);
4366 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4367
4368 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4369 si = gsi_last_bb (exit_bb);
4370 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4371 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4372 false, GSI_SAME_STMT);
4373 gsi_remove (&si, true);
4374
4375 /* Connect the new blocks. */
4376 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4377 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4378
4379 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4380 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4381
4382 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4383
4384 if (gimple_in_ssa_p (cfun))
4385 {
4386 gimple_stmt_iterator psi;
4387 gimple phi;
4388 edge re, ene;
4389 edge_var_map_vector head;
4390 edge_var_map *vm;
4391 size_t i;
4392
4393 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4394 remove arguments of the phi nodes in fin_bb. We need to create
4395 appropriate phi nodes in iter_part_bb instead. */
4396 se = single_pred_edge (fin_bb);
4397 re = single_succ_edge (trip_update_bb);
4398 head = redirect_edge_var_map_vector (re);
4399 ene = single_succ_edge (entry_bb);
4400
4401 psi = gsi_start_phis (fin_bb);
4402 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4403 gsi_next (&psi), ++i)
4404 {
4405 gimple nphi;
4406
4407 phi = gsi_stmt (psi);
4408 t = gimple_phi_result (phi);
4409 gcc_assert (t == redirect_edge_var_map_result (vm));
4410 nphi = create_phi_node (t, iter_part_bb);
4411 SSA_NAME_DEF_STMT (t) = nphi;
4412
4413 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4414 /* A special case -- fd->loop.v is not yet computed in
4415 iter_part_bb, we need to use v_extra instead. */
4416 if (t == fd->loop.v)
4417 t = v_extra;
4418 add_phi_arg (nphi, t, ene);
4419 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4420 }
4421 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4422 redirect_edge_var_map_clear (re);
4423 while (1)
4424 {
4425 psi = gsi_start_phis (fin_bb);
4426 if (gsi_end_p (psi))
4427 break;
4428 remove_phi_node (&psi, false);
4429 }
4430
4431 /* Make phi node for trip. */
4432 phi = create_phi_node (trip_main, iter_part_bb);
4433 SSA_NAME_DEF_STMT (trip_main) = phi;
4434 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4435 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4436 }
4437
4438 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4439 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4440 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4441 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4442 recompute_dominator (CDI_DOMINATORS, fin_bb));
4443 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4444 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4445 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4446 recompute_dominator (CDI_DOMINATORS, body_bb));
4447 }
4448
4449
4450 /* Expand the OpenMP loop defined by REGION. */
4451
4452 static void
4453 expand_omp_for (struct omp_region *region)
4454 {
4455 struct omp_for_data fd;
4456 struct omp_for_data_loop *loops;
4457
4458 loops
4459 = (struct omp_for_data_loop *)
4460 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4461 * sizeof (struct omp_for_data_loop));
4462 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4463 region->sched_kind = fd.sched_kind;
4464
4465 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4466 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4467 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4468 if (region->cont)
4469 {
4470 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4471 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4472 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4473 }
4474
4475 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4476 && !fd.have_ordered
4477 && fd.collapse == 1
4478 && region->cont != NULL)
4479 {
4480 if (fd.chunk_size == NULL)
4481 expand_omp_for_static_nochunk (region, &fd);
4482 else
4483 expand_omp_for_static_chunk (region, &fd);
4484 }
4485 else
4486 {
4487 int fn_index, start_ix, next_ix;
4488
4489 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4490 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4491 ? 3 : fd.sched_kind;
4492 fn_index += fd.have_ordered * 4;
4493 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4494 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4495 if (fd.iter_type == long_long_unsigned_type_node)
4496 {
4497 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4498 - BUILT_IN_GOMP_LOOP_STATIC_START;
4499 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4500 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4501 }
4502 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4503 }
4504
4505 update_ssa (TODO_update_ssa_only_virtuals);
4506 }
4507
4508
4509 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4510
4511 v = GOMP_sections_start (n);
4512 L0:
4513 switch (v)
4514 {
4515 case 0:
4516 goto L2;
4517 case 1:
4518 section 1;
4519 goto L1;
4520 case 2:
4521 ...
4522 case n:
4523 ...
4524 default:
4525 abort ();
4526 }
4527 L1:
4528 v = GOMP_sections_next ();
4529 goto L0;
4530 L2:
4531 reduction;
4532
4533 If this is a combined parallel sections, replace the call to
4534 GOMP_sections_start with call to GOMP_sections_next. */
4535
4536 static void
4537 expand_omp_sections (struct omp_region *region)
4538 {
4539 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4540 VEC (tree,heap) *label_vec;
4541 unsigned len;
4542 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4543 gimple_stmt_iterator si, switch_si;
4544 gimple sections_stmt, stmt, cont;
4545 edge_iterator ei;
4546 edge e;
4547 struct omp_region *inner;
4548 unsigned i, casei;
4549 bool exit_reachable = region->cont != NULL;
4550
4551 gcc_assert (exit_reachable == (region->exit != NULL));
4552 entry_bb = region->entry;
4553 l0_bb = single_succ (entry_bb);
4554 l1_bb = region->cont;
4555 l2_bb = region->exit;
4556 if (exit_reachable)
4557 {
4558 if (single_pred (l2_bb) == l0_bb)
4559 l2 = gimple_block_label (l2_bb);
4560 else
4561 {
4562 /* This can happen if there are reductions. */
4563 len = EDGE_COUNT (l0_bb->succs);
4564 gcc_assert (len > 0);
4565 e = EDGE_SUCC (l0_bb, len - 1);
4566 si = gsi_last_bb (e->dest);
4567 l2 = NULL_TREE;
4568 if (gsi_end_p (si)
4569 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4570 l2 = gimple_block_label (e->dest);
4571 else
4572 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4573 {
4574 si = gsi_last_bb (e->dest);
4575 if (gsi_end_p (si)
4576 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4577 {
4578 l2 = gimple_block_label (e->dest);
4579 break;
4580 }
4581 }
4582 }
4583 default_bb = create_empty_bb (l1_bb->prev_bb);
4584 l1 = gimple_block_label (l1_bb);
4585 }
4586 else
4587 {
4588 default_bb = create_empty_bb (l0_bb);
4589 l1 = NULL_TREE;
4590 l2 = gimple_block_label (default_bb);
4591 }
4592
4593 /* We will build a switch() with enough cases for all the
4594 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4595 and a default case to abort if something goes wrong. */
4596 len = EDGE_COUNT (l0_bb->succs);
4597
4598 /* Use VEC_quick_push on label_vec throughout, since we know the size
4599 in advance. */
4600 label_vec = VEC_alloc (tree, heap, len);
4601
4602 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4603 GIMPLE_OMP_SECTIONS statement. */
4604 si = gsi_last_bb (entry_bb);
4605 sections_stmt = gsi_stmt (si);
4606 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4607 vin = gimple_omp_sections_control (sections_stmt);
4608 if (!is_combined_parallel (region))
4609 {
4610 /* If we are not inside a combined parallel+sections region,
4611 call GOMP_sections_start. */
4612 t = build_int_cst (unsigned_type_node,
4613 exit_reachable ? len - 1 : len);
4614 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4615 stmt = gimple_build_call (u, 1, t);
4616 }
4617 else
4618 {
4619 /* Otherwise, call GOMP_sections_next. */
4620 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4621 stmt = gimple_build_call (u, 0);
4622 }
4623 gimple_call_set_lhs (stmt, vin);
4624 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4625 gsi_remove (&si, true);
4626
4627 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4628 L0_BB. */
4629 switch_si = gsi_last_bb (l0_bb);
4630 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4631 if (exit_reachable)
4632 {
4633 cont = last_stmt (l1_bb);
4634 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4635 vmain = gimple_omp_continue_control_use (cont);
4636 vnext = gimple_omp_continue_control_def (cont);
4637 }
4638 else
4639 {
4640 vmain = vin;
4641 vnext = NULL_TREE;
4642 }
4643
4644 i = 0;
4645 if (exit_reachable)
4646 {
4647 t = build3 (CASE_LABEL_EXPR, void_type_node,
4648 build_int_cst (unsigned_type_node, 0), NULL, l2);
4649 VEC_quick_push (tree, label_vec, t);
4650 i++;
4651 }
4652
4653 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4654 for (inner = region->inner, casei = 1;
4655 inner;
4656 inner = inner->next, i++, casei++)
4657 {
4658 basic_block s_entry_bb, s_exit_bb;
4659
4660 /* Skip optional reduction region. */
4661 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4662 {
4663 --i;
4664 --casei;
4665 continue;
4666 }
4667
4668 s_entry_bb = inner->entry;
4669 s_exit_bb = inner->exit;
4670
4671 t = gimple_block_label (s_entry_bb);
4672 u = build_int_cst (unsigned_type_node, casei);
4673 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4674 VEC_quick_push (tree, label_vec, u);
4675
4676 si = gsi_last_bb (s_entry_bb);
4677 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4678 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4679 gsi_remove (&si, true);
4680 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4681
4682 if (s_exit_bb == NULL)
4683 continue;
4684
4685 si = gsi_last_bb (s_exit_bb);
4686 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4687 gsi_remove (&si, true);
4688
4689 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4690 }
4691
4692 /* Error handling code goes in DEFAULT_BB. */
4693 t = gimple_block_label (default_bb);
4694 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4695 make_edge (l0_bb, default_bb, 0);
4696
4697 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4698 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4699 gsi_remove (&switch_si, true);
4700 VEC_free (tree, heap, label_vec);
4701
4702 si = gsi_start_bb (default_bb);
4703 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4704 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4705
4706 if (exit_reachable)
4707 {
4708 /* Code to get the next section goes in L1_BB. */
4709 si = gsi_last_bb (l1_bb);
4710 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4711
4712 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4713 gimple_call_set_lhs (stmt, vnext);
4714 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4715 gsi_remove (&si, true);
4716
4717 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4718
4719 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4720 si = gsi_last_bb (l2_bb);
4721 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4722 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4723 else
4724 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4725 stmt = gimple_build_call (t, 0);
4726 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4727 gsi_remove (&si, true);
4728 }
4729
4730 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4731 }
4732
4733
4734 /* Expand code for an OpenMP single directive. We've already expanded
4735 much of the code, here we simply place the GOMP_barrier call. */
4736
4737 static void
4738 expand_omp_single (struct omp_region *region)
4739 {
4740 basic_block entry_bb, exit_bb;
4741 gimple_stmt_iterator si;
4742 bool need_barrier = false;
4743
4744 entry_bb = region->entry;
4745 exit_bb = region->exit;
4746
4747 si = gsi_last_bb (entry_bb);
4748 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4749 be removed. We need to ensure that the thread that entered the single
4750 does not exit before the data is copied out by the other threads. */
4751 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4752 OMP_CLAUSE_COPYPRIVATE))
4753 need_barrier = true;
4754 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4755 gsi_remove (&si, true);
4756 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4757
4758 si = gsi_last_bb (exit_bb);
4759 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4760 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4761 false, GSI_SAME_STMT);
4762 gsi_remove (&si, true);
4763 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4764 }
4765
4766
4767 /* Generic expansion for OpenMP synchronization directives: master,
4768 ordered and critical. All we need to do here is remove the entry
4769 and exit markers for REGION. */
4770
4771 static void
4772 expand_omp_synch (struct omp_region *region)
4773 {
4774 basic_block entry_bb, exit_bb;
4775 gimple_stmt_iterator si;
4776
4777 entry_bb = region->entry;
4778 exit_bb = region->exit;
4779
4780 si = gsi_last_bb (entry_bb);
4781 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4782 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4783 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4784 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4785 gsi_remove (&si, true);
4786 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4787
4788 if (exit_bb)
4789 {
4790 si = gsi_last_bb (exit_bb);
4791 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4792 gsi_remove (&si, true);
4793 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4794 }
4795 }
4796
4797 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4798 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4799 size of the data type, and thus usable to find the index of the builtin
4800 decl. Returns false if the expression is not of the proper form. */
4801
4802 static bool
4803 expand_omp_atomic_fetch_op (basic_block load_bb,
4804 tree addr, tree loaded_val,
4805 tree stored_val, int index)
4806 {
4807 enum built_in_function base;
4808 tree decl, itype, call;
4809 enum insn_code *optab;
4810 tree rhs;
4811 basic_block store_bb = single_succ (load_bb);
4812 gimple_stmt_iterator gsi;
4813 gimple stmt;
4814
4815 /* We expect to find the following sequences:
4816
4817 load_bb:
4818 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4819
4820 store_bb:
4821 val = tmp OP something; (or: something OP tmp)
4822 GIMPLE_OMP_STORE (val)
4823
4824 ???FIXME: Allow a more flexible sequence.
4825 Perhaps use data flow to pick the statements.
4826
4827 */
4828
4829 gsi = gsi_after_labels (store_bb);
4830 stmt = gsi_stmt (gsi);
4831 if (!is_gimple_assign (stmt))
4832 return false;
4833 gsi_next (&gsi);
4834 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4835 return false;
4836
4837 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4838 return false;
4839
4840 /* Check for one of the supported fetch-op operations. */
4841 switch (gimple_assign_rhs_code (stmt))
4842 {
4843 case PLUS_EXPR:
4844 case POINTER_PLUS_EXPR:
4845 base = BUILT_IN_FETCH_AND_ADD_N;
4846 optab = sync_add_optab;
4847 break;
4848 case MINUS_EXPR:
4849 base = BUILT_IN_FETCH_AND_SUB_N;
4850 optab = sync_add_optab;
4851 break;
4852 case BIT_AND_EXPR:
4853 base = BUILT_IN_FETCH_AND_AND_N;
4854 optab = sync_and_optab;
4855 break;
4856 case BIT_IOR_EXPR:
4857 base = BUILT_IN_FETCH_AND_OR_N;
4858 optab = sync_ior_optab;
4859 break;
4860 case BIT_XOR_EXPR:
4861 base = BUILT_IN_FETCH_AND_XOR_N;
4862 optab = sync_xor_optab;
4863 break;
4864 default:
4865 return false;
4866 }
4867 /* Make sure the expression is of the proper form. */
4868 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4869 rhs = gimple_assign_rhs2 (stmt);
4870 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4871 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4872 rhs = gimple_assign_rhs1 (stmt);
4873 else
4874 return false;
4875
4876 decl = built_in_decls[base + index + 1];
4877 itype = TREE_TYPE (TREE_TYPE (decl));
4878
4879 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4880 return false;
4881
4882 gsi = gsi_last_bb (load_bb);
4883 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4884 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4885 call = fold_convert (void_type_node, call);
4886 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4887 gsi_remove (&gsi, true);
4888
4889 gsi = gsi_last_bb (store_bb);
4890 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4891 gsi_remove (&gsi, true);
4892 gsi = gsi_last_bb (store_bb);
4893 gsi_remove (&gsi, true);
4894
4895 if (gimple_in_ssa_p (cfun))
4896 update_ssa (TODO_update_ssa_no_phi);
4897
4898 return true;
4899 }
4900
4901 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4902
4903 oldval = *addr;
4904 repeat:
4905 newval = rhs; // with oldval replacing *addr in rhs
4906 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4907 if (oldval != newval)
4908 goto repeat;
4909
4910 INDEX is log2 of the size of the data type, and thus usable to find the
4911 index of the builtin decl. */
4912
4913 static bool
4914 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4915 tree addr, tree loaded_val, tree stored_val,
4916 int index)
4917 {
4918 tree loadedi, storedi, initial, new_storedi, old_vali;
4919 tree type, itype, cmpxchg, iaddr;
4920 gimple_stmt_iterator si;
4921 basic_block loop_header = single_succ (load_bb);
4922 gimple phi, stmt;
4923 edge e;
4924
4925 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4926 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4927 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4928
4929 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4930 return false;
4931
4932 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4933 si = gsi_last_bb (load_bb);
4934 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4935
4936 /* For floating-point values, we'll need to view-convert them to integers
4937 so that we can perform the atomic compare and swap. Simplify the
4938 following code by always setting up the "i"ntegral variables. */
4939 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4940 {
4941 tree iaddr_val;
4942
4943 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
4944 iaddr_val
4945 = force_gimple_operand_gsi (&si,
4946 fold_convert (TREE_TYPE (iaddr), addr),
4947 false, NULL_TREE, true, GSI_SAME_STMT);
4948 stmt = gimple_build_assign (iaddr, iaddr_val);
4949 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4950 DECL_NO_TBAA_P (iaddr) = 1;
4951 DECL_POINTER_ALIAS_SET (iaddr) = 0;
4952 loadedi = create_tmp_var (itype, NULL);
4953 if (gimple_in_ssa_p (cfun))
4954 {
4955 add_referenced_var (iaddr);
4956 add_referenced_var (loadedi);
4957 loadedi = make_ssa_name (loadedi, NULL);
4958 }
4959 }
4960 else
4961 {
4962 iaddr = addr;
4963 loadedi = loaded_val;
4964 }
4965
4966 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
4967 true, NULL_TREE, true, GSI_SAME_STMT);
4968
4969 /* Move the value to the LOADEDI temporary. */
4970 if (gimple_in_ssa_p (cfun))
4971 {
4972 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
4973 phi = create_phi_node (loadedi, loop_header);
4974 SSA_NAME_DEF_STMT (loadedi) = phi;
4975 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
4976 initial);
4977 }
4978 else
4979 gsi_insert_before (&si,
4980 gimple_build_assign (loadedi, initial),
4981 GSI_SAME_STMT);
4982 if (loadedi != loaded_val)
4983 {
4984 gimple_stmt_iterator gsi2;
4985 tree x;
4986
4987 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
4988 gsi2 = gsi_start_bb (loop_header);
4989 if (gimple_in_ssa_p (cfun))
4990 {
4991 gimple stmt;
4992 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4993 true, GSI_SAME_STMT);
4994 stmt = gimple_build_assign (loaded_val, x);
4995 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
4996 }
4997 else
4998 {
4999 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5000 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5001 true, GSI_SAME_STMT);
5002 }
5003 }
5004 gsi_remove (&si, true);
5005
5006 si = gsi_last_bb (store_bb);
5007 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5008
5009 if (iaddr == addr)
5010 storedi = stored_val;
5011 else
5012 storedi =
5013 force_gimple_operand_gsi (&si,
5014 build1 (VIEW_CONVERT_EXPR, itype,
5015 stored_val), true, NULL_TREE, true,
5016 GSI_SAME_STMT);
5017
5018 /* Build the compare&swap statement. */
5019 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5020 new_storedi = force_gimple_operand_gsi (&si,
5021 fold_convert (itype, new_storedi),
5022 true, NULL_TREE,
5023 true, GSI_SAME_STMT);
5024
5025 if (gimple_in_ssa_p (cfun))
5026 old_vali = loadedi;
5027 else
5028 {
5029 old_vali = create_tmp_var (itype, NULL);
5030 if (gimple_in_ssa_p (cfun))
5031 add_referenced_var (old_vali);
5032 stmt = gimple_build_assign (old_vali, loadedi);
5033 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5034
5035 stmt = gimple_build_assign (loadedi, new_storedi);
5036 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5037 }
5038
5039 /* Note that we always perform the comparison as an integer, even for
5040 floating point. This allows the atomic operation to properly
5041 succeed even with NaNs and -0.0. */
5042 stmt = gimple_build_cond_empty
5043 (build2 (NE_EXPR, boolean_type_node,
5044 new_storedi, old_vali));
5045 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5046
5047 /* Update cfg. */
5048 e = single_succ_edge (store_bb);
5049 e->flags &= ~EDGE_FALLTHRU;
5050 e->flags |= EDGE_FALSE_VALUE;
5051
5052 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5053
5054 /* Copy the new value to loadedi (we already did that before the condition
5055 if we are not in SSA). */
5056 if (gimple_in_ssa_p (cfun))
5057 {
5058 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5059 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5060 }
5061
5062 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5063 gsi_remove (&si, true);
5064
5065 if (gimple_in_ssa_p (cfun))
5066 update_ssa (TODO_update_ssa_no_phi);
5067
5068 return true;
5069 }
5070
5071 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5072
5073 GOMP_atomic_start ();
5074 *addr = rhs;
5075 GOMP_atomic_end ();
5076
5077 The result is not globally atomic, but works so long as all parallel
5078 references are within #pragma omp atomic directives. According to
5079 responses received from omp@openmp.org, appears to be within spec.
5080 Which makes sense, since that's how several other compilers handle
5081 this situation as well.
5082 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5083 expanding. STORED_VAL is the operand of the matching
5084 GIMPLE_OMP_ATOMIC_STORE.
5085
5086 We replace
5087 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5088 loaded_val = *addr;
5089
5090 and replace
5091 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5092 *addr = stored_val;
5093 */
5094
5095 static bool
5096 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5097 tree addr, tree loaded_val, tree stored_val)
5098 {
5099 gimple_stmt_iterator si;
5100 gimple stmt;
5101 tree t;
5102
5103 si = gsi_last_bb (load_bb);
5104 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5105
5106 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5107 t = build_function_call_expr (t, 0);
5108 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5109
5110 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5111 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5112 gsi_remove (&si, true);
5113
5114 si = gsi_last_bb (store_bb);
5115 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5116
5117 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5118 stored_val);
5119 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5120
5121 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5122 t = build_function_call_expr (t, 0);
5123 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5124 gsi_remove (&si, true);
5125
5126 if (gimple_in_ssa_p (cfun))
5127 update_ssa (TODO_update_ssa_no_phi);
5128 return true;
5129 }
5130
5131 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5132 using expand_omp_atomic_fetch_op. If it failed, we try to
5133 call expand_omp_atomic_pipeline, and if it fails too, the
5134 ultimate fallback is wrapping the operation in a mutex
5135 (expand_omp_atomic_mutex). REGION is the atomic region built
5136 by build_omp_regions_1(). */
5137
5138 static void
5139 expand_omp_atomic (struct omp_region *region)
5140 {
5141 basic_block load_bb = region->entry, store_bb = region->exit;
5142 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5143 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5144 tree addr = gimple_omp_atomic_load_rhs (load);
5145 tree stored_val = gimple_omp_atomic_store_val (store);
5146 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5147 HOST_WIDE_INT index;
5148
5149 /* Make sure the type is one of the supported sizes. */
5150 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5151 index = exact_log2 (index);
5152 if (index >= 0 && index <= 4)
5153 {
5154 unsigned int align = TYPE_ALIGN_UNIT (type);
5155
5156 /* __sync builtins require strict data alignment. */
5157 if (exact_log2 (align) >= index)
5158 {
5159 /* When possible, use specialized atomic update functions. */
5160 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5161 && store_bb == single_succ (load_bb))
5162 {
5163 if (expand_omp_atomic_fetch_op (load_bb, addr,
5164 loaded_val, stored_val, index))
5165 return;
5166 }
5167
5168 /* If we don't have specialized __sync builtins, try and implement
5169 as a compare and swap loop. */
5170 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5171 loaded_val, stored_val, index))
5172 return;
5173 }
5174 }
5175
5176 /* The ultimate fallback is wrapping the operation in a mutex. */
5177 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5178 }
5179
5180
5181 /* Expand the parallel region tree rooted at REGION. Expansion
5182 proceeds in depth-first order. Innermost regions are expanded
5183 first. This way, parallel regions that require a new function to
5184 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5185 internal dependencies in their body. */
5186
5187 static void
5188 expand_omp (struct omp_region *region)
5189 {
5190 while (region)
5191 {
5192 location_t saved_location;
5193
5194 /* First, determine whether this is a combined parallel+workshare
5195 region. */
5196 if (region->type == GIMPLE_OMP_PARALLEL)
5197 determine_parallel_type (region);
5198
5199 if (region->inner)
5200 expand_omp (region->inner);
5201
5202 saved_location = input_location;
5203 if (gimple_has_location (last_stmt (region->entry)))
5204 input_location = gimple_location (last_stmt (region->entry));
5205
5206 switch (region->type)
5207 {
5208 case GIMPLE_OMP_PARALLEL:
5209 case GIMPLE_OMP_TASK:
5210 expand_omp_taskreg (region);
5211 break;
5212
5213 case GIMPLE_OMP_FOR:
5214 expand_omp_for (region);
5215 break;
5216
5217 case GIMPLE_OMP_SECTIONS:
5218 expand_omp_sections (region);
5219 break;
5220
5221 case GIMPLE_OMP_SECTION:
5222 /* Individual omp sections are handled together with their
5223 parent GIMPLE_OMP_SECTIONS region. */
5224 break;
5225
5226 case GIMPLE_OMP_SINGLE:
5227 expand_omp_single (region);
5228 break;
5229
5230 case GIMPLE_OMP_MASTER:
5231 case GIMPLE_OMP_ORDERED:
5232 case GIMPLE_OMP_CRITICAL:
5233 expand_omp_synch (region);
5234 break;
5235
5236 case GIMPLE_OMP_ATOMIC_LOAD:
5237 expand_omp_atomic (region);
5238 break;
5239
5240 default:
5241 gcc_unreachable ();
5242 }
5243
5244 input_location = saved_location;
5245 region = region->next;
5246 }
5247 }
5248
5249
5250 /* Helper for build_omp_regions. Scan the dominator tree starting at
5251 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5252 true, the function ends once a single tree is built (otherwise, whole
5253 forest of OMP constructs may be built). */
5254
5255 static void
5256 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5257 bool single_tree)
5258 {
5259 gimple_stmt_iterator gsi;
5260 gimple stmt;
5261 basic_block son;
5262
5263 gsi = gsi_last_bb (bb);
5264 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5265 {
5266 struct omp_region *region;
5267 enum gimple_code code;
5268
5269 stmt = gsi_stmt (gsi);
5270 code = gimple_code (stmt);
5271 if (code == GIMPLE_OMP_RETURN)
5272 {
5273 /* STMT is the return point out of region PARENT. Mark it
5274 as the exit point and make PARENT the immediately
5275 enclosing region. */
5276 gcc_assert (parent);
5277 region = parent;
5278 region->exit = bb;
5279 parent = parent->outer;
5280 }
5281 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5282 {
5283 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5284 GIMPLE_OMP_RETURN, but matches with
5285 GIMPLE_OMP_ATOMIC_LOAD. */
5286 gcc_assert (parent);
5287 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5288 region = parent;
5289 region->exit = bb;
5290 parent = parent->outer;
5291 }
5292
5293 else if (code == GIMPLE_OMP_CONTINUE)
5294 {
5295 gcc_assert (parent);
5296 parent->cont = bb;
5297 }
5298 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5299 {
5300 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5301 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5302 ;
5303 }
5304 else
5305 {
5306 /* Otherwise, this directive becomes the parent for a new
5307 region. */
5308 region = new_omp_region (bb, code, parent);
5309 parent = region;
5310 }
5311 }
5312
5313 if (single_tree && !parent)
5314 return;
5315
5316 for (son = first_dom_son (CDI_DOMINATORS, bb);
5317 son;
5318 son = next_dom_son (CDI_DOMINATORS, son))
5319 build_omp_regions_1 (son, parent, single_tree);
5320 }
5321
5322 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5323 root_omp_region. */
5324
5325 static void
5326 build_omp_regions_root (basic_block root)
5327 {
5328 gcc_assert (root_omp_region == NULL);
5329 build_omp_regions_1 (root, NULL, true);
5330 gcc_assert (root_omp_region != NULL);
5331 }
5332
5333 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5334
5335 void
5336 omp_expand_local (basic_block head)
5337 {
5338 build_omp_regions_root (head);
5339 if (dump_file && (dump_flags & TDF_DETAILS))
5340 {
5341 fprintf (dump_file, "\nOMP region tree\n\n");
5342 dump_omp_region (dump_file, root_omp_region, 0);
5343 fprintf (dump_file, "\n");
5344 }
5345
5346 remove_exit_barriers (root_omp_region);
5347 expand_omp (root_omp_region);
5348
5349 free_omp_regions ();
5350 }
5351
5352 /* Scan the CFG and build a tree of OMP regions. Return the root of
5353 the OMP region tree. */
5354
5355 static void
5356 build_omp_regions (void)
5357 {
5358 gcc_assert (root_omp_region == NULL);
5359 calculate_dominance_info (CDI_DOMINATORS);
5360 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5361 }
5362
5363 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5364
5365 static unsigned int
5366 execute_expand_omp (void)
5367 {
5368 build_omp_regions ();
5369
5370 if (!root_omp_region)
5371 return 0;
5372
5373 if (dump_file)
5374 {
5375 fprintf (dump_file, "\nOMP region tree\n\n");
5376 dump_omp_region (dump_file, root_omp_region, 0);
5377 fprintf (dump_file, "\n");
5378 }
5379
5380 remove_exit_barriers (root_omp_region);
5381
5382 expand_omp (root_omp_region);
5383
5384 cleanup_tree_cfg ();
5385
5386 free_omp_regions ();
5387
5388 return 0;
5389 }
5390
5391 /* OMP expansion -- the default pass, run before creation of SSA form. */
5392
5393 static bool
5394 gate_expand_omp (void)
5395 {
5396 return (flag_openmp != 0 && errorcount == 0);
5397 }
5398
5399 struct gimple_opt_pass pass_expand_omp =
5400 {
5401 {
5402 GIMPLE_PASS,
5403 "ompexp", /* name */
5404 gate_expand_omp, /* gate */
5405 execute_expand_omp, /* execute */
5406 NULL, /* sub */
5407 NULL, /* next */
5408 0, /* static_pass_number */
5409 0, /* tv_id */
5410 PROP_gimple_any, /* properties_required */
5411 PROP_gimple_lomp, /* properties_provided */
5412 0, /* properties_destroyed */
5413 0, /* todo_flags_start */
5414 TODO_dump_func /* todo_flags_finish */
5415 }
5416 };
5417 \f
5418 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5419
5420 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5421 CTX is the enclosing OMP context for the current statement. */
5422
5423 static void
5424 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5425 {
5426 tree block, control;
5427 gimple_stmt_iterator tgsi;
5428 unsigned i, len;
5429 gimple stmt, new_stmt, bind, t;
5430 gimple_seq ilist, dlist, olist, new_body, body;
5431 struct gimplify_ctx gctx;
5432
5433 stmt = gsi_stmt (*gsi_p);
5434
5435 push_gimplify_context (&gctx);
5436
5437 dlist = NULL;
5438 ilist = NULL;
5439 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5440 &ilist, &dlist, ctx);
5441
5442 tgsi = gsi_start (gimple_omp_body (stmt));
5443 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5444 continue;
5445
5446 tgsi = gsi_start (gimple_omp_body (stmt));
5447 body = NULL;
5448 for (i = 0; i < len; i++, gsi_next (&tgsi))
5449 {
5450 omp_context *sctx;
5451 gimple sec_start;
5452
5453 sec_start = gsi_stmt (tgsi);
5454 sctx = maybe_lookup_ctx (sec_start);
5455 gcc_assert (sctx);
5456
5457 gimple_seq_add_stmt (&body, sec_start);
5458
5459 lower_omp (gimple_omp_body (sec_start), sctx);
5460 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5461 gimple_omp_set_body (sec_start, NULL);
5462
5463 if (i == len - 1)
5464 {
5465 gimple_seq l = NULL;
5466 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5467 &l, ctx);
5468 gimple_seq_add_seq (&body, l);
5469 gimple_omp_section_set_last (sec_start);
5470 }
5471
5472 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5473 }
5474
5475 block = make_node (BLOCK);
5476 bind = gimple_build_bind (NULL, body, block);
5477
5478 olist = NULL;
5479 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5480
5481 block = make_node (BLOCK);
5482 new_stmt = gimple_build_bind (NULL, NULL, block);
5483
5484 pop_gimplify_context (new_stmt);
5485 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5486 BLOCK_VARS (block) = gimple_bind_vars (bind);
5487 if (BLOCK_VARS (block))
5488 TREE_USED (block) = 1;
5489
5490 new_body = NULL;
5491 gimple_seq_add_seq (&new_body, ilist);
5492 gimple_seq_add_stmt (&new_body, stmt);
5493 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5494 gimple_seq_add_stmt (&new_body, bind);
5495
5496 control = create_tmp_var (unsigned_type_node, ".section");
5497 t = gimple_build_omp_continue (control, control);
5498 gimple_omp_sections_set_control (stmt, control);
5499 gimple_seq_add_stmt (&new_body, t);
5500
5501 gimple_seq_add_seq (&new_body, olist);
5502 gimple_seq_add_seq (&new_body, dlist);
5503
5504 new_body = maybe_catch_exception (new_body);
5505
5506 t = gimple_build_omp_return
5507 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5508 OMP_CLAUSE_NOWAIT));
5509 gimple_seq_add_stmt (&new_body, t);
5510
5511 gimple_bind_set_body (new_stmt, new_body);
5512 gimple_omp_set_body (stmt, NULL);
5513
5514 gsi_replace (gsi_p, new_stmt, true);
5515 }
5516
5517
5518 /* A subroutine of lower_omp_single. Expand the simple form of
5519 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5520
5521 if (GOMP_single_start ())
5522 BODY;
5523 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5524
5525 FIXME. It may be better to delay expanding the logic of this until
5526 pass_expand_omp. The expanded logic may make the job more difficult
5527 to a synchronization analysis pass. */
5528
5529 static void
5530 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5531 {
5532 tree tlabel = create_artificial_label ();
5533 tree flabel = create_artificial_label ();
5534 gimple call, cond;
5535 tree lhs, decl;
5536
5537 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5538 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5539 call = gimple_build_call (decl, 0);
5540 gimple_call_set_lhs (call, lhs);
5541 gimple_seq_add_stmt (pre_p, call);
5542
5543 cond = gimple_build_cond (EQ_EXPR, lhs,
5544 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5545 tlabel, flabel);
5546 gimple_seq_add_stmt (pre_p, cond);
5547 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5548 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5549 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5550 }
5551
5552
5553 /* A subroutine of lower_omp_single. Expand the simple form of
5554 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5555
5556 #pragma omp single copyprivate (a, b, c)
5557
5558 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5559
5560 {
5561 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5562 {
5563 BODY;
5564 copyout.a = a;
5565 copyout.b = b;
5566 copyout.c = c;
5567 GOMP_single_copy_end (&copyout);
5568 }
5569 else
5570 {
5571 a = copyout_p->a;
5572 b = copyout_p->b;
5573 c = copyout_p->c;
5574 }
5575 GOMP_barrier ();
5576 }
5577
5578 FIXME. It may be better to delay expanding the logic of this until
5579 pass_expand_omp. The expanded logic may make the job more difficult
5580 to a synchronization analysis pass. */
5581
5582 static void
5583 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5584 {
5585 tree ptr_type, t, l0, l1, l2;
5586 gimple_seq copyin_seq;
5587
5588 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5589
5590 ptr_type = build_pointer_type (ctx->record_type);
5591 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5592
5593 l0 = create_artificial_label ();
5594 l1 = create_artificial_label ();
5595 l2 = create_artificial_label ();
5596
5597 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5598 t = fold_convert (ptr_type, t);
5599 gimplify_assign (ctx->receiver_decl, t, pre_p);
5600
5601 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5602 build_int_cst (ptr_type, 0));
5603 t = build3 (COND_EXPR, void_type_node, t,
5604 build_and_jump (&l0), build_and_jump (&l1));
5605 gimplify_and_add (t, pre_p);
5606
5607 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5608
5609 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5610
5611 copyin_seq = NULL;
5612 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5613 &copyin_seq, ctx);
5614
5615 t = build_fold_addr_expr (ctx->sender_decl);
5616 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5617 gimplify_and_add (t, pre_p);
5618
5619 t = build_and_jump (&l2);
5620 gimplify_and_add (t, pre_p);
5621
5622 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5623
5624 gimple_seq_add_seq (pre_p, copyin_seq);
5625
5626 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5627 }
5628
5629
5630 /* Expand code for an OpenMP single directive. */
5631
5632 static void
5633 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5634 {
5635 tree block;
5636 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5637 gimple_seq bind_body, dlist;
5638 struct gimplify_ctx gctx;
5639
5640 push_gimplify_context (&gctx);
5641
5642 bind_body = NULL;
5643 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5644 &bind_body, &dlist, ctx);
5645 lower_omp (gimple_omp_body (single_stmt), ctx);
5646
5647 gimple_seq_add_stmt (&bind_body, single_stmt);
5648
5649 if (ctx->record_type)
5650 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5651 else
5652 lower_omp_single_simple (single_stmt, &bind_body);
5653
5654 gimple_omp_set_body (single_stmt, NULL);
5655
5656 gimple_seq_add_seq (&bind_body, dlist);
5657
5658 bind_body = maybe_catch_exception (bind_body);
5659
5660 t = gimple_build_omp_return
5661 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5662 OMP_CLAUSE_NOWAIT));
5663 gimple_seq_add_stmt (&bind_body, t);
5664
5665 block = make_node (BLOCK);
5666 bind = gimple_build_bind (NULL, bind_body, block);
5667
5668 pop_gimplify_context (bind);
5669
5670 gimple_bind_append_vars (bind, ctx->block_vars);
5671 BLOCK_VARS (block) = ctx->block_vars;
5672 gsi_replace (gsi_p, bind, true);
5673 if (BLOCK_VARS (block))
5674 TREE_USED (block) = 1;
5675 }
5676
5677
5678 /* Expand code for an OpenMP master directive. */
5679
5680 static void
5681 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5682 {
5683 tree block, lab = NULL, x;
5684 gimple stmt = gsi_stmt (*gsi_p), bind;
5685 gimple_seq tseq;
5686 struct gimplify_ctx gctx;
5687
5688 push_gimplify_context (&gctx);
5689
5690 block = make_node (BLOCK);
5691 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5692 block);
5693
5694 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5695 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5696 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5697 tseq = NULL;
5698 gimplify_and_add (x, &tseq);
5699 gimple_bind_add_seq (bind, tseq);
5700
5701 lower_omp (gimple_omp_body (stmt), ctx);
5702 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5703 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5704 gimple_omp_set_body (stmt, NULL);
5705
5706 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5707
5708 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5709
5710 pop_gimplify_context (bind);
5711
5712 gimple_bind_append_vars (bind, ctx->block_vars);
5713 BLOCK_VARS (block) = ctx->block_vars;
5714 gsi_replace (gsi_p, bind, true);
5715 }
5716
5717
5718 /* Expand code for an OpenMP ordered directive. */
5719
5720 static void
5721 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5722 {
5723 tree block;
5724 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5725 struct gimplify_ctx gctx;
5726
5727 push_gimplify_context (&gctx);
5728
5729 block = make_node (BLOCK);
5730 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5731 block);
5732
5733 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5734 gimple_bind_add_stmt (bind, x);
5735
5736 lower_omp (gimple_omp_body (stmt), ctx);
5737 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5738 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5739 gimple_omp_set_body (stmt, NULL);
5740
5741 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5742 gimple_bind_add_stmt (bind, x);
5743
5744 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5745
5746 pop_gimplify_context (bind);
5747
5748 gimple_bind_append_vars (bind, ctx->block_vars);
5749 BLOCK_VARS (block) = gimple_bind_vars (bind);
5750 gsi_replace (gsi_p, bind, true);
5751 }
5752
5753
5754 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5755 substitution of a couple of function calls. But in the NAMED case,
5756 requires that languages coordinate a symbol name. It is therefore
5757 best put here in common code. */
5758
5759 static GTY((param1_is (tree), param2_is (tree)))
5760 splay_tree critical_name_mutexes;
5761
5762 static void
5763 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5764 {
5765 tree block;
5766 tree name, lock, unlock;
5767 gimple stmt = gsi_stmt (*gsi_p), bind;
5768 gimple_seq tbody;
5769 struct gimplify_ctx gctx;
5770
5771 name = gimple_omp_critical_name (stmt);
5772 if (name)
5773 {
5774 tree decl;
5775 splay_tree_node n;
5776
5777 if (!critical_name_mutexes)
5778 critical_name_mutexes
5779 = splay_tree_new_ggc (splay_tree_compare_pointers);
5780
5781 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5782 if (n == NULL)
5783 {
5784 char *new_str;
5785
5786 decl = create_tmp_var_raw (ptr_type_node, NULL);
5787
5788 new_str = ACONCAT ((".gomp_critical_user_",
5789 IDENTIFIER_POINTER (name), NULL));
5790 DECL_NAME (decl) = get_identifier (new_str);
5791 TREE_PUBLIC (decl) = 1;
5792 TREE_STATIC (decl) = 1;
5793 DECL_COMMON (decl) = 1;
5794 DECL_ARTIFICIAL (decl) = 1;
5795 DECL_IGNORED_P (decl) = 1;
5796 varpool_finalize_decl (decl);
5797
5798 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5799 (splay_tree_value) decl);
5800 }
5801 else
5802 decl = (tree) n->value;
5803
5804 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5805 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5806
5807 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5808 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5809 }
5810 else
5811 {
5812 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5813 lock = build_call_expr (lock, 0);
5814
5815 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5816 unlock = build_call_expr (unlock, 0);
5817 }
5818
5819 push_gimplify_context (&gctx);
5820
5821 block = make_node (BLOCK);
5822 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5823
5824 tbody = gimple_bind_body (bind);
5825 gimplify_and_add (lock, &tbody);
5826 gimple_bind_set_body (bind, tbody);
5827
5828 lower_omp (gimple_omp_body (stmt), ctx);
5829 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5830 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5831 gimple_omp_set_body (stmt, NULL);
5832
5833 tbody = gimple_bind_body (bind);
5834 gimplify_and_add (unlock, &tbody);
5835 gimple_bind_set_body (bind, tbody);
5836
5837 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5838
5839 pop_gimplify_context (bind);
5840 gimple_bind_append_vars (bind, ctx->block_vars);
5841 BLOCK_VARS (block) = gimple_bind_vars (bind);
5842 gsi_replace (gsi_p, bind, true);
5843 }
5844
5845
5846 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5847 for a lastprivate clause. Given a loop control predicate of (V
5848 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5849 is appended to *DLIST, iterator initialization is appended to
5850 *BODY_P. */
5851
5852 static void
5853 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5854 gimple_seq *dlist, struct omp_context *ctx)
5855 {
5856 tree clauses, cond, vinit;
5857 enum tree_code cond_code;
5858 gimple_seq stmts;
5859
5860 cond_code = fd->loop.cond_code;
5861 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5862
5863 /* When possible, use a strict equality expression. This can let VRP
5864 type optimizations deduce the value and remove a copy. */
5865 if (host_integerp (fd->loop.step, 0))
5866 {
5867 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5868 if (step == 1 || step == -1)
5869 cond_code = EQ_EXPR;
5870 }
5871
5872 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5873
5874 clauses = gimple_omp_for_clauses (fd->for_stmt);
5875 stmts = NULL;
5876 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5877 if (!gimple_seq_empty_p (stmts))
5878 {
5879 gimple_seq_add_seq (&stmts, *dlist);
5880 *dlist = stmts;
5881
5882 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5883 vinit = fd->loop.n1;
5884 if (cond_code == EQ_EXPR
5885 && host_integerp (fd->loop.n2, 0)
5886 && ! integer_zerop (fd->loop.n2))
5887 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5888
5889 /* Initialize the iterator variable, so that threads that don't execute
5890 any iterations don't execute the lastprivate clauses by accident. */
5891 gimplify_assign (fd->loop.v, vinit, body_p);
5892 }
5893 }
5894
5895
5896 /* Lower code for an OpenMP loop directive. */
5897
5898 static void
5899 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5900 {
5901 tree *rhs_p, block;
5902 struct omp_for_data fd;
5903 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5904 gimple_seq omp_for_body, body, dlist, ilist;
5905 size_t i;
5906 struct gimplify_ctx gctx;
5907
5908 push_gimplify_context (&gctx);
5909
5910 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5911 lower_omp (gimple_omp_body (stmt), ctx);
5912
5913 block = make_node (BLOCK);
5914 new_stmt = gimple_build_bind (NULL, NULL, block);
5915
5916 /* Move declaration of temporaries in the loop body before we make
5917 it go away. */
5918 omp_for_body = gimple_omp_body (stmt);
5919 if (!gimple_seq_empty_p (omp_for_body)
5920 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5921 {
5922 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5923 gimple_bind_append_vars (new_stmt, vars);
5924 }
5925
5926 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5927 ilist = NULL;
5928 dlist = NULL;
5929 body = NULL;
5930 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5931 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5932
5933 /* Lower the header expressions. At this point, we can assume that
5934 the header is of the form:
5935
5936 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5937
5938 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5939 using the .omp_data_s mapping, if needed. */
5940 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5941 {
5942 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
5943 if (!is_gimple_min_invariant (*rhs_p))
5944 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5945
5946 rhs_p = gimple_omp_for_final_ptr (stmt, i);
5947 if (!is_gimple_min_invariant (*rhs_p))
5948 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5949
5950 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
5951 if (!is_gimple_min_invariant (*rhs_p))
5952 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5953 }
5954
5955 /* Once lowered, extract the bounds and clauses. */
5956 extract_omp_for_data (stmt, &fd, NULL);
5957
5958 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
5959
5960 gimple_seq_add_stmt (&body, stmt);
5961 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
5962
5963 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
5964 fd.loop.v));
5965
5966 /* After the loop, add exit clauses. */
5967 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
5968 gimple_seq_add_seq (&body, dlist);
5969
5970 body = maybe_catch_exception (body);
5971
5972 /* Region exit marker goes at the end of the loop body. */
5973 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
5974
5975 pop_gimplify_context (new_stmt);
5976
5977 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5978 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
5979 if (BLOCK_VARS (block))
5980 TREE_USED (block) = 1;
5981
5982 gimple_bind_set_body (new_stmt, body);
5983 gimple_omp_set_body (stmt, NULL);
5984 gimple_omp_for_set_pre_body (stmt, NULL);
5985 gsi_replace (gsi_p, new_stmt, true);
5986 }
5987
5988 /* Callback for walk_stmts. Check if the current statement only contains
5989 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
5990
5991 static tree
5992 check_combined_parallel (gimple_stmt_iterator *gsi_p,
5993 bool *handled_ops_p,
5994 struct walk_stmt_info *wi)
5995 {
5996 int *info = (int *) wi->info;
5997 gimple stmt = gsi_stmt (*gsi_p);
5998
5999 *handled_ops_p = true;
6000 switch (gimple_code (stmt))
6001 {
6002 WALK_SUBSTMTS;
6003
6004 case GIMPLE_OMP_FOR:
6005 case GIMPLE_OMP_SECTIONS:
6006 *info = *info == 0 ? 1 : -1;
6007 break;
6008 default:
6009 *info = -1;
6010 break;
6011 }
6012 return NULL;
6013 }
6014
6015 struct omp_taskcopy_context
6016 {
6017 /* This field must be at the beginning, as we do "inheritance": Some
6018 callback functions for tree-inline.c (e.g., omp_copy_decl)
6019 receive a copy_body_data pointer that is up-casted to an
6020 omp_context pointer. */
6021 copy_body_data cb;
6022 omp_context *ctx;
6023 };
6024
6025 static tree
6026 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6027 {
6028 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6029
6030 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6031 return create_tmp_var (TREE_TYPE (var), NULL);
6032
6033 return var;
6034 }
6035
6036 static tree
6037 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6038 {
6039 tree name, new_fields = NULL, type, f;
6040
6041 type = lang_hooks.types.make_type (RECORD_TYPE);
6042 name = DECL_NAME (TYPE_NAME (orig_type));
6043 name = build_decl (TYPE_DECL, name, type);
6044 TYPE_NAME (type) = name;
6045
6046 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6047 {
6048 tree new_f = copy_node (f);
6049 DECL_CONTEXT (new_f) = type;
6050 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6051 TREE_CHAIN (new_f) = new_fields;
6052 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6053 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6054 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6055 &tcctx->cb, NULL);
6056 new_fields = new_f;
6057 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6058 }
6059 TYPE_FIELDS (type) = nreverse (new_fields);
6060 layout_type (type);
6061 return type;
6062 }
6063
6064 /* Create task copyfn. */
6065
6066 static void
6067 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6068 {
6069 struct function *child_cfun;
6070 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6071 tree record_type, srecord_type, bind, list;
6072 bool record_needs_remap = false, srecord_needs_remap = false;
6073 splay_tree_node n;
6074 struct omp_taskcopy_context tcctx;
6075 struct gimplify_ctx gctx;
6076
6077 child_fn = gimple_omp_task_copy_fn (task_stmt);
6078 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6079 gcc_assert (child_cfun->cfg == NULL);
6080 child_cfun->dont_save_pending_sizes_p = 1;
6081 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6082
6083 /* Reset DECL_CONTEXT on function arguments. */
6084 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6085 DECL_CONTEXT (t) = child_fn;
6086
6087 /* Populate the function. */
6088 push_gimplify_context (&gctx);
6089 current_function_decl = child_fn;
6090
6091 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6092 TREE_SIDE_EFFECTS (bind) = 1;
6093 list = NULL;
6094 DECL_SAVED_TREE (child_fn) = bind;
6095 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6096
6097 /* Remap src and dst argument types if needed. */
6098 record_type = ctx->record_type;
6099 srecord_type = ctx->srecord_type;
6100 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6101 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6102 {
6103 record_needs_remap = true;
6104 break;
6105 }
6106 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6107 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6108 {
6109 srecord_needs_remap = true;
6110 break;
6111 }
6112
6113 if (record_needs_remap || srecord_needs_remap)
6114 {
6115 memset (&tcctx, '\0', sizeof (tcctx));
6116 tcctx.cb.src_fn = ctx->cb.src_fn;
6117 tcctx.cb.dst_fn = child_fn;
6118 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6119 tcctx.cb.dst_node = tcctx.cb.src_node;
6120 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6121 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6122 tcctx.cb.eh_region = -1;
6123 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6124 tcctx.cb.decl_map = pointer_map_create ();
6125 tcctx.ctx = ctx;
6126
6127 if (record_needs_remap)
6128 record_type = task_copyfn_remap_type (&tcctx, record_type);
6129 if (srecord_needs_remap)
6130 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6131 }
6132 else
6133 tcctx.cb.decl_map = NULL;
6134
6135 push_cfun (child_cfun);
6136
6137 arg = DECL_ARGUMENTS (child_fn);
6138 TREE_TYPE (arg) = build_pointer_type (record_type);
6139 sarg = TREE_CHAIN (arg);
6140 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6141
6142 /* First pass: initialize temporaries used in record_type and srecord_type
6143 sizes and field offsets. */
6144 if (tcctx.cb.decl_map)
6145 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6146 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6147 {
6148 tree *p;
6149
6150 decl = OMP_CLAUSE_DECL (c);
6151 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6152 if (p == NULL)
6153 continue;
6154 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6155 sf = (tree) n->value;
6156 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6157 src = build_fold_indirect_ref (sarg);
6158 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6159 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6160 append_to_statement_list (t, &list);
6161 }
6162
6163 /* Second pass: copy shared var pointers and copy construct non-VLA
6164 firstprivate vars. */
6165 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6166 switch (OMP_CLAUSE_CODE (c))
6167 {
6168 case OMP_CLAUSE_SHARED:
6169 decl = OMP_CLAUSE_DECL (c);
6170 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6171 if (n == NULL)
6172 break;
6173 f = (tree) n->value;
6174 if (tcctx.cb.decl_map)
6175 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6176 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6177 sf = (tree) n->value;
6178 if (tcctx.cb.decl_map)
6179 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6180 src = build_fold_indirect_ref (sarg);
6181 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6182 dst = build_fold_indirect_ref (arg);
6183 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6184 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6185 append_to_statement_list (t, &list);
6186 break;
6187 case OMP_CLAUSE_FIRSTPRIVATE:
6188 decl = OMP_CLAUSE_DECL (c);
6189 if (is_variable_sized (decl))
6190 break;
6191 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6192 if (n == NULL)
6193 break;
6194 f = (tree) n->value;
6195 if (tcctx.cb.decl_map)
6196 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6197 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6198 if (n != NULL)
6199 {
6200 sf = (tree) n->value;
6201 if (tcctx.cb.decl_map)
6202 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6203 src = build_fold_indirect_ref (sarg);
6204 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6205 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6206 src = build_fold_indirect_ref (src);
6207 }
6208 else
6209 src = decl;
6210 dst = build_fold_indirect_ref (arg);
6211 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6212 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6213 append_to_statement_list (t, &list);
6214 break;
6215 case OMP_CLAUSE_PRIVATE:
6216 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6217 break;
6218 decl = OMP_CLAUSE_DECL (c);
6219 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6220 f = (tree) n->value;
6221 if (tcctx.cb.decl_map)
6222 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6223 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6224 if (n != NULL)
6225 {
6226 sf = (tree) n->value;
6227 if (tcctx.cb.decl_map)
6228 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6229 src = build_fold_indirect_ref (sarg);
6230 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6231 if (use_pointer_for_field (decl, NULL))
6232 src = build_fold_indirect_ref (src);
6233 }
6234 else
6235 src = decl;
6236 dst = build_fold_indirect_ref (arg);
6237 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6238 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6239 append_to_statement_list (t, &list);
6240 break;
6241 default:
6242 break;
6243 }
6244
6245 /* Last pass: handle VLA firstprivates. */
6246 if (tcctx.cb.decl_map)
6247 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6248 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6249 {
6250 tree ind, ptr, df;
6251
6252 decl = OMP_CLAUSE_DECL (c);
6253 if (!is_variable_sized (decl))
6254 continue;
6255 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6256 if (n == NULL)
6257 continue;
6258 f = (tree) n->value;
6259 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6260 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6261 ind = DECL_VALUE_EXPR (decl);
6262 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6263 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6264 n = splay_tree_lookup (ctx->sfield_map,
6265 (splay_tree_key) TREE_OPERAND (ind, 0));
6266 sf = (tree) n->value;
6267 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6268 src = build_fold_indirect_ref (sarg);
6269 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6270 src = build_fold_indirect_ref (src);
6271 dst = build_fold_indirect_ref (arg);
6272 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6273 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6274 append_to_statement_list (t, &list);
6275 n = splay_tree_lookup (ctx->field_map,
6276 (splay_tree_key) TREE_OPERAND (ind, 0));
6277 df = (tree) n->value;
6278 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6279 ptr = build_fold_indirect_ref (arg);
6280 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6281 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6282 build_fold_addr_expr (dst));
6283 append_to_statement_list (t, &list);
6284 }
6285
6286 t = build1 (RETURN_EXPR, void_type_node, NULL);
6287 append_to_statement_list (t, &list);
6288
6289 if (tcctx.cb.decl_map)
6290 pointer_map_destroy (tcctx.cb.decl_map);
6291 pop_gimplify_context (NULL);
6292 BIND_EXPR_BODY (bind) = list;
6293 pop_cfun ();
6294 current_function_decl = ctx->cb.src_fn;
6295 }
6296
6297 /* Lower the OpenMP parallel or task directive in the current statement
6298 in GSI_P. CTX holds context information for the directive. */
6299
6300 static void
6301 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6302 {
6303 tree clauses;
6304 tree child_fn, t;
6305 gimple stmt = gsi_stmt (*gsi_p);
6306 gimple par_bind, bind;
6307 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6308 struct gimplify_ctx gctx;
6309
6310 clauses = gimple_omp_taskreg_clauses (stmt);
6311 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6312 par_body = gimple_bind_body (par_bind);
6313 child_fn = ctx->cb.dst_fn;
6314 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6315 && !gimple_omp_parallel_combined_p (stmt))
6316 {
6317 struct walk_stmt_info wi;
6318 int ws_num = 0;
6319
6320 memset (&wi, 0, sizeof (wi));
6321 wi.info = &ws_num;
6322 wi.val_only = true;
6323 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6324 if (ws_num == 1)
6325 gimple_omp_parallel_set_combined_p (stmt, true);
6326 }
6327 if (ctx->srecord_type)
6328 create_task_copyfn (stmt, ctx);
6329
6330 push_gimplify_context (&gctx);
6331
6332 par_olist = NULL;
6333 par_ilist = NULL;
6334 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6335 lower_omp (par_body, ctx);
6336 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6337 lower_reduction_clauses (clauses, &par_olist, ctx);
6338
6339 /* Declare all the variables created by mapping and the variables
6340 declared in the scope of the parallel body. */
6341 record_vars_into (ctx->block_vars, child_fn);
6342 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6343
6344 if (ctx->record_type)
6345 {
6346 ctx->sender_decl
6347 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6348 : ctx->record_type, ".omp_data_o");
6349 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6350 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6351 }
6352
6353 olist = NULL;
6354 ilist = NULL;
6355 lower_send_clauses (clauses, &ilist, &olist, ctx);
6356 lower_send_shared_vars (&ilist, &olist, ctx);
6357
6358 /* Once all the expansions are done, sequence all the different
6359 fragments inside gimple_omp_body. */
6360
6361 new_body = NULL;
6362
6363 if (ctx->record_type)
6364 {
6365 t = build_fold_addr_expr (ctx->sender_decl);
6366 /* fixup_child_record_type might have changed receiver_decl's type. */
6367 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6368 gimple_seq_add_stmt (&new_body,
6369 gimple_build_assign (ctx->receiver_decl, t));
6370 }
6371
6372 gimple_seq_add_seq (&new_body, par_ilist);
6373 gimple_seq_add_seq (&new_body, par_body);
6374 gimple_seq_add_seq (&new_body, par_olist);
6375 new_body = maybe_catch_exception (new_body);
6376 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6377 gimple_omp_set_body (stmt, new_body);
6378
6379 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6380 gimple_bind_add_stmt (bind, stmt);
6381 if (ilist || olist)
6382 {
6383 gimple_seq_add_stmt (&ilist, bind);
6384 gimple_seq_add_seq (&ilist, olist);
6385 bind = gimple_build_bind (NULL, ilist, NULL);
6386 }
6387
6388 gsi_replace (gsi_p, bind, true);
6389
6390 pop_gimplify_context (NULL);
6391 }
6392
6393 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6394 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6395 of OpenMP context, but with task_shared_vars set. */
6396
6397 static tree
6398 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6399 void *data)
6400 {
6401 tree t = *tp;
6402
6403 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6404 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6405 return t;
6406
6407 if (task_shared_vars
6408 && DECL_P (t)
6409 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6410 return t;
6411
6412 /* If a global variable has been privatized, TREE_CONSTANT on
6413 ADDR_EXPR might be wrong. */
6414 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6415 recompute_tree_invariant_for_addr_expr (t);
6416
6417 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6418 return NULL_TREE;
6419 }
6420
6421 static void
6422 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6423 {
6424 gimple stmt = gsi_stmt (*gsi_p);
6425 struct walk_stmt_info wi;
6426
6427 if (gimple_has_location (stmt))
6428 input_location = gimple_location (stmt);
6429
6430 if (task_shared_vars)
6431 memset (&wi, '\0', sizeof (wi));
6432
6433 /* If we have issued syntax errors, avoid doing any heavy lifting.
6434 Just replace the OpenMP directives with a NOP to avoid
6435 confusing RTL expansion. */
6436 if (errorcount && is_gimple_omp (stmt))
6437 {
6438 gsi_replace (gsi_p, gimple_build_nop (), true);
6439 return;
6440 }
6441
6442 switch (gimple_code (stmt))
6443 {
6444 case GIMPLE_COND:
6445 if ((ctx || task_shared_vars)
6446 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6447 ctx ? NULL : &wi, NULL)
6448 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6449 ctx ? NULL : &wi, NULL)))
6450 gimple_regimplify_operands (stmt, gsi_p);
6451 break;
6452 case GIMPLE_CATCH:
6453 lower_omp (gimple_catch_handler (stmt), ctx);
6454 break;
6455 case GIMPLE_EH_FILTER:
6456 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6457 break;
6458 case GIMPLE_TRY:
6459 lower_omp (gimple_try_eval (stmt), ctx);
6460 lower_omp (gimple_try_cleanup (stmt), ctx);
6461 break;
6462 case GIMPLE_BIND:
6463 lower_omp (gimple_bind_body (stmt), ctx);
6464 break;
6465 case GIMPLE_OMP_PARALLEL:
6466 case GIMPLE_OMP_TASK:
6467 ctx = maybe_lookup_ctx (stmt);
6468 lower_omp_taskreg (gsi_p, ctx);
6469 break;
6470 case GIMPLE_OMP_FOR:
6471 ctx = maybe_lookup_ctx (stmt);
6472 gcc_assert (ctx);
6473 lower_omp_for (gsi_p, ctx);
6474 break;
6475 case GIMPLE_OMP_SECTIONS:
6476 ctx = maybe_lookup_ctx (stmt);
6477 gcc_assert (ctx);
6478 lower_omp_sections (gsi_p, ctx);
6479 break;
6480 case GIMPLE_OMP_SINGLE:
6481 ctx = maybe_lookup_ctx (stmt);
6482 gcc_assert (ctx);
6483 lower_omp_single (gsi_p, ctx);
6484 break;
6485 case GIMPLE_OMP_MASTER:
6486 ctx = maybe_lookup_ctx (stmt);
6487 gcc_assert (ctx);
6488 lower_omp_master (gsi_p, ctx);
6489 break;
6490 case GIMPLE_OMP_ORDERED:
6491 ctx = maybe_lookup_ctx (stmt);
6492 gcc_assert (ctx);
6493 lower_omp_ordered (gsi_p, ctx);
6494 break;
6495 case GIMPLE_OMP_CRITICAL:
6496 ctx = maybe_lookup_ctx (stmt);
6497 gcc_assert (ctx);
6498 lower_omp_critical (gsi_p, ctx);
6499 break;
6500 case GIMPLE_OMP_ATOMIC_LOAD:
6501 if ((ctx || task_shared_vars)
6502 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6503 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6504 gimple_regimplify_operands (stmt, gsi_p);
6505 break;
6506 default:
6507 if ((ctx || task_shared_vars)
6508 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6509 ctx ? NULL : &wi))
6510 gimple_regimplify_operands (stmt, gsi_p);
6511 break;
6512 }
6513 }
6514
6515 static void
6516 lower_omp (gimple_seq body, omp_context *ctx)
6517 {
6518 location_t saved_location = input_location;
6519 gimple_stmt_iterator gsi = gsi_start (body);
6520 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6521 lower_omp_1 (&gsi, ctx);
6522 input_location = saved_location;
6523 }
6524 \f
6525 /* Main entry point. */
6526
6527 static unsigned int
6528 execute_lower_omp (void)
6529 {
6530 gimple_seq body;
6531
6532 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6533 delete_omp_context);
6534
6535 body = gimple_body (current_function_decl);
6536 scan_omp (body, NULL);
6537 gcc_assert (taskreg_nesting_level == 0);
6538
6539 if (all_contexts->root)
6540 {
6541 struct gimplify_ctx gctx;
6542
6543 if (task_shared_vars)
6544 push_gimplify_context (&gctx);
6545 lower_omp (body, NULL);
6546 if (task_shared_vars)
6547 pop_gimplify_context (NULL);
6548 }
6549
6550 if (all_contexts)
6551 {
6552 splay_tree_delete (all_contexts);
6553 all_contexts = NULL;
6554 }
6555 BITMAP_FREE (task_shared_vars);
6556 return 0;
6557 }
6558
6559 static bool
6560 gate_lower_omp (void)
6561 {
6562 return flag_openmp != 0;
6563 }
6564
6565 struct gimple_opt_pass pass_lower_omp =
6566 {
6567 {
6568 GIMPLE_PASS,
6569 "omplower", /* name */
6570 gate_lower_omp, /* gate */
6571 execute_lower_omp, /* execute */
6572 NULL, /* sub */
6573 NULL, /* next */
6574 0, /* static_pass_number */
6575 0, /* tv_id */
6576 PROP_gimple_any, /* properties_required */
6577 PROP_gimple_lomp, /* properties_provided */
6578 0, /* properties_destroyed */
6579 0, /* todo_flags_start */
6580 TODO_dump_func /* todo_flags_finish */
6581 }
6582 };
6583 \f
6584 /* The following is a utility to diagnose OpenMP structured block violations.
6585 It is not part of the "omplower" pass, as that's invoked too late. It
6586 should be invoked by the respective front ends after gimplification. */
6587
6588 static splay_tree all_labels;
6589
6590 /* Check for mismatched contexts and generate an error if needed. Return
6591 true if an error is detected. */
6592
6593 static bool
6594 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6595 gimple branch_ctx, gimple label_ctx)
6596 {
6597 if (label_ctx == branch_ctx)
6598 return false;
6599
6600
6601 /*
6602 Previously we kept track of the label's entire context in diagnose_sb_[12]
6603 so we could traverse it and issue a correct "exit" or "enter" error
6604 message upon a structured block violation.
6605
6606 We built the context by building a list with tree_cons'ing, but there is
6607 no easy counterpart in gimple tuples. It seems like far too much work
6608 for issuing exit/enter error messages. If someone really misses the
6609 distinct error message... patches welcome.
6610 */
6611
6612 #if 0
6613 /* Try to avoid confusing the user by producing and error message
6614 with correct "exit" or "enter" verbiage. We prefer "exit"
6615 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6616 if (branch_ctx == NULL)
6617 exit_p = false;
6618 else
6619 {
6620 while (label_ctx)
6621 {
6622 if (TREE_VALUE (label_ctx) == branch_ctx)
6623 {
6624 exit_p = false;
6625 break;
6626 }
6627 label_ctx = TREE_CHAIN (label_ctx);
6628 }
6629 }
6630
6631 if (exit_p)
6632 error ("invalid exit from OpenMP structured block");
6633 else
6634 error ("invalid entry to OpenMP structured block");
6635 #endif
6636
6637 /* If it's obvious we have an invalid entry, be specific about the error. */
6638 if (branch_ctx == NULL)
6639 error ("invalid entry to OpenMP structured block");
6640 else
6641 /* Otherwise, be vague and lazy, but efficient. */
6642 error ("invalid branch to/from an OpenMP structured block");
6643
6644 gsi_replace (gsi_p, gimple_build_nop (), false);
6645 return true;
6646 }
6647
6648 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6649 where each label is found. */
6650
6651 static tree
6652 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6653 struct walk_stmt_info *wi)
6654 {
6655 gimple context = (gimple) wi->info;
6656 gimple inner_context;
6657 gimple stmt = gsi_stmt (*gsi_p);
6658
6659 *handled_ops_p = true;
6660
6661 switch (gimple_code (stmt))
6662 {
6663 WALK_SUBSTMTS;
6664
6665 case GIMPLE_OMP_PARALLEL:
6666 case GIMPLE_OMP_TASK:
6667 case GIMPLE_OMP_SECTIONS:
6668 case GIMPLE_OMP_SINGLE:
6669 case GIMPLE_OMP_SECTION:
6670 case GIMPLE_OMP_MASTER:
6671 case GIMPLE_OMP_ORDERED:
6672 case GIMPLE_OMP_CRITICAL:
6673 /* The minimal context here is just the current OMP construct. */
6674 inner_context = stmt;
6675 wi->info = inner_context;
6676 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6677 wi->info = context;
6678 break;
6679
6680 case GIMPLE_OMP_FOR:
6681 inner_context = stmt;
6682 wi->info = inner_context;
6683 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6684 walk them. */
6685 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6686 diagnose_sb_1, NULL, wi);
6687 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6688 wi->info = context;
6689 break;
6690
6691 case GIMPLE_LABEL:
6692 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6693 (splay_tree_value) context);
6694 break;
6695
6696 default:
6697 break;
6698 }
6699
6700 return NULL_TREE;
6701 }
6702
6703 /* Pass 2: Check each branch and see if its context differs from that of
6704 the destination label's context. */
6705
6706 static tree
6707 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6708 struct walk_stmt_info *wi)
6709 {
6710 gimple context = (gimple) wi->info;
6711 splay_tree_node n;
6712 gimple stmt = gsi_stmt (*gsi_p);
6713
6714 *handled_ops_p = true;
6715
6716 switch (gimple_code (stmt))
6717 {
6718 WALK_SUBSTMTS;
6719
6720 case GIMPLE_OMP_PARALLEL:
6721 case GIMPLE_OMP_TASK:
6722 case GIMPLE_OMP_SECTIONS:
6723 case GIMPLE_OMP_SINGLE:
6724 case GIMPLE_OMP_SECTION:
6725 case GIMPLE_OMP_MASTER:
6726 case GIMPLE_OMP_ORDERED:
6727 case GIMPLE_OMP_CRITICAL:
6728 wi->info = stmt;
6729 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6730 wi->info = context;
6731 break;
6732
6733 case GIMPLE_OMP_FOR:
6734 wi->info = stmt;
6735 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6736 walk them. */
6737 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6738 diagnose_sb_2, NULL, wi);
6739 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6740 wi->info = context;
6741 break;
6742
6743 case GIMPLE_GOTO:
6744 {
6745 tree lab = gimple_goto_dest (stmt);
6746 if (TREE_CODE (lab) != LABEL_DECL)
6747 break;
6748
6749 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6750 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6751 }
6752 break;
6753
6754 case GIMPLE_SWITCH:
6755 {
6756 unsigned int i;
6757 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6758 {
6759 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6760 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6761 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6762 break;
6763 }
6764 }
6765 break;
6766
6767 case GIMPLE_RETURN:
6768 diagnose_sb_0 (gsi_p, context, NULL);
6769 break;
6770
6771 default:
6772 break;
6773 }
6774
6775 return NULL_TREE;
6776 }
6777
6778 void
6779 diagnose_omp_structured_block_errors (tree fndecl)
6780 {
6781 tree save_current = current_function_decl;
6782 struct walk_stmt_info wi;
6783 struct function *old_cfun = cfun;
6784 gimple_seq body = gimple_body (fndecl);
6785
6786 current_function_decl = fndecl;
6787 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6788
6789 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6790
6791 memset (&wi, 0, sizeof (wi));
6792 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6793
6794 memset (&wi, 0, sizeof (wi));
6795 wi.want_locations = true;
6796 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6797
6798 splay_tree_delete (all_labels);
6799 all_labels = NULL;
6800
6801 set_cfun (old_cfun);
6802 current_function_decl = save_current;
6803 }
6804
6805 #include "gt-omp-low.h"