omp-low.c (create_omp_child_function): Set DECL_CONTEXT for DECL.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
464 }
465
466
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511 {
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
520
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
522
523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542 }
543
544
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549 static tree
550 get_ws_args_for (gimple ws_stmt)
551 {
552 tree t;
553
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
559 extract_omp_for_data (ws_stmt, &fd, NULL);
560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
570
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
573
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
580 {
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591 }
592
593
594 /* Discover whether REGION is a combined parallel+workshare region. */
595
596 static void
597 determine_parallel_type (struct omp_region *region)
598 {
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
606
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
626 {
627 gimple ws_stmt = last_stmt (ws_entry_bb);
628
629 if (region->inner->type == GIMPLE_OMP_FOR)
630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
655 }
656 }
657
658
659 /* Return true if EXPR is variable sized. */
660
661 static inline bool
662 is_variable_sized (const_tree expr)
663 {
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665 }
666
667 /* Return true if DECL is a reference type. */
668
669 static inline bool
670 is_reference (tree decl)
671 {
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673 }
674
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
681 {
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
685 }
686
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
693 }
694
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
697 {
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701 }
702
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719 }
720
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
723
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
726 {
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
765
766 if (up)
767 {
768 tree c;
769
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
800 }
801
802 return false;
803 }
804
805 /* Create a new VAR_DECL and copy information from VAR to it. */
806
807 tree
808 copy_var_decl (tree var, tree name, tree type)
809 {
810 tree copy = build_decl (VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822
823 return copy;
824 }
825
826 /* Construct a new automatic decl similar to VAR. */
827
828 static tree
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 {
831 tree copy = copy_var_decl (var, name, type);
832
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
836
837 return copy;
838 }
839
840 static tree
841 omp_copy_decl_1 (tree var, omp_context *ctx)
842 {
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
844 }
845
846 /* Build tree nodes to access the field for VAR on the receiver side. */
847
848 static tree
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 {
851 tree x, field = lookup_field (var, ctx);
852
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
858
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
863
864 return x;
865 }
866
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
870
871 static tree
872 build_outer_var_ref (tree var, omp_context *ctx)
873 {
874 tree x;
875
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
877 x = var;
878 else if (is_variable_sized (var))
879 {
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
883 }
884 else if (is_taskreg_ctx (ctx))
885 {
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
888 }
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
895 else
896 gcc_unreachable ();
897
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
900
901 return x;
902 }
903
904 /* Build tree nodes to access the field for VAR on the sender side. */
905
906 static tree
907 build_sender_ref (tree var, omp_context *ctx)
908 {
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
912 }
913
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915
916 static void
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 {
919 tree field, type, sfield = NULL_TREE;
920
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
931
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
939 {
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 }
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
946
947 if ((mask & 3) == 3)
948 {
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
951 {
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
976 }
977 }
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
981 }
982
983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
989 }
990
991 static tree
992 install_var_local (tree var, omp_context *ctx)
993 {
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
997 }
998
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001
1002 static void
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 {
1005 tree new_decl, size;
1006
1007 new_decl = lookup_decl (decl, ctx);
1008
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1013 {
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1018 }
1019
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 {
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1026
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1031 }
1032 }
1033
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1038
1039 static tree
1040 omp_copy_decl (tree var, copy_body_data *cb)
1041 {
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1044
1045 if (TREE_CODE (var) == LABEL_DECL)
1046 {
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1051 }
1052
1053 while (!is_taskreg_ctx (ctx))
1054 {
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1061 }
1062
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1065
1066 return error_mark_node;
1067 }
1068
1069
1070 /* Return the parallel region associated with STMT. */
1071
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1076
1077 /* Dump the parallel region tree rooted at REGION. */
1078
1079 void
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 {
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1084
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1087
1088 if (region->cont)
1089 {
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1092 }
1093
1094 if (region->exit)
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1097 else
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099
1100 if (region->next)
1101 dump_omp_region (file, region->next, indent);
1102 }
1103
1104 void
1105 debug_omp_region (struct omp_region *region)
1106 {
1107 dump_omp_region (stderr, region, 0);
1108 }
1109
1110 void
1111 debug_all_omp_regions (void)
1112 {
1113 dump_omp_region (stderr, root_omp_region, 0);
1114 }
1115
1116
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1118
1119 struct omp_region *
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1122 {
1123 struct omp_region *region = XCNEW (struct omp_region);
1124
1125 region->outer = parent;
1126 region->entry = bb;
1127 region->type = type;
1128
1129 if (parent)
1130 {
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1135 }
1136 else
1137 {
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1142 }
1143
1144 return region;
1145 }
1146
1147 /* Release the memory associated with the region tree rooted at REGION. */
1148
1149 static void
1150 free_omp_region_1 (struct omp_region *region)
1151 {
1152 struct omp_region *i, *n;
1153
1154 for (i = region->inner; i ; i = n)
1155 {
1156 n = i->next;
1157 free_omp_region_1 (i);
1158 }
1159
1160 free (region);
1161 }
1162
1163 /* Release the memory for the entire omp region tree. */
1164
1165 void
1166 free_omp_regions (void)
1167 {
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1170 {
1171 n = r->next;
1172 free_omp_region_1 (r);
1173 }
1174 root_omp_region = NULL;
1175 }
1176
1177
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 {
1183 omp_context *ctx = XCNEW (omp_context);
1184
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1188
1189 if (outer_ctx)
1190 {
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1195 }
1196 else
1197 {
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1207 }
1208
1209 ctx->cb.decl_map = pointer_map_create ();
1210
1211 return ctx;
1212 }
1213
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1215
1216 /* Finalize task copyfn. */
1217
1218 static void
1219 finalize_task_copyfn (gimple task_stmt)
1220 {
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1224 gimple bind;
1225
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1228 return;
1229
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1235
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1244 {
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1248 }
1249 gimple_set_body (child_fn, seq);
1250 pop_cfun ();
1251 current_function_decl = old_fn;
1252
1253 cgraph_add_new_function (child_fn, false);
1254 }
1255
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1258
1259 static void
1260 delete_omp_context (splay_tree_value value)
1261 {
1262 omp_context *ctx = (omp_context *) value;
1263
1264 pointer_map_destroy (ctx->cb.decl_map);
1265
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1270
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1274 {
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 }
1279 if (ctx->srecord_type)
1280 {
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1284 }
1285
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1288
1289 XDELETE (ctx);
1290 }
1291
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1294
1295 static void
1296 fixup_child_record_type (omp_context *ctx)
1297 {
1298 tree f, type = ctx->record_type;
1299
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1308 {
1309 tree name, new_fields = NULL;
1310
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1315
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 {
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 new_fields = new_f;
1328
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1333 }
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1336 }
1337
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1339 }
1340
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1343
1344 static void
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 {
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1349
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 {
1352 bool by_ref;
1353
1354 switch (OMP_CLAUSE_CODE (c))
1355 {
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1362 break;
1363
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1378 {
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1381 break;
1382 }
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1385 goto do_private;
1386
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1393
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1399 {
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1403 }
1404 else if (is_taskreg_ctx (ctx))
1405 {
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1409
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1412 {
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1416 }
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1419 }
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1426 /* FALLTHRU */
1427
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1432 break;
1433
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1437
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1443 break;
1444
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1449 break;
1450
1451 default:
1452 gcc_unreachable ();
1453 }
1454 }
1455
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 {
1458 switch (OMP_CLAUSE_CODE (c))
1459 {
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1468
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1482
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1487 break;
1488
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1499 break;
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 {
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 }
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1517 }
1518
1519 /* Create a new name for omp child function. Returns an identifier. */
1520
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1522
1523 static tree
1524 create_omp_child_function_name (bool task_copy)
1525 {
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1529 const char *suffix;
1530
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537 #elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539 #endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1542 }
1543
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 {
1550 tree decl, type, name, t;
1551
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1561
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1576
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_CONTEXT (t) = decl;
1581 DECL_RESULT (decl) = t;
1582
1583 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1584 DECL_ARTIFICIAL (t) = 1;
1585 DECL_ARG_TYPE (t) = ptr_type_node;
1586 DECL_CONTEXT (t) = current_function_decl;
1587 TREE_USED (t) = 1;
1588 DECL_ARGUMENTS (decl) = t;
1589 if (!task_copy)
1590 ctx->receiver_decl = t;
1591 else
1592 {
1593 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1594 ptr_type_node);
1595 DECL_ARTIFICIAL (t) = 1;
1596 DECL_ARG_TYPE (t) = ptr_type_node;
1597 DECL_CONTEXT (t) = current_function_decl;
1598 TREE_USED (t) = 1;
1599 TREE_ADDRESSABLE (t) = 1;
1600 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1601 DECL_ARGUMENTS (decl) = t;
1602 }
1603
1604 /* Allocate memory for the function structure. The call to
1605 allocate_struct_function clobbers CFUN, so we need to restore
1606 it afterward. */
1607 push_struct_function (decl);
1608 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1609 cfun->function_end_locus = gimple_location (ctx->stmt);
1610 pop_cfun ();
1611 }
1612
1613
1614 /* Scan an OpenMP parallel directive. */
1615
1616 static void
1617 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1618 {
1619 omp_context *ctx;
1620 tree name;
1621 gimple stmt = gsi_stmt (*gsi);
1622
1623 /* Ignore parallel directives with empty bodies, unless there
1624 are copyin clauses. */
1625 if (optimize > 0
1626 && empty_body_p (gimple_omp_body (stmt))
1627 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1628 OMP_CLAUSE_COPYIN) == NULL)
1629 {
1630 gsi_replace (gsi, gimple_build_nop (), false);
1631 return;
1632 }
1633
1634 ctx = new_omp_context (stmt, outer_ctx);
1635 if (taskreg_nesting_level > 1)
1636 ctx->is_nested = true;
1637 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1638 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1639 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1640 name = create_tmp_var_name (".omp_data_s");
1641 name = build_decl (TYPE_DECL, name, ctx->record_type);
1642 TYPE_NAME (ctx->record_type) = name;
1643 create_omp_child_function (ctx, false);
1644 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1645
1646 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1647 scan_omp (gimple_omp_body (stmt), ctx);
1648
1649 if (TYPE_FIELDS (ctx->record_type) == NULL)
1650 ctx->record_type = ctx->receiver_decl = NULL;
1651 else
1652 {
1653 layout_type (ctx->record_type);
1654 fixup_child_record_type (ctx);
1655 }
1656 }
1657
1658 /* Scan an OpenMP task directive. */
1659
1660 static void
1661 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1662 {
1663 omp_context *ctx;
1664 tree name, t;
1665 gimple stmt = gsi_stmt (*gsi);
1666
1667 /* Ignore task directives with empty bodies. */
1668 if (optimize > 0
1669 && empty_body_p (gimple_omp_body (stmt)))
1670 {
1671 gsi_replace (gsi, gimple_build_nop (), false);
1672 return;
1673 }
1674
1675 ctx = new_omp_context (stmt, outer_ctx);
1676 if (taskreg_nesting_level > 1)
1677 ctx->is_nested = true;
1678 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1679 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1680 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1681 name = create_tmp_var_name (".omp_data_s");
1682 name = build_decl (TYPE_DECL, name, ctx->record_type);
1683 TYPE_NAME (ctx->record_type) = name;
1684 create_omp_child_function (ctx, false);
1685 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1686
1687 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1688
1689 if (ctx->srecord_type)
1690 {
1691 name = create_tmp_var_name (".omp_data_a");
1692 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1693 TYPE_NAME (ctx->srecord_type) = name;
1694 create_omp_child_function (ctx, true);
1695 }
1696
1697 scan_omp (gimple_omp_body (stmt), ctx);
1698
1699 if (TYPE_FIELDS (ctx->record_type) == NULL)
1700 {
1701 ctx->record_type = ctx->receiver_decl = NULL;
1702 t = build_int_cst (long_integer_type_node, 0);
1703 gimple_omp_task_set_arg_size (stmt, t);
1704 t = build_int_cst (long_integer_type_node, 1);
1705 gimple_omp_task_set_arg_align (stmt, t);
1706 }
1707 else
1708 {
1709 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1710 /* Move VLA fields to the end. */
1711 p = &TYPE_FIELDS (ctx->record_type);
1712 while (*p)
1713 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1714 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 {
1716 *q = *p;
1717 *p = TREE_CHAIN (*p);
1718 TREE_CHAIN (*q) = NULL_TREE;
1719 q = &TREE_CHAIN (*q);
1720 }
1721 else
1722 p = &TREE_CHAIN (*p);
1723 *p = vla_fields;
1724 layout_type (ctx->record_type);
1725 fixup_child_record_type (ctx);
1726 if (ctx->srecord_type)
1727 layout_type (ctx->srecord_type);
1728 t = fold_convert (long_integer_type_node,
1729 TYPE_SIZE_UNIT (ctx->record_type));
1730 gimple_omp_task_set_arg_size (stmt, t);
1731 t = build_int_cst (long_integer_type_node,
1732 TYPE_ALIGN_UNIT (ctx->record_type));
1733 gimple_omp_task_set_arg_align (stmt, t);
1734 }
1735 }
1736
1737
1738 /* Scan an OpenMP loop directive. */
1739
1740 static void
1741 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1742 {
1743 omp_context *ctx;
1744 size_t i;
1745
1746 ctx = new_omp_context (stmt, outer_ctx);
1747
1748 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1749
1750 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1751 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1752 {
1753 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1756 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1757 }
1758 scan_omp (gimple_omp_body (stmt), ctx);
1759 }
1760
1761 /* Scan an OpenMP sections directive. */
1762
1763 static void
1764 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1765 {
1766 omp_context *ctx;
1767
1768 ctx = new_omp_context (stmt, outer_ctx);
1769 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1770 scan_omp (gimple_omp_body (stmt), ctx);
1771 }
1772
1773 /* Scan an OpenMP single directive. */
1774
1775 static void
1776 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1777 {
1778 omp_context *ctx;
1779 tree name;
1780
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1783 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1784 name = create_tmp_var_name (".omp_copy_s");
1785 name = build_decl (TYPE_DECL, name, ctx->record_type);
1786 TYPE_NAME (ctx->record_type) = name;
1787
1788 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1789 scan_omp (gimple_omp_body (stmt), ctx);
1790
1791 if (TYPE_FIELDS (ctx->record_type) == NULL)
1792 ctx->record_type = NULL;
1793 else
1794 layout_type (ctx->record_type);
1795 }
1796
1797
1798 /* Check OpenMP nesting restrictions. */
1799 static void
1800 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1801 {
1802 switch (gimple_code (stmt))
1803 {
1804 case GIMPLE_OMP_FOR:
1805 case GIMPLE_OMP_SECTIONS:
1806 case GIMPLE_OMP_SINGLE:
1807 case GIMPLE_CALL:
1808 for (; ctx != NULL; ctx = ctx->outer)
1809 switch (gimple_code (ctx->stmt))
1810 {
1811 case GIMPLE_OMP_FOR:
1812 case GIMPLE_OMP_SECTIONS:
1813 case GIMPLE_OMP_SINGLE:
1814 case GIMPLE_OMP_ORDERED:
1815 case GIMPLE_OMP_MASTER:
1816 case GIMPLE_OMP_TASK:
1817 if (is_gimple_call (stmt))
1818 {
1819 warning (0, "barrier region may not be closely nested inside "
1820 "of work-sharing, critical, ordered, master or "
1821 "explicit task region");
1822 return;
1823 }
1824 warning (0, "work-sharing region may not be closely nested inside "
1825 "of work-sharing, critical, ordered, master or explicit "
1826 "task region");
1827 return;
1828 case GIMPLE_OMP_PARALLEL:
1829 return;
1830 default:
1831 break;
1832 }
1833 break;
1834 case GIMPLE_OMP_MASTER:
1835 for (; ctx != NULL; ctx = ctx->outer)
1836 switch (gimple_code (ctx->stmt))
1837 {
1838 case GIMPLE_OMP_FOR:
1839 case GIMPLE_OMP_SECTIONS:
1840 case GIMPLE_OMP_SINGLE:
1841 case GIMPLE_OMP_TASK:
1842 warning (0, "master region may not be closely nested inside "
1843 "of work-sharing or explicit task region");
1844 return;
1845 case GIMPLE_OMP_PARALLEL:
1846 return;
1847 default:
1848 break;
1849 }
1850 break;
1851 case GIMPLE_OMP_ORDERED:
1852 for (; ctx != NULL; ctx = ctx->outer)
1853 switch (gimple_code (ctx->stmt))
1854 {
1855 case GIMPLE_OMP_CRITICAL:
1856 case GIMPLE_OMP_TASK:
1857 warning (0, "ordered region may not be closely nested inside "
1858 "of critical or explicit task region");
1859 return;
1860 case GIMPLE_OMP_FOR:
1861 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1862 OMP_CLAUSE_ORDERED) == NULL)
1863 warning (0, "ordered region must be closely nested inside "
1864 "a loop region with an ordered clause");
1865 return;
1866 case GIMPLE_OMP_PARALLEL:
1867 return;
1868 default:
1869 break;
1870 }
1871 break;
1872 case GIMPLE_OMP_CRITICAL:
1873 for (; ctx != NULL; ctx = ctx->outer)
1874 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1875 && (gimple_omp_critical_name (stmt)
1876 == gimple_omp_critical_name (ctx->stmt)))
1877 {
1878 warning (0, "critical region may not be nested inside a critical "
1879 "region with the same name");
1880 return;
1881 }
1882 break;
1883 default:
1884 break;
1885 }
1886 }
1887
1888
1889 /* Helper function scan_omp.
1890
1891 Callback for walk_tree or operators in walk_gimple_stmt used to
1892 scan for OpenMP directives in TP. */
1893
1894 static tree
1895 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1896 {
1897 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1898 omp_context *ctx = (omp_context *) wi->info;
1899 tree t = *tp;
1900
1901 switch (TREE_CODE (t))
1902 {
1903 case VAR_DECL:
1904 case PARM_DECL:
1905 case LABEL_DECL:
1906 case RESULT_DECL:
1907 if (ctx)
1908 *tp = remap_decl (t, &ctx->cb);
1909 break;
1910
1911 default:
1912 if (ctx && TYPE_P (t))
1913 *tp = remap_type (t, &ctx->cb);
1914 else if (!DECL_P (t))
1915 *walk_subtrees = 1;
1916 break;
1917 }
1918
1919 return NULL_TREE;
1920 }
1921
1922
1923 /* Helper function for scan_omp.
1924
1925 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1926 the current statement in GSI. */
1927
1928 static tree
1929 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1930 struct walk_stmt_info *wi)
1931 {
1932 gimple stmt = gsi_stmt (*gsi);
1933 omp_context *ctx = (omp_context *) wi->info;
1934
1935 if (gimple_has_location (stmt))
1936 input_location = gimple_location (stmt);
1937
1938 /* Check the OpenMP nesting restrictions. */
1939 if (ctx != NULL)
1940 {
1941 if (is_gimple_omp (stmt))
1942 check_omp_nesting_restrictions (stmt, ctx);
1943 else if (is_gimple_call (stmt))
1944 {
1945 tree fndecl = gimple_call_fndecl (stmt);
1946 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1947 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1948 check_omp_nesting_restrictions (stmt, ctx);
1949 }
1950 }
1951
1952 *handled_ops_p = true;
1953
1954 switch (gimple_code (stmt))
1955 {
1956 case GIMPLE_OMP_PARALLEL:
1957 taskreg_nesting_level++;
1958 scan_omp_parallel (gsi, ctx);
1959 taskreg_nesting_level--;
1960 break;
1961
1962 case GIMPLE_OMP_TASK:
1963 taskreg_nesting_level++;
1964 scan_omp_task (gsi, ctx);
1965 taskreg_nesting_level--;
1966 break;
1967
1968 case GIMPLE_OMP_FOR:
1969 scan_omp_for (stmt, ctx);
1970 break;
1971
1972 case GIMPLE_OMP_SECTIONS:
1973 scan_omp_sections (stmt, ctx);
1974 break;
1975
1976 case GIMPLE_OMP_SINGLE:
1977 scan_omp_single (stmt, ctx);
1978 break;
1979
1980 case GIMPLE_OMP_SECTION:
1981 case GIMPLE_OMP_MASTER:
1982 case GIMPLE_OMP_ORDERED:
1983 case GIMPLE_OMP_CRITICAL:
1984 ctx = new_omp_context (stmt, ctx);
1985 scan_omp (gimple_omp_body (stmt), ctx);
1986 break;
1987
1988 case GIMPLE_BIND:
1989 {
1990 tree var;
1991
1992 *handled_ops_p = false;
1993 if (ctx)
1994 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1995 insert_decl_map (&ctx->cb, var, var);
1996 }
1997 break;
1998 default:
1999 *handled_ops_p = false;
2000 break;
2001 }
2002
2003 return NULL_TREE;
2004 }
2005
2006
2007 /* Scan all the statements starting at the current statement. CTX
2008 contains context information about the OpenMP directives and
2009 clauses found during the scan. */
2010
2011 static void
2012 scan_omp (gimple_seq body, omp_context *ctx)
2013 {
2014 location_t saved_location;
2015 struct walk_stmt_info wi;
2016
2017 memset (&wi, 0, sizeof (wi));
2018 wi.info = ctx;
2019 wi.want_locations = true;
2020
2021 saved_location = input_location;
2022 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2023 input_location = saved_location;
2024 }
2025 \f
2026 /* Re-gimplification and code generation routines. */
2027
2028 /* Build a call to GOMP_barrier. */
2029
2030 static tree
2031 build_omp_barrier (void)
2032 {
2033 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2034 }
2035
2036 /* If a context was created for STMT when it was scanned, return it. */
2037
2038 static omp_context *
2039 maybe_lookup_ctx (gimple stmt)
2040 {
2041 splay_tree_node n;
2042 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2043 return n ? (omp_context *) n->value : NULL;
2044 }
2045
2046
2047 /* Find the mapping for DECL in CTX or the immediately enclosing
2048 context that has a mapping for DECL.
2049
2050 If CTX is a nested parallel directive, we may have to use the decl
2051 mappings created in CTX's parent context. Suppose that we have the
2052 following parallel nesting (variable UIDs showed for clarity):
2053
2054 iD.1562 = 0;
2055 #omp parallel shared(iD.1562) -> outer parallel
2056 iD.1562 = iD.1562 + 1;
2057
2058 #omp parallel shared (iD.1562) -> inner parallel
2059 iD.1562 = iD.1562 - 1;
2060
2061 Each parallel structure will create a distinct .omp_data_s structure
2062 for copying iD.1562 in/out of the directive:
2063
2064 outer parallel .omp_data_s.1.i -> iD.1562
2065 inner parallel .omp_data_s.2.i -> iD.1562
2066
2067 A shared variable mapping will produce a copy-out operation before
2068 the parallel directive and a copy-in operation after it. So, in
2069 this case we would have:
2070
2071 iD.1562 = 0;
2072 .omp_data_o.1.i = iD.1562;
2073 #omp parallel shared(iD.1562) -> outer parallel
2074 .omp_data_i.1 = &.omp_data_o.1
2075 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2076
2077 .omp_data_o.2.i = iD.1562; -> **
2078 #omp parallel shared(iD.1562) -> inner parallel
2079 .omp_data_i.2 = &.omp_data_o.2
2080 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2081
2082
2083 ** This is a problem. The symbol iD.1562 cannot be referenced
2084 inside the body of the outer parallel region. But since we are
2085 emitting this copy operation while expanding the inner parallel
2086 directive, we need to access the CTX structure of the outer
2087 parallel directive to get the correct mapping:
2088
2089 .omp_data_o.2.i = .omp_data_i.1->i
2090
2091 Since there may be other workshare or parallel directives enclosing
2092 the parallel directive, it may be necessary to walk up the context
2093 parent chain. This is not a problem in general because nested
2094 parallelism happens only rarely. */
2095
2096 static tree
2097 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2098 {
2099 tree t;
2100 omp_context *up;
2101
2102 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2103 t = maybe_lookup_decl (decl, up);
2104
2105 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2106
2107 return t ? t : decl;
2108 }
2109
2110
2111 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2112 in outer contexts. */
2113
2114 static tree
2115 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2116 {
2117 tree t = NULL;
2118 omp_context *up;
2119
2120 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2121 t = maybe_lookup_decl (decl, up);
2122
2123 return t ? t : decl;
2124 }
2125
2126
2127 /* Construct the initialization value for reduction CLAUSE. */
2128
2129 tree
2130 omp_reduction_init (tree clause, tree type)
2131 {
2132 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2133 {
2134 case PLUS_EXPR:
2135 case MINUS_EXPR:
2136 case BIT_IOR_EXPR:
2137 case BIT_XOR_EXPR:
2138 case TRUTH_OR_EXPR:
2139 case TRUTH_ORIF_EXPR:
2140 case TRUTH_XOR_EXPR:
2141 case NE_EXPR:
2142 return fold_convert (type, integer_zero_node);
2143
2144 case MULT_EXPR:
2145 case TRUTH_AND_EXPR:
2146 case TRUTH_ANDIF_EXPR:
2147 case EQ_EXPR:
2148 return fold_convert (type, integer_one_node);
2149
2150 case BIT_AND_EXPR:
2151 return fold_convert (type, integer_minus_one_node);
2152
2153 case MAX_EXPR:
2154 if (SCALAR_FLOAT_TYPE_P (type))
2155 {
2156 REAL_VALUE_TYPE max, min;
2157 if (HONOR_INFINITIES (TYPE_MODE (type)))
2158 {
2159 real_inf (&max);
2160 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2161 }
2162 else
2163 real_maxval (&min, 1, TYPE_MODE (type));
2164 return build_real (type, min);
2165 }
2166 else
2167 {
2168 gcc_assert (INTEGRAL_TYPE_P (type));
2169 return TYPE_MIN_VALUE (type);
2170 }
2171
2172 case MIN_EXPR:
2173 if (SCALAR_FLOAT_TYPE_P (type))
2174 {
2175 REAL_VALUE_TYPE max;
2176 if (HONOR_INFINITIES (TYPE_MODE (type)))
2177 real_inf (&max);
2178 else
2179 real_maxval (&max, 0, TYPE_MODE (type));
2180 return build_real (type, max);
2181 }
2182 else
2183 {
2184 gcc_assert (INTEGRAL_TYPE_P (type));
2185 return TYPE_MAX_VALUE (type);
2186 }
2187
2188 default:
2189 gcc_unreachable ();
2190 }
2191 }
2192
2193 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2194 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2195 private variables. Initialization statements go in ILIST, while calls
2196 to destructors go in DLIST. */
2197
2198 static void
2199 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2200 omp_context *ctx)
2201 {
2202 gimple_stmt_iterator diter;
2203 tree c, dtor, copyin_seq, x, ptr;
2204 bool copyin_by_ref = false;
2205 bool lastprivate_firstprivate = false;
2206 int pass;
2207
2208 *dlist = gimple_seq_alloc ();
2209 diter = gsi_start (*dlist);
2210 copyin_seq = NULL;
2211
2212 /* Do all the fixed sized types in the first pass, and the variable sized
2213 types in the second pass. This makes sure that the scalar arguments to
2214 the variable sized types are processed before we use them in the
2215 variable sized operations. */
2216 for (pass = 0; pass < 2; ++pass)
2217 {
2218 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2219 {
2220 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2221 tree var, new_var;
2222 bool by_ref;
2223
2224 switch (c_kind)
2225 {
2226 case OMP_CLAUSE_PRIVATE:
2227 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2228 continue;
2229 break;
2230 case OMP_CLAUSE_SHARED:
2231 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2232 {
2233 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2234 continue;
2235 }
2236 case OMP_CLAUSE_FIRSTPRIVATE:
2237 case OMP_CLAUSE_COPYIN:
2238 case OMP_CLAUSE_REDUCTION:
2239 break;
2240 case OMP_CLAUSE_LASTPRIVATE:
2241 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2242 {
2243 lastprivate_firstprivate = true;
2244 if (pass != 0)
2245 continue;
2246 }
2247 break;
2248 default:
2249 continue;
2250 }
2251
2252 new_var = var = OMP_CLAUSE_DECL (c);
2253 if (c_kind != OMP_CLAUSE_COPYIN)
2254 new_var = lookup_decl (var, ctx);
2255
2256 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2257 {
2258 if (pass != 0)
2259 continue;
2260 }
2261 else if (is_variable_sized (var))
2262 {
2263 /* For variable sized types, we need to allocate the
2264 actual storage here. Call alloca and store the
2265 result in the pointer decl that we created elsewhere. */
2266 if (pass == 0)
2267 continue;
2268
2269 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2270 {
2271 gimple stmt;
2272 tree tmp;
2273
2274 ptr = DECL_VALUE_EXPR (new_var);
2275 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2276 ptr = TREE_OPERAND (ptr, 0);
2277 gcc_assert (DECL_P (ptr));
2278 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2279
2280 /* void *tmp = __builtin_alloca */
2281 stmt
2282 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2283 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2284 gimple_add_tmp_var (tmp);
2285 gimple_call_set_lhs (stmt, tmp);
2286
2287 gimple_seq_add_stmt (ilist, stmt);
2288
2289 x = fold_convert (TREE_TYPE (ptr), tmp);
2290 gimplify_assign (ptr, x, ilist);
2291 }
2292 }
2293 else if (is_reference (var))
2294 {
2295 /* For references that are being privatized for Fortran,
2296 allocate new backing storage for the new pointer
2297 variable. This allows us to avoid changing all the
2298 code that expects a pointer to something that expects
2299 a direct variable. Note that this doesn't apply to
2300 C++, since reference types are disallowed in data
2301 sharing clauses there, except for NRV optimized
2302 return values. */
2303 if (pass == 0)
2304 continue;
2305
2306 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2307 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2308 {
2309 x = build_receiver_ref (var, false, ctx);
2310 x = build_fold_addr_expr (x);
2311 }
2312 else if (TREE_CONSTANT (x))
2313 {
2314 const char *name = NULL;
2315 if (DECL_NAME (var))
2316 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2317
2318 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2319 name);
2320 gimple_add_tmp_var (x);
2321 TREE_ADDRESSABLE (x) = 1;
2322 x = build_fold_addr_expr (x);
2323 }
2324 else
2325 {
2326 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2327 }
2328
2329 x = fold_convert (TREE_TYPE (new_var), x);
2330 gimplify_assign (new_var, x, ilist);
2331
2332 new_var = build_fold_indirect_ref (new_var);
2333 }
2334 else if (c_kind == OMP_CLAUSE_REDUCTION
2335 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2336 {
2337 if (pass == 0)
2338 continue;
2339 }
2340 else if (pass != 0)
2341 continue;
2342
2343 switch (OMP_CLAUSE_CODE (c))
2344 {
2345 case OMP_CLAUSE_SHARED:
2346 /* Shared global vars are just accessed directly. */
2347 if (is_global_var (new_var))
2348 break;
2349 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2350 needs to be delayed until after fixup_child_record_type so
2351 that we get the correct type during the dereference. */
2352 by_ref = use_pointer_for_field (var, ctx);
2353 x = build_receiver_ref (var, by_ref, ctx);
2354 SET_DECL_VALUE_EXPR (new_var, x);
2355 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2356
2357 /* ??? If VAR is not passed by reference, and the variable
2358 hasn't been initialized yet, then we'll get a warning for
2359 the store into the omp_data_s structure. Ideally, we'd be
2360 able to notice this and not store anything at all, but
2361 we're generating code too early. Suppress the warning. */
2362 if (!by_ref)
2363 TREE_NO_WARNING (var) = 1;
2364 break;
2365
2366 case OMP_CLAUSE_LASTPRIVATE:
2367 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2368 break;
2369 /* FALLTHRU */
2370
2371 case OMP_CLAUSE_PRIVATE:
2372 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2373 x = build_outer_var_ref (var, ctx);
2374 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2375 {
2376 if (is_task_ctx (ctx))
2377 x = build_receiver_ref (var, false, ctx);
2378 else
2379 x = build_outer_var_ref (var, ctx);
2380 }
2381 else
2382 x = NULL;
2383 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2384 if (x)
2385 gimplify_and_add (x, ilist);
2386 /* FALLTHRU */
2387
2388 do_dtor:
2389 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2390 if (x)
2391 {
2392 gimple_seq tseq = NULL;
2393
2394 dtor = x;
2395 gimplify_stmt (&dtor, &tseq);
2396 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2397 }
2398 break;
2399
2400 case OMP_CLAUSE_FIRSTPRIVATE:
2401 if (is_task_ctx (ctx))
2402 {
2403 if (is_reference (var) || is_variable_sized (var))
2404 goto do_dtor;
2405 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2406 ctx))
2407 || use_pointer_for_field (var, NULL))
2408 {
2409 x = build_receiver_ref (var, false, ctx);
2410 SET_DECL_VALUE_EXPR (new_var, x);
2411 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2412 goto do_dtor;
2413 }
2414 }
2415 x = build_outer_var_ref (var, ctx);
2416 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2417 gimplify_and_add (x, ilist);
2418 goto do_dtor;
2419 break;
2420
2421 case OMP_CLAUSE_COPYIN:
2422 by_ref = use_pointer_for_field (var, NULL);
2423 x = build_receiver_ref (var, by_ref, ctx);
2424 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2425 append_to_statement_list (x, &copyin_seq);
2426 copyin_by_ref |= by_ref;
2427 break;
2428
2429 case OMP_CLAUSE_REDUCTION:
2430 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2431 {
2432 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2433 x = build_outer_var_ref (var, ctx);
2434
2435 if (is_reference (var))
2436 x = build_fold_addr_expr (x);
2437 SET_DECL_VALUE_EXPR (placeholder, x);
2438 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2439 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2440 gimple_seq_add_seq (ilist,
2441 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2442 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2443 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2444 }
2445 else
2446 {
2447 x = omp_reduction_init (c, TREE_TYPE (new_var));
2448 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2449 gimplify_assign (new_var, x, ilist);
2450 }
2451 break;
2452
2453 default:
2454 gcc_unreachable ();
2455 }
2456 }
2457 }
2458
2459 /* The copyin sequence is not to be executed by the main thread, since
2460 that would result in self-copies. Perhaps not visible to scalars,
2461 but it certainly is to C++ operator=. */
2462 if (copyin_seq)
2463 {
2464 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2465 x = build2 (NE_EXPR, boolean_type_node, x,
2466 build_int_cst (TREE_TYPE (x), 0));
2467 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2468 gimplify_and_add (x, ilist);
2469 }
2470
2471 /* If any copyin variable is passed by reference, we must ensure the
2472 master thread doesn't modify it before it is copied over in all
2473 threads. Similarly for variables in both firstprivate and
2474 lastprivate clauses we need to ensure the lastprivate copying
2475 happens after firstprivate copying in all threads. */
2476 if (copyin_by_ref || lastprivate_firstprivate)
2477 gimplify_and_add (build_omp_barrier (), ilist);
2478 }
2479
2480
2481 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2482 both parallel and workshare constructs. PREDICATE may be NULL if it's
2483 always true. */
2484
2485 static void
2486 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2487 omp_context *ctx)
2488 {
2489 tree x, c, label = NULL;
2490 bool par_clauses = false;
2491
2492 /* Early exit if there are no lastprivate clauses. */
2493 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2494 if (clauses == NULL)
2495 {
2496 /* If this was a workshare clause, see if it had been combined
2497 with its parallel. In that case, look for the clauses on the
2498 parallel statement itself. */
2499 if (is_parallel_ctx (ctx))
2500 return;
2501
2502 ctx = ctx->outer;
2503 if (ctx == NULL || !is_parallel_ctx (ctx))
2504 return;
2505
2506 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2507 OMP_CLAUSE_LASTPRIVATE);
2508 if (clauses == NULL)
2509 return;
2510 par_clauses = true;
2511 }
2512
2513 if (predicate)
2514 {
2515 gimple stmt;
2516 tree label_true, arm1, arm2;
2517
2518 label = create_artificial_label ();
2519 label_true = create_artificial_label ();
2520 arm1 = TREE_OPERAND (predicate, 0);
2521 arm2 = TREE_OPERAND (predicate, 1);
2522 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2523 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2524 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2525 label_true, label);
2526 gimple_seq_add_stmt (stmt_list, stmt);
2527 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2528 }
2529
2530 for (c = clauses; c ;)
2531 {
2532 tree var, new_var;
2533
2534 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2535 {
2536 var = OMP_CLAUSE_DECL (c);
2537 new_var = lookup_decl (var, ctx);
2538
2539 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2540 {
2541 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2542 gimple_seq_add_seq (stmt_list,
2543 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2544 }
2545 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2546
2547 x = build_outer_var_ref (var, ctx);
2548 if (is_reference (var))
2549 new_var = build_fold_indirect_ref (new_var);
2550 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2551 gimplify_and_add (x, stmt_list);
2552 }
2553 c = OMP_CLAUSE_CHAIN (c);
2554 if (c == NULL && !par_clauses)
2555 {
2556 /* If this was a workshare clause, see if it had been combined
2557 with its parallel. In that case, continue looking for the
2558 clauses also on the parallel statement itself. */
2559 if (is_parallel_ctx (ctx))
2560 break;
2561
2562 ctx = ctx->outer;
2563 if (ctx == NULL || !is_parallel_ctx (ctx))
2564 break;
2565
2566 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2567 OMP_CLAUSE_LASTPRIVATE);
2568 par_clauses = true;
2569 }
2570 }
2571
2572 if (label)
2573 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2574 }
2575
2576
2577 /* Generate code to implement the REDUCTION clauses. */
2578
2579 static void
2580 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2581 {
2582 gimple_seq sub_seq = NULL;
2583 gimple stmt;
2584 tree x, c;
2585 int count = 0;
2586
2587 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2588 update in that case, otherwise use a lock. */
2589 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2590 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2591 {
2592 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2593 {
2594 /* Never use OMP_ATOMIC for array reductions. */
2595 count = -1;
2596 break;
2597 }
2598 count++;
2599 }
2600
2601 if (count == 0)
2602 return;
2603
2604 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2605 {
2606 tree var, ref, new_var;
2607 enum tree_code code;
2608
2609 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2610 continue;
2611
2612 var = OMP_CLAUSE_DECL (c);
2613 new_var = lookup_decl (var, ctx);
2614 if (is_reference (var))
2615 new_var = build_fold_indirect_ref (new_var);
2616 ref = build_outer_var_ref (var, ctx);
2617 code = OMP_CLAUSE_REDUCTION_CODE (c);
2618
2619 /* reduction(-:var) sums up the partial results, so it acts
2620 identically to reduction(+:var). */
2621 if (code == MINUS_EXPR)
2622 code = PLUS_EXPR;
2623
2624 if (count == 1)
2625 {
2626 tree addr = build_fold_addr_expr (ref);
2627
2628 addr = save_expr (addr);
2629 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2630 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2631 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2632 gimplify_and_add (x, stmt_seqp);
2633 return;
2634 }
2635
2636 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2637 {
2638 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2639
2640 if (is_reference (var))
2641 ref = build_fold_addr_expr (ref);
2642 SET_DECL_VALUE_EXPR (placeholder, ref);
2643 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2644 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2645 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2646 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2647 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2648 }
2649 else
2650 {
2651 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2652 ref = build_outer_var_ref (var, ctx);
2653 gimplify_assign (ref, x, &sub_seq);
2654 }
2655 }
2656
2657 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2658 gimple_seq_add_stmt (stmt_seqp, stmt);
2659
2660 gimple_seq_add_seq (stmt_seqp, sub_seq);
2661
2662 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2663 gimple_seq_add_stmt (stmt_seqp, stmt);
2664 }
2665
2666
2667 /* Generate code to implement the COPYPRIVATE clauses. */
2668
2669 static void
2670 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2671 omp_context *ctx)
2672 {
2673 tree c;
2674
2675 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2676 {
2677 tree var, ref, x;
2678 bool by_ref;
2679
2680 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2681 continue;
2682
2683 var = OMP_CLAUSE_DECL (c);
2684 by_ref = use_pointer_for_field (var, NULL);
2685
2686 ref = build_sender_ref (var, ctx);
2687 x = lookup_decl_in_outer_ctx (var, ctx);
2688 x = by_ref ? build_fold_addr_expr (x) : x;
2689 gimplify_assign (ref, x, slist);
2690
2691 ref = build_receiver_ref (var, by_ref, ctx);
2692 if (is_reference (var))
2693 {
2694 ref = build_fold_indirect_ref (ref);
2695 var = build_fold_indirect_ref (var);
2696 }
2697 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2698 gimplify_and_add (x, rlist);
2699 }
2700 }
2701
2702
2703 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2704 and REDUCTION from the sender (aka parent) side. */
2705
2706 static void
2707 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2708 omp_context *ctx)
2709 {
2710 tree c;
2711
2712 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2713 {
2714 tree val, ref, x, var;
2715 bool by_ref, do_in = false, do_out = false;
2716
2717 switch (OMP_CLAUSE_CODE (c))
2718 {
2719 case OMP_CLAUSE_PRIVATE:
2720 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2721 break;
2722 continue;
2723 case OMP_CLAUSE_FIRSTPRIVATE:
2724 case OMP_CLAUSE_COPYIN:
2725 case OMP_CLAUSE_LASTPRIVATE:
2726 case OMP_CLAUSE_REDUCTION:
2727 break;
2728 default:
2729 continue;
2730 }
2731
2732 val = OMP_CLAUSE_DECL (c);
2733 var = lookup_decl_in_outer_ctx (val, ctx);
2734
2735 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2736 && is_global_var (var))
2737 continue;
2738 if (is_variable_sized (val))
2739 continue;
2740 by_ref = use_pointer_for_field (val, NULL);
2741
2742 switch (OMP_CLAUSE_CODE (c))
2743 {
2744 case OMP_CLAUSE_PRIVATE:
2745 case OMP_CLAUSE_FIRSTPRIVATE:
2746 case OMP_CLAUSE_COPYIN:
2747 do_in = true;
2748 break;
2749
2750 case OMP_CLAUSE_LASTPRIVATE:
2751 if (by_ref || is_reference (val))
2752 {
2753 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2754 continue;
2755 do_in = true;
2756 }
2757 else
2758 {
2759 do_out = true;
2760 if (lang_hooks.decls.omp_private_outer_ref (val))
2761 do_in = true;
2762 }
2763 break;
2764
2765 case OMP_CLAUSE_REDUCTION:
2766 do_in = true;
2767 do_out = !(by_ref || is_reference (val));
2768 break;
2769
2770 default:
2771 gcc_unreachable ();
2772 }
2773
2774 if (do_in)
2775 {
2776 ref = build_sender_ref (val, ctx);
2777 x = by_ref ? build_fold_addr_expr (var) : var;
2778 gimplify_assign (ref, x, ilist);
2779 if (is_task_ctx (ctx))
2780 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2781 }
2782
2783 if (do_out)
2784 {
2785 ref = build_sender_ref (val, ctx);
2786 gimplify_assign (var, ref, olist);
2787 }
2788 }
2789 }
2790
2791 /* Generate code to implement SHARED from the sender (aka parent)
2792 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2793 list things that got automatically shared. */
2794
2795 static void
2796 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2797 {
2798 tree var, ovar, nvar, f, x, record_type;
2799
2800 if (ctx->record_type == NULL)
2801 return;
2802
2803 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2804 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2805 {
2806 ovar = DECL_ABSTRACT_ORIGIN (f);
2807 nvar = maybe_lookup_decl (ovar, ctx);
2808 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2809 continue;
2810
2811 /* If CTX is a nested parallel directive. Find the immediately
2812 enclosing parallel or workshare construct that contains a
2813 mapping for OVAR. */
2814 var = lookup_decl_in_outer_ctx (ovar, ctx);
2815
2816 if (use_pointer_for_field (ovar, ctx))
2817 {
2818 x = build_sender_ref (ovar, ctx);
2819 var = build_fold_addr_expr (var);
2820 gimplify_assign (x, var, ilist);
2821 }
2822 else
2823 {
2824 x = build_sender_ref (ovar, ctx);
2825 gimplify_assign (x, var, ilist);
2826
2827 if (!TREE_READONLY (var)
2828 /* We don't need to receive a new reference to a result
2829 or parm decl. In fact we may not store to it as we will
2830 invalidate any pending RSO and generate wrong gimple
2831 during inlining. */
2832 && !((TREE_CODE (var) == RESULT_DECL
2833 || TREE_CODE (var) == PARM_DECL)
2834 && DECL_BY_REFERENCE (var)))
2835 {
2836 x = build_sender_ref (ovar, ctx);
2837 gimplify_assign (var, x, olist);
2838 }
2839 }
2840 }
2841 }
2842
2843
2844 /* A convenience function to build an empty GIMPLE_COND with just the
2845 condition. */
2846
2847 static gimple
2848 gimple_build_cond_empty (tree cond)
2849 {
2850 enum tree_code pred_code;
2851 tree lhs, rhs;
2852
2853 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2854 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2855 }
2856
2857
2858 /* Build the function calls to GOMP_parallel_start etc to actually
2859 generate the parallel operation. REGION is the parallel region
2860 being expanded. BB is the block where to insert the code. WS_ARGS
2861 will be set if this is a call to a combined parallel+workshare
2862 construct, it contains the list of additional arguments needed by
2863 the workshare construct. */
2864
2865 static void
2866 expand_parallel_call (struct omp_region *region, basic_block bb,
2867 gimple entry_stmt, tree ws_args)
2868 {
2869 tree t, t1, t2, val, cond, c, clauses;
2870 gimple_stmt_iterator gsi;
2871 gimple stmt;
2872 int start_ix;
2873
2874 clauses = gimple_omp_parallel_clauses (entry_stmt);
2875
2876 /* Determine what flavor of GOMP_parallel_start we will be
2877 emitting. */
2878 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2879 if (is_combined_parallel (region))
2880 {
2881 switch (region->inner->type)
2882 {
2883 case GIMPLE_OMP_FOR:
2884 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2885 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2886 + (region->inner->sched_kind
2887 == OMP_CLAUSE_SCHEDULE_RUNTIME
2888 ? 3 : region->inner->sched_kind);
2889 break;
2890 case GIMPLE_OMP_SECTIONS:
2891 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2892 break;
2893 default:
2894 gcc_unreachable ();
2895 }
2896 }
2897
2898 /* By default, the value of NUM_THREADS is zero (selected at run time)
2899 and there is no conditional. */
2900 cond = NULL_TREE;
2901 val = build_int_cst (unsigned_type_node, 0);
2902
2903 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2904 if (c)
2905 cond = OMP_CLAUSE_IF_EXPR (c);
2906
2907 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2908 if (c)
2909 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2910
2911 /* Ensure 'val' is of the correct type. */
2912 val = fold_convert (unsigned_type_node, val);
2913
2914 /* If we found the clause 'if (cond)', build either
2915 (cond != 0) or (cond ? val : 1u). */
2916 if (cond)
2917 {
2918 gimple_stmt_iterator gsi;
2919
2920 cond = gimple_boolify (cond);
2921
2922 if (integer_zerop (val))
2923 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2924 build_int_cst (TREE_TYPE (cond), 0));
2925 else
2926 {
2927 basic_block cond_bb, then_bb, else_bb;
2928 edge e, e_then, e_else;
2929 tree tmp_then, tmp_else, tmp_join, tmp_var;
2930
2931 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2932 if (gimple_in_ssa_p (cfun))
2933 {
2934 tmp_then = make_ssa_name (tmp_var, NULL);
2935 tmp_else = make_ssa_name (tmp_var, NULL);
2936 tmp_join = make_ssa_name (tmp_var, NULL);
2937 }
2938 else
2939 {
2940 tmp_then = tmp_var;
2941 tmp_else = tmp_var;
2942 tmp_join = tmp_var;
2943 }
2944
2945 e = split_block (bb, NULL);
2946 cond_bb = e->src;
2947 bb = e->dest;
2948 remove_edge (e);
2949
2950 then_bb = create_empty_bb (cond_bb);
2951 else_bb = create_empty_bb (then_bb);
2952 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2953 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2954
2955 stmt = gimple_build_cond_empty (cond);
2956 gsi = gsi_start_bb (cond_bb);
2957 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2958
2959 gsi = gsi_start_bb (then_bb);
2960 stmt = gimple_build_assign (tmp_then, val);
2961 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2962
2963 gsi = gsi_start_bb (else_bb);
2964 stmt = gimple_build_assign
2965 (tmp_else, build_int_cst (unsigned_type_node, 1));
2966 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2967
2968 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2969 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2970 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2971 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2972
2973 if (gimple_in_ssa_p (cfun))
2974 {
2975 gimple phi = create_phi_node (tmp_join, bb);
2976 SSA_NAME_DEF_STMT (tmp_join) = phi;
2977 add_phi_arg (phi, tmp_then, e_then);
2978 add_phi_arg (phi, tmp_else, e_else);
2979 }
2980
2981 val = tmp_join;
2982 }
2983
2984 gsi = gsi_start_bb (bb);
2985 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2986 false, GSI_CONTINUE_LINKING);
2987 }
2988
2989 gsi = gsi_last_bb (bb);
2990 t = gimple_omp_parallel_data_arg (entry_stmt);
2991 if (t == NULL)
2992 t1 = null_pointer_node;
2993 else
2994 t1 = build_fold_addr_expr (t);
2995 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2996
2997 if (ws_args)
2998 {
2999 tree args = tree_cons (NULL, t2,
3000 tree_cons (NULL, t1,
3001 tree_cons (NULL, val, ws_args)));
3002 t = build_function_call_expr (built_in_decls[start_ix], args);
3003 }
3004 else
3005 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3006
3007 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3008 false, GSI_CONTINUE_LINKING);
3009
3010 t = gimple_omp_parallel_data_arg (entry_stmt);
3011 if (t == NULL)
3012 t = null_pointer_node;
3013 else
3014 t = build_fold_addr_expr (t);
3015 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3016 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3017 false, GSI_CONTINUE_LINKING);
3018
3019 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3020 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3021 false, GSI_CONTINUE_LINKING);
3022 }
3023
3024
3025 /* Build the function call to GOMP_task to actually
3026 generate the task operation. BB is the block where to insert the code. */
3027
3028 static void
3029 expand_task_call (basic_block bb, gimple entry_stmt)
3030 {
3031 tree t, t1, t2, t3, flags, cond, c, clauses;
3032 gimple_stmt_iterator gsi;
3033
3034 clauses = gimple_omp_task_clauses (entry_stmt);
3035
3036 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3037 if (c)
3038 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3039 else
3040 cond = boolean_true_node;
3041
3042 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3043 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3044
3045 gsi = gsi_last_bb (bb);
3046 t = gimple_omp_task_data_arg (entry_stmt);
3047 if (t == NULL)
3048 t2 = null_pointer_node;
3049 else
3050 t2 = build_fold_addr_expr (t);
3051 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3052 t = gimple_omp_task_copy_fn (entry_stmt);
3053 if (t == NULL)
3054 t3 = null_pointer_node;
3055 else
3056 t3 = build_fold_addr_expr (t);
3057
3058 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3059 gimple_omp_task_arg_size (entry_stmt),
3060 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3061
3062 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3063 false, GSI_CONTINUE_LINKING);
3064 }
3065
3066
3067 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3068 catch handler and return it. This prevents programs from violating the
3069 structured block semantics with throws. */
3070
3071 static gimple_seq
3072 maybe_catch_exception (gimple_seq body)
3073 {
3074 gimple f, t;
3075
3076 if (!flag_exceptions)
3077 return body;
3078
3079 if (lang_protect_cleanup_actions)
3080 t = lang_protect_cleanup_actions ();
3081 else
3082 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3083
3084 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3085 gimple_eh_filter_set_must_not_throw (f, true);
3086
3087 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3088 GIMPLE_TRY_CATCH);
3089
3090 return gimple_seq_alloc_with_stmt (t);
3091 }
3092
3093 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3094
3095 static tree
3096 list2chain (tree list)
3097 {
3098 tree t;
3099
3100 for (t = list; t; t = TREE_CHAIN (t))
3101 {
3102 tree var = TREE_VALUE (t);
3103 if (TREE_CHAIN (t))
3104 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3105 else
3106 TREE_CHAIN (var) = NULL_TREE;
3107 }
3108
3109 return list ? TREE_VALUE (list) : NULL_TREE;
3110 }
3111
3112
3113 /* Remove barriers in REGION->EXIT's block. Note that this is only
3114 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3115 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3116 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3117 removed. */
3118
3119 static void
3120 remove_exit_barrier (struct omp_region *region)
3121 {
3122 gimple_stmt_iterator gsi;
3123 basic_block exit_bb;
3124 edge_iterator ei;
3125 edge e;
3126 gimple stmt;
3127 int any_addressable_vars = -1;
3128
3129 exit_bb = region->exit;
3130
3131 /* If the parallel region doesn't return, we don't have REGION->EXIT
3132 block at all. */
3133 if (! exit_bb)
3134 return;
3135
3136 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3137 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3138 statements that can appear in between are extremely limited -- no
3139 memory operations at all. Here, we allow nothing at all, so the
3140 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3141 gsi = gsi_last_bb (exit_bb);
3142 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3143 gsi_prev (&gsi);
3144 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3145 return;
3146
3147 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3148 {
3149 gsi = gsi_last_bb (e->src);
3150 if (gsi_end_p (gsi))
3151 continue;
3152 stmt = gsi_stmt (gsi);
3153 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3154 && !gimple_omp_return_nowait_p (stmt))
3155 {
3156 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3157 in many cases. If there could be tasks queued, the barrier
3158 might be needed to let the tasks run before some local
3159 variable of the parallel that the task uses as shared
3160 runs out of scope. The task can be spawned either
3161 from within current function (this would be easy to check)
3162 or from some function it calls and gets passed an address
3163 of such a variable. */
3164 if (any_addressable_vars < 0)
3165 {
3166 gimple parallel_stmt = last_stmt (region->entry);
3167 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3168 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3169 tree block;
3170
3171 any_addressable_vars = 0;
3172 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3173 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3174 {
3175 any_addressable_vars = 1;
3176 break;
3177 }
3178 for (block = gimple_block (stmt);
3179 !any_addressable_vars
3180 && block
3181 && TREE_CODE (block) == BLOCK;
3182 block = BLOCK_SUPERCONTEXT (block))
3183 {
3184 for (local_decls = BLOCK_VARS (block);
3185 local_decls;
3186 local_decls = TREE_CHAIN (local_decls))
3187 if (TREE_ADDRESSABLE (local_decls))
3188 {
3189 any_addressable_vars = 1;
3190 break;
3191 }
3192 if (block == gimple_block (parallel_stmt))
3193 break;
3194 }
3195 }
3196 if (!any_addressable_vars)
3197 gimple_omp_return_set_nowait (stmt);
3198 }
3199 }
3200 }
3201
3202 static void
3203 remove_exit_barriers (struct omp_region *region)
3204 {
3205 if (region->type == GIMPLE_OMP_PARALLEL)
3206 remove_exit_barrier (region);
3207
3208 if (region->inner)
3209 {
3210 region = region->inner;
3211 remove_exit_barriers (region);
3212 while (region->next)
3213 {
3214 region = region->next;
3215 remove_exit_barriers (region);
3216 }
3217 }
3218 }
3219
3220 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3221 calls. These can't be declared as const functions, but
3222 within one parallel body they are constant, so they can be
3223 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3224 which are declared const. Similarly for task body, except
3225 that in untied task omp_get_thread_num () can change at any task
3226 scheduling point. */
3227
3228 static void
3229 optimize_omp_library_calls (gimple entry_stmt)
3230 {
3231 basic_block bb;
3232 gimple_stmt_iterator gsi;
3233 tree thr_num_id
3234 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3235 tree num_thr_id
3236 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3237 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3238 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3239 OMP_CLAUSE_UNTIED) != NULL);
3240
3241 FOR_EACH_BB (bb)
3242 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3243 {
3244 gimple call = gsi_stmt (gsi);
3245 tree decl;
3246
3247 if (is_gimple_call (call)
3248 && (decl = gimple_call_fndecl (call))
3249 && DECL_EXTERNAL (decl)
3250 && TREE_PUBLIC (decl)
3251 && DECL_INITIAL (decl) == NULL)
3252 {
3253 tree built_in;
3254
3255 if (DECL_NAME (decl) == thr_num_id)
3256 {
3257 /* In #pragma omp task untied omp_get_thread_num () can change
3258 during the execution of the task region. */
3259 if (untied_task)
3260 continue;
3261 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3262 }
3263 else if (DECL_NAME (decl) == num_thr_id)
3264 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3265 else
3266 continue;
3267
3268 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3269 || gimple_call_num_args (call) != 0)
3270 continue;
3271
3272 if (flag_exceptions && !TREE_NOTHROW (decl))
3273 continue;
3274
3275 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3276 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3277 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3278 continue;
3279
3280 gimple_call_set_fndecl (call, built_in);
3281 }
3282 }
3283 }
3284
3285 /* Expand the OpenMP parallel or task directive starting at REGION. */
3286
3287 static void
3288 expand_omp_taskreg (struct omp_region *region)
3289 {
3290 basic_block entry_bb, exit_bb, new_bb;
3291 struct function *child_cfun;
3292 tree child_fn, block, t, ws_args, *tp;
3293 tree save_current;
3294 gimple_stmt_iterator gsi;
3295 gimple entry_stmt, stmt;
3296 edge e;
3297
3298 entry_stmt = last_stmt (region->entry);
3299 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3300 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3301 /* If this function has been already instrumented, make sure
3302 the child function isn't instrumented again. */
3303 child_cfun->after_tree_profile = cfun->after_tree_profile;
3304
3305 entry_bb = region->entry;
3306 exit_bb = region->exit;
3307
3308 if (is_combined_parallel (region))
3309 ws_args = region->ws_args;
3310 else
3311 ws_args = NULL_TREE;
3312
3313 if (child_cfun->cfg)
3314 {
3315 /* Due to inlining, it may happen that we have already outlined
3316 the region, in which case all we need to do is make the
3317 sub-graph unreachable and emit the parallel call. */
3318 edge entry_succ_e, exit_succ_e;
3319 gimple_stmt_iterator gsi;
3320
3321 entry_succ_e = single_succ_edge (entry_bb);
3322
3323 gsi = gsi_last_bb (entry_bb);
3324 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3325 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3326 gsi_remove (&gsi, true);
3327
3328 new_bb = entry_bb;
3329 if (exit_bb)
3330 {
3331 exit_succ_e = single_succ_edge (exit_bb);
3332 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3333 }
3334 remove_edge_and_dominated_blocks (entry_succ_e);
3335 }
3336 else
3337 {
3338 /* If the parallel region needs data sent from the parent
3339 function, then the very first statement (except possible
3340 tree profile counter updates) of the parallel body
3341 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3342 &.OMP_DATA_O is passed as an argument to the child function,
3343 we need to replace it with the argument as seen by the child
3344 function.
3345
3346 In most cases, this will end up being the identity assignment
3347 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3348 a function call that has been inlined, the original PARM_DECL
3349 .OMP_DATA_I may have been converted into a different local
3350 variable. In which case, we need to keep the assignment. */
3351 if (gimple_omp_taskreg_data_arg (entry_stmt))
3352 {
3353 basic_block entry_succ_bb = single_succ (entry_bb);
3354 gimple_stmt_iterator gsi;
3355 tree arg, narg;
3356 gimple parcopy_stmt = NULL;
3357
3358 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3359 {
3360 gimple stmt;
3361
3362 gcc_assert (!gsi_end_p (gsi));
3363 stmt = gsi_stmt (gsi);
3364 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3365 continue;
3366
3367 if (gimple_num_ops (stmt) == 2)
3368 {
3369 tree arg = gimple_assign_rhs1 (stmt);
3370
3371 /* We're ignore the subcode because we're
3372 effectively doing a STRIP_NOPS. */
3373
3374 if (TREE_CODE (arg) == ADDR_EXPR
3375 && TREE_OPERAND (arg, 0)
3376 == gimple_omp_taskreg_data_arg (entry_stmt))
3377 {
3378 parcopy_stmt = stmt;
3379 break;
3380 }
3381 }
3382 }
3383
3384 gcc_assert (parcopy_stmt != NULL);
3385 arg = DECL_ARGUMENTS (child_fn);
3386
3387 if (!gimple_in_ssa_p (cfun))
3388 {
3389 if (gimple_assign_lhs (parcopy_stmt) == arg)
3390 gsi_remove (&gsi, true);
3391 else
3392 {
3393 /* ?? Is setting the subcode really necessary ?? */
3394 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3395 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3396 }
3397 }
3398 else
3399 {
3400 /* If we are in ssa form, we must load the value from the default
3401 definition of the argument. That should not be defined now,
3402 since the argument is not used uninitialized. */
3403 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3404 narg = make_ssa_name (arg, gimple_build_nop ());
3405 set_default_def (arg, narg);
3406 /* ?? Is setting the subcode really necessary ?? */
3407 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3408 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3409 update_stmt (parcopy_stmt);
3410 }
3411 }
3412
3413 /* Declare local variables needed in CHILD_CFUN. */
3414 block = DECL_INITIAL (child_fn);
3415 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3416 /* The gimplifier could record temporaries in parallel/task block
3417 rather than in containing function's local_decls chain,
3418 which would mean cgraph missed finalizing them. Do it now. */
3419 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3420 if (TREE_CODE (t) == VAR_DECL
3421 && TREE_STATIC (t)
3422 && !DECL_EXTERNAL (t))
3423 varpool_finalize_decl (t);
3424 DECL_SAVED_TREE (child_fn) = NULL;
3425 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3426 TREE_USED (block) = 1;
3427
3428 /* Reset DECL_CONTEXT on function arguments. */
3429 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3430 DECL_CONTEXT (t) = child_fn;
3431
3432 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3433 so that it can be moved to the child function. */
3434 gsi = gsi_last_bb (entry_bb);
3435 stmt = gsi_stmt (gsi);
3436 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3437 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3438 gsi_remove (&gsi, true);
3439 e = split_block (entry_bb, stmt);
3440 entry_bb = e->dest;
3441 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3442
3443 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3444 if (exit_bb)
3445 {
3446 gsi = gsi_last_bb (exit_bb);
3447 gcc_assert (!gsi_end_p (gsi)
3448 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3449 stmt = gimple_build_return (NULL);
3450 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3451 gsi_remove (&gsi, true);
3452 }
3453
3454 /* Move the parallel region into CHILD_CFUN. */
3455
3456 if (gimple_in_ssa_p (cfun))
3457 {
3458 push_cfun (child_cfun);
3459 init_tree_ssa (child_cfun);
3460 init_ssa_operands ();
3461 cfun->gimple_df->in_ssa_p = true;
3462 pop_cfun ();
3463 block = NULL_TREE;
3464 }
3465 else
3466 block = gimple_block (entry_stmt);
3467
3468 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3469 if (exit_bb)
3470 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3471
3472 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3473 for (tp = &child_cfun->local_decls; *tp; )
3474 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3475 tp = &TREE_CHAIN (*tp);
3476 else
3477 *tp = TREE_CHAIN (*tp);
3478
3479 /* Inform the callgraph about the new function. */
3480 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3481 = cfun->curr_properties;
3482 cgraph_add_new_function (child_fn, true);
3483
3484 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3485 fixed in a following pass. */
3486 push_cfun (child_cfun);
3487 save_current = current_function_decl;
3488 current_function_decl = child_fn;
3489 if (optimize)
3490 optimize_omp_library_calls (entry_stmt);
3491 rebuild_cgraph_edges ();
3492
3493 /* Some EH regions might become dead, see PR34608. If
3494 pass_cleanup_cfg isn't the first pass to happen with the
3495 new child, these dead EH edges might cause problems.
3496 Clean them up now. */
3497 if (flag_exceptions)
3498 {
3499 basic_block bb;
3500 bool changed = false;
3501
3502 FOR_EACH_BB (bb)
3503 changed |= gimple_purge_dead_eh_edges (bb);
3504 if (changed)
3505 cleanup_tree_cfg ();
3506 }
3507 if (gimple_in_ssa_p (cfun))
3508 update_ssa (TODO_update_ssa);
3509 current_function_decl = save_current;
3510 pop_cfun ();
3511 }
3512
3513 /* Emit a library call to launch the children threads. */
3514 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3515 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3516 else
3517 expand_task_call (new_bb, entry_stmt);
3518 update_ssa (TODO_update_ssa_only_virtuals);
3519 }
3520
3521
3522 /* A subroutine of expand_omp_for. Generate code for a parallel
3523 loop with any schedule. Given parameters:
3524
3525 for (V = N1; V cond N2; V += STEP) BODY;
3526
3527 where COND is "<" or ">", we generate pseudocode
3528
3529 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3530 if (more) goto L0; else goto L3;
3531 L0:
3532 V = istart0;
3533 iend = iend0;
3534 L1:
3535 BODY;
3536 V += STEP;
3537 if (V cond iend) goto L1; else goto L2;
3538 L2:
3539 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3540 L3:
3541
3542 If this is a combined omp parallel loop, instead of the call to
3543 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3544
3545 For collapsed loops, given parameters:
3546 collapse(3)
3547 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3548 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3549 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3550 BODY;
3551
3552 we generate pseudocode
3553
3554 if (cond3 is <)
3555 adj = STEP3 - 1;
3556 else
3557 adj = STEP3 + 1;
3558 count3 = (adj + N32 - N31) / STEP3;
3559 if (cond2 is <)
3560 adj = STEP2 - 1;
3561 else
3562 adj = STEP2 + 1;
3563 count2 = (adj + N22 - N21) / STEP2;
3564 if (cond1 is <)
3565 adj = STEP1 - 1;
3566 else
3567 adj = STEP1 + 1;
3568 count1 = (adj + N12 - N11) / STEP1;
3569 count = count1 * count2 * count3;
3570 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3571 if (more) goto L0; else goto L3;
3572 L0:
3573 V = istart0;
3574 T = V;
3575 V3 = N31 + (T % count3) * STEP3;
3576 T = T / count3;
3577 V2 = N21 + (T % count2) * STEP2;
3578 T = T / count2;
3579 V1 = N11 + T * STEP1;
3580 iend = iend0;
3581 L1:
3582 BODY;
3583 V += 1;
3584 if (V < iend) goto L10; else goto L2;
3585 L10:
3586 V3 += STEP3;
3587 if (V3 cond3 N32) goto L1; else goto L11;
3588 L11:
3589 V3 = N31;
3590 V2 += STEP2;
3591 if (V2 cond2 N22) goto L1; else goto L12;
3592 L12:
3593 V2 = N21;
3594 V1 += STEP1;
3595 goto L1;
3596 L2:
3597 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3598 L3:
3599
3600 */
3601
3602 static void
3603 expand_omp_for_generic (struct omp_region *region,
3604 struct omp_for_data *fd,
3605 enum built_in_function start_fn,
3606 enum built_in_function next_fn)
3607 {
3608 tree type, istart0, iend0, iend;
3609 tree t, vmain, vback, bias = NULL_TREE;
3610 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3611 basic_block l2_bb = NULL, l3_bb = NULL;
3612 gimple_stmt_iterator gsi;
3613 gimple stmt;
3614 bool in_combined_parallel = is_combined_parallel (region);
3615 bool broken_loop = region->cont == NULL;
3616 edge e, ne;
3617 tree *counts = NULL;
3618 int i;
3619
3620 gcc_assert (!broken_loop || !in_combined_parallel);
3621 gcc_assert (fd->iter_type == long_integer_type_node
3622 || !in_combined_parallel);
3623
3624 type = TREE_TYPE (fd->loop.v);
3625 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3626 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3627 TREE_ADDRESSABLE (istart0) = 1;
3628 TREE_ADDRESSABLE (iend0) = 1;
3629 if (gimple_in_ssa_p (cfun))
3630 {
3631 add_referenced_var (istart0);
3632 add_referenced_var (iend0);
3633 }
3634
3635 /* See if we need to bias by LLONG_MIN. */
3636 if (fd->iter_type == long_long_unsigned_type_node
3637 && TREE_CODE (type) == INTEGER_TYPE
3638 && !TYPE_UNSIGNED (type))
3639 {
3640 tree n1, n2;
3641
3642 if (fd->loop.cond_code == LT_EXPR)
3643 {
3644 n1 = fd->loop.n1;
3645 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3646 }
3647 else
3648 {
3649 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3650 n2 = fd->loop.n1;
3651 }
3652 if (TREE_CODE (n1) != INTEGER_CST
3653 || TREE_CODE (n2) != INTEGER_CST
3654 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3655 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3656 }
3657
3658 entry_bb = region->entry;
3659 cont_bb = region->cont;
3660 collapse_bb = NULL;
3661 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3662 gcc_assert (broken_loop
3663 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3664 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3665 l1_bb = single_succ (l0_bb);
3666 if (!broken_loop)
3667 {
3668 l2_bb = create_empty_bb (cont_bb);
3669 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3670 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3671 }
3672 else
3673 l2_bb = NULL;
3674 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3675 exit_bb = region->exit;
3676
3677 gsi = gsi_last_bb (entry_bb);
3678
3679 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3680 if (fd->collapse > 1)
3681 {
3682 /* collapsed loops need work for expansion in SSA form. */
3683 gcc_assert (!gimple_in_ssa_p (cfun));
3684 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3685 for (i = 0; i < fd->collapse; i++)
3686 {
3687 tree itype = TREE_TYPE (fd->loops[i].v);
3688
3689 if (POINTER_TYPE_P (itype))
3690 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3691 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3692 ? -1 : 1));
3693 t = fold_build2 (PLUS_EXPR, itype,
3694 fold_convert (itype, fd->loops[i].step), t);
3695 t = fold_build2 (PLUS_EXPR, itype, t,
3696 fold_convert (itype, fd->loops[i].n2));
3697 t = fold_build2 (MINUS_EXPR, itype, t,
3698 fold_convert (itype, fd->loops[i].n1));
3699 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3700 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3701 fold_build1 (NEGATE_EXPR, itype, t),
3702 fold_build1 (NEGATE_EXPR, itype,
3703 fold_convert (itype,
3704 fd->loops[i].step)));
3705 else
3706 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3707 fold_convert (itype, fd->loops[i].step));
3708 t = fold_convert (type, t);
3709 if (TREE_CODE (t) == INTEGER_CST)
3710 counts[i] = t;
3711 else
3712 {
3713 counts[i] = create_tmp_var (type, ".count");
3714 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3715 true, GSI_SAME_STMT);
3716 stmt = gimple_build_assign (counts[i], t);
3717 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3718 }
3719 if (SSA_VAR_P (fd->loop.n2))
3720 {
3721 if (i == 0)
3722 t = counts[0];
3723 else
3724 {
3725 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3726 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3727 true, GSI_SAME_STMT);
3728 }
3729 stmt = gimple_build_assign (fd->loop.n2, t);
3730 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3731 }
3732 }
3733 }
3734 if (in_combined_parallel)
3735 {
3736 /* In a combined parallel loop, emit a call to
3737 GOMP_loop_foo_next. */
3738 t = build_call_expr (built_in_decls[next_fn], 2,
3739 build_fold_addr_expr (istart0),
3740 build_fold_addr_expr (iend0));
3741 }
3742 else
3743 {
3744 tree t0, t1, t2, t3, t4;
3745 /* If this is not a combined parallel loop, emit a call to
3746 GOMP_loop_foo_start in ENTRY_BB. */
3747 t4 = build_fold_addr_expr (iend0);
3748 t3 = build_fold_addr_expr (istart0);
3749 t2 = fold_convert (fd->iter_type, fd->loop.step);
3750 if (POINTER_TYPE_P (type)
3751 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3752 {
3753 /* Avoid casting pointers to integer of a different size. */
3754 tree itype
3755 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3756 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3757 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3758 }
3759 else
3760 {
3761 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3762 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3763 }
3764 if (bias)
3765 {
3766 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3767 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3768 }
3769 if (fd->iter_type == long_integer_type_node)
3770 {
3771 if (fd->chunk_size)
3772 {
3773 t = fold_convert (fd->iter_type, fd->chunk_size);
3774 t = build_call_expr (built_in_decls[start_fn], 6,
3775 t0, t1, t2, t, t3, t4);
3776 }
3777 else
3778 t = build_call_expr (built_in_decls[start_fn], 5,
3779 t0, t1, t2, t3, t4);
3780 }
3781 else
3782 {
3783 tree t5;
3784 tree c_bool_type;
3785
3786 /* The GOMP_loop_ull_*start functions have additional boolean
3787 argument, true for < loops and false for > loops.
3788 In Fortran, the C bool type can be different from
3789 boolean_type_node. */
3790 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3791 t5 = build_int_cst (c_bool_type,
3792 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3793 if (fd->chunk_size)
3794 {
3795 t = fold_convert (fd->iter_type, fd->chunk_size);
3796 t = build_call_expr (built_in_decls[start_fn], 7,
3797 t5, t0, t1, t2, t, t3, t4);
3798 }
3799 else
3800 t = build_call_expr (built_in_decls[start_fn], 6,
3801 t5, t0, t1, t2, t3, t4);
3802 }
3803 }
3804 if (TREE_TYPE (t) != boolean_type_node)
3805 t = fold_build2 (NE_EXPR, boolean_type_node,
3806 t, build_int_cst (TREE_TYPE (t), 0));
3807 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3808 true, GSI_SAME_STMT);
3809 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3810
3811 /* Remove the GIMPLE_OMP_FOR statement. */
3812 gsi_remove (&gsi, true);
3813
3814 /* Iteration setup for sequential loop goes in L0_BB. */
3815 gsi = gsi_start_bb (l0_bb);
3816 t = istart0;
3817 if (bias)
3818 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3819 if (POINTER_TYPE_P (type))
3820 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3821 0), t);
3822 t = fold_convert (type, t);
3823 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3824 false, GSI_CONTINUE_LINKING);
3825 stmt = gimple_build_assign (fd->loop.v, t);
3826 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3827
3828 t = iend0;
3829 if (bias)
3830 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3831 if (POINTER_TYPE_P (type))
3832 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3833 0), t);
3834 t = fold_convert (type, t);
3835 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3836 false, GSI_CONTINUE_LINKING);
3837 if (fd->collapse > 1)
3838 {
3839 tree tem = create_tmp_var (type, ".tem");
3840
3841 stmt = gimple_build_assign (tem, fd->loop.v);
3842 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3843 for (i = fd->collapse - 1; i >= 0; i--)
3844 {
3845 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3846 itype = vtype;
3847 if (POINTER_TYPE_P (vtype))
3848 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3849 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3850 t = fold_convert (itype, t);
3851 t = fold_build2 (MULT_EXPR, itype, t,
3852 fold_convert (itype, fd->loops[i].step));
3853 if (POINTER_TYPE_P (vtype))
3854 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3855 fd->loops[i].n1, fold_convert (sizetype, t));
3856 else
3857 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3858 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3859 false, GSI_CONTINUE_LINKING);
3860 stmt = gimple_build_assign (fd->loops[i].v, t);
3861 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3862 if (i != 0)
3863 {
3864 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3865 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3866 false, GSI_CONTINUE_LINKING);
3867 stmt = gimple_build_assign (tem, t);
3868 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3869 }
3870 }
3871 }
3872
3873 if (!broken_loop)
3874 {
3875 /* Code to control the increment and predicate for the sequential
3876 loop goes in the CONT_BB. */
3877 gsi = gsi_last_bb (cont_bb);
3878 stmt = gsi_stmt (gsi);
3879 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3880 vmain = gimple_omp_continue_control_use (stmt);
3881 vback = gimple_omp_continue_control_def (stmt);
3882
3883 if (POINTER_TYPE_P (type))
3884 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3885 fold_convert (sizetype, fd->loop.step));
3886 else
3887 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3888 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3889 true, GSI_SAME_STMT);
3890 stmt = gimple_build_assign (vback, t);
3891 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3892
3893 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3894 stmt = gimple_build_cond_empty (t);
3895 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3896
3897 /* Remove GIMPLE_OMP_CONTINUE. */
3898 gsi_remove (&gsi, true);
3899
3900 if (fd->collapse > 1)
3901 {
3902 basic_block last_bb, bb;
3903
3904 last_bb = cont_bb;
3905 for (i = fd->collapse - 1; i >= 0; i--)
3906 {
3907 tree vtype = TREE_TYPE (fd->loops[i].v);
3908
3909 bb = create_empty_bb (last_bb);
3910 gsi = gsi_start_bb (bb);
3911
3912 if (i < fd->collapse - 1)
3913 {
3914 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3915 e->probability = REG_BR_PROB_BASE / 8;
3916
3917 t = fd->loops[i + 1].n1;
3918 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3919 false, GSI_CONTINUE_LINKING);
3920 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3921 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3922 }
3923 else
3924 collapse_bb = bb;
3925
3926 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3927
3928 if (POINTER_TYPE_P (vtype))
3929 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3930 fd->loops[i].v,
3931 fold_convert (sizetype, fd->loops[i].step));
3932 else
3933 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3934 fd->loops[i].step);
3935 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3936 false, GSI_CONTINUE_LINKING);
3937 stmt = gimple_build_assign (fd->loops[i].v, t);
3938 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3939
3940 if (i > 0)
3941 {
3942 t = fd->loops[i].n2;
3943 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3944 false, GSI_CONTINUE_LINKING);
3945 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3946 fd->loops[i].v, t);
3947 stmt = gimple_build_cond_empty (t);
3948 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3949 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3950 e->probability = REG_BR_PROB_BASE * 7 / 8;
3951 }
3952 else
3953 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3954 last_bb = bb;
3955 }
3956 }
3957
3958 /* Emit code to get the next parallel iteration in L2_BB. */
3959 gsi = gsi_start_bb (l2_bb);
3960
3961 t = build_call_expr (built_in_decls[next_fn], 2,
3962 build_fold_addr_expr (istart0),
3963 build_fold_addr_expr (iend0));
3964 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3965 false, GSI_CONTINUE_LINKING);
3966 if (TREE_TYPE (t) != boolean_type_node)
3967 t = fold_build2 (NE_EXPR, boolean_type_node,
3968 t, build_int_cst (TREE_TYPE (t), 0));
3969 stmt = gimple_build_cond_empty (t);
3970 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3971 }
3972
3973 /* Add the loop cleanup function. */
3974 gsi = gsi_last_bb (exit_bb);
3975 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3976 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3977 else
3978 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3979 stmt = gimple_build_call (t, 0);
3980 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3981 gsi_remove (&gsi, true);
3982
3983 /* Connect the new blocks. */
3984 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3985 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3986
3987 if (!broken_loop)
3988 {
3989 gimple_seq phis;
3990
3991 e = find_edge (cont_bb, l3_bb);
3992 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3993
3994 phis = phi_nodes (l3_bb);
3995 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3996 {
3997 gimple phi = gsi_stmt (gsi);
3998 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3999 PHI_ARG_DEF_FROM_EDGE (phi, e));
4000 }
4001 remove_edge (e);
4002
4003 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4004 if (fd->collapse > 1)
4005 {
4006 e = find_edge (cont_bb, l1_bb);
4007 remove_edge (e);
4008 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4009 }
4010 else
4011 {
4012 e = find_edge (cont_bb, l1_bb);
4013 e->flags = EDGE_TRUE_VALUE;
4014 }
4015 e->probability = REG_BR_PROB_BASE * 7 / 8;
4016 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4017 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4018
4019 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4020 recompute_dominator (CDI_DOMINATORS, l2_bb));
4021 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4022 recompute_dominator (CDI_DOMINATORS, l3_bb));
4023 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4024 recompute_dominator (CDI_DOMINATORS, l0_bb));
4025 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4026 recompute_dominator (CDI_DOMINATORS, l1_bb));
4027 }
4028 }
4029
4030
4031 /* A subroutine of expand_omp_for. Generate code for a parallel
4032 loop with static schedule and no specified chunk size. Given
4033 parameters:
4034
4035 for (V = N1; V cond N2; V += STEP) BODY;
4036
4037 where COND is "<" or ">", we generate pseudocode
4038
4039 if (cond is <)
4040 adj = STEP - 1;
4041 else
4042 adj = STEP + 1;
4043 if ((__typeof (V)) -1 > 0 && cond is >)
4044 n = -(adj + N2 - N1) / -STEP;
4045 else
4046 n = (adj + N2 - N1) / STEP;
4047 q = n / nthreads;
4048 q += (q * nthreads != n);
4049 s0 = q * threadid;
4050 e0 = min(s0 + q, n);
4051 V = s0 * STEP + N1;
4052 if (s0 >= e0) goto L2; else goto L0;
4053 L0:
4054 e = e0 * STEP + N1;
4055 L1:
4056 BODY;
4057 V += STEP;
4058 if (V cond e) goto L1;
4059 L2:
4060 */
4061
4062 static void
4063 expand_omp_for_static_nochunk (struct omp_region *region,
4064 struct omp_for_data *fd)
4065 {
4066 tree n, q, s0, e0, e, t, nthreads, threadid;
4067 tree type, itype, vmain, vback;
4068 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4069 basic_block fin_bb;
4070 gimple_stmt_iterator gsi;
4071 gimple stmt;
4072
4073 itype = type = TREE_TYPE (fd->loop.v);
4074 if (POINTER_TYPE_P (type))
4075 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4076
4077 entry_bb = region->entry;
4078 cont_bb = region->cont;
4079 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4080 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4081 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4082 body_bb = single_succ (seq_start_bb);
4083 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4084 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4085 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4086 exit_bb = region->exit;
4087
4088 /* Iteration space partitioning goes in ENTRY_BB. */
4089 gsi = gsi_last_bb (entry_bb);
4090 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4091
4092 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4093 t = fold_convert (itype, t);
4094 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4095 true, GSI_SAME_STMT);
4096
4097 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4098 t = fold_convert (itype, t);
4099 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4100 true, GSI_SAME_STMT);
4101
4102 fd->loop.n1
4103 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4104 true, NULL_TREE, true, GSI_SAME_STMT);
4105 fd->loop.n2
4106 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4107 true, NULL_TREE, true, GSI_SAME_STMT);
4108 fd->loop.step
4109 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4110 true, NULL_TREE, true, GSI_SAME_STMT);
4111
4112 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4113 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4114 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4115 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4116 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4117 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4118 fold_build1 (NEGATE_EXPR, itype, t),
4119 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4120 else
4121 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4122 t = fold_convert (itype, t);
4123 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4124
4125 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4126 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4127
4128 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4129 t = fold_build2 (NE_EXPR, itype, t, n);
4130 t = fold_build2 (PLUS_EXPR, itype, q, t);
4131 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4132
4133 t = build2 (MULT_EXPR, itype, q, threadid);
4134 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4135
4136 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4137 t = fold_build2 (MIN_EXPR, itype, t, n);
4138 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4139
4140 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4141 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4142
4143 /* Remove the GIMPLE_OMP_FOR statement. */
4144 gsi_remove (&gsi, true);
4145
4146 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4147 gsi = gsi_start_bb (seq_start_bb);
4148
4149 t = fold_convert (itype, s0);
4150 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4151 if (POINTER_TYPE_P (type))
4152 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4153 fold_convert (sizetype, t));
4154 else
4155 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4156 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4157 false, GSI_CONTINUE_LINKING);
4158 stmt = gimple_build_assign (fd->loop.v, t);
4159 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4160
4161 t = fold_convert (itype, e0);
4162 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4163 if (POINTER_TYPE_P (type))
4164 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4165 fold_convert (sizetype, t));
4166 else
4167 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4168 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4169 false, GSI_CONTINUE_LINKING);
4170
4171 /* The code controlling the sequential loop replaces the
4172 GIMPLE_OMP_CONTINUE. */
4173 gsi = gsi_last_bb (cont_bb);
4174 stmt = gsi_stmt (gsi);
4175 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4176 vmain = gimple_omp_continue_control_use (stmt);
4177 vback = gimple_omp_continue_control_def (stmt);
4178
4179 if (POINTER_TYPE_P (type))
4180 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4181 fold_convert (sizetype, fd->loop.step));
4182 else
4183 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4184 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4185 true, GSI_SAME_STMT);
4186 stmt = gimple_build_assign (vback, t);
4187 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4188
4189 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4190 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4191
4192 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4193 gsi_remove (&gsi, true);
4194
4195 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4196 gsi = gsi_last_bb (exit_bb);
4197 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4198 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4199 false, GSI_SAME_STMT);
4200 gsi_remove (&gsi, true);
4201
4202 /* Connect all the blocks. */
4203 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4204 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4205
4206 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4207 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4208
4209 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4210 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4211 recompute_dominator (CDI_DOMINATORS, body_bb));
4212 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4213 recompute_dominator (CDI_DOMINATORS, fin_bb));
4214 }
4215
4216
4217 /* A subroutine of expand_omp_for. Generate code for a parallel
4218 loop with static schedule and a specified chunk size. Given
4219 parameters:
4220
4221 for (V = N1; V cond N2; V += STEP) BODY;
4222
4223 where COND is "<" or ">", we generate pseudocode
4224
4225 if (cond is <)
4226 adj = STEP - 1;
4227 else
4228 adj = STEP + 1;
4229 if ((__typeof (V)) -1 > 0 && cond is >)
4230 n = -(adj + N2 - N1) / -STEP;
4231 else
4232 n = (adj + N2 - N1) / STEP;
4233 trip = 0;
4234 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4235 here so that V is defined
4236 if the loop is not entered
4237 L0:
4238 s0 = (trip * nthreads + threadid) * CHUNK;
4239 e0 = min(s0 + CHUNK, n);
4240 if (s0 < n) goto L1; else goto L4;
4241 L1:
4242 V = s0 * STEP + N1;
4243 e = e0 * STEP + N1;
4244 L2:
4245 BODY;
4246 V += STEP;
4247 if (V cond e) goto L2; else goto L3;
4248 L3:
4249 trip += 1;
4250 goto L0;
4251 L4:
4252 */
4253
4254 static void
4255 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4256 {
4257 tree n, s0, e0, e, t;
4258 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4259 tree type, itype, v_main, v_back, v_extra;
4260 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4261 basic_block trip_update_bb, cont_bb, fin_bb;
4262 gimple_stmt_iterator si;
4263 gimple stmt;
4264 edge se;
4265
4266 itype = type = TREE_TYPE (fd->loop.v);
4267 if (POINTER_TYPE_P (type))
4268 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4269
4270 entry_bb = region->entry;
4271 se = split_block (entry_bb, last_stmt (entry_bb));
4272 entry_bb = se->src;
4273 iter_part_bb = se->dest;
4274 cont_bb = region->cont;
4275 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4276 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4277 == FALLTHRU_EDGE (cont_bb)->dest);
4278 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4279 body_bb = single_succ (seq_start_bb);
4280 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4281 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4282 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4283 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4284 exit_bb = region->exit;
4285
4286 /* Trip and adjustment setup goes in ENTRY_BB. */
4287 si = gsi_last_bb (entry_bb);
4288 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4289
4290 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4291 t = fold_convert (itype, t);
4292 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4293 true, GSI_SAME_STMT);
4294
4295 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4296 t = fold_convert (itype, t);
4297 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4298 true, GSI_SAME_STMT);
4299
4300 fd->loop.n1
4301 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4302 true, NULL_TREE, true, GSI_SAME_STMT);
4303 fd->loop.n2
4304 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4305 true, NULL_TREE, true, GSI_SAME_STMT);
4306 fd->loop.step
4307 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4308 true, NULL_TREE, true, GSI_SAME_STMT);
4309 fd->chunk_size
4310 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4311 true, NULL_TREE, true, GSI_SAME_STMT);
4312
4313 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4314 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4315 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4316 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4317 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4318 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4319 fold_build1 (NEGATE_EXPR, itype, t),
4320 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4321 else
4322 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4323 t = fold_convert (itype, t);
4324 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4325 true, GSI_SAME_STMT);
4326
4327 trip_var = create_tmp_var (itype, ".trip");
4328 if (gimple_in_ssa_p (cfun))
4329 {
4330 add_referenced_var (trip_var);
4331 trip_init = make_ssa_name (trip_var, NULL);
4332 trip_main = make_ssa_name (trip_var, NULL);
4333 trip_back = make_ssa_name (trip_var, NULL);
4334 }
4335 else
4336 {
4337 trip_init = trip_var;
4338 trip_main = trip_var;
4339 trip_back = trip_var;
4340 }
4341
4342 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4343 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4344
4345 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4346 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4347 if (POINTER_TYPE_P (type))
4348 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4349 fold_convert (sizetype, t));
4350 else
4351 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4352 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4353 true, GSI_SAME_STMT);
4354
4355 /* Remove the GIMPLE_OMP_FOR. */
4356 gsi_remove (&si, true);
4357
4358 /* Iteration space partitioning goes in ITER_PART_BB. */
4359 si = gsi_last_bb (iter_part_bb);
4360
4361 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4362 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4363 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4364 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4365 false, GSI_CONTINUE_LINKING);
4366
4367 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4368 t = fold_build2 (MIN_EXPR, itype, t, n);
4369 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4370 false, GSI_CONTINUE_LINKING);
4371
4372 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4373 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4374
4375 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4376 si = gsi_start_bb (seq_start_bb);
4377
4378 t = fold_convert (itype, s0);
4379 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4380 if (POINTER_TYPE_P (type))
4381 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4382 fold_convert (sizetype, t));
4383 else
4384 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4385 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4386 false, GSI_CONTINUE_LINKING);
4387 stmt = gimple_build_assign (fd->loop.v, t);
4388 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4389
4390 t = fold_convert (itype, e0);
4391 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4392 if (POINTER_TYPE_P (type))
4393 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4394 fold_convert (sizetype, t));
4395 else
4396 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4397 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4398 false, GSI_CONTINUE_LINKING);
4399
4400 /* The code controlling the sequential loop goes in CONT_BB,
4401 replacing the GIMPLE_OMP_CONTINUE. */
4402 si = gsi_last_bb (cont_bb);
4403 stmt = gsi_stmt (si);
4404 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4405 v_main = gimple_omp_continue_control_use (stmt);
4406 v_back = gimple_omp_continue_control_def (stmt);
4407
4408 if (POINTER_TYPE_P (type))
4409 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4410 fold_convert (sizetype, fd->loop.step));
4411 else
4412 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4413 stmt = gimple_build_assign (v_back, t);
4414 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4415
4416 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4417 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4418
4419 /* Remove GIMPLE_OMP_CONTINUE. */
4420 gsi_remove (&si, true);
4421
4422 /* Trip update code goes into TRIP_UPDATE_BB. */
4423 si = gsi_start_bb (trip_update_bb);
4424
4425 t = build_int_cst (itype, 1);
4426 t = build2 (PLUS_EXPR, itype, trip_main, t);
4427 stmt = gimple_build_assign (trip_back, t);
4428 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4429
4430 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4431 si = gsi_last_bb (exit_bb);
4432 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4433 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4434 false, GSI_SAME_STMT);
4435 gsi_remove (&si, true);
4436
4437 /* Connect the new blocks. */
4438 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4439 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4440
4441 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4442 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4443
4444 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4445
4446 if (gimple_in_ssa_p (cfun))
4447 {
4448 gimple_stmt_iterator psi;
4449 gimple phi;
4450 edge re, ene;
4451 edge_var_map_vector head;
4452 edge_var_map *vm;
4453 size_t i;
4454
4455 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4456 remove arguments of the phi nodes in fin_bb. We need to create
4457 appropriate phi nodes in iter_part_bb instead. */
4458 se = single_pred_edge (fin_bb);
4459 re = single_succ_edge (trip_update_bb);
4460 head = redirect_edge_var_map_vector (re);
4461 ene = single_succ_edge (entry_bb);
4462
4463 psi = gsi_start_phis (fin_bb);
4464 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4465 gsi_next (&psi), ++i)
4466 {
4467 gimple nphi;
4468
4469 phi = gsi_stmt (psi);
4470 t = gimple_phi_result (phi);
4471 gcc_assert (t == redirect_edge_var_map_result (vm));
4472 nphi = create_phi_node (t, iter_part_bb);
4473 SSA_NAME_DEF_STMT (t) = nphi;
4474
4475 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4476 /* A special case -- fd->loop.v is not yet computed in
4477 iter_part_bb, we need to use v_extra instead. */
4478 if (t == fd->loop.v)
4479 t = v_extra;
4480 add_phi_arg (nphi, t, ene);
4481 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4482 }
4483 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4484 redirect_edge_var_map_clear (re);
4485 while (1)
4486 {
4487 psi = gsi_start_phis (fin_bb);
4488 if (gsi_end_p (psi))
4489 break;
4490 remove_phi_node (&psi, false);
4491 }
4492
4493 /* Make phi node for trip. */
4494 phi = create_phi_node (trip_main, iter_part_bb);
4495 SSA_NAME_DEF_STMT (trip_main) = phi;
4496 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4497 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4498 }
4499
4500 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4501 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4502 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4503 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4504 recompute_dominator (CDI_DOMINATORS, fin_bb));
4505 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4506 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4507 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4508 recompute_dominator (CDI_DOMINATORS, body_bb));
4509 }
4510
4511
4512 /* Expand the OpenMP loop defined by REGION. */
4513
4514 static void
4515 expand_omp_for (struct omp_region *region)
4516 {
4517 struct omp_for_data fd;
4518 struct omp_for_data_loop *loops;
4519
4520 loops
4521 = (struct omp_for_data_loop *)
4522 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4523 * sizeof (struct omp_for_data_loop));
4524 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4525 region->sched_kind = fd.sched_kind;
4526
4527 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4528 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4529 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4530 if (region->cont)
4531 {
4532 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4533 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4534 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4535 }
4536
4537 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4538 && !fd.have_ordered
4539 && fd.collapse == 1
4540 && region->cont != NULL)
4541 {
4542 if (fd.chunk_size == NULL)
4543 expand_omp_for_static_nochunk (region, &fd);
4544 else
4545 expand_omp_for_static_chunk (region, &fd);
4546 }
4547 else
4548 {
4549 int fn_index, start_ix, next_ix;
4550
4551 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4552 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4553 ? 3 : fd.sched_kind;
4554 fn_index += fd.have_ordered * 4;
4555 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4556 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4557 if (fd.iter_type == long_long_unsigned_type_node)
4558 {
4559 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4560 - BUILT_IN_GOMP_LOOP_STATIC_START;
4561 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4562 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4563 }
4564 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4565 }
4566
4567 update_ssa (TODO_update_ssa_only_virtuals);
4568 }
4569
4570
4571 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4572
4573 v = GOMP_sections_start (n);
4574 L0:
4575 switch (v)
4576 {
4577 case 0:
4578 goto L2;
4579 case 1:
4580 section 1;
4581 goto L1;
4582 case 2:
4583 ...
4584 case n:
4585 ...
4586 default:
4587 abort ();
4588 }
4589 L1:
4590 v = GOMP_sections_next ();
4591 goto L0;
4592 L2:
4593 reduction;
4594
4595 If this is a combined parallel sections, replace the call to
4596 GOMP_sections_start with call to GOMP_sections_next. */
4597
4598 static void
4599 expand_omp_sections (struct omp_region *region)
4600 {
4601 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4602 VEC (tree,heap) *label_vec;
4603 unsigned len;
4604 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4605 gimple_stmt_iterator si, switch_si;
4606 gimple sections_stmt, stmt, cont;
4607 edge_iterator ei;
4608 edge e;
4609 struct omp_region *inner;
4610 unsigned i, casei;
4611 bool exit_reachable = region->cont != NULL;
4612
4613 gcc_assert (exit_reachable == (region->exit != NULL));
4614 entry_bb = region->entry;
4615 l0_bb = single_succ (entry_bb);
4616 l1_bb = region->cont;
4617 l2_bb = region->exit;
4618 if (exit_reachable)
4619 {
4620 if (single_pred (l2_bb) == l0_bb)
4621 l2 = gimple_block_label (l2_bb);
4622 else
4623 {
4624 /* This can happen if there are reductions. */
4625 len = EDGE_COUNT (l0_bb->succs);
4626 gcc_assert (len > 0);
4627 e = EDGE_SUCC (l0_bb, len - 1);
4628 si = gsi_last_bb (e->dest);
4629 l2 = NULL_TREE;
4630 if (gsi_end_p (si)
4631 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4632 l2 = gimple_block_label (e->dest);
4633 else
4634 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4635 {
4636 si = gsi_last_bb (e->dest);
4637 if (gsi_end_p (si)
4638 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4639 {
4640 l2 = gimple_block_label (e->dest);
4641 break;
4642 }
4643 }
4644 }
4645 default_bb = create_empty_bb (l1_bb->prev_bb);
4646 l1 = gimple_block_label (l1_bb);
4647 }
4648 else
4649 {
4650 default_bb = create_empty_bb (l0_bb);
4651 l1 = NULL_TREE;
4652 l2 = gimple_block_label (default_bb);
4653 }
4654
4655 /* We will build a switch() with enough cases for all the
4656 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4657 and a default case to abort if something goes wrong. */
4658 len = EDGE_COUNT (l0_bb->succs);
4659
4660 /* Use VEC_quick_push on label_vec throughout, since we know the size
4661 in advance. */
4662 label_vec = VEC_alloc (tree, heap, len);
4663
4664 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4665 GIMPLE_OMP_SECTIONS statement. */
4666 si = gsi_last_bb (entry_bb);
4667 sections_stmt = gsi_stmt (si);
4668 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4669 vin = gimple_omp_sections_control (sections_stmt);
4670 if (!is_combined_parallel (region))
4671 {
4672 /* If we are not inside a combined parallel+sections region,
4673 call GOMP_sections_start. */
4674 t = build_int_cst (unsigned_type_node,
4675 exit_reachable ? len - 1 : len);
4676 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4677 stmt = gimple_build_call (u, 1, t);
4678 }
4679 else
4680 {
4681 /* Otherwise, call GOMP_sections_next. */
4682 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4683 stmt = gimple_build_call (u, 0);
4684 }
4685 gimple_call_set_lhs (stmt, vin);
4686 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4687 gsi_remove (&si, true);
4688
4689 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4690 L0_BB. */
4691 switch_si = gsi_last_bb (l0_bb);
4692 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4693 if (exit_reachable)
4694 {
4695 cont = last_stmt (l1_bb);
4696 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4697 vmain = gimple_omp_continue_control_use (cont);
4698 vnext = gimple_omp_continue_control_def (cont);
4699 }
4700 else
4701 {
4702 vmain = vin;
4703 vnext = NULL_TREE;
4704 }
4705
4706 i = 0;
4707 if (exit_reachable)
4708 {
4709 t = build3 (CASE_LABEL_EXPR, void_type_node,
4710 build_int_cst (unsigned_type_node, 0), NULL, l2);
4711 VEC_quick_push (tree, label_vec, t);
4712 i++;
4713 }
4714
4715 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4716 for (inner = region->inner, casei = 1;
4717 inner;
4718 inner = inner->next, i++, casei++)
4719 {
4720 basic_block s_entry_bb, s_exit_bb;
4721
4722 /* Skip optional reduction region. */
4723 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4724 {
4725 --i;
4726 --casei;
4727 continue;
4728 }
4729
4730 s_entry_bb = inner->entry;
4731 s_exit_bb = inner->exit;
4732
4733 t = gimple_block_label (s_entry_bb);
4734 u = build_int_cst (unsigned_type_node, casei);
4735 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4736 VEC_quick_push (tree, label_vec, u);
4737
4738 si = gsi_last_bb (s_entry_bb);
4739 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4740 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4741 gsi_remove (&si, true);
4742 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4743
4744 if (s_exit_bb == NULL)
4745 continue;
4746
4747 si = gsi_last_bb (s_exit_bb);
4748 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4749 gsi_remove (&si, true);
4750
4751 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4752 }
4753
4754 /* Error handling code goes in DEFAULT_BB. */
4755 t = gimple_block_label (default_bb);
4756 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4757 make_edge (l0_bb, default_bb, 0);
4758
4759 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4760 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4761 gsi_remove (&switch_si, true);
4762 VEC_free (tree, heap, label_vec);
4763
4764 si = gsi_start_bb (default_bb);
4765 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4766 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4767
4768 if (exit_reachable)
4769 {
4770 /* Code to get the next section goes in L1_BB. */
4771 si = gsi_last_bb (l1_bb);
4772 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4773
4774 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4775 gimple_call_set_lhs (stmt, vnext);
4776 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4777 gsi_remove (&si, true);
4778
4779 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4780
4781 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4782 si = gsi_last_bb (l2_bb);
4783 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4784 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4785 else
4786 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4787 stmt = gimple_build_call (t, 0);
4788 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4789 gsi_remove (&si, true);
4790 }
4791
4792 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4793 }
4794
4795
4796 /* Expand code for an OpenMP single directive. We've already expanded
4797 much of the code, here we simply place the GOMP_barrier call. */
4798
4799 static void
4800 expand_omp_single (struct omp_region *region)
4801 {
4802 basic_block entry_bb, exit_bb;
4803 gimple_stmt_iterator si;
4804 bool need_barrier = false;
4805
4806 entry_bb = region->entry;
4807 exit_bb = region->exit;
4808
4809 si = gsi_last_bb (entry_bb);
4810 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4811 be removed. We need to ensure that the thread that entered the single
4812 does not exit before the data is copied out by the other threads. */
4813 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4814 OMP_CLAUSE_COPYPRIVATE))
4815 need_barrier = true;
4816 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4817 gsi_remove (&si, true);
4818 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4819
4820 si = gsi_last_bb (exit_bb);
4821 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4822 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4823 false, GSI_SAME_STMT);
4824 gsi_remove (&si, true);
4825 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4826 }
4827
4828
4829 /* Generic expansion for OpenMP synchronization directives: master,
4830 ordered and critical. All we need to do here is remove the entry
4831 and exit markers for REGION. */
4832
4833 static void
4834 expand_omp_synch (struct omp_region *region)
4835 {
4836 basic_block entry_bb, exit_bb;
4837 gimple_stmt_iterator si;
4838
4839 entry_bb = region->entry;
4840 exit_bb = region->exit;
4841
4842 si = gsi_last_bb (entry_bb);
4843 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4844 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4845 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4846 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4847 gsi_remove (&si, true);
4848 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4849
4850 if (exit_bb)
4851 {
4852 si = gsi_last_bb (exit_bb);
4853 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4854 gsi_remove (&si, true);
4855 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4856 }
4857 }
4858
4859 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4860 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4861 size of the data type, and thus usable to find the index of the builtin
4862 decl. Returns false if the expression is not of the proper form. */
4863
4864 static bool
4865 expand_omp_atomic_fetch_op (basic_block load_bb,
4866 tree addr, tree loaded_val,
4867 tree stored_val, int index)
4868 {
4869 enum built_in_function base;
4870 tree decl, itype, call;
4871 enum insn_code *optab;
4872 tree rhs;
4873 basic_block store_bb = single_succ (load_bb);
4874 gimple_stmt_iterator gsi;
4875 gimple stmt;
4876
4877 /* We expect to find the following sequences:
4878
4879 load_bb:
4880 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4881
4882 store_bb:
4883 val = tmp OP something; (or: something OP tmp)
4884 GIMPLE_OMP_STORE (val)
4885
4886 ???FIXME: Allow a more flexible sequence.
4887 Perhaps use data flow to pick the statements.
4888
4889 */
4890
4891 gsi = gsi_after_labels (store_bb);
4892 stmt = gsi_stmt (gsi);
4893 if (!is_gimple_assign (stmt))
4894 return false;
4895 gsi_next (&gsi);
4896 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4897 return false;
4898
4899 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4900 return false;
4901
4902 /* Check for one of the supported fetch-op operations. */
4903 switch (gimple_assign_rhs_code (stmt))
4904 {
4905 case PLUS_EXPR:
4906 case POINTER_PLUS_EXPR:
4907 base = BUILT_IN_FETCH_AND_ADD_N;
4908 optab = sync_add_optab;
4909 break;
4910 case MINUS_EXPR:
4911 base = BUILT_IN_FETCH_AND_SUB_N;
4912 optab = sync_add_optab;
4913 break;
4914 case BIT_AND_EXPR:
4915 base = BUILT_IN_FETCH_AND_AND_N;
4916 optab = sync_and_optab;
4917 break;
4918 case BIT_IOR_EXPR:
4919 base = BUILT_IN_FETCH_AND_OR_N;
4920 optab = sync_ior_optab;
4921 break;
4922 case BIT_XOR_EXPR:
4923 base = BUILT_IN_FETCH_AND_XOR_N;
4924 optab = sync_xor_optab;
4925 break;
4926 default:
4927 return false;
4928 }
4929 /* Make sure the expression is of the proper form. */
4930 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4931 rhs = gimple_assign_rhs2 (stmt);
4932 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4933 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4934 rhs = gimple_assign_rhs1 (stmt);
4935 else
4936 return false;
4937
4938 decl = built_in_decls[base + index + 1];
4939 itype = TREE_TYPE (TREE_TYPE (decl));
4940
4941 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4942 return false;
4943
4944 gsi = gsi_last_bb (load_bb);
4945 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4946 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4947 call = fold_convert (void_type_node, call);
4948 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4949 gsi_remove (&gsi, true);
4950
4951 gsi = gsi_last_bb (store_bb);
4952 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4953 gsi_remove (&gsi, true);
4954 gsi = gsi_last_bb (store_bb);
4955 gsi_remove (&gsi, true);
4956
4957 if (gimple_in_ssa_p (cfun))
4958 update_ssa (TODO_update_ssa_no_phi);
4959
4960 return true;
4961 }
4962
4963 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4964
4965 oldval = *addr;
4966 repeat:
4967 newval = rhs; // with oldval replacing *addr in rhs
4968 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4969 if (oldval != newval)
4970 goto repeat;
4971
4972 INDEX is log2 of the size of the data type, and thus usable to find the
4973 index of the builtin decl. */
4974
4975 static bool
4976 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4977 tree addr, tree loaded_val, tree stored_val,
4978 int index)
4979 {
4980 tree loadedi, storedi, initial, new_storedi, old_vali;
4981 tree type, itype, cmpxchg, iaddr;
4982 gimple_stmt_iterator si;
4983 basic_block loop_header = single_succ (load_bb);
4984 gimple phi, stmt;
4985 edge e;
4986
4987 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4988 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4989 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4990
4991 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4992 return false;
4993
4994 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4995 si = gsi_last_bb (load_bb);
4996 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4997
4998 /* For floating-point values, we'll need to view-convert them to integers
4999 so that we can perform the atomic compare and swap. Simplify the
5000 following code by always setting up the "i"ntegral variables. */
5001 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5002 {
5003 tree iaddr_val;
5004
5005 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
5006 iaddr_val
5007 = force_gimple_operand_gsi (&si,
5008 fold_convert (TREE_TYPE (iaddr), addr),
5009 false, NULL_TREE, true, GSI_SAME_STMT);
5010 stmt = gimple_build_assign (iaddr, iaddr_val);
5011 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5012 DECL_NO_TBAA_P (iaddr) = 1;
5013 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5014 loadedi = create_tmp_var (itype, NULL);
5015 if (gimple_in_ssa_p (cfun))
5016 {
5017 add_referenced_var (iaddr);
5018 add_referenced_var (loadedi);
5019 loadedi = make_ssa_name (loadedi, NULL);
5020 }
5021 }
5022 else
5023 {
5024 iaddr = addr;
5025 loadedi = loaded_val;
5026 }
5027
5028 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5029 true, NULL_TREE, true, GSI_SAME_STMT);
5030
5031 /* Move the value to the LOADEDI temporary. */
5032 if (gimple_in_ssa_p (cfun))
5033 {
5034 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5035 phi = create_phi_node (loadedi, loop_header);
5036 SSA_NAME_DEF_STMT (loadedi) = phi;
5037 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5038 initial);
5039 }
5040 else
5041 gsi_insert_before (&si,
5042 gimple_build_assign (loadedi, initial),
5043 GSI_SAME_STMT);
5044 if (loadedi != loaded_val)
5045 {
5046 gimple_stmt_iterator gsi2;
5047 tree x;
5048
5049 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5050 gsi2 = gsi_start_bb (loop_header);
5051 if (gimple_in_ssa_p (cfun))
5052 {
5053 gimple stmt;
5054 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5055 true, GSI_SAME_STMT);
5056 stmt = gimple_build_assign (loaded_val, x);
5057 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5058 }
5059 else
5060 {
5061 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5062 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5063 true, GSI_SAME_STMT);
5064 }
5065 }
5066 gsi_remove (&si, true);
5067
5068 si = gsi_last_bb (store_bb);
5069 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5070
5071 if (iaddr == addr)
5072 storedi = stored_val;
5073 else
5074 storedi =
5075 force_gimple_operand_gsi (&si,
5076 build1 (VIEW_CONVERT_EXPR, itype,
5077 stored_val), true, NULL_TREE, true,
5078 GSI_SAME_STMT);
5079
5080 /* Build the compare&swap statement. */
5081 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5082 new_storedi = force_gimple_operand_gsi (&si,
5083 fold_convert (TREE_TYPE (loadedi),
5084 new_storedi),
5085 true, NULL_TREE,
5086 true, GSI_SAME_STMT);
5087
5088 if (gimple_in_ssa_p (cfun))
5089 old_vali = loadedi;
5090 else
5091 {
5092 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5093 if (gimple_in_ssa_p (cfun))
5094 add_referenced_var (old_vali);
5095 stmt = gimple_build_assign (old_vali, loadedi);
5096 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5097
5098 stmt = gimple_build_assign (loadedi, new_storedi);
5099 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5100 }
5101
5102 /* Note that we always perform the comparison as an integer, even for
5103 floating point. This allows the atomic operation to properly
5104 succeed even with NaNs and -0.0. */
5105 stmt = gimple_build_cond_empty
5106 (build2 (NE_EXPR, boolean_type_node,
5107 new_storedi, old_vali));
5108 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5109
5110 /* Update cfg. */
5111 e = single_succ_edge (store_bb);
5112 e->flags &= ~EDGE_FALLTHRU;
5113 e->flags |= EDGE_FALSE_VALUE;
5114
5115 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5116
5117 /* Copy the new value to loadedi (we already did that before the condition
5118 if we are not in SSA). */
5119 if (gimple_in_ssa_p (cfun))
5120 {
5121 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5122 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5123 }
5124
5125 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5126 gsi_remove (&si, true);
5127
5128 if (gimple_in_ssa_p (cfun))
5129 update_ssa (TODO_update_ssa_no_phi);
5130
5131 return true;
5132 }
5133
5134 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5135
5136 GOMP_atomic_start ();
5137 *addr = rhs;
5138 GOMP_atomic_end ();
5139
5140 The result is not globally atomic, but works so long as all parallel
5141 references are within #pragma omp atomic directives. According to
5142 responses received from omp@openmp.org, appears to be within spec.
5143 Which makes sense, since that's how several other compilers handle
5144 this situation as well.
5145 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5146 expanding. STORED_VAL is the operand of the matching
5147 GIMPLE_OMP_ATOMIC_STORE.
5148
5149 We replace
5150 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5151 loaded_val = *addr;
5152
5153 and replace
5154 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5155 *addr = stored_val;
5156 */
5157
5158 static bool
5159 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5160 tree addr, tree loaded_val, tree stored_val)
5161 {
5162 gimple_stmt_iterator si;
5163 gimple stmt;
5164 tree t;
5165
5166 si = gsi_last_bb (load_bb);
5167 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5168
5169 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5170 t = build_function_call_expr (t, 0);
5171 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5172
5173 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5174 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5175 gsi_remove (&si, true);
5176
5177 si = gsi_last_bb (store_bb);
5178 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5179
5180 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5181 stored_val);
5182 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5183
5184 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5185 t = build_function_call_expr (t, 0);
5186 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5187 gsi_remove (&si, true);
5188
5189 if (gimple_in_ssa_p (cfun))
5190 update_ssa (TODO_update_ssa_no_phi);
5191 return true;
5192 }
5193
5194 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5195 using expand_omp_atomic_fetch_op. If it failed, we try to
5196 call expand_omp_atomic_pipeline, and if it fails too, the
5197 ultimate fallback is wrapping the operation in a mutex
5198 (expand_omp_atomic_mutex). REGION is the atomic region built
5199 by build_omp_regions_1(). */
5200
5201 static void
5202 expand_omp_atomic (struct omp_region *region)
5203 {
5204 basic_block load_bb = region->entry, store_bb = region->exit;
5205 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5206 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5207 tree addr = gimple_omp_atomic_load_rhs (load);
5208 tree stored_val = gimple_omp_atomic_store_val (store);
5209 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5210 HOST_WIDE_INT index;
5211
5212 /* Make sure the type is one of the supported sizes. */
5213 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5214 index = exact_log2 (index);
5215 if (index >= 0 && index <= 4)
5216 {
5217 unsigned int align = TYPE_ALIGN_UNIT (type);
5218
5219 /* __sync builtins require strict data alignment. */
5220 if (exact_log2 (align) >= index)
5221 {
5222 /* When possible, use specialized atomic update functions. */
5223 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5224 && store_bb == single_succ (load_bb))
5225 {
5226 if (expand_omp_atomic_fetch_op (load_bb, addr,
5227 loaded_val, stored_val, index))
5228 return;
5229 }
5230
5231 /* If we don't have specialized __sync builtins, try and implement
5232 as a compare and swap loop. */
5233 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5234 loaded_val, stored_val, index))
5235 return;
5236 }
5237 }
5238
5239 /* The ultimate fallback is wrapping the operation in a mutex. */
5240 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5241 }
5242
5243
5244 /* Expand the parallel region tree rooted at REGION. Expansion
5245 proceeds in depth-first order. Innermost regions are expanded
5246 first. This way, parallel regions that require a new function to
5247 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5248 internal dependencies in their body. */
5249
5250 static void
5251 expand_omp (struct omp_region *region)
5252 {
5253 while (region)
5254 {
5255 location_t saved_location;
5256
5257 /* First, determine whether this is a combined parallel+workshare
5258 region. */
5259 if (region->type == GIMPLE_OMP_PARALLEL)
5260 determine_parallel_type (region);
5261
5262 if (region->inner)
5263 expand_omp (region->inner);
5264
5265 saved_location = input_location;
5266 if (gimple_has_location (last_stmt (region->entry)))
5267 input_location = gimple_location (last_stmt (region->entry));
5268
5269 switch (region->type)
5270 {
5271 case GIMPLE_OMP_PARALLEL:
5272 case GIMPLE_OMP_TASK:
5273 expand_omp_taskreg (region);
5274 break;
5275
5276 case GIMPLE_OMP_FOR:
5277 expand_omp_for (region);
5278 break;
5279
5280 case GIMPLE_OMP_SECTIONS:
5281 expand_omp_sections (region);
5282 break;
5283
5284 case GIMPLE_OMP_SECTION:
5285 /* Individual omp sections are handled together with their
5286 parent GIMPLE_OMP_SECTIONS region. */
5287 break;
5288
5289 case GIMPLE_OMP_SINGLE:
5290 expand_omp_single (region);
5291 break;
5292
5293 case GIMPLE_OMP_MASTER:
5294 case GIMPLE_OMP_ORDERED:
5295 case GIMPLE_OMP_CRITICAL:
5296 expand_omp_synch (region);
5297 break;
5298
5299 case GIMPLE_OMP_ATOMIC_LOAD:
5300 expand_omp_atomic (region);
5301 break;
5302
5303 default:
5304 gcc_unreachable ();
5305 }
5306
5307 input_location = saved_location;
5308 region = region->next;
5309 }
5310 }
5311
5312
5313 /* Helper for build_omp_regions. Scan the dominator tree starting at
5314 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5315 true, the function ends once a single tree is built (otherwise, whole
5316 forest of OMP constructs may be built). */
5317
5318 static void
5319 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5320 bool single_tree)
5321 {
5322 gimple_stmt_iterator gsi;
5323 gimple stmt;
5324 basic_block son;
5325
5326 gsi = gsi_last_bb (bb);
5327 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5328 {
5329 struct omp_region *region;
5330 enum gimple_code code;
5331
5332 stmt = gsi_stmt (gsi);
5333 code = gimple_code (stmt);
5334 if (code == GIMPLE_OMP_RETURN)
5335 {
5336 /* STMT is the return point out of region PARENT. Mark it
5337 as the exit point and make PARENT the immediately
5338 enclosing region. */
5339 gcc_assert (parent);
5340 region = parent;
5341 region->exit = bb;
5342 parent = parent->outer;
5343 }
5344 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5345 {
5346 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5347 GIMPLE_OMP_RETURN, but matches with
5348 GIMPLE_OMP_ATOMIC_LOAD. */
5349 gcc_assert (parent);
5350 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5351 region = parent;
5352 region->exit = bb;
5353 parent = parent->outer;
5354 }
5355
5356 else if (code == GIMPLE_OMP_CONTINUE)
5357 {
5358 gcc_assert (parent);
5359 parent->cont = bb;
5360 }
5361 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5362 {
5363 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5364 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5365 ;
5366 }
5367 else
5368 {
5369 /* Otherwise, this directive becomes the parent for a new
5370 region. */
5371 region = new_omp_region (bb, code, parent);
5372 parent = region;
5373 }
5374 }
5375
5376 if (single_tree && !parent)
5377 return;
5378
5379 for (son = first_dom_son (CDI_DOMINATORS, bb);
5380 son;
5381 son = next_dom_son (CDI_DOMINATORS, son))
5382 build_omp_regions_1 (son, parent, single_tree);
5383 }
5384
5385 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5386 root_omp_region. */
5387
5388 static void
5389 build_omp_regions_root (basic_block root)
5390 {
5391 gcc_assert (root_omp_region == NULL);
5392 build_omp_regions_1 (root, NULL, true);
5393 gcc_assert (root_omp_region != NULL);
5394 }
5395
5396 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5397
5398 void
5399 omp_expand_local (basic_block head)
5400 {
5401 build_omp_regions_root (head);
5402 if (dump_file && (dump_flags & TDF_DETAILS))
5403 {
5404 fprintf (dump_file, "\nOMP region tree\n\n");
5405 dump_omp_region (dump_file, root_omp_region, 0);
5406 fprintf (dump_file, "\n");
5407 }
5408
5409 remove_exit_barriers (root_omp_region);
5410 expand_omp (root_omp_region);
5411
5412 free_omp_regions ();
5413 }
5414
5415 /* Scan the CFG and build a tree of OMP regions. Return the root of
5416 the OMP region tree. */
5417
5418 static void
5419 build_omp_regions (void)
5420 {
5421 gcc_assert (root_omp_region == NULL);
5422 calculate_dominance_info (CDI_DOMINATORS);
5423 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5424 }
5425
5426 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5427
5428 static unsigned int
5429 execute_expand_omp (void)
5430 {
5431 build_omp_regions ();
5432
5433 if (!root_omp_region)
5434 return 0;
5435
5436 if (dump_file)
5437 {
5438 fprintf (dump_file, "\nOMP region tree\n\n");
5439 dump_omp_region (dump_file, root_omp_region, 0);
5440 fprintf (dump_file, "\n");
5441 }
5442
5443 remove_exit_barriers (root_omp_region);
5444
5445 expand_omp (root_omp_region);
5446
5447 cleanup_tree_cfg ();
5448
5449 free_omp_regions ();
5450
5451 return 0;
5452 }
5453
5454 /* OMP expansion -- the default pass, run before creation of SSA form. */
5455
5456 static bool
5457 gate_expand_omp (void)
5458 {
5459 return (flag_openmp != 0 && errorcount == 0);
5460 }
5461
5462 struct gimple_opt_pass pass_expand_omp =
5463 {
5464 {
5465 GIMPLE_PASS,
5466 "ompexp", /* name */
5467 gate_expand_omp, /* gate */
5468 execute_expand_omp, /* execute */
5469 NULL, /* sub */
5470 NULL, /* next */
5471 0, /* static_pass_number */
5472 TV_NONE, /* tv_id */
5473 PROP_gimple_any, /* properties_required */
5474 0, /* properties_provided */
5475 0, /* properties_destroyed */
5476 0, /* todo_flags_start */
5477 TODO_dump_func /* todo_flags_finish */
5478 }
5479 };
5480 \f
5481 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5482
5483 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5484 CTX is the enclosing OMP context for the current statement. */
5485
5486 static void
5487 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5488 {
5489 tree block, control;
5490 gimple_stmt_iterator tgsi;
5491 unsigned i, len;
5492 gimple stmt, new_stmt, bind, t;
5493 gimple_seq ilist, dlist, olist, new_body, body;
5494 struct gimplify_ctx gctx;
5495
5496 stmt = gsi_stmt (*gsi_p);
5497
5498 push_gimplify_context (&gctx);
5499
5500 dlist = NULL;
5501 ilist = NULL;
5502 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5503 &ilist, &dlist, ctx);
5504
5505 tgsi = gsi_start (gimple_omp_body (stmt));
5506 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5507 continue;
5508
5509 tgsi = gsi_start (gimple_omp_body (stmt));
5510 body = NULL;
5511 for (i = 0; i < len; i++, gsi_next (&tgsi))
5512 {
5513 omp_context *sctx;
5514 gimple sec_start;
5515
5516 sec_start = gsi_stmt (tgsi);
5517 sctx = maybe_lookup_ctx (sec_start);
5518 gcc_assert (sctx);
5519
5520 gimple_seq_add_stmt (&body, sec_start);
5521
5522 lower_omp (gimple_omp_body (sec_start), sctx);
5523 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5524 gimple_omp_set_body (sec_start, NULL);
5525
5526 if (i == len - 1)
5527 {
5528 gimple_seq l = NULL;
5529 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5530 &l, ctx);
5531 gimple_seq_add_seq (&body, l);
5532 gimple_omp_section_set_last (sec_start);
5533 }
5534
5535 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5536 }
5537
5538 block = make_node (BLOCK);
5539 bind = gimple_build_bind (NULL, body, block);
5540
5541 olist = NULL;
5542 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5543
5544 block = make_node (BLOCK);
5545 new_stmt = gimple_build_bind (NULL, NULL, block);
5546
5547 pop_gimplify_context (new_stmt);
5548 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5549 BLOCK_VARS (block) = gimple_bind_vars (bind);
5550 if (BLOCK_VARS (block))
5551 TREE_USED (block) = 1;
5552
5553 new_body = NULL;
5554 gimple_seq_add_seq (&new_body, ilist);
5555 gimple_seq_add_stmt (&new_body, stmt);
5556 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5557 gimple_seq_add_stmt (&new_body, bind);
5558
5559 control = create_tmp_var (unsigned_type_node, ".section");
5560 t = gimple_build_omp_continue (control, control);
5561 gimple_omp_sections_set_control (stmt, control);
5562 gimple_seq_add_stmt (&new_body, t);
5563
5564 gimple_seq_add_seq (&new_body, olist);
5565 gimple_seq_add_seq (&new_body, dlist);
5566
5567 new_body = maybe_catch_exception (new_body);
5568
5569 t = gimple_build_omp_return
5570 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5571 OMP_CLAUSE_NOWAIT));
5572 gimple_seq_add_stmt (&new_body, t);
5573
5574 gimple_bind_set_body (new_stmt, new_body);
5575 gimple_omp_set_body (stmt, NULL);
5576
5577 gsi_replace (gsi_p, new_stmt, true);
5578 }
5579
5580
5581 /* A subroutine of lower_omp_single. Expand the simple form of
5582 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5583
5584 if (GOMP_single_start ())
5585 BODY;
5586 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5587
5588 FIXME. It may be better to delay expanding the logic of this until
5589 pass_expand_omp. The expanded logic may make the job more difficult
5590 to a synchronization analysis pass. */
5591
5592 static void
5593 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5594 {
5595 tree tlabel = create_artificial_label ();
5596 tree flabel = create_artificial_label ();
5597 gimple call, cond;
5598 tree lhs, decl;
5599
5600 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5601 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5602 call = gimple_build_call (decl, 0);
5603 gimple_call_set_lhs (call, lhs);
5604 gimple_seq_add_stmt (pre_p, call);
5605
5606 cond = gimple_build_cond (EQ_EXPR, lhs,
5607 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5608 tlabel, flabel);
5609 gimple_seq_add_stmt (pre_p, cond);
5610 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5611 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5612 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5613 }
5614
5615
5616 /* A subroutine of lower_omp_single. Expand the simple form of
5617 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5618
5619 #pragma omp single copyprivate (a, b, c)
5620
5621 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5622
5623 {
5624 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5625 {
5626 BODY;
5627 copyout.a = a;
5628 copyout.b = b;
5629 copyout.c = c;
5630 GOMP_single_copy_end (&copyout);
5631 }
5632 else
5633 {
5634 a = copyout_p->a;
5635 b = copyout_p->b;
5636 c = copyout_p->c;
5637 }
5638 GOMP_barrier ();
5639 }
5640
5641 FIXME. It may be better to delay expanding the logic of this until
5642 pass_expand_omp. The expanded logic may make the job more difficult
5643 to a synchronization analysis pass. */
5644
5645 static void
5646 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5647 {
5648 tree ptr_type, t, l0, l1, l2;
5649 gimple_seq copyin_seq;
5650
5651 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5652
5653 ptr_type = build_pointer_type (ctx->record_type);
5654 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5655
5656 l0 = create_artificial_label ();
5657 l1 = create_artificial_label ();
5658 l2 = create_artificial_label ();
5659
5660 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5661 t = fold_convert (ptr_type, t);
5662 gimplify_assign (ctx->receiver_decl, t, pre_p);
5663
5664 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5665 build_int_cst (ptr_type, 0));
5666 t = build3 (COND_EXPR, void_type_node, t,
5667 build_and_jump (&l0), build_and_jump (&l1));
5668 gimplify_and_add (t, pre_p);
5669
5670 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5671
5672 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5673
5674 copyin_seq = NULL;
5675 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5676 &copyin_seq, ctx);
5677
5678 t = build_fold_addr_expr (ctx->sender_decl);
5679 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5680 gimplify_and_add (t, pre_p);
5681
5682 t = build_and_jump (&l2);
5683 gimplify_and_add (t, pre_p);
5684
5685 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5686
5687 gimple_seq_add_seq (pre_p, copyin_seq);
5688
5689 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5690 }
5691
5692
5693 /* Expand code for an OpenMP single directive. */
5694
5695 static void
5696 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5697 {
5698 tree block;
5699 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5700 gimple_seq bind_body, dlist;
5701 struct gimplify_ctx gctx;
5702
5703 push_gimplify_context (&gctx);
5704
5705 bind_body = NULL;
5706 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5707 &bind_body, &dlist, ctx);
5708 lower_omp (gimple_omp_body (single_stmt), ctx);
5709
5710 gimple_seq_add_stmt (&bind_body, single_stmt);
5711
5712 if (ctx->record_type)
5713 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5714 else
5715 lower_omp_single_simple (single_stmt, &bind_body);
5716
5717 gimple_omp_set_body (single_stmt, NULL);
5718
5719 gimple_seq_add_seq (&bind_body, dlist);
5720
5721 bind_body = maybe_catch_exception (bind_body);
5722
5723 t = gimple_build_omp_return
5724 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5725 OMP_CLAUSE_NOWAIT));
5726 gimple_seq_add_stmt (&bind_body, t);
5727
5728 block = make_node (BLOCK);
5729 bind = gimple_build_bind (NULL, bind_body, block);
5730
5731 pop_gimplify_context (bind);
5732
5733 gimple_bind_append_vars (bind, ctx->block_vars);
5734 BLOCK_VARS (block) = ctx->block_vars;
5735 gsi_replace (gsi_p, bind, true);
5736 if (BLOCK_VARS (block))
5737 TREE_USED (block) = 1;
5738 }
5739
5740
5741 /* Expand code for an OpenMP master directive. */
5742
5743 static void
5744 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5745 {
5746 tree block, lab = NULL, x;
5747 gimple stmt = gsi_stmt (*gsi_p), bind;
5748 gimple_seq tseq;
5749 struct gimplify_ctx gctx;
5750
5751 push_gimplify_context (&gctx);
5752
5753 block = make_node (BLOCK);
5754 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5755 block);
5756
5757 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5758 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5759 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5760 tseq = NULL;
5761 gimplify_and_add (x, &tseq);
5762 gimple_bind_add_seq (bind, tseq);
5763
5764 lower_omp (gimple_omp_body (stmt), ctx);
5765 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5766 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5767 gimple_omp_set_body (stmt, NULL);
5768
5769 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5770
5771 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5772
5773 pop_gimplify_context (bind);
5774
5775 gimple_bind_append_vars (bind, ctx->block_vars);
5776 BLOCK_VARS (block) = ctx->block_vars;
5777 gsi_replace (gsi_p, bind, true);
5778 }
5779
5780
5781 /* Expand code for an OpenMP ordered directive. */
5782
5783 static void
5784 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5785 {
5786 tree block;
5787 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5788 struct gimplify_ctx gctx;
5789
5790 push_gimplify_context (&gctx);
5791
5792 block = make_node (BLOCK);
5793 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5794 block);
5795
5796 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5797 gimple_bind_add_stmt (bind, x);
5798
5799 lower_omp (gimple_omp_body (stmt), ctx);
5800 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5801 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5802 gimple_omp_set_body (stmt, NULL);
5803
5804 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5805 gimple_bind_add_stmt (bind, x);
5806
5807 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5808
5809 pop_gimplify_context (bind);
5810
5811 gimple_bind_append_vars (bind, ctx->block_vars);
5812 BLOCK_VARS (block) = gimple_bind_vars (bind);
5813 gsi_replace (gsi_p, bind, true);
5814 }
5815
5816
5817 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5818 substitution of a couple of function calls. But in the NAMED case,
5819 requires that languages coordinate a symbol name. It is therefore
5820 best put here in common code. */
5821
5822 static GTY((param1_is (tree), param2_is (tree)))
5823 splay_tree critical_name_mutexes;
5824
5825 static void
5826 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5827 {
5828 tree block;
5829 tree name, lock, unlock;
5830 gimple stmt = gsi_stmt (*gsi_p), bind;
5831 gimple_seq tbody;
5832 struct gimplify_ctx gctx;
5833
5834 name = gimple_omp_critical_name (stmt);
5835 if (name)
5836 {
5837 tree decl;
5838 splay_tree_node n;
5839
5840 if (!critical_name_mutexes)
5841 critical_name_mutexes
5842 = splay_tree_new_ggc (splay_tree_compare_pointers);
5843
5844 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5845 if (n == NULL)
5846 {
5847 char *new_str;
5848
5849 decl = create_tmp_var_raw (ptr_type_node, NULL);
5850
5851 new_str = ACONCAT ((".gomp_critical_user_",
5852 IDENTIFIER_POINTER (name), NULL));
5853 DECL_NAME (decl) = get_identifier (new_str);
5854 TREE_PUBLIC (decl) = 1;
5855 TREE_STATIC (decl) = 1;
5856 DECL_COMMON (decl) = 1;
5857 DECL_ARTIFICIAL (decl) = 1;
5858 DECL_IGNORED_P (decl) = 1;
5859 varpool_finalize_decl (decl);
5860
5861 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5862 (splay_tree_value) decl);
5863 }
5864 else
5865 decl = (tree) n->value;
5866
5867 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5868 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5869
5870 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5871 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5872 }
5873 else
5874 {
5875 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5876 lock = build_call_expr (lock, 0);
5877
5878 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5879 unlock = build_call_expr (unlock, 0);
5880 }
5881
5882 push_gimplify_context (&gctx);
5883
5884 block = make_node (BLOCK);
5885 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5886
5887 tbody = gimple_bind_body (bind);
5888 gimplify_and_add (lock, &tbody);
5889 gimple_bind_set_body (bind, tbody);
5890
5891 lower_omp (gimple_omp_body (stmt), ctx);
5892 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5893 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5894 gimple_omp_set_body (stmt, NULL);
5895
5896 tbody = gimple_bind_body (bind);
5897 gimplify_and_add (unlock, &tbody);
5898 gimple_bind_set_body (bind, tbody);
5899
5900 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5901
5902 pop_gimplify_context (bind);
5903 gimple_bind_append_vars (bind, ctx->block_vars);
5904 BLOCK_VARS (block) = gimple_bind_vars (bind);
5905 gsi_replace (gsi_p, bind, true);
5906 }
5907
5908
5909 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5910 for a lastprivate clause. Given a loop control predicate of (V
5911 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5912 is appended to *DLIST, iterator initialization is appended to
5913 *BODY_P. */
5914
5915 static void
5916 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5917 gimple_seq *dlist, struct omp_context *ctx)
5918 {
5919 tree clauses, cond, vinit;
5920 enum tree_code cond_code;
5921 gimple_seq stmts;
5922
5923 cond_code = fd->loop.cond_code;
5924 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5925
5926 /* When possible, use a strict equality expression. This can let VRP
5927 type optimizations deduce the value and remove a copy. */
5928 if (host_integerp (fd->loop.step, 0))
5929 {
5930 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5931 if (step == 1 || step == -1)
5932 cond_code = EQ_EXPR;
5933 }
5934
5935 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5936
5937 clauses = gimple_omp_for_clauses (fd->for_stmt);
5938 stmts = NULL;
5939 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5940 if (!gimple_seq_empty_p (stmts))
5941 {
5942 gimple_seq_add_seq (&stmts, *dlist);
5943 *dlist = stmts;
5944
5945 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5946 vinit = fd->loop.n1;
5947 if (cond_code == EQ_EXPR
5948 && host_integerp (fd->loop.n2, 0)
5949 && ! integer_zerop (fd->loop.n2))
5950 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5951
5952 /* Initialize the iterator variable, so that threads that don't execute
5953 any iterations don't execute the lastprivate clauses by accident. */
5954 gimplify_assign (fd->loop.v, vinit, body_p);
5955 }
5956 }
5957
5958
5959 /* Lower code for an OpenMP loop directive. */
5960
5961 static void
5962 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5963 {
5964 tree *rhs_p, block;
5965 struct omp_for_data fd;
5966 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5967 gimple_seq omp_for_body, body, dlist, ilist;
5968 size_t i;
5969 struct gimplify_ctx gctx;
5970
5971 push_gimplify_context (&gctx);
5972
5973 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5974 lower_omp (gimple_omp_body (stmt), ctx);
5975
5976 block = make_node (BLOCK);
5977 new_stmt = gimple_build_bind (NULL, NULL, block);
5978
5979 /* Move declaration of temporaries in the loop body before we make
5980 it go away. */
5981 omp_for_body = gimple_omp_body (stmt);
5982 if (!gimple_seq_empty_p (omp_for_body)
5983 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5984 {
5985 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5986 gimple_bind_append_vars (new_stmt, vars);
5987 }
5988
5989 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5990 ilist = NULL;
5991 dlist = NULL;
5992 body = NULL;
5993 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5994 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5995
5996 /* Lower the header expressions. At this point, we can assume that
5997 the header is of the form:
5998
5999 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6000
6001 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6002 using the .omp_data_s mapping, if needed. */
6003 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6004 {
6005 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6006 if (!is_gimple_min_invariant (*rhs_p))
6007 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6008
6009 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6010 if (!is_gimple_min_invariant (*rhs_p))
6011 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6012
6013 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6014 if (!is_gimple_min_invariant (*rhs_p))
6015 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6016 }
6017
6018 /* Once lowered, extract the bounds and clauses. */
6019 extract_omp_for_data (stmt, &fd, NULL);
6020
6021 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6022
6023 gimple_seq_add_stmt (&body, stmt);
6024 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6025
6026 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6027 fd.loop.v));
6028
6029 /* After the loop, add exit clauses. */
6030 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6031 gimple_seq_add_seq (&body, dlist);
6032
6033 body = maybe_catch_exception (body);
6034
6035 /* Region exit marker goes at the end of the loop body. */
6036 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6037
6038 pop_gimplify_context (new_stmt);
6039
6040 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6041 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6042 if (BLOCK_VARS (block))
6043 TREE_USED (block) = 1;
6044
6045 gimple_bind_set_body (new_stmt, body);
6046 gimple_omp_set_body (stmt, NULL);
6047 gimple_omp_for_set_pre_body (stmt, NULL);
6048 gsi_replace (gsi_p, new_stmt, true);
6049 }
6050
6051 /* Callback for walk_stmts. Check if the current statement only contains
6052 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6053
6054 static tree
6055 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6056 bool *handled_ops_p,
6057 struct walk_stmt_info *wi)
6058 {
6059 int *info = (int *) wi->info;
6060 gimple stmt = gsi_stmt (*gsi_p);
6061
6062 *handled_ops_p = true;
6063 switch (gimple_code (stmt))
6064 {
6065 WALK_SUBSTMTS;
6066
6067 case GIMPLE_OMP_FOR:
6068 case GIMPLE_OMP_SECTIONS:
6069 *info = *info == 0 ? 1 : -1;
6070 break;
6071 default:
6072 *info = -1;
6073 break;
6074 }
6075 return NULL;
6076 }
6077
6078 struct omp_taskcopy_context
6079 {
6080 /* This field must be at the beginning, as we do "inheritance": Some
6081 callback functions for tree-inline.c (e.g., omp_copy_decl)
6082 receive a copy_body_data pointer that is up-casted to an
6083 omp_context pointer. */
6084 copy_body_data cb;
6085 omp_context *ctx;
6086 };
6087
6088 static tree
6089 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6090 {
6091 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6092
6093 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6094 return create_tmp_var (TREE_TYPE (var), NULL);
6095
6096 return var;
6097 }
6098
6099 static tree
6100 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6101 {
6102 tree name, new_fields = NULL, type, f;
6103
6104 type = lang_hooks.types.make_type (RECORD_TYPE);
6105 name = DECL_NAME (TYPE_NAME (orig_type));
6106 name = build_decl (TYPE_DECL, name, type);
6107 TYPE_NAME (type) = name;
6108
6109 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6110 {
6111 tree new_f = copy_node (f);
6112 DECL_CONTEXT (new_f) = type;
6113 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6114 TREE_CHAIN (new_f) = new_fields;
6115 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6116 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6117 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6118 &tcctx->cb, NULL);
6119 new_fields = new_f;
6120 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6121 }
6122 TYPE_FIELDS (type) = nreverse (new_fields);
6123 layout_type (type);
6124 return type;
6125 }
6126
6127 /* Create task copyfn. */
6128
6129 static void
6130 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6131 {
6132 struct function *child_cfun;
6133 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6134 tree record_type, srecord_type, bind, list;
6135 bool record_needs_remap = false, srecord_needs_remap = false;
6136 splay_tree_node n;
6137 struct omp_taskcopy_context tcctx;
6138 struct gimplify_ctx gctx;
6139
6140 child_fn = gimple_omp_task_copy_fn (task_stmt);
6141 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6142 gcc_assert (child_cfun->cfg == NULL);
6143 child_cfun->dont_save_pending_sizes_p = 1;
6144 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6145
6146 /* Reset DECL_CONTEXT on function arguments. */
6147 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6148 DECL_CONTEXT (t) = child_fn;
6149
6150 /* Populate the function. */
6151 push_gimplify_context (&gctx);
6152 current_function_decl = child_fn;
6153
6154 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6155 TREE_SIDE_EFFECTS (bind) = 1;
6156 list = NULL;
6157 DECL_SAVED_TREE (child_fn) = bind;
6158 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6159
6160 /* Remap src and dst argument types if needed. */
6161 record_type = ctx->record_type;
6162 srecord_type = ctx->srecord_type;
6163 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6164 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6165 {
6166 record_needs_remap = true;
6167 break;
6168 }
6169 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6170 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6171 {
6172 srecord_needs_remap = true;
6173 break;
6174 }
6175
6176 if (record_needs_remap || srecord_needs_remap)
6177 {
6178 memset (&tcctx, '\0', sizeof (tcctx));
6179 tcctx.cb.src_fn = ctx->cb.src_fn;
6180 tcctx.cb.dst_fn = child_fn;
6181 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6182 tcctx.cb.dst_node = tcctx.cb.src_node;
6183 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6184 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6185 tcctx.cb.eh_region = -1;
6186 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6187 tcctx.cb.decl_map = pointer_map_create ();
6188 tcctx.ctx = ctx;
6189
6190 if (record_needs_remap)
6191 record_type = task_copyfn_remap_type (&tcctx, record_type);
6192 if (srecord_needs_remap)
6193 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6194 }
6195 else
6196 tcctx.cb.decl_map = NULL;
6197
6198 push_cfun (child_cfun);
6199
6200 arg = DECL_ARGUMENTS (child_fn);
6201 TREE_TYPE (arg) = build_pointer_type (record_type);
6202 sarg = TREE_CHAIN (arg);
6203 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6204
6205 /* First pass: initialize temporaries used in record_type and srecord_type
6206 sizes and field offsets. */
6207 if (tcctx.cb.decl_map)
6208 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6209 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6210 {
6211 tree *p;
6212
6213 decl = OMP_CLAUSE_DECL (c);
6214 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6215 if (p == NULL)
6216 continue;
6217 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6218 sf = (tree) n->value;
6219 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6220 src = build_fold_indirect_ref (sarg);
6221 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6222 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6223 append_to_statement_list (t, &list);
6224 }
6225
6226 /* Second pass: copy shared var pointers and copy construct non-VLA
6227 firstprivate vars. */
6228 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6229 switch (OMP_CLAUSE_CODE (c))
6230 {
6231 case OMP_CLAUSE_SHARED:
6232 decl = OMP_CLAUSE_DECL (c);
6233 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6234 if (n == NULL)
6235 break;
6236 f = (tree) n->value;
6237 if (tcctx.cb.decl_map)
6238 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6239 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6240 sf = (tree) n->value;
6241 if (tcctx.cb.decl_map)
6242 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6243 src = build_fold_indirect_ref (sarg);
6244 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6245 dst = build_fold_indirect_ref (arg);
6246 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6247 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6248 append_to_statement_list (t, &list);
6249 break;
6250 case OMP_CLAUSE_FIRSTPRIVATE:
6251 decl = OMP_CLAUSE_DECL (c);
6252 if (is_variable_sized (decl))
6253 break;
6254 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6255 if (n == NULL)
6256 break;
6257 f = (tree) n->value;
6258 if (tcctx.cb.decl_map)
6259 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6260 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6261 if (n != NULL)
6262 {
6263 sf = (tree) n->value;
6264 if (tcctx.cb.decl_map)
6265 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6266 src = build_fold_indirect_ref (sarg);
6267 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6268 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6269 src = build_fold_indirect_ref (src);
6270 }
6271 else
6272 src = decl;
6273 dst = build_fold_indirect_ref (arg);
6274 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6275 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6276 append_to_statement_list (t, &list);
6277 break;
6278 case OMP_CLAUSE_PRIVATE:
6279 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6280 break;
6281 decl = OMP_CLAUSE_DECL (c);
6282 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6283 f = (tree) n->value;
6284 if (tcctx.cb.decl_map)
6285 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6286 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6287 if (n != NULL)
6288 {
6289 sf = (tree) n->value;
6290 if (tcctx.cb.decl_map)
6291 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6292 src = build_fold_indirect_ref (sarg);
6293 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6294 if (use_pointer_for_field (decl, NULL))
6295 src = build_fold_indirect_ref (src);
6296 }
6297 else
6298 src = decl;
6299 dst = build_fold_indirect_ref (arg);
6300 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6301 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6302 append_to_statement_list (t, &list);
6303 break;
6304 default:
6305 break;
6306 }
6307
6308 /* Last pass: handle VLA firstprivates. */
6309 if (tcctx.cb.decl_map)
6310 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6311 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6312 {
6313 tree ind, ptr, df;
6314
6315 decl = OMP_CLAUSE_DECL (c);
6316 if (!is_variable_sized (decl))
6317 continue;
6318 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6319 if (n == NULL)
6320 continue;
6321 f = (tree) n->value;
6322 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6323 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6324 ind = DECL_VALUE_EXPR (decl);
6325 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6326 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6327 n = splay_tree_lookup (ctx->sfield_map,
6328 (splay_tree_key) TREE_OPERAND (ind, 0));
6329 sf = (tree) n->value;
6330 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6331 src = build_fold_indirect_ref (sarg);
6332 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6333 src = build_fold_indirect_ref (src);
6334 dst = build_fold_indirect_ref (arg);
6335 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6336 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6337 append_to_statement_list (t, &list);
6338 n = splay_tree_lookup (ctx->field_map,
6339 (splay_tree_key) TREE_OPERAND (ind, 0));
6340 df = (tree) n->value;
6341 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6342 ptr = build_fold_indirect_ref (arg);
6343 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6344 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6345 build_fold_addr_expr (dst));
6346 append_to_statement_list (t, &list);
6347 }
6348
6349 t = build1 (RETURN_EXPR, void_type_node, NULL);
6350 append_to_statement_list (t, &list);
6351
6352 if (tcctx.cb.decl_map)
6353 pointer_map_destroy (tcctx.cb.decl_map);
6354 pop_gimplify_context (NULL);
6355 BIND_EXPR_BODY (bind) = list;
6356 pop_cfun ();
6357 current_function_decl = ctx->cb.src_fn;
6358 }
6359
6360 /* Lower the OpenMP parallel or task directive in the current statement
6361 in GSI_P. CTX holds context information for the directive. */
6362
6363 static void
6364 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6365 {
6366 tree clauses;
6367 tree child_fn, t;
6368 gimple stmt = gsi_stmt (*gsi_p);
6369 gimple par_bind, bind;
6370 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6371 struct gimplify_ctx gctx;
6372
6373 clauses = gimple_omp_taskreg_clauses (stmt);
6374 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6375 par_body = gimple_bind_body (par_bind);
6376 child_fn = ctx->cb.dst_fn;
6377 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6378 && !gimple_omp_parallel_combined_p (stmt))
6379 {
6380 struct walk_stmt_info wi;
6381 int ws_num = 0;
6382
6383 memset (&wi, 0, sizeof (wi));
6384 wi.info = &ws_num;
6385 wi.val_only = true;
6386 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6387 if (ws_num == 1)
6388 gimple_omp_parallel_set_combined_p (stmt, true);
6389 }
6390 if (ctx->srecord_type)
6391 create_task_copyfn (stmt, ctx);
6392
6393 push_gimplify_context (&gctx);
6394
6395 par_olist = NULL;
6396 par_ilist = NULL;
6397 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6398 lower_omp (par_body, ctx);
6399 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6400 lower_reduction_clauses (clauses, &par_olist, ctx);
6401
6402 /* Declare all the variables created by mapping and the variables
6403 declared in the scope of the parallel body. */
6404 record_vars_into (ctx->block_vars, child_fn);
6405 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6406
6407 if (ctx->record_type)
6408 {
6409 ctx->sender_decl
6410 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6411 : ctx->record_type, ".omp_data_o");
6412 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6413 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6414 }
6415
6416 olist = NULL;
6417 ilist = NULL;
6418 lower_send_clauses (clauses, &ilist, &olist, ctx);
6419 lower_send_shared_vars (&ilist, &olist, ctx);
6420
6421 /* Once all the expansions are done, sequence all the different
6422 fragments inside gimple_omp_body. */
6423
6424 new_body = NULL;
6425
6426 if (ctx->record_type)
6427 {
6428 t = build_fold_addr_expr (ctx->sender_decl);
6429 /* fixup_child_record_type might have changed receiver_decl's type. */
6430 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6431 gimple_seq_add_stmt (&new_body,
6432 gimple_build_assign (ctx->receiver_decl, t));
6433 }
6434
6435 gimple_seq_add_seq (&new_body, par_ilist);
6436 gimple_seq_add_seq (&new_body, par_body);
6437 gimple_seq_add_seq (&new_body, par_olist);
6438 new_body = maybe_catch_exception (new_body);
6439 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6440 gimple_omp_set_body (stmt, new_body);
6441
6442 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6443 gimple_bind_add_stmt (bind, stmt);
6444 if (ilist || olist)
6445 {
6446 gimple_seq_add_stmt (&ilist, bind);
6447 gimple_seq_add_seq (&ilist, olist);
6448 bind = gimple_build_bind (NULL, ilist, NULL);
6449 }
6450
6451 gsi_replace (gsi_p, bind, true);
6452
6453 pop_gimplify_context (NULL);
6454 }
6455
6456 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6457 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6458 of OpenMP context, but with task_shared_vars set. */
6459
6460 static tree
6461 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6462 void *data)
6463 {
6464 tree t = *tp;
6465
6466 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6467 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6468 return t;
6469
6470 if (task_shared_vars
6471 && DECL_P (t)
6472 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6473 return t;
6474
6475 /* If a global variable has been privatized, TREE_CONSTANT on
6476 ADDR_EXPR might be wrong. */
6477 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6478 recompute_tree_invariant_for_addr_expr (t);
6479
6480 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6481 return NULL_TREE;
6482 }
6483
6484 static void
6485 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6486 {
6487 gimple stmt = gsi_stmt (*gsi_p);
6488 struct walk_stmt_info wi;
6489
6490 if (gimple_has_location (stmt))
6491 input_location = gimple_location (stmt);
6492
6493 if (task_shared_vars)
6494 memset (&wi, '\0', sizeof (wi));
6495
6496 /* If we have issued syntax errors, avoid doing any heavy lifting.
6497 Just replace the OpenMP directives with a NOP to avoid
6498 confusing RTL expansion. */
6499 if (errorcount && is_gimple_omp (stmt))
6500 {
6501 gsi_replace (gsi_p, gimple_build_nop (), true);
6502 return;
6503 }
6504
6505 switch (gimple_code (stmt))
6506 {
6507 case GIMPLE_COND:
6508 if ((ctx || task_shared_vars)
6509 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6510 ctx ? NULL : &wi, NULL)
6511 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6512 ctx ? NULL : &wi, NULL)))
6513 gimple_regimplify_operands (stmt, gsi_p);
6514 break;
6515 case GIMPLE_CATCH:
6516 lower_omp (gimple_catch_handler (stmt), ctx);
6517 break;
6518 case GIMPLE_EH_FILTER:
6519 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6520 break;
6521 case GIMPLE_TRY:
6522 lower_omp (gimple_try_eval (stmt), ctx);
6523 lower_omp (gimple_try_cleanup (stmt), ctx);
6524 break;
6525 case GIMPLE_BIND:
6526 lower_omp (gimple_bind_body (stmt), ctx);
6527 break;
6528 case GIMPLE_OMP_PARALLEL:
6529 case GIMPLE_OMP_TASK:
6530 ctx = maybe_lookup_ctx (stmt);
6531 lower_omp_taskreg (gsi_p, ctx);
6532 break;
6533 case GIMPLE_OMP_FOR:
6534 ctx = maybe_lookup_ctx (stmt);
6535 gcc_assert (ctx);
6536 lower_omp_for (gsi_p, ctx);
6537 break;
6538 case GIMPLE_OMP_SECTIONS:
6539 ctx = maybe_lookup_ctx (stmt);
6540 gcc_assert (ctx);
6541 lower_omp_sections (gsi_p, ctx);
6542 break;
6543 case GIMPLE_OMP_SINGLE:
6544 ctx = maybe_lookup_ctx (stmt);
6545 gcc_assert (ctx);
6546 lower_omp_single (gsi_p, ctx);
6547 break;
6548 case GIMPLE_OMP_MASTER:
6549 ctx = maybe_lookup_ctx (stmt);
6550 gcc_assert (ctx);
6551 lower_omp_master (gsi_p, ctx);
6552 break;
6553 case GIMPLE_OMP_ORDERED:
6554 ctx = maybe_lookup_ctx (stmt);
6555 gcc_assert (ctx);
6556 lower_omp_ordered (gsi_p, ctx);
6557 break;
6558 case GIMPLE_OMP_CRITICAL:
6559 ctx = maybe_lookup_ctx (stmt);
6560 gcc_assert (ctx);
6561 lower_omp_critical (gsi_p, ctx);
6562 break;
6563 case GIMPLE_OMP_ATOMIC_LOAD:
6564 if ((ctx || task_shared_vars)
6565 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6566 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6567 gimple_regimplify_operands (stmt, gsi_p);
6568 break;
6569 default:
6570 if ((ctx || task_shared_vars)
6571 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6572 ctx ? NULL : &wi))
6573 gimple_regimplify_operands (stmt, gsi_p);
6574 break;
6575 }
6576 }
6577
6578 static void
6579 lower_omp (gimple_seq body, omp_context *ctx)
6580 {
6581 location_t saved_location = input_location;
6582 gimple_stmt_iterator gsi = gsi_start (body);
6583 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6584 lower_omp_1 (&gsi, ctx);
6585 input_location = saved_location;
6586 }
6587 \f
6588 /* Main entry point. */
6589
6590 static unsigned int
6591 execute_lower_omp (void)
6592 {
6593 gimple_seq body;
6594
6595 /* This pass always runs, to provide PROP_gimple_lomp.
6596 But there is nothing to do unless -fopenmp is given. */
6597 if (flag_openmp == 0)
6598 return 0;
6599
6600 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6601 delete_omp_context);
6602
6603 body = gimple_body (current_function_decl);
6604 scan_omp (body, NULL);
6605 gcc_assert (taskreg_nesting_level == 0);
6606
6607 if (all_contexts->root)
6608 {
6609 struct gimplify_ctx gctx;
6610
6611 if (task_shared_vars)
6612 push_gimplify_context (&gctx);
6613 lower_omp (body, NULL);
6614 if (task_shared_vars)
6615 pop_gimplify_context (NULL);
6616 }
6617
6618 if (all_contexts)
6619 {
6620 splay_tree_delete (all_contexts);
6621 all_contexts = NULL;
6622 }
6623 BITMAP_FREE (task_shared_vars);
6624 return 0;
6625 }
6626
6627 struct gimple_opt_pass pass_lower_omp =
6628 {
6629 {
6630 GIMPLE_PASS,
6631 "omplower", /* name */
6632 NULL, /* gate */
6633 execute_lower_omp, /* execute */
6634 NULL, /* sub */
6635 NULL, /* next */
6636 0, /* static_pass_number */
6637 TV_NONE, /* tv_id */
6638 PROP_gimple_any, /* properties_required */
6639 PROP_gimple_lomp, /* properties_provided */
6640 0, /* properties_destroyed */
6641 0, /* todo_flags_start */
6642 TODO_dump_func /* todo_flags_finish */
6643 }
6644 };
6645 \f
6646 /* The following is a utility to diagnose OpenMP structured block violations.
6647 It is not part of the "omplower" pass, as that's invoked too late. It
6648 should be invoked by the respective front ends after gimplification. */
6649
6650 static splay_tree all_labels;
6651
6652 /* Check for mismatched contexts and generate an error if needed. Return
6653 true if an error is detected. */
6654
6655 static bool
6656 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6657 gimple branch_ctx, gimple label_ctx)
6658 {
6659 if (label_ctx == branch_ctx)
6660 return false;
6661
6662
6663 /*
6664 Previously we kept track of the label's entire context in diagnose_sb_[12]
6665 so we could traverse it and issue a correct "exit" or "enter" error
6666 message upon a structured block violation.
6667
6668 We built the context by building a list with tree_cons'ing, but there is
6669 no easy counterpart in gimple tuples. It seems like far too much work
6670 for issuing exit/enter error messages. If someone really misses the
6671 distinct error message... patches welcome.
6672 */
6673
6674 #if 0
6675 /* Try to avoid confusing the user by producing and error message
6676 with correct "exit" or "enter" verbiage. We prefer "exit"
6677 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6678 if (branch_ctx == NULL)
6679 exit_p = false;
6680 else
6681 {
6682 while (label_ctx)
6683 {
6684 if (TREE_VALUE (label_ctx) == branch_ctx)
6685 {
6686 exit_p = false;
6687 break;
6688 }
6689 label_ctx = TREE_CHAIN (label_ctx);
6690 }
6691 }
6692
6693 if (exit_p)
6694 error ("invalid exit from OpenMP structured block");
6695 else
6696 error ("invalid entry to OpenMP structured block");
6697 #endif
6698
6699 /* If it's obvious we have an invalid entry, be specific about the error. */
6700 if (branch_ctx == NULL)
6701 error ("invalid entry to OpenMP structured block");
6702 else
6703 /* Otherwise, be vague and lazy, but efficient. */
6704 error ("invalid branch to/from an OpenMP structured block");
6705
6706 gsi_replace (gsi_p, gimple_build_nop (), false);
6707 return true;
6708 }
6709
6710 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6711 where each label is found. */
6712
6713 static tree
6714 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6715 struct walk_stmt_info *wi)
6716 {
6717 gimple context = (gimple) wi->info;
6718 gimple inner_context;
6719 gimple stmt = gsi_stmt (*gsi_p);
6720
6721 *handled_ops_p = true;
6722
6723 switch (gimple_code (stmt))
6724 {
6725 WALK_SUBSTMTS;
6726
6727 case GIMPLE_OMP_PARALLEL:
6728 case GIMPLE_OMP_TASK:
6729 case GIMPLE_OMP_SECTIONS:
6730 case GIMPLE_OMP_SINGLE:
6731 case GIMPLE_OMP_SECTION:
6732 case GIMPLE_OMP_MASTER:
6733 case GIMPLE_OMP_ORDERED:
6734 case GIMPLE_OMP_CRITICAL:
6735 /* The minimal context here is just the current OMP construct. */
6736 inner_context = stmt;
6737 wi->info = inner_context;
6738 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6739 wi->info = context;
6740 break;
6741
6742 case GIMPLE_OMP_FOR:
6743 inner_context = stmt;
6744 wi->info = inner_context;
6745 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6746 walk them. */
6747 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6748 diagnose_sb_1, NULL, wi);
6749 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6750 wi->info = context;
6751 break;
6752
6753 case GIMPLE_LABEL:
6754 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6755 (splay_tree_value) context);
6756 break;
6757
6758 default:
6759 break;
6760 }
6761
6762 return NULL_TREE;
6763 }
6764
6765 /* Pass 2: Check each branch and see if its context differs from that of
6766 the destination label's context. */
6767
6768 static tree
6769 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6770 struct walk_stmt_info *wi)
6771 {
6772 gimple context = (gimple) wi->info;
6773 splay_tree_node n;
6774 gimple stmt = gsi_stmt (*gsi_p);
6775
6776 *handled_ops_p = true;
6777
6778 switch (gimple_code (stmt))
6779 {
6780 WALK_SUBSTMTS;
6781
6782 case GIMPLE_OMP_PARALLEL:
6783 case GIMPLE_OMP_TASK:
6784 case GIMPLE_OMP_SECTIONS:
6785 case GIMPLE_OMP_SINGLE:
6786 case GIMPLE_OMP_SECTION:
6787 case GIMPLE_OMP_MASTER:
6788 case GIMPLE_OMP_ORDERED:
6789 case GIMPLE_OMP_CRITICAL:
6790 wi->info = stmt;
6791 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6792 wi->info = context;
6793 break;
6794
6795 case GIMPLE_OMP_FOR:
6796 wi->info = stmt;
6797 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6798 walk them. */
6799 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6800 diagnose_sb_2, NULL, wi);
6801 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6802 wi->info = context;
6803 break;
6804
6805 case GIMPLE_GOTO:
6806 {
6807 tree lab = gimple_goto_dest (stmt);
6808 if (TREE_CODE (lab) != LABEL_DECL)
6809 break;
6810
6811 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6812 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6813 }
6814 break;
6815
6816 case GIMPLE_SWITCH:
6817 {
6818 unsigned int i;
6819 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6820 {
6821 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6822 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6823 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6824 break;
6825 }
6826 }
6827 break;
6828
6829 case GIMPLE_RETURN:
6830 diagnose_sb_0 (gsi_p, context, NULL);
6831 break;
6832
6833 default:
6834 break;
6835 }
6836
6837 return NULL_TREE;
6838 }
6839
6840 void
6841 diagnose_omp_structured_block_errors (tree fndecl)
6842 {
6843 tree save_current = current_function_decl;
6844 struct walk_stmt_info wi;
6845 struct function *old_cfun = cfun;
6846 gimple_seq body = gimple_body (fndecl);
6847
6848 current_function_decl = fndecl;
6849 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6850
6851 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6852
6853 memset (&wi, 0, sizeof (wi));
6854 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6855
6856 memset (&wi, 0, sizeof (wi));
6857 wi.want_locations = true;
6858 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6859
6860 splay_tree_delete (all_labels);
6861 all_labels = NULL;
6862
6863 set_cfun (old_cfun);
6864 current_function_decl = save_current;
6865 }
6866
6867 #include "gt-omp-low.h"