backport: ChangeLog.tuples: ChangeLog from gimple-tuples-branch.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum tree_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
464 }
465
466
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511 {
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
520
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
522
523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542 }
543
544
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549 static tree
550 get_ws_args_for (gimple ws_stmt)
551 {
552 tree t;
553
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
559 extract_omp_for_data (ws_stmt, &fd, NULL);
560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
570
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
573
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
580 {
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591 }
592
593
594 /* Discover whether REGION is a combined parallel+workshare region. */
595
596 static void
597 determine_parallel_type (struct omp_region *region)
598 {
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
606
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
626 {
627 gimple ws_stmt = last_stmt (ws_entry_bb);
628
629 if (region->inner->type == GIMPLE_OMP_FOR)
630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
655 }
656 }
657
658
659 /* Return true if EXPR is variable sized. */
660
661 static inline bool
662 is_variable_sized (const_tree expr)
663 {
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665 }
666
667 /* Return true if DECL is a reference type. */
668
669 static inline bool
670 is_reference (tree decl)
671 {
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673 }
674
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
681 {
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
685 }
686
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
693 }
694
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
697 {
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701 }
702
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719 }
720
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
723
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
726 {
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (maybe_lookup_decl (decl, up))
764 break;
765
766 if (up && is_taskreg_ctx (up))
767 {
768 tree c;
769
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
800 }
801
802 return false;
803 }
804
805 /* Create a new VAR_DECL and copy information from VAR to it. */
806
807 tree
808 copy_var_decl (tree var, tree name, tree type)
809 {
810 tree copy = build_decl (VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822
823 return copy;
824 }
825
826 /* Construct a new automatic decl similar to VAR. */
827
828 static tree
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 {
831 tree copy = copy_var_decl (var, name, type);
832
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
836
837 return copy;
838 }
839
840 static tree
841 omp_copy_decl_1 (tree var, omp_context *ctx)
842 {
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
844 }
845
846 /* Build tree nodes to access the field for VAR on the receiver side. */
847
848 static tree
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 {
851 tree x, field = lookup_field (var, ctx);
852
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
858
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
863
864 return x;
865 }
866
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
870
871 static tree
872 build_outer_var_ref (tree var, omp_context *ctx)
873 {
874 tree x;
875
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
877 x = var;
878 else if (is_variable_sized (var))
879 {
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
883 }
884 else if (is_taskreg_ctx (ctx))
885 {
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
888 }
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
895 else
896 gcc_unreachable ();
897
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
900
901 return x;
902 }
903
904 /* Build tree nodes to access the field for VAR on the sender side. */
905
906 static tree
907 build_sender_ref (tree var, omp_context *ctx)
908 {
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
912 }
913
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915
916 static void
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 {
919 tree field, type, sfield = NULL_TREE;
920
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
931
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
939 {
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 }
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
946
947 if ((mask & 3) == 3)
948 {
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
951 {
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
976 }
977 }
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
981 }
982
983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
989 }
990
991 static tree
992 install_var_local (tree var, omp_context *ctx)
993 {
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
997 }
998
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001
1002 static void
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 {
1005 tree new_decl, size;
1006
1007 new_decl = lookup_decl (decl, ctx);
1008
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1013 {
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1018 }
1019
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 {
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1026
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1031 }
1032 }
1033
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1038
1039 static tree
1040 omp_copy_decl (tree var, copy_body_data *cb)
1041 {
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1044
1045 if (TREE_CODE (var) == LABEL_DECL)
1046 {
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1051 }
1052
1053 while (!is_taskreg_ctx (ctx))
1054 {
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1061 }
1062
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1065
1066 return error_mark_node;
1067 }
1068
1069
1070 /* Return the parallel region associated with STMT. */
1071
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1076
1077 /* Dump the parallel region tree rooted at REGION. */
1078
1079 void
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 {
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1084
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1087
1088 if (region->cont)
1089 {
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1092 }
1093
1094 if (region->exit)
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1097 else
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099
1100 if (region->next)
1101 dump_omp_region (file, region->next, indent);
1102 }
1103
1104 void
1105 debug_omp_region (struct omp_region *region)
1106 {
1107 dump_omp_region (stderr, region, 0);
1108 }
1109
1110 void
1111 debug_all_omp_regions (void)
1112 {
1113 dump_omp_region (stderr, root_omp_region, 0);
1114 }
1115
1116
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1118
1119 struct omp_region *
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1122 {
1123 struct omp_region *region = XCNEW (struct omp_region);
1124
1125 region->outer = parent;
1126 region->entry = bb;
1127 region->type = type;
1128
1129 if (parent)
1130 {
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1135 }
1136 else
1137 {
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1142 }
1143
1144 return region;
1145 }
1146
1147 /* Release the memory associated with the region tree rooted at REGION. */
1148
1149 static void
1150 free_omp_region_1 (struct omp_region *region)
1151 {
1152 struct omp_region *i, *n;
1153
1154 for (i = region->inner; i ; i = n)
1155 {
1156 n = i->next;
1157 free_omp_region_1 (i);
1158 }
1159
1160 free (region);
1161 }
1162
1163 /* Release the memory for the entire omp region tree. */
1164
1165 void
1166 free_omp_regions (void)
1167 {
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1170 {
1171 n = r->next;
1172 free_omp_region_1 (r);
1173 }
1174 root_omp_region = NULL;
1175 }
1176
1177
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 {
1183 omp_context *ctx = XCNEW (omp_context);
1184
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1188
1189 if (outer_ctx)
1190 {
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1195 }
1196 else
1197 {
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1207 }
1208
1209 ctx->cb.decl_map = pointer_map_create ();
1210
1211 return ctx;
1212 }
1213
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1215
1216 /* Finalize task copyfn. */
1217
1218 static void
1219 finalize_task_copyfn (gimple task_stmt)
1220 {
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1224 gimple bind;
1225
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1228 return;
1229
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1235
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1244 {
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1248 }
1249 gimple_set_body (child_fn, seq);
1250 pop_cfun ();
1251 current_function_decl = old_fn;
1252
1253 cgraph_add_new_function (child_fn, false);
1254 }
1255
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1258
1259 static void
1260 delete_omp_context (splay_tree_value value)
1261 {
1262 omp_context *ctx = (omp_context *) value;
1263
1264 pointer_map_destroy (ctx->cb.decl_map);
1265
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1270
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1274 {
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 }
1279 if (ctx->srecord_type)
1280 {
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1284 }
1285
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1288
1289 XDELETE (ctx);
1290 }
1291
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1294
1295 static void
1296 fixup_child_record_type (omp_context *ctx)
1297 {
1298 tree f, type = ctx->record_type;
1299
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1308 {
1309 tree name, new_fields = NULL;
1310
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1315
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 {
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 new_fields = new_f;
1328
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1333 }
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1336 }
1337
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1339 }
1340
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1343
1344 static void
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 {
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1349
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 {
1352 bool by_ref;
1353
1354 switch (OMP_CLAUSE_CODE (c))
1355 {
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1362 break;
1363
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1378 {
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1381 break;
1382 }
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1385 goto do_private;
1386
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1393
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1399 {
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1403 }
1404 else if (is_taskreg_ctx (ctx))
1405 {
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1409
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1412 {
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1416 }
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1419 }
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1426 /* FALLTHRU */
1427
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1432 break;
1433
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1437
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1443 break;
1444
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1449 break;
1450
1451 default:
1452 gcc_unreachable ();
1453 }
1454 }
1455
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 {
1458 switch (OMP_CLAUSE_CODE (c))
1459 {
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1468
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1482
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1487 break;
1488
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1499 break;
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 {
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 }
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1517 }
1518
1519 /* Create a new name for omp child function. Returns an identifier. */
1520
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1522
1523 static tree
1524 create_omp_child_function_name (bool task_copy)
1525 {
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1529 const char *suffix;
1530
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537 #elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539 #endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1542 }
1543
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 {
1550 tree decl, type, name, t;
1551
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1561
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1576
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1581
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1591 {
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1599 DECL_ARGUMENTS (decl) = t;
1600 }
1601
1602 /* Allocate memory for the function structure. The call to
1603 allocate_struct_function clobbers CFUN, so we need to restore
1604 it afterward. */
1605 push_struct_function (decl);
1606 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1607 cfun->function_end_locus = gimple_location (ctx->stmt);
1608 pop_cfun ();
1609 }
1610
1611
1612 /* Scan an OpenMP parallel directive. */
1613
1614 static void
1615 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1616 {
1617 omp_context *ctx;
1618 tree name;
1619 gimple stmt = gsi_stmt (*gsi);
1620
1621 /* Ignore parallel directives with empty bodies, unless there
1622 are copyin clauses. */
1623 if (optimize > 0
1624 && empty_body_p (gimple_omp_body (stmt))
1625 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1626 OMP_CLAUSE_COPYIN) == NULL)
1627 {
1628 gsi_replace (gsi, gimple_build_nop (), false);
1629 return;
1630 }
1631
1632 ctx = new_omp_context (stmt, outer_ctx);
1633 if (taskreg_nesting_level > 1)
1634 ctx->is_nested = true;
1635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1636 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1637 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1638 name = create_tmp_var_name (".omp_data_s");
1639 name = build_decl (TYPE_DECL, name, ctx->record_type);
1640 TYPE_NAME (ctx->record_type) = name;
1641 create_omp_child_function (ctx, false);
1642 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1643
1644 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1645 scan_omp (gimple_omp_body (stmt), ctx);
1646
1647 if (TYPE_FIELDS (ctx->record_type) == NULL)
1648 ctx->record_type = ctx->receiver_decl = NULL;
1649 else
1650 {
1651 layout_type (ctx->record_type);
1652 fixup_child_record_type (ctx);
1653 }
1654 }
1655
1656 /* Scan an OpenMP task directive. */
1657
1658 static void
1659 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1660 {
1661 omp_context *ctx;
1662 tree name, t;
1663 gimple stmt = gsi_stmt (*gsi);
1664
1665 /* Ignore task directives with empty bodies. */
1666 if (optimize > 0
1667 && empty_body_p (gimple_omp_body (stmt)))
1668 {
1669 gsi_replace (gsi, gimple_build_nop (), false);
1670 return;
1671 }
1672
1673 ctx = new_omp_context (stmt, outer_ctx);
1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
1680 name = build_decl (TYPE_DECL, name, ctx->record_type);
1681 TYPE_NAME (ctx->record_type) = name;
1682 create_omp_child_function (ctx, false);
1683 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1684
1685 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1686
1687 if (ctx->srecord_type)
1688 {
1689 name = create_tmp_var_name (".omp_data_a");
1690 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1691 TYPE_NAME (ctx->srecord_type) = name;
1692 create_omp_child_function (ctx, true);
1693 }
1694
1695 scan_omp (gimple_omp_body (stmt), ctx);
1696
1697 if (TYPE_FIELDS (ctx->record_type) == NULL)
1698 {
1699 ctx->record_type = ctx->receiver_decl = NULL;
1700 t = build_int_cst (long_integer_type_node, 0);
1701 gimple_omp_task_set_arg_size (stmt, t);
1702 t = build_int_cst (long_integer_type_node, 1);
1703 gimple_omp_task_set_arg_align (stmt, t);
1704 }
1705 else
1706 {
1707 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1708 /* Move VLA fields to the end. */
1709 p = &TYPE_FIELDS (ctx->record_type);
1710 while (*p)
1711 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1712 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1713 {
1714 *q = *p;
1715 *p = TREE_CHAIN (*p);
1716 TREE_CHAIN (*q) = NULL_TREE;
1717 q = &TREE_CHAIN (*q);
1718 }
1719 else
1720 p = &TREE_CHAIN (*p);
1721 *p = vla_fields;
1722 layout_type (ctx->record_type);
1723 fixup_child_record_type (ctx);
1724 if (ctx->srecord_type)
1725 layout_type (ctx->srecord_type);
1726 t = fold_convert (long_integer_type_node,
1727 TYPE_SIZE_UNIT (ctx->record_type));
1728 gimple_omp_task_set_arg_size (stmt, t);
1729 t = build_int_cst (long_integer_type_node,
1730 TYPE_ALIGN_UNIT (ctx->record_type));
1731 gimple_omp_task_set_arg_align (stmt, t);
1732 }
1733 }
1734
1735
1736 /* Scan an OpenMP loop directive. */
1737
1738 static void
1739 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1740 {
1741 omp_context *ctx;
1742 size_t i;
1743
1744 ctx = new_omp_context (stmt, outer_ctx);
1745
1746 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1747
1748 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1749 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1750 {
1751 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1752 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1755 }
1756 scan_omp (gimple_omp_body (stmt), ctx);
1757 }
1758
1759 /* Scan an OpenMP sections directive. */
1760
1761 static void
1762 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1763 {
1764 omp_context *ctx;
1765
1766 ctx = new_omp_context (stmt, outer_ctx);
1767 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1768 scan_omp (gimple_omp_body (stmt), ctx);
1769 }
1770
1771 /* Scan an OpenMP single directive. */
1772
1773 static void
1774 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1775 {
1776 omp_context *ctx;
1777 tree name;
1778
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1781 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1782 name = create_tmp_var_name (".omp_copy_s");
1783 name = build_decl (TYPE_DECL, name, ctx->record_type);
1784 TYPE_NAME (ctx->record_type) = name;
1785
1786 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1787 scan_omp (gimple_omp_body (stmt), ctx);
1788
1789 if (TYPE_FIELDS (ctx->record_type) == NULL)
1790 ctx->record_type = NULL;
1791 else
1792 layout_type (ctx->record_type);
1793 }
1794
1795
1796 /* Check OpenMP nesting restrictions. */
1797 static void
1798 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1799 {
1800 switch (gimple_code (stmt))
1801 {
1802 case GIMPLE_OMP_FOR:
1803 case GIMPLE_OMP_SECTIONS:
1804 case GIMPLE_OMP_SINGLE:
1805 case GIMPLE_CALL:
1806 for (; ctx != NULL; ctx = ctx->outer)
1807 switch (gimple_code (ctx->stmt))
1808 {
1809 case GIMPLE_OMP_FOR:
1810 case GIMPLE_OMP_SECTIONS:
1811 case GIMPLE_OMP_SINGLE:
1812 case GIMPLE_OMP_ORDERED:
1813 case GIMPLE_OMP_MASTER:
1814 case GIMPLE_OMP_TASK:
1815 if (is_gimple_call (stmt))
1816 {
1817 warning (0, "barrier region may not be closely nested inside "
1818 "of work-sharing, critical, ordered, master or "
1819 "explicit task region");
1820 return;
1821 }
1822 warning (0, "work-sharing region may not be closely nested inside "
1823 "of work-sharing, critical, ordered, master or explicit "
1824 "task region");
1825 return;
1826 case GIMPLE_OMP_PARALLEL:
1827 return;
1828 default:
1829 break;
1830 }
1831 break;
1832 case GIMPLE_OMP_MASTER:
1833 for (; ctx != NULL; ctx = ctx->outer)
1834 switch (gimple_code (ctx->stmt))
1835 {
1836 case GIMPLE_OMP_FOR:
1837 case GIMPLE_OMP_SECTIONS:
1838 case GIMPLE_OMP_SINGLE:
1839 case GIMPLE_OMP_TASK:
1840 warning (0, "master region may not be closely nested inside "
1841 "of work-sharing or explicit task region");
1842 return;
1843 case GIMPLE_OMP_PARALLEL:
1844 return;
1845 default:
1846 break;
1847 }
1848 break;
1849 case GIMPLE_OMP_ORDERED:
1850 for (; ctx != NULL; ctx = ctx->outer)
1851 switch (gimple_code (ctx->stmt))
1852 {
1853 case GIMPLE_OMP_CRITICAL:
1854 case GIMPLE_OMP_TASK:
1855 warning (0, "ordered region may not be closely nested inside "
1856 "of critical or explicit task region");
1857 return;
1858 case GIMPLE_OMP_FOR:
1859 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1860 OMP_CLAUSE_ORDERED) == NULL)
1861 warning (0, "ordered region must be closely nested inside "
1862 "a loop region with an ordered clause");
1863 return;
1864 case GIMPLE_OMP_PARALLEL:
1865 return;
1866 default:
1867 break;
1868 }
1869 break;
1870 case GIMPLE_OMP_CRITICAL:
1871 for (; ctx != NULL; ctx = ctx->outer)
1872 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1873 && (gimple_omp_critical_name (stmt)
1874 == gimple_omp_critical_name (ctx->stmt)))
1875 {
1876 warning (0, "critical region may not be nested inside a critical "
1877 "region with the same name");
1878 return;
1879 }
1880 break;
1881 default:
1882 break;
1883 }
1884 }
1885
1886
1887 /* Helper function scan_omp.
1888
1889 Callback for walk_tree or operators in walk_gimple_stmt used to
1890 scan for OpenMP directives in TP. */
1891
1892 static tree
1893 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1894 {
1895 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1896 omp_context *ctx = (omp_context *) wi->info;
1897 tree t = *tp;
1898
1899 switch (TREE_CODE (t))
1900 {
1901 case VAR_DECL:
1902 case PARM_DECL:
1903 case LABEL_DECL:
1904 case RESULT_DECL:
1905 if (ctx)
1906 *tp = remap_decl (t, &ctx->cb);
1907 break;
1908
1909 default:
1910 if (ctx && TYPE_P (t))
1911 *tp = remap_type (t, &ctx->cb);
1912 else if (!DECL_P (t))
1913 *walk_subtrees = 1;
1914 break;
1915 }
1916
1917 return NULL_TREE;
1918 }
1919
1920
1921 /* Helper function for scan_omp.
1922
1923 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1924 the current statement in GSI. */
1925
1926 static tree
1927 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1928 struct walk_stmt_info *wi)
1929 {
1930 gimple stmt = gsi_stmt (*gsi);
1931 omp_context *ctx = (omp_context *) wi->info;
1932
1933 if (gimple_has_location (stmt))
1934 input_location = gimple_location (stmt);
1935
1936 /* Check the OpenMP nesting restrictions. */
1937 if (ctx != NULL)
1938 {
1939 if (is_gimple_omp (stmt))
1940 check_omp_nesting_restrictions (stmt, ctx);
1941 else if (is_gimple_call (stmt))
1942 {
1943 tree fndecl = gimple_call_fndecl (stmt);
1944 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1945 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1946 check_omp_nesting_restrictions (stmt, ctx);
1947 }
1948 }
1949
1950 *handled_ops_p = true;
1951
1952 switch (gimple_code (stmt))
1953 {
1954 case GIMPLE_OMP_PARALLEL:
1955 taskreg_nesting_level++;
1956 scan_omp_parallel (gsi, ctx);
1957 taskreg_nesting_level--;
1958 break;
1959
1960 case GIMPLE_OMP_TASK:
1961 taskreg_nesting_level++;
1962 scan_omp_task (gsi, ctx);
1963 taskreg_nesting_level--;
1964 break;
1965
1966 case GIMPLE_OMP_FOR:
1967 scan_omp_for (stmt, ctx);
1968 break;
1969
1970 case GIMPLE_OMP_SECTIONS:
1971 scan_omp_sections (stmt, ctx);
1972 break;
1973
1974 case GIMPLE_OMP_SINGLE:
1975 scan_omp_single (stmt, ctx);
1976 break;
1977
1978 case GIMPLE_OMP_SECTION:
1979 case GIMPLE_OMP_MASTER:
1980 case GIMPLE_OMP_ORDERED:
1981 case GIMPLE_OMP_CRITICAL:
1982 ctx = new_omp_context (stmt, ctx);
1983 scan_omp (gimple_omp_body (stmt), ctx);
1984 break;
1985
1986 case GIMPLE_BIND:
1987 {
1988 tree var;
1989
1990 *handled_ops_p = false;
1991 if (ctx)
1992 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1993 insert_decl_map (&ctx->cb, var, var);
1994 }
1995 break;
1996 default:
1997 *handled_ops_p = false;
1998 break;
1999 }
2000
2001 return NULL_TREE;
2002 }
2003
2004
2005 /* Scan all the statements starting at the current statement. CTX
2006 contains context information about the OpenMP directives and
2007 clauses found during the scan. */
2008
2009 static void
2010 scan_omp (gimple_seq body, omp_context *ctx)
2011 {
2012 location_t saved_location;
2013 struct walk_stmt_info wi;
2014
2015 memset (&wi, 0, sizeof (wi));
2016 wi.info = ctx;
2017 wi.want_locations = true;
2018
2019 saved_location = input_location;
2020 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2021 input_location = saved_location;
2022 }
2023 \f
2024 /* Re-gimplification and code generation routines. */
2025
2026 /* Build a call to GOMP_barrier. */
2027
2028 static tree
2029 build_omp_barrier (void)
2030 {
2031 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2032 }
2033
2034 /* If a context was created for STMT when it was scanned, return it. */
2035
2036 static omp_context *
2037 maybe_lookup_ctx (gimple stmt)
2038 {
2039 splay_tree_node n;
2040 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2041 return n ? (omp_context *) n->value : NULL;
2042 }
2043
2044
2045 /* Find the mapping for DECL in CTX or the immediately enclosing
2046 context that has a mapping for DECL.
2047
2048 If CTX is a nested parallel directive, we may have to use the decl
2049 mappings created in CTX's parent context. Suppose that we have the
2050 following parallel nesting (variable UIDs showed for clarity):
2051
2052 iD.1562 = 0;
2053 #omp parallel shared(iD.1562) -> outer parallel
2054 iD.1562 = iD.1562 + 1;
2055
2056 #omp parallel shared (iD.1562) -> inner parallel
2057 iD.1562 = iD.1562 - 1;
2058
2059 Each parallel structure will create a distinct .omp_data_s structure
2060 for copying iD.1562 in/out of the directive:
2061
2062 outer parallel .omp_data_s.1.i -> iD.1562
2063 inner parallel .omp_data_s.2.i -> iD.1562
2064
2065 A shared variable mapping will produce a copy-out operation before
2066 the parallel directive and a copy-in operation after it. So, in
2067 this case we would have:
2068
2069 iD.1562 = 0;
2070 .omp_data_o.1.i = iD.1562;
2071 #omp parallel shared(iD.1562) -> outer parallel
2072 .omp_data_i.1 = &.omp_data_o.1
2073 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2074
2075 .omp_data_o.2.i = iD.1562; -> **
2076 #omp parallel shared(iD.1562) -> inner parallel
2077 .omp_data_i.2 = &.omp_data_o.2
2078 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2079
2080
2081 ** This is a problem. The symbol iD.1562 cannot be referenced
2082 inside the body of the outer parallel region. But since we are
2083 emitting this copy operation while expanding the inner parallel
2084 directive, we need to access the CTX structure of the outer
2085 parallel directive to get the correct mapping:
2086
2087 .omp_data_o.2.i = .omp_data_i.1->i
2088
2089 Since there may be other workshare or parallel directives enclosing
2090 the parallel directive, it may be necessary to walk up the context
2091 parent chain. This is not a problem in general because nested
2092 parallelism happens only rarely. */
2093
2094 static tree
2095 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2096 {
2097 tree t;
2098 omp_context *up;
2099
2100 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2101 t = maybe_lookup_decl (decl, up);
2102
2103 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2104
2105 return t ? t : decl;
2106 }
2107
2108
2109 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2110 in outer contexts. */
2111
2112 static tree
2113 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2114 {
2115 tree t = NULL;
2116 omp_context *up;
2117
2118 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2119 t = maybe_lookup_decl (decl, up);
2120
2121 return t ? t : decl;
2122 }
2123
2124
2125 /* Construct the initialization value for reduction CLAUSE. */
2126
2127 tree
2128 omp_reduction_init (tree clause, tree type)
2129 {
2130 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2131 {
2132 case PLUS_EXPR:
2133 case MINUS_EXPR:
2134 case BIT_IOR_EXPR:
2135 case BIT_XOR_EXPR:
2136 case TRUTH_OR_EXPR:
2137 case TRUTH_ORIF_EXPR:
2138 case TRUTH_XOR_EXPR:
2139 case NE_EXPR:
2140 return fold_convert (type, integer_zero_node);
2141
2142 case MULT_EXPR:
2143 case TRUTH_AND_EXPR:
2144 case TRUTH_ANDIF_EXPR:
2145 case EQ_EXPR:
2146 return fold_convert (type, integer_one_node);
2147
2148 case BIT_AND_EXPR:
2149 return fold_convert (type, integer_minus_one_node);
2150
2151 case MAX_EXPR:
2152 if (SCALAR_FLOAT_TYPE_P (type))
2153 {
2154 REAL_VALUE_TYPE max, min;
2155 if (HONOR_INFINITIES (TYPE_MODE (type)))
2156 {
2157 real_inf (&max);
2158 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2159 }
2160 else
2161 real_maxval (&min, 1, TYPE_MODE (type));
2162 return build_real (type, min);
2163 }
2164 else
2165 {
2166 gcc_assert (INTEGRAL_TYPE_P (type));
2167 return TYPE_MIN_VALUE (type);
2168 }
2169
2170 case MIN_EXPR:
2171 if (SCALAR_FLOAT_TYPE_P (type))
2172 {
2173 REAL_VALUE_TYPE max;
2174 if (HONOR_INFINITIES (TYPE_MODE (type)))
2175 real_inf (&max);
2176 else
2177 real_maxval (&max, 0, TYPE_MODE (type));
2178 return build_real (type, max);
2179 }
2180 else
2181 {
2182 gcc_assert (INTEGRAL_TYPE_P (type));
2183 return TYPE_MAX_VALUE (type);
2184 }
2185
2186 default:
2187 gcc_unreachable ();
2188 }
2189 }
2190
2191 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2192 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2193 private variables. Initialization statements go in ILIST, while calls
2194 to destructors go in DLIST. */
2195
2196 static void
2197 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2198 omp_context *ctx)
2199 {
2200 gimple_stmt_iterator diter;
2201 tree c, dtor, copyin_seq, x, ptr;
2202 bool copyin_by_ref = false;
2203 bool lastprivate_firstprivate = false;
2204 int pass;
2205
2206 *dlist = gimple_seq_alloc ();
2207 diter = gsi_start (*dlist);
2208 copyin_seq = NULL;
2209
2210 /* Do all the fixed sized types in the first pass, and the variable sized
2211 types in the second pass. This makes sure that the scalar arguments to
2212 the variable sized types are processed before we use them in the
2213 variable sized operations. */
2214 for (pass = 0; pass < 2; ++pass)
2215 {
2216 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2217 {
2218 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2219 tree var, new_var;
2220 bool by_ref;
2221
2222 switch (c_kind)
2223 {
2224 case OMP_CLAUSE_PRIVATE:
2225 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2226 continue;
2227 break;
2228 case OMP_CLAUSE_SHARED:
2229 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2230 {
2231 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2232 continue;
2233 }
2234 case OMP_CLAUSE_FIRSTPRIVATE:
2235 case OMP_CLAUSE_COPYIN:
2236 case OMP_CLAUSE_REDUCTION:
2237 break;
2238 case OMP_CLAUSE_LASTPRIVATE:
2239 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2240 {
2241 lastprivate_firstprivate = true;
2242 if (pass != 0)
2243 continue;
2244 }
2245 break;
2246 default:
2247 continue;
2248 }
2249
2250 new_var = var = OMP_CLAUSE_DECL (c);
2251 if (c_kind != OMP_CLAUSE_COPYIN)
2252 new_var = lookup_decl (var, ctx);
2253
2254 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2255 {
2256 if (pass != 0)
2257 continue;
2258 }
2259 else if (is_variable_sized (var))
2260 {
2261 /* For variable sized types, we need to allocate the
2262 actual storage here. Call alloca and store the
2263 result in the pointer decl that we created elsewhere. */
2264 if (pass == 0)
2265 continue;
2266
2267 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2268 {
2269 gimple stmt;
2270 tree tmp;
2271
2272 ptr = DECL_VALUE_EXPR (new_var);
2273 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2274 ptr = TREE_OPERAND (ptr, 0);
2275 gcc_assert (DECL_P (ptr));
2276 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2277
2278 /* void *tmp = __builtin_alloca */
2279 stmt
2280 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2281 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2282 gimple_add_tmp_var (tmp);
2283 gimple_call_set_lhs (stmt, tmp);
2284
2285 gimple_seq_add_stmt (ilist, stmt);
2286
2287 x = fold_convert (TREE_TYPE (ptr), tmp);
2288 gimplify_assign (ptr, x, ilist);
2289 }
2290 }
2291 else if (is_reference (var))
2292 {
2293 /* For references that are being privatized for Fortran,
2294 allocate new backing storage for the new pointer
2295 variable. This allows us to avoid changing all the
2296 code that expects a pointer to something that expects
2297 a direct variable. Note that this doesn't apply to
2298 C++, since reference types are disallowed in data
2299 sharing clauses there, except for NRV optimized
2300 return values. */
2301 if (pass == 0)
2302 continue;
2303
2304 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2305 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2306 {
2307 x = build_receiver_ref (var, false, ctx);
2308 x = build_fold_addr_expr (x);
2309 }
2310 else if (TREE_CONSTANT (x))
2311 {
2312 const char *name = NULL;
2313 if (DECL_NAME (var))
2314 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2315
2316 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2317 name);
2318 gimple_add_tmp_var (x);
2319 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2320 }
2321 else
2322 {
2323 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2324 x = fold_convert (TREE_TYPE (new_var), x);
2325 }
2326
2327 gimplify_assign (new_var, x, ilist);
2328
2329 new_var = build_fold_indirect_ref (new_var);
2330 }
2331 else if (c_kind == OMP_CLAUSE_REDUCTION
2332 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2333 {
2334 if (pass == 0)
2335 continue;
2336 }
2337 else if (pass != 0)
2338 continue;
2339
2340 switch (OMP_CLAUSE_CODE (c))
2341 {
2342 case OMP_CLAUSE_SHARED:
2343 /* Shared global vars are just accessed directly. */
2344 if (is_global_var (new_var))
2345 break;
2346 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2347 needs to be delayed until after fixup_child_record_type so
2348 that we get the correct type during the dereference. */
2349 by_ref = use_pointer_for_field (var, ctx);
2350 x = build_receiver_ref (var, by_ref, ctx);
2351 SET_DECL_VALUE_EXPR (new_var, x);
2352 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2353
2354 /* ??? If VAR is not passed by reference, and the variable
2355 hasn't been initialized yet, then we'll get a warning for
2356 the store into the omp_data_s structure. Ideally, we'd be
2357 able to notice this and not store anything at all, but
2358 we're generating code too early. Suppress the warning. */
2359 if (!by_ref)
2360 TREE_NO_WARNING (var) = 1;
2361 break;
2362
2363 case OMP_CLAUSE_LASTPRIVATE:
2364 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2365 break;
2366 /* FALLTHRU */
2367
2368 case OMP_CLAUSE_PRIVATE:
2369 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2370 x = build_outer_var_ref (var, ctx);
2371 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2372 {
2373 if (is_task_ctx (ctx))
2374 x = build_receiver_ref (var, false, ctx);
2375 else
2376 x = build_outer_var_ref (var, ctx);
2377 }
2378 else
2379 x = NULL;
2380 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2381 if (x)
2382 gimplify_and_add (x, ilist);
2383 /* FALLTHRU */
2384
2385 do_dtor:
2386 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2387 if (x)
2388 {
2389 gimple_seq tseq = NULL;
2390
2391 dtor = x;
2392 gimplify_stmt (&dtor, &tseq);
2393 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2394 }
2395 break;
2396
2397 case OMP_CLAUSE_FIRSTPRIVATE:
2398 if (is_task_ctx (ctx))
2399 {
2400 if (is_reference (var) || is_variable_sized (var))
2401 goto do_dtor;
2402 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2403 ctx))
2404 || use_pointer_for_field (var, NULL))
2405 {
2406 x = build_receiver_ref (var, false, ctx);
2407 SET_DECL_VALUE_EXPR (new_var, x);
2408 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2409 goto do_dtor;
2410 }
2411 }
2412 x = build_outer_var_ref (var, ctx);
2413 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2414 gimplify_and_add (x, ilist);
2415 goto do_dtor;
2416 break;
2417
2418 case OMP_CLAUSE_COPYIN:
2419 by_ref = use_pointer_for_field (var, NULL);
2420 x = build_receiver_ref (var, by_ref, ctx);
2421 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2422 append_to_statement_list (x, &copyin_seq);
2423 copyin_by_ref |= by_ref;
2424 break;
2425
2426 case OMP_CLAUSE_REDUCTION:
2427 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2428 {
2429 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2430 x = build_outer_var_ref (var, ctx);
2431
2432 if (is_reference (var))
2433 x = build_fold_addr_expr (x);
2434 SET_DECL_VALUE_EXPR (placeholder, x);
2435 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2436 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2437 gimple_seq_add_seq (ilist,
2438 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2439 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2440 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2441 }
2442 else
2443 {
2444 x = omp_reduction_init (c, TREE_TYPE (new_var));
2445 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2446 gimplify_assign (new_var, x, ilist);
2447 }
2448 break;
2449
2450 default:
2451 gcc_unreachable ();
2452 }
2453 }
2454 }
2455
2456 /* The copyin sequence is not to be executed by the main thread, since
2457 that would result in self-copies. Perhaps not visible to scalars,
2458 but it certainly is to C++ operator=. */
2459 if (copyin_seq)
2460 {
2461 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2462 x = build2 (NE_EXPR, boolean_type_node, x,
2463 build_int_cst (TREE_TYPE (x), 0));
2464 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2465 gimplify_and_add (x, ilist);
2466 }
2467
2468 /* If any copyin variable is passed by reference, we must ensure the
2469 master thread doesn't modify it before it is copied over in all
2470 threads. Similarly for variables in both firstprivate and
2471 lastprivate clauses we need to ensure the lastprivate copying
2472 happens after firstprivate copying in all threads. */
2473 if (copyin_by_ref || lastprivate_firstprivate)
2474 gimplify_and_add (build_omp_barrier (), ilist);
2475 }
2476
2477
2478 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2479 both parallel and workshare constructs. PREDICATE may be NULL if it's
2480 always true. */
2481
2482 static void
2483 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2484 omp_context *ctx)
2485 {
2486 tree x, c, label = NULL;
2487 bool par_clauses = false;
2488
2489 /* Early exit if there are no lastprivate clauses. */
2490 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2491 if (clauses == NULL)
2492 {
2493 /* If this was a workshare clause, see if it had been combined
2494 with its parallel. In that case, look for the clauses on the
2495 parallel statement itself. */
2496 if (is_parallel_ctx (ctx))
2497 return;
2498
2499 ctx = ctx->outer;
2500 if (ctx == NULL || !is_parallel_ctx (ctx))
2501 return;
2502
2503 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2504 OMP_CLAUSE_LASTPRIVATE);
2505 if (clauses == NULL)
2506 return;
2507 par_clauses = true;
2508 }
2509
2510 if (predicate)
2511 {
2512 gimple stmt;
2513 tree label_true, arm1, arm2;
2514
2515 label = create_artificial_label ();
2516 label_true = create_artificial_label ();
2517 arm1 = TREE_OPERAND (predicate, 0);
2518 arm2 = TREE_OPERAND (predicate, 1);
2519 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2520 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2521 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2522 label_true, label);
2523 gimple_seq_add_stmt (stmt_list, stmt);
2524 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2525 }
2526
2527 for (c = clauses; c ;)
2528 {
2529 tree var, new_var;
2530
2531 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2532 {
2533 var = OMP_CLAUSE_DECL (c);
2534 new_var = lookup_decl (var, ctx);
2535
2536 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2537 {
2538 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2539 gimple_seq_add_seq (stmt_list,
2540 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2541 }
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2543
2544 x = build_outer_var_ref (var, ctx);
2545 if (is_reference (var))
2546 new_var = build_fold_indirect_ref (new_var);
2547 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2548 gimplify_and_add (x, stmt_list);
2549 }
2550 c = OMP_CLAUSE_CHAIN (c);
2551 if (c == NULL && !par_clauses)
2552 {
2553 /* If this was a workshare clause, see if it had been combined
2554 with its parallel. In that case, continue looking for the
2555 clauses also on the parallel statement itself. */
2556 if (is_parallel_ctx (ctx))
2557 break;
2558
2559 ctx = ctx->outer;
2560 if (ctx == NULL || !is_parallel_ctx (ctx))
2561 break;
2562
2563 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2564 OMP_CLAUSE_LASTPRIVATE);
2565 par_clauses = true;
2566 }
2567 }
2568
2569 if (label)
2570 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2571 }
2572
2573
2574 /* Generate code to implement the REDUCTION clauses. */
2575
2576 static void
2577 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2578 {
2579 gimple_seq sub_seq = NULL;
2580 gimple stmt;
2581 tree x, c;
2582 int count = 0;
2583
2584 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2585 update in that case, otherwise use a lock. */
2586 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2587 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2588 {
2589 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2590 {
2591 /* Never use OMP_ATOMIC for array reductions. */
2592 count = -1;
2593 break;
2594 }
2595 count++;
2596 }
2597
2598 if (count == 0)
2599 return;
2600
2601 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2602 {
2603 tree var, ref, new_var;
2604 enum tree_code code;
2605
2606 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2607 continue;
2608
2609 var = OMP_CLAUSE_DECL (c);
2610 new_var = lookup_decl (var, ctx);
2611 if (is_reference (var))
2612 new_var = build_fold_indirect_ref (new_var);
2613 ref = build_outer_var_ref (var, ctx);
2614 code = OMP_CLAUSE_REDUCTION_CODE (c);
2615
2616 /* reduction(-:var) sums up the partial results, so it acts
2617 identically to reduction(+:var). */
2618 if (code == MINUS_EXPR)
2619 code = PLUS_EXPR;
2620
2621 if (count == 1)
2622 {
2623 tree addr = build_fold_addr_expr (ref);
2624
2625 addr = save_expr (addr);
2626 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2627 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2628 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2629 gimplify_and_add (x, stmt_seqp);
2630 return;
2631 }
2632
2633 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2634 {
2635 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2636
2637 if (is_reference (var))
2638 ref = build_fold_addr_expr (ref);
2639 SET_DECL_VALUE_EXPR (placeholder, ref);
2640 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2641 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2642 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2643 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2644 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2645 }
2646 else
2647 {
2648 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2649 ref = build_outer_var_ref (var, ctx);
2650 gimplify_assign (ref, x, &sub_seq);
2651 }
2652 }
2653
2654 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2655 gimple_seq_add_stmt (stmt_seqp, stmt);
2656
2657 gimple_seq_add_seq (stmt_seqp, sub_seq);
2658
2659 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2660 gimple_seq_add_stmt (stmt_seqp, stmt);
2661 }
2662
2663
2664 /* Generate code to implement the COPYPRIVATE clauses. */
2665
2666 static void
2667 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2668 omp_context *ctx)
2669 {
2670 tree c;
2671
2672 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2673 {
2674 tree var, ref, x;
2675 bool by_ref;
2676
2677 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2678 continue;
2679
2680 var = OMP_CLAUSE_DECL (c);
2681 by_ref = use_pointer_for_field (var, NULL);
2682
2683 ref = build_sender_ref (var, ctx);
2684 x = lookup_decl_in_outer_ctx (var, ctx);
2685 x = by_ref ? build_fold_addr_expr (x) : x;
2686 gimplify_assign (ref, x, slist);
2687
2688 ref = build_receiver_ref (var, by_ref, ctx);
2689 if (is_reference (var))
2690 {
2691 ref = build_fold_indirect_ref (ref);
2692 var = build_fold_indirect_ref (var);
2693 }
2694 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2695 gimplify_and_add (x, rlist);
2696 }
2697 }
2698
2699
2700 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2701 and REDUCTION from the sender (aka parent) side. */
2702
2703 static void
2704 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2705 omp_context *ctx)
2706 {
2707 tree c;
2708
2709 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2710 {
2711 tree val, ref, x, var;
2712 bool by_ref, do_in = false, do_out = false;
2713
2714 switch (OMP_CLAUSE_CODE (c))
2715 {
2716 case OMP_CLAUSE_PRIVATE:
2717 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2718 break;
2719 continue;
2720 case OMP_CLAUSE_FIRSTPRIVATE:
2721 case OMP_CLAUSE_COPYIN:
2722 case OMP_CLAUSE_LASTPRIVATE:
2723 case OMP_CLAUSE_REDUCTION:
2724 break;
2725 default:
2726 continue;
2727 }
2728
2729 val = OMP_CLAUSE_DECL (c);
2730 var = lookup_decl_in_outer_ctx (val, ctx);
2731
2732 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2733 && is_global_var (var))
2734 continue;
2735 if (is_variable_sized (val))
2736 continue;
2737 by_ref = use_pointer_for_field (val, NULL);
2738
2739 switch (OMP_CLAUSE_CODE (c))
2740 {
2741 case OMP_CLAUSE_PRIVATE:
2742 case OMP_CLAUSE_FIRSTPRIVATE:
2743 case OMP_CLAUSE_COPYIN:
2744 do_in = true;
2745 break;
2746
2747 case OMP_CLAUSE_LASTPRIVATE:
2748 if (by_ref || is_reference (val))
2749 {
2750 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2751 continue;
2752 do_in = true;
2753 }
2754 else
2755 {
2756 do_out = true;
2757 if (lang_hooks.decls.omp_private_outer_ref (val))
2758 do_in = true;
2759 }
2760 break;
2761
2762 case OMP_CLAUSE_REDUCTION:
2763 do_in = true;
2764 do_out = !(by_ref || is_reference (val));
2765 break;
2766
2767 default:
2768 gcc_unreachable ();
2769 }
2770
2771 if (do_in)
2772 {
2773 ref = build_sender_ref (val, ctx);
2774 x = by_ref ? build_fold_addr_expr (var) : var;
2775 gimplify_assign (ref, x, ilist);
2776 if (is_task_ctx (ctx))
2777 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2778 }
2779
2780 if (do_out)
2781 {
2782 ref = build_sender_ref (val, ctx);
2783 gimplify_assign (var, ref, olist);
2784 }
2785 }
2786 }
2787
2788 /* Generate code to implement SHARED from the sender (aka parent)
2789 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2790 list things that got automatically shared. */
2791
2792 static void
2793 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2794 {
2795 tree var, ovar, nvar, f, x, record_type;
2796
2797 if (ctx->record_type == NULL)
2798 return;
2799
2800 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2801 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2802 {
2803 ovar = DECL_ABSTRACT_ORIGIN (f);
2804 nvar = maybe_lookup_decl (ovar, ctx);
2805 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2806 continue;
2807
2808 /* If CTX is a nested parallel directive. Find the immediately
2809 enclosing parallel or workshare construct that contains a
2810 mapping for OVAR. */
2811 var = lookup_decl_in_outer_ctx (ovar, ctx);
2812
2813 if (use_pointer_for_field (ovar, ctx))
2814 {
2815 x = build_sender_ref (ovar, ctx);
2816 var = build_fold_addr_expr (var);
2817 gimplify_assign (x, var, ilist);
2818 }
2819 else
2820 {
2821 x = build_sender_ref (ovar, ctx);
2822 gimplify_assign (x, var, ilist);
2823
2824 if (!TREE_READONLY (var))
2825 {
2826 x = build_sender_ref (ovar, ctx);
2827 gimplify_assign (var, x, olist);
2828 }
2829 }
2830 }
2831 }
2832
2833
2834 /* A convenience function to build an empty GIMPLE_COND with just the
2835 condition. */
2836
2837 static gimple
2838 gimple_build_cond_empty (tree cond)
2839 {
2840 enum tree_code pred_code;
2841 tree lhs, rhs;
2842
2843 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2844 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2845 }
2846
2847
2848 /* Build the function calls to GOMP_parallel_start etc to actually
2849 generate the parallel operation. REGION is the parallel region
2850 being expanded. BB is the block where to insert the code. WS_ARGS
2851 will be set if this is a call to a combined parallel+workshare
2852 construct, it contains the list of additional arguments needed by
2853 the workshare construct. */
2854
2855 static void
2856 expand_parallel_call (struct omp_region *region, basic_block bb,
2857 gimple entry_stmt, tree ws_args)
2858 {
2859 tree t, t1, t2, val, cond, c, clauses;
2860 gimple_stmt_iterator gsi;
2861 gimple stmt;
2862 int start_ix;
2863
2864 clauses = gimple_omp_parallel_clauses (entry_stmt);
2865
2866 /* Determine what flavor of GOMP_parallel_start we will be
2867 emitting. */
2868 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2869 if (is_combined_parallel (region))
2870 {
2871 switch (region->inner->type)
2872 {
2873 case GIMPLE_OMP_FOR:
2874 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2875 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2876 + (region->inner->sched_kind
2877 == OMP_CLAUSE_SCHEDULE_RUNTIME
2878 ? 3 : region->inner->sched_kind);
2879 break;
2880 case GIMPLE_OMP_SECTIONS:
2881 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2882 break;
2883 default:
2884 gcc_unreachable ();
2885 }
2886 }
2887
2888 /* By default, the value of NUM_THREADS is zero (selected at run time)
2889 and there is no conditional. */
2890 cond = NULL_TREE;
2891 val = build_int_cst (unsigned_type_node, 0);
2892
2893 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2894 if (c)
2895 cond = OMP_CLAUSE_IF_EXPR (c);
2896
2897 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2898 if (c)
2899 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2900
2901 /* Ensure 'val' is of the correct type. */
2902 val = fold_convert (unsigned_type_node, val);
2903
2904 /* If we found the clause 'if (cond)', build either
2905 (cond != 0) or (cond ? val : 1u). */
2906 if (cond)
2907 {
2908 gimple_stmt_iterator gsi;
2909
2910 cond = gimple_boolify (cond);
2911
2912 if (integer_zerop (val))
2913 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2914 build_int_cst (TREE_TYPE (cond), 0));
2915 else
2916 {
2917 basic_block cond_bb, then_bb, else_bb;
2918 edge e, e_then, e_else;
2919 tree tmp_then, tmp_else, tmp_join, tmp_var;
2920
2921 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2922 if (gimple_in_ssa_p (cfun))
2923 {
2924 tmp_then = make_ssa_name (tmp_var, NULL);
2925 tmp_else = make_ssa_name (tmp_var, NULL);
2926 tmp_join = make_ssa_name (tmp_var, NULL);
2927 }
2928 else
2929 {
2930 tmp_then = tmp_var;
2931 tmp_else = tmp_var;
2932 tmp_join = tmp_var;
2933 }
2934
2935 e = split_block (bb, NULL);
2936 cond_bb = e->src;
2937 bb = e->dest;
2938 remove_edge (e);
2939
2940 then_bb = create_empty_bb (cond_bb);
2941 else_bb = create_empty_bb (then_bb);
2942 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2943 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2944
2945 stmt = gimple_build_cond_empty (cond);
2946 gsi = gsi_start_bb (cond_bb);
2947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2948
2949 gsi = gsi_start_bb (then_bb);
2950 stmt = gimple_build_assign (tmp_then, val);
2951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2952
2953 gsi = gsi_start_bb (else_bb);
2954 stmt = gimple_build_assign
2955 (tmp_else, build_int_cst (unsigned_type_node, 1));
2956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2957
2958 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2959 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2960 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2961 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2962
2963 if (gimple_in_ssa_p (cfun))
2964 {
2965 gimple phi = create_phi_node (tmp_join, bb);
2966 SSA_NAME_DEF_STMT (tmp_join) = phi;
2967 add_phi_arg (phi, tmp_then, e_then);
2968 add_phi_arg (phi, tmp_else, e_else);
2969 }
2970
2971 val = tmp_join;
2972 }
2973
2974 gsi = gsi_start_bb (bb);
2975 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2976 false, GSI_CONTINUE_LINKING);
2977 }
2978
2979 gsi = gsi_last_bb (bb);
2980 t = gimple_omp_parallel_data_arg (entry_stmt);
2981 if (t == NULL)
2982 t1 = null_pointer_node;
2983 else
2984 t1 = build_fold_addr_expr (t);
2985 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2986
2987 if (ws_args)
2988 {
2989 tree args = tree_cons (NULL, t2,
2990 tree_cons (NULL, t1,
2991 tree_cons (NULL, val, ws_args)));
2992 t = build_function_call_expr (built_in_decls[start_ix], args);
2993 }
2994 else
2995 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
2996
2997 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2998 false, GSI_CONTINUE_LINKING);
2999
3000 t = gimple_omp_parallel_data_arg (entry_stmt);
3001 if (t == NULL)
3002 t = null_pointer_node;
3003 else
3004 t = build_fold_addr_expr (t);
3005 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3006 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3007 false, GSI_CONTINUE_LINKING);
3008
3009 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3010 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3011 false, GSI_CONTINUE_LINKING);
3012 }
3013
3014
3015 /* Build the function call to GOMP_task to actually
3016 generate the task operation. BB is the block where to insert the code. */
3017
3018 static void
3019 expand_task_call (basic_block bb, gimple entry_stmt)
3020 {
3021 tree t, t1, t2, t3, flags, cond, c, clauses;
3022 gimple_stmt_iterator gsi;
3023
3024 clauses = gimple_omp_task_clauses (entry_stmt);
3025
3026 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3027 if (c)
3028 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3029 else
3030 cond = boolean_true_node;
3031
3032 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3033 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3034
3035 gsi = gsi_last_bb (bb);
3036 t = gimple_omp_task_data_arg (entry_stmt);
3037 if (t == NULL)
3038 t2 = null_pointer_node;
3039 else
3040 t2 = build_fold_addr_expr (t);
3041 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3042 t = gimple_omp_task_copy_fn (entry_stmt);
3043 if (t == NULL)
3044 t3 = null_pointer_node;
3045 else
3046 t3 = build_fold_addr_expr (t);
3047
3048 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3049 gimple_omp_task_arg_size (entry_stmt),
3050 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3051
3052 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3053 false, GSI_CONTINUE_LINKING);
3054 }
3055
3056
3057 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3058 catch handler and return it. This prevents programs from violating the
3059 structured block semantics with throws. */
3060
3061 static gimple_seq
3062 maybe_catch_exception (gimple_seq body)
3063 {
3064 gimple f, t;
3065
3066 if (!flag_exceptions)
3067 return body;
3068
3069 if (lang_protect_cleanup_actions)
3070 t = lang_protect_cleanup_actions ();
3071 else
3072 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3073
3074 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3075 gimple_eh_filter_set_must_not_throw (f, true);
3076
3077 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3078 GIMPLE_TRY_CATCH);
3079
3080 return gimple_seq_alloc_with_stmt (t);
3081 }
3082
3083 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3084
3085 static tree
3086 list2chain (tree list)
3087 {
3088 tree t;
3089
3090 for (t = list; t; t = TREE_CHAIN (t))
3091 {
3092 tree var = TREE_VALUE (t);
3093 if (TREE_CHAIN (t))
3094 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3095 else
3096 TREE_CHAIN (var) = NULL_TREE;
3097 }
3098
3099 return list ? TREE_VALUE (list) : NULL_TREE;
3100 }
3101
3102
3103 /* Remove barriers in REGION->EXIT's block. Note that this is only
3104 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3105 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3106 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3107 removed. */
3108
3109 static void
3110 remove_exit_barrier (struct omp_region *region)
3111 {
3112 gimple_stmt_iterator gsi;
3113 basic_block exit_bb;
3114 edge_iterator ei;
3115 edge e;
3116 gimple stmt;
3117
3118 exit_bb = region->exit;
3119
3120 /* If the parallel region doesn't return, we don't have REGION->EXIT
3121 block at all. */
3122 if (! exit_bb)
3123 return;
3124
3125 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3126 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3127 statements that can appear in between are extremely limited -- no
3128 memory operations at all. Here, we allow nothing at all, so the
3129 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3130 gsi = gsi_last_bb (exit_bb);
3131 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3132 gsi_prev (&gsi);
3133 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3134 return;
3135
3136 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3137 {
3138 gsi = gsi_last_bb (e->src);
3139 if (gsi_end_p (gsi))
3140 continue;
3141 stmt = gsi_stmt (gsi);
3142 if (gimple_code (stmt) == GIMPLE_OMP_RETURN)
3143 gimple_omp_return_set_nowait (stmt);
3144 }
3145 }
3146
3147 static void
3148 remove_exit_barriers (struct omp_region *region)
3149 {
3150 if (region->type == GIMPLE_OMP_PARALLEL)
3151 remove_exit_barrier (region);
3152
3153 if (region->inner)
3154 {
3155 region = region->inner;
3156 remove_exit_barriers (region);
3157 while (region->next)
3158 {
3159 region = region->next;
3160 remove_exit_barriers (region);
3161 }
3162 }
3163 }
3164
3165 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3166 calls. These can't be declared as const functions, but
3167 within one parallel body they are constant, so they can be
3168 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3169 which are declared const. Similarly for task body, except
3170 that in untied task omp_get_thread_num () can change at any task
3171 scheduling point. */
3172
3173 static void
3174 optimize_omp_library_calls (gimple entry_stmt)
3175 {
3176 basic_block bb;
3177 gimple_stmt_iterator gsi;
3178 tree thr_num_id
3179 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3180 tree num_thr_id
3181 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3182 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3183 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3184 OMP_CLAUSE_UNTIED) != NULL);
3185
3186 FOR_EACH_BB (bb)
3187 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3188 {
3189 gimple call = gsi_stmt (gsi);
3190 tree decl;
3191
3192 if (is_gimple_call (call)
3193 && (decl = gimple_call_fndecl (call))
3194 && DECL_EXTERNAL (decl)
3195 && TREE_PUBLIC (decl)
3196 && DECL_INITIAL (decl) == NULL)
3197 {
3198 tree built_in;
3199
3200 if (DECL_NAME (decl) == thr_num_id)
3201 {
3202 /* In #pragma omp task untied omp_get_thread_num () can change
3203 during the execution of the task region. */
3204 if (untied_task)
3205 continue;
3206 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3207 }
3208 else if (DECL_NAME (decl) == num_thr_id)
3209 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3210 else
3211 continue;
3212
3213 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3214 || gimple_call_num_args (call) != 0)
3215 continue;
3216
3217 if (flag_exceptions && !TREE_NOTHROW (decl))
3218 continue;
3219
3220 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3221 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3222 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3223 continue;
3224
3225 gimple_call_set_fn (call, build_fold_addr_expr (built_in));
3226 }
3227 }
3228 }
3229
3230 /* Expand the OpenMP parallel or task directive starting at REGION. */
3231
3232 static void
3233 expand_omp_taskreg (struct omp_region *region)
3234 {
3235 basic_block entry_bb, exit_bb, new_bb;
3236 struct function *child_cfun;
3237 tree child_fn, block, t, ws_args, *tp;
3238 gimple_stmt_iterator gsi;
3239 gimple entry_stmt, stmt;
3240 edge e;
3241
3242 entry_stmt = last_stmt (region->entry);
3243 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3244 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3245 /* If this function has been already instrumented, make sure
3246 the child function isn't instrumented again. */
3247 child_cfun->after_tree_profile = cfun->after_tree_profile;
3248
3249 entry_bb = region->entry;
3250 exit_bb = region->exit;
3251
3252 if (is_combined_parallel (region))
3253 ws_args = region->ws_args;
3254 else
3255 ws_args = NULL_TREE;
3256
3257 if (child_cfun->cfg)
3258 {
3259 /* Due to inlining, it may happen that we have already outlined
3260 the region, in which case all we need to do is make the
3261 sub-graph unreachable and emit the parallel call. */
3262 edge entry_succ_e, exit_succ_e;
3263 gimple_stmt_iterator gsi;
3264
3265 entry_succ_e = single_succ_edge (entry_bb);
3266
3267 gsi = gsi_last_bb (entry_bb);
3268 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3269 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3270 gsi_remove (&gsi, true);
3271
3272 new_bb = entry_bb;
3273 if (exit_bb)
3274 {
3275 exit_succ_e = single_succ_edge (exit_bb);
3276 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3277 }
3278 remove_edge_and_dominated_blocks (entry_succ_e);
3279 }
3280 else
3281 {
3282 /* If the parallel region needs data sent from the parent
3283 function, then the very first statement (except possible
3284 tree profile counter updates) of the parallel body
3285 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3286 &.OMP_DATA_O is passed as an argument to the child function,
3287 we need to replace it with the argument as seen by the child
3288 function.
3289
3290 In most cases, this will end up being the identity assignment
3291 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3292 a function call that has been inlined, the original PARM_DECL
3293 .OMP_DATA_I may have been converted into a different local
3294 variable. In which case, we need to keep the assignment. */
3295 if (gimple_omp_taskreg_data_arg (entry_stmt))
3296 {
3297 basic_block entry_succ_bb = single_succ (entry_bb);
3298 gimple_stmt_iterator gsi;
3299 tree arg, narg;
3300 gimple parcopy_stmt = NULL;
3301
3302 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3303 {
3304 gimple stmt;
3305
3306 gcc_assert (!gsi_end_p (gsi));
3307 stmt = gsi_stmt (gsi);
3308 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3309 continue;
3310
3311 if (gimple_num_ops (stmt) == 2)
3312 {
3313 tree arg = gimple_assign_rhs1 (stmt);
3314
3315 /* We're ignore the subcode because we're
3316 effectively doing a STRIP_NOPS. */
3317
3318 if (TREE_CODE (arg) == ADDR_EXPR
3319 && TREE_OPERAND (arg, 0)
3320 == gimple_omp_taskreg_data_arg (entry_stmt))
3321 {
3322 parcopy_stmt = stmt;
3323 break;
3324 }
3325 }
3326 }
3327
3328 gcc_assert (parcopy_stmt != NULL);
3329 arg = DECL_ARGUMENTS (child_fn);
3330
3331 if (!gimple_in_ssa_p (cfun))
3332 {
3333 if (gimple_assign_lhs (parcopy_stmt) == arg)
3334 gsi_remove (&gsi, true);
3335 else
3336 {
3337 /* ?? Is setting the subcode really necessary ?? */
3338 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3339 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3340 }
3341 }
3342 else
3343 {
3344 /* If we are in ssa form, we must load the value from the default
3345 definition of the argument. That should not be defined now,
3346 since the argument is not used uninitialized. */
3347 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3348 narg = make_ssa_name (arg, gimple_build_nop ());
3349 set_default_def (arg, narg);
3350 /* ?? Is setting the subcode really necessary ?? */
3351 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3352 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3353 update_stmt (parcopy_stmt);
3354 }
3355 }
3356
3357 /* Declare local variables needed in CHILD_CFUN. */
3358 block = DECL_INITIAL (child_fn);
3359 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3360 DECL_SAVED_TREE (child_fn) = NULL;
3361 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3362 TREE_USED (block) = 1;
3363
3364 /* Reset DECL_CONTEXT on function arguments. */
3365 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3366 DECL_CONTEXT (t) = child_fn;
3367
3368 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3369 so that it can be moved to the child function. */
3370 gsi = gsi_last_bb (entry_bb);
3371 stmt = gsi_stmt (gsi);
3372 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3373 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3374 gsi_remove (&gsi, true);
3375 e = split_block (entry_bb, stmt);
3376 entry_bb = e->dest;
3377 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3378
3379 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3380 if (exit_bb)
3381 {
3382 gsi = gsi_last_bb (exit_bb);
3383 gcc_assert (!gsi_end_p (gsi)
3384 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3385 stmt = gimple_build_return (NULL);
3386 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3387 gsi_remove (&gsi, true);
3388 }
3389
3390 /* Move the parallel region into CHILD_CFUN. */
3391
3392 if (gimple_in_ssa_p (cfun))
3393 {
3394 push_cfun (child_cfun);
3395 init_tree_ssa (child_cfun);
3396 init_ssa_operands ();
3397 cfun->gimple_df->in_ssa_p = true;
3398 pop_cfun ();
3399 block = NULL_TREE;
3400 }
3401 else
3402 block = gimple_block (entry_stmt);
3403
3404 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3405 if (exit_bb)
3406 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3407
3408 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3409 for (tp = &child_cfun->local_decls; *tp; )
3410 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3411 tp = &TREE_CHAIN (*tp);
3412 else
3413 *tp = TREE_CHAIN (*tp);
3414
3415 /* Inform the callgraph about the new function. */
3416 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3417 = cfun->curr_properties;
3418 cgraph_add_new_function (child_fn, true);
3419
3420 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3421 fixed in a following pass. */
3422 push_cfun (child_cfun);
3423 if (optimize)
3424 optimize_omp_library_calls (entry_stmt);
3425 rebuild_cgraph_edges ();
3426
3427 /* Some EH regions might become dead, see PR34608. If
3428 pass_cleanup_cfg isn't the first pass to happen with the
3429 new child, these dead EH edges might cause problems.
3430 Clean them up now. */
3431 if (flag_exceptions)
3432 {
3433 basic_block bb;
3434 tree save_current = current_function_decl;
3435 bool changed = false;
3436
3437 current_function_decl = child_fn;
3438 FOR_EACH_BB (bb)
3439 changed |= gimple_purge_dead_eh_edges (bb);
3440 if (changed)
3441 cleanup_tree_cfg ();
3442 current_function_decl = save_current;
3443 }
3444 pop_cfun ();
3445 }
3446
3447 /* Emit a library call to launch the children threads. */
3448 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3449 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3450 else
3451 expand_task_call (new_bb, entry_stmt);
3452 update_ssa (TODO_update_ssa_only_virtuals);
3453 }
3454
3455
3456 /* A subroutine of expand_omp_for. Generate code for a parallel
3457 loop with any schedule. Given parameters:
3458
3459 for (V = N1; V cond N2; V += STEP) BODY;
3460
3461 where COND is "<" or ">", we generate pseudocode
3462
3463 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3464 if (more) goto L0; else goto L3;
3465 L0:
3466 V = istart0;
3467 iend = iend0;
3468 L1:
3469 BODY;
3470 V += STEP;
3471 if (V cond iend) goto L1; else goto L2;
3472 L2:
3473 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3474 L3:
3475
3476 If this is a combined omp parallel loop, instead of the call to
3477 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3478
3479 For collapsed loops, given parameters:
3480 collapse(3)
3481 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3482 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3483 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3484 BODY;
3485
3486 we generate pseudocode
3487
3488 if (cond3 is <)
3489 adj = STEP3 - 1;
3490 else
3491 adj = STEP3 + 1;
3492 count3 = (adj + N32 - N31) / STEP3;
3493 if (cond2 is <)
3494 adj = STEP2 - 1;
3495 else
3496 adj = STEP2 + 1;
3497 count2 = (adj + N22 - N21) / STEP2;
3498 if (cond1 is <)
3499 adj = STEP1 - 1;
3500 else
3501 adj = STEP1 + 1;
3502 count1 = (adj + N12 - N11) / STEP1;
3503 count = count1 * count2 * count3;
3504 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3505 if (more) goto L0; else goto L3;
3506 L0:
3507 V = istart0;
3508 T = V;
3509 V3 = N31 + (T % count3) * STEP3;
3510 T = T / count3;
3511 V2 = N21 + (T % count2) * STEP2;
3512 T = T / count2;
3513 V1 = N11 + T * STEP1;
3514 iend = iend0;
3515 L1:
3516 BODY;
3517 V += 1;
3518 if (V < iend) goto L10; else goto L2;
3519 L10:
3520 V3 += STEP3;
3521 if (V3 cond3 N32) goto L1; else goto L11;
3522 L11:
3523 V3 = N31;
3524 V2 += STEP2;
3525 if (V2 cond2 N22) goto L1; else goto L12;
3526 L12:
3527 V2 = N21;
3528 V1 += STEP1;
3529 goto L1;
3530 L2:
3531 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3532 L3:
3533
3534 */
3535
3536 static void
3537 expand_omp_for_generic (struct omp_region *region,
3538 struct omp_for_data *fd,
3539 enum built_in_function start_fn,
3540 enum built_in_function next_fn)
3541 {
3542 tree type, istart0, iend0, iend;
3543 tree t, vmain, vback, bias = NULL_TREE;
3544 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3545 basic_block l2_bb = NULL, l3_bb = NULL;
3546 gimple_stmt_iterator gsi;
3547 gimple stmt;
3548 bool in_combined_parallel = is_combined_parallel (region);
3549 bool broken_loop = region->cont == NULL;
3550 edge e, ne;
3551 tree *counts = NULL;
3552 int i;
3553
3554 gcc_assert (!broken_loop || !in_combined_parallel);
3555 gcc_assert (fd->iter_type == long_integer_type_node
3556 || !in_combined_parallel);
3557
3558 type = TREE_TYPE (fd->loop.v);
3559 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3560 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3561 TREE_ADDRESSABLE (istart0) = 1;
3562 TREE_ADDRESSABLE (iend0) = 1;
3563 if (gimple_in_ssa_p (cfun))
3564 {
3565 add_referenced_var (istart0);
3566 add_referenced_var (iend0);
3567 }
3568
3569 /* See if we need to bias by LLONG_MIN. */
3570 if (fd->iter_type == long_long_unsigned_type_node
3571 && TREE_CODE (type) == INTEGER_TYPE
3572 && !TYPE_UNSIGNED (type))
3573 {
3574 tree n1, n2;
3575
3576 if (fd->loop.cond_code == LT_EXPR)
3577 {
3578 n1 = fd->loop.n1;
3579 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3580 }
3581 else
3582 {
3583 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3584 n2 = fd->loop.n1;
3585 }
3586 if (TREE_CODE (n1) != INTEGER_CST
3587 || TREE_CODE (n2) != INTEGER_CST
3588 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3589 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3590 }
3591
3592 entry_bb = region->entry;
3593 cont_bb = region->cont;
3594 collapse_bb = NULL;
3595 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3596 gcc_assert (broken_loop
3597 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3598 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3599 l1_bb = single_succ (l0_bb);
3600 if (!broken_loop)
3601 {
3602 l2_bb = create_empty_bb (cont_bb);
3603 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3604 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3605 }
3606 else
3607 l2_bb = NULL;
3608 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3609 exit_bb = region->exit;
3610
3611 gsi = gsi_last_bb (entry_bb);
3612
3613 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3614 if (fd->collapse > 1)
3615 {
3616 /* collapsed loops need work for expansion in SSA form. */
3617 gcc_assert (!gimple_in_ssa_p (cfun));
3618 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3619 for (i = 0; i < fd->collapse; i++)
3620 {
3621 tree itype = TREE_TYPE (fd->loops[i].v);
3622
3623 if (POINTER_TYPE_P (itype))
3624 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3625 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3626 ? -1 : 1));
3627 t = fold_build2 (PLUS_EXPR, itype,
3628 fold_convert (itype, fd->loops[i].step), t);
3629 t = fold_build2 (PLUS_EXPR, itype, t,
3630 fold_convert (itype, fd->loops[i].n2));
3631 t = fold_build2 (MINUS_EXPR, itype, t,
3632 fold_convert (itype, fd->loops[i].n1));
3633 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3634 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3635 fold_build1 (NEGATE_EXPR, itype, t),
3636 fold_build1 (NEGATE_EXPR, itype,
3637 fold_convert (itype,
3638 fd->loops[i].step)));
3639 else
3640 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3641 fold_convert (itype, fd->loops[i].step));
3642 t = fold_convert (type, t);
3643 if (TREE_CODE (t) == INTEGER_CST)
3644 counts[i] = t;
3645 else
3646 {
3647 counts[i] = create_tmp_var (type, ".count");
3648 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3649 true, GSI_SAME_STMT);
3650 stmt = gimple_build_assign (counts[i], t);
3651 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3652 }
3653 if (SSA_VAR_P (fd->loop.n2))
3654 {
3655 if (i == 0)
3656 t = counts[0];
3657 else
3658 {
3659 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3660 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3661 true, GSI_SAME_STMT);
3662 }
3663 stmt = gimple_build_assign (fd->loop.n2, t);
3664 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3665 }
3666 }
3667 }
3668 if (in_combined_parallel)
3669 {
3670 /* In a combined parallel loop, emit a call to
3671 GOMP_loop_foo_next. */
3672 t = build_call_expr (built_in_decls[next_fn], 2,
3673 build_fold_addr_expr (istart0),
3674 build_fold_addr_expr (iend0));
3675 }
3676 else
3677 {
3678 tree t0, t1, t2, t3, t4;
3679 /* If this is not a combined parallel loop, emit a call to
3680 GOMP_loop_foo_start in ENTRY_BB. */
3681 t4 = build_fold_addr_expr (iend0);
3682 t3 = build_fold_addr_expr (istart0);
3683 t2 = fold_convert (fd->iter_type, fd->loop.step);
3684 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3685 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3686 if (bias)
3687 {
3688 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3689 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3690 }
3691 if (fd->iter_type == long_integer_type_node)
3692 {
3693 if (fd->chunk_size)
3694 {
3695 t = fold_convert (fd->iter_type, fd->chunk_size);
3696 t = build_call_expr (built_in_decls[start_fn], 6,
3697 t0, t1, t2, t, t3, t4);
3698 }
3699 else
3700 t = build_call_expr (built_in_decls[start_fn], 5,
3701 t0, t1, t2, t3, t4);
3702 }
3703 else
3704 {
3705 tree t5;
3706 tree c_bool_type;
3707
3708 /* The GOMP_loop_ull_*start functions have additional boolean
3709 argument, true for < loops and false for > loops.
3710 In Fortran, the C bool type can be different from
3711 boolean_type_node. */
3712 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3713 t5 = build_int_cst (c_bool_type,
3714 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3715 if (fd->chunk_size)
3716 {
3717 t = fold_convert (fd->iter_type, fd->chunk_size);
3718 t = build_call_expr (built_in_decls[start_fn], 7,
3719 t5, t0, t1, t2, t, t3, t4);
3720 }
3721 else
3722 t = build_call_expr (built_in_decls[start_fn], 6,
3723 t5, t0, t1, t2, t3, t4);
3724 }
3725 }
3726 if (TREE_TYPE (t) != boolean_type_node)
3727 t = fold_build2 (NE_EXPR, boolean_type_node,
3728 t, build_int_cst (TREE_TYPE (t), 0));
3729 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3730 true, GSI_SAME_STMT);
3731 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3732
3733 /* Remove the GIMPLE_OMP_FOR statement. */
3734 gsi_remove (&gsi, true);
3735
3736 /* Iteration setup for sequential loop goes in L0_BB. */
3737 gsi = gsi_start_bb (l0_bb);
3738 if (bias)
3739 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3740 istart0, bias));
3741 else
3742 t = fold_convert (type, istart0);
3743 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3744 false, GSI_CONTINUE_LINKING);
3745 stmt = gimple_build_assign (fd->loop.v, t);
3746 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3747
3748 if (bias)
3749 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3750 iend0, bias));
3751 else
3752 t = fold_convert (type, iend0);
3753 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3754 false, GSI_CONTINUE_LINKING);
3755 if (fd->collapse > 1)
3756 {
3757 tree tem = create_tmp_var (type, ".tem");
3758
3759 stmt = gimple_build_assign (tem, fd->loop.v);
3760 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3761 for (i = fd->collapse - 1; i >= 0; i--)
3762 {
3763 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3764 itype = vtype;
3765 if (POINTER_TYPE_P (vtype))
3766 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3767 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3768 t = fold_convert (itype, t);
3769 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3770 if (POINTER_TYPE_P (vtype))
3771 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3772 fd->loops[i].n1, fold_convert (sizetype, t));
3773 else
3774 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3775 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3776 false, GSI_CONTINUE_LINKING);
3777 stmt = gimple_build_assign (fd->loops[i].v, t);
3778 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3779 if (i != 0)
3780 {
3781 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3782 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3783 false, GSI_CONTINUE_LINKING);
3784 stmt = gimple_build_assign (tem, t);
3785 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3786 }
3787 }
3788 }
3789
3790 if (!broken_loop)
3791 {
3792 /* Code to control the increment and predicate for the sequential
3793 loop goes in the CONT_BB. */
3794 gsi = gsi_last_bb (cont_bb);
3795 stmt = gsi_stmt (gsi);
3796 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3797 vmain = gimple_omp_continue_control_use (stmt);
3798 vback = gimple_omp_continue_control_def (stmt);
3799
3800 if (POINTER_TYPE_P (type))
3801 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3802 fold_convert (sizetype, fd->loop.step));
3803 else
3804 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3805 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3806 true, GSI_SAME_STMT);
3807 stmt = gimple_build_assign (vback, t);
3808 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3809
3810 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3811 stmt = gimple_build_cond_empty (t);
3812 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3813
3814 /* Remove GIMPLE_OMP_CONTINUE. */
3815 gsi_remove (&gsi, true);
3816
3817 if (fd->collapse > 1)
3818 {
3819 basic_block last_bb, bb;
3820
3821 last_bb = cont_bb;
3822 for (i = fd->collapse - 1; i >= 0; i--)
3823 {
3824 tree vtype = TREE_TYPE (fd->loops[i].v);
3825
3826 bb = create_empty_bb (last_bb);
3827 gsi = gsi_start_bb (bb);
3828
3829 if (i < fd->collapse - 1)
3830 {
3831 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3832 e->probability = REG_BR_PROB_BASE / 8;
3833
3834 t = fd->loops[i + 1].n1;
3835 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3836 false, GSI_CONTINUE_LINKING);
3837 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3838 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3839 }
3840 else
3841 collapse_bb = bb;
3842
3843 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3844
3845 if (POINTER_TYPE_P (vtype))
3846 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3847 fd->loops[i].v,
3848 fold_convert (sizetype, fd->loops[i].step));
3849 else
3850 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3851 fd->loops[i].step);
3852 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3853 false, GSI_CONTINUE_LINKING);
3854 stmt = gimple_build_assign (fd->loops[i].v, t);
3855 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3856
3857 if (i > 0)
3858 {
3859 t = fd->loops[i].n2;
3860 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3861 false, GSI_CONTINUE_LINKING);
3862 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3863 fd->loops[i].v, t);
3864 stmt = gimple_build_cond_empty (t);
3865 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3866 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3867 e->probability = REG_BR_PROB_BASE * 7 / 8;
3868 }
3869 else
3870 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3871 last_bb = bb;
3872 }
3873 }
3874
3875 /* Emit code to get the next parallel iteration in L2_BB. */
3876 gsi = gsi_start_bb (l2_bb);
3877
3878 t = build_call_expr (built_in_decls[next_fn], 2,
3879 build_fold_addr_expr (istart0),
3880 build_fold_addr_expr (iend0));
3881 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3882 false, GSI_CONTINUE_LINKING);
3883 if (TREE_TYPE (t) != boolean_type_node)
3884 t = fold_build2 (NE_EXPR, boolean_type_node,
3885 t, build_int_cst (TREE_TYPE (t), 0));
3886 stmt = gimple_build_cond_empty (t);
3887 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3888 }
3889
3890 /* Add the loop cleanup function. */
3891 gsi = gsi_last_bb (exit_bb);
3892 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3893 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3894 else
3895 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3896 stmt = gimple_build_call (t, 0);
3897 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3898 gsi_remove (&gsi, true);
3899
3900 /* Connect the new blocks. */
3901 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3902 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3903
3904 if (!broken_loop)
3905 {
3906 gimple_seq phis;
3907
3908 e = find_edge (cont_bb, l3_bb);
3909 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3910
3911 phis = phi_nodes (l3_bb);
3912 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3913 {
3914 gimple phi = gsi_stmt (gsi);
3915 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3916 PHI_ARG_DEF_FROM_EDGE (phi, e));
3917 }
3918 remove_edge (e);
3919
3920 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3921 if (fd->collapse > 1)
3922 {
3923 e = find_edge (cont_bb, l1_bb);
3924 remove_edge (e);
3925 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3926 }
3927 else
3928 {
3929 e = find_edge (cont_bb, l1_bb);
3930 e->flags = EDGE_TRUE_VALUE;
3931 }
3932 e->probability = REG_BR_PROB_BASE * 7 / 8;
3933 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
3934 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3935
3936 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3937 recompute_dominator (CDI_DOMINATORS, l2_bb));
3938 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3939 recompute_dominator (CDI_DOMINATORS, l3_bb));
3940 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3941 recompute_dominator (CDI_DOMINATORS, l0_bb));
3942 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3943 recompute_dominator (CDI_DOMINATORS, l1_bb));
3944 }
3945 }
3946
3947
3948 /* A subroutine of expand_omp_for. Generate code for a parallel
3949 loop with static schedule and no specified chunk size. Given
3950 parameters:
3951
3952 for (V = N1; V cond N2; V += STEP) BODY;
3953
3954 where COND is "<" or ">", we generate pseudocode
3955
3956 if (cond is <)
3957 adj = STEP - 1;
3958 else
3959 adj = STEP + 1;
3960 if ((__typeof (V)) -1 > 0 && cond is >)
3961 n = -(adj + N2 - N1) / -STEP;
3962 else
3963 n = (adj + N2 - N1) / STEP;
3964 q = n / nthreads;
3965 q += (q * nthreads != n);
3966 s0 = q * threadid;
3967 e0 = min(s0 + q, n);
3968 V = s0 * STEP + N1;
3969 if (s0 >= e0) goto L2; else goto L0;
3970 L0:
3971 e = e0 * STEP + N1;
3972 L1:
3973 BODY;
3974 V += STEP;
3975 if (V cond e) goto L1;
3976 L2:
3977 */
3978
3979 static void
3980 expand_omp_for_static_nochunk (struct omp_region *region,
3981 struct omp_for_data *fd)
3982 {
3983 tree n, q, s0, e0, e, t, nthreads, threadid;
3984 tree type, itype, vmain, vback;
3985 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
3986 basic_block fin_bb;
3987 gimple_stmt_iterator gsi;
3988 gimple stmt;
3989
3990 itype = type = TREE_TYPE (fd->loop.v);
3991 if (POINTER_TYPE_P (type))
3992 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3993
3994 entry_bb = region->entry;
3995 cont_bb = region->cont;
3996 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3997 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3998 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3999 body_bb = single_succ (seq_start_bb);
4000 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4001 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4002 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4003 exit_bb = region->exit;
4004
4005 /* Iteration space partitioning goes in ENTRY_BB. */
4006 gsi = gsi_last_bb (entry_bb);
4007 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4008
4009 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4010 t = fold_convert (itype, t);
4011 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4012 true, GSI_SAME_STMT);
4013
4014 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4015 t = fold_convert (itype, t);
4016 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4017 true, GSI_SAME_STMT);
4018
4019 fd->loop.n1
4020 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4021 true, NULL_TREE, true, GSI_SAME_STMT);
4022 fd->loop.n2
4023 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4024 true, NULL_TREE, true, GSI_SAME_STMT);
4025 fd->loop.step
4026 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4027 true, NULL_TREE, true, GSI_SAME_STMT);
4028
4029 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4030 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4031 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4032 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4033 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4034 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4035 fold_build1 (NEGATE_EXPR, itype, t),
4036 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4037 else
4038 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4039 t = fold_convert (itype, t);
4040 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4041
4042 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4043 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4044
4045 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4046 t = fold_build2 (NE_EXPR, itype, t, n);
4047 t = fold_build2 (PLUS_EXPR, itype, q, t);
4048 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4049
4050 t = build2 (MULT_EXPR, itype, q, threadid);
4051 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4052
4053 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4054 t = fold_build2 (MIN_EXPR, itype, t, n);
4055 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4056
4057 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4058 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4059
4060 /* Remove the GIMPLE_OMP_FOR statement. */
4061 gsi_remove (&gsi, true);
4062
4063 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4064 gsi = gsi_start_bb (seq_start_bb);
4065
4066 t = fold_convert (itype, s0);
4067 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4068 if (POINTER_TYPE_P (type))
4069 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4070 fold_convert (sizetype, t));
4071 else
4072 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4073 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4074 false, GSI_CONTINUE_LINKING);
4075 stmt = gimple_build_assign (fd->loop.v, t);
4076 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4077
4078 t = fold_convert (itype, e0);
4079 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4080 if (POINTER_TYPE_P (type))
4081 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4082 fold_convert (sizetype, t));
4083 else
4084 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4085 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4086 false, GSI_CONTINUE_LINKING);
4087
4088 /* The code controlling the sequential loop replaces the
4089 GIMPLE_OMP_CONTINUE. */
4090 gsi = gsi_last_bb (cont_bb);
4091 stmt = gsi_stmt (gsi);
4092 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4093 vmain = gimple_omp_continue_control_use (stmt);
4094 vback = gimple_omp_continue_control_def (stmt);
4095
4096 if (POINTER_TYPE_P (type))
4097 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4098 fold_convert (sizetype, fd->loop.step));
4099 else
4100 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4101 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4102 true, GSI_SAME_STMT);
4103 stmt = gimple_build_assign (vback, t);
4104 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4105
4106 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4107 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4108
4109 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4110 gsi_remove (&gsi, true);
4111
4112 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4113 gsi = gsi_last_bb (exit_bb);
4114 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4115 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4116 false, GSI_SAME_STMT);
4117 gsi_remove (&gsi, true);
4118
4119 /* Connect all the blocks. */
4120 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4121 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4122
4123 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4124 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4125
4126 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4127 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4128 recompute_dominator (CDI_DOMINATORS, body_bb));
4129 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4130 recompute_dominator (CDI_DOMINATORS, fin_bb));
4131 }
4132
4133
4134 /* A subroutine of expand_omp_for. Generate code for a parallel
4135 loop with static schedule and a specified chunk size. Given
4136 parameters:
4137
4138 for (V = N1; V cond N2; V += STEP) BODY;
4139
4140 where COND is "<" or ">", we generate pseudocode
4141
4142 if (cond is <)
4143 adj = STEP - 1;
4144 else
4145 adj = STEP + 1;
4146 if ((__typeof (V)) -1 > 0 && cond is >)
4147 n = -(adj + N2 - N1) / -STEP;
4148 else
4149 n = (adj + N2 - N1) / STEP;
4150 trip = 0;
4151 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4152 here so that V is defined
4153 if the loop is not entered
4154 L0:
4155 s0 = (trip * nthreads + threadid) * CHUNK;
4156 e0 = min(s0 + CHUNK, n);
4157 if (s0 < n) goto L1; else goto L4;
4158 L1:
4159 V = s0 * STEP + N1;
4160 e = e0 * STEP + N1;
4161 L2:
4162 BODY;
4163 V += STEP;
4164 if (V cond e) goto L2; else goto L3;
4165 L3:
4166 trip += 1;
4167 goto L0;
4168 L4:
4169 */
4170
4171 static void
4172 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4173 {
4174 tree n, s0, e0, e, t;
4175 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4176 tree type, itype, v_main, v_back, v_extra;
4177 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4178 basic_block trip_update_bb, cont_bb, fin_bb;
4179 gimple_stmt_iterator si;
4180 gimple stmt;
4181 edge se;
4182
4183 itype = type = TREE_TYPE (fd->loop.v);
4184 if (POINTER_TYPE_P (type))
4185 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4186
4187 entry_bb = region->entry;
4188 se = split_block (entry_bb, last_stmt (entry_bb));
4189 entry_bb = se->src;
4190 iter_part_bb = se->dest;
4191 cont_bb = region->cont;
4192 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4193 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4194 == FALLTHRU_EDGE (cont_bb)->dest);
4195 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4196 body_bb = single_succ (seq_start_bb);
4197 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4198 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4199 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4200 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4201 exit_bb = region->exit;
4202
4203 /* Trip and adjustment setup goes in ENTRY_BB. */
4204 si = gsi_last_bb (entry_bb);
4205 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4206
4207 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4208 t = fold_convert (itype, t);
4209 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4210 true, GSI_SAME_STMT);
4211
4212 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4213 t = fold_convert (itype, t);
4214 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4215 true, GSI_SAME_STMT);
4216
4217 fd->loop.n1
4218 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4219 true, NULL_TREE, true, GSI_SAME_STMT);
4220 fd->loop.n2
4221 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4222 true, NULL_TREE, true, GSI_SAME_STMT);
4223 fd->loop.step
4224 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4225 true, NULL_TREE, true, GSI_SAME_STMT);
4226 fd->chunk_size
4227 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4228 true, NULL_TREE, true, GSI_SAME_STMT);
4229
4230 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4231 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4232 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4233 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4234 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4235 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4236 fold_build1 (NEGATE_EXPR, itype, t),
4237 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4238 else
4239 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4240 t = fold_convert (itype, t);
4241 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4242 true, GSI_SAME_STMT);
4243
4244 trip_var = create_tmp_var (itype, ".trip");
4245 if (gimple_in_ssa_p (cfun))
4246 {
4247 add_referenced_var (trip_var);
4248 trip_init = make_ssa_name (trip_var, NULL);
4249 trip_main = make_ssa_name (trip_var, NULL);
4250 trip_back = make_ssa_name (trip_var, NULL);
4251 }
4252 else
4253 {
4254 trip_init = trip_var;
4255 trip_main = trip_var;
4256 trip_back = trip_var;
4257 }
4258
4259 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4260 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4261
4262 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4263 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4264 if (POINTER_TYPE_P (type))
4265 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4266 fold_convert (sizetype, t));
4267 else
4268 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4269 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4270 true, GSI_SAME_STMT);
4271
4272 /* Remove the GIMPLE_OMP_FOR. */
4273 gsi_remove (&si, true);
4274
4275 /* Iteration space partitioning goes in ITER_PART_BB. */
4276 si = gsi_last_bb (iter_part_bb);
4277
4278 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4279 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4280 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4281 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4282 false, GSI_CONTINUE_LINKING);
4283
4284 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4285 t = fold_build2 (MIN_EXPR, itype, t, n);
4286 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4287 false, GSI_CONTINUE_LINKING);
4288
4289 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4290 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4291
4292 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4293 si = gsi_start_bb (seq_start_bb);
4294
4295 t = fold_convert (itype, s0);
4296 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4297 if (POINTER_TYPE_P (type))
4298 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4299 fold_convert (sizetype, t));
4300 else
4301 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4302 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4303 false, GSI_CONTINUE_LINKING);
4304 stmt = gimple_build_assign (fd->loop.v, t);
4305 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4306
4307 t = fold_convert (itype, e0);
4308 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4309 if (POINTER_TYPE_P (type))
4310 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4311 fold_convert (sizetype, t));
4312 else
4313 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4314 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4315 false, GSI_CONTINUE_LINKING);
4316
4317 /* The code controlling the sequential loop goes in CONT_BB,
4318 replacing the GIMPLE_OMP_CONTINUE. */
4319 si = gsi_last_bb (cont_bb);
4320 stmt = gsi_stmt (si);
4321 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4322 v_main = gimple_omp_continue_control_use (stmt);
4323 v_back = gimple_omp_continue_control_def (stmt);
4324
4325 if (POINTER_TYPE_P (type))
4326 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4327 fold_convert (sizetype, fd->loop.step));
4328 else
4329 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4330 stmt = gimple_build_assign (v_back, t);
4331 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4332
4333 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4334 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4335
4336 /* Remove GIMPLE_OMP_CONTINUE. */
4337 gsi_remove (&si, true);
4338
4339 /* Trip update code goes into TRIP_UPDATE_BB. */
4340 si = gsi_start_bb (trip_update_bb);
4341
4342 t = build_int_cst (itype, 1);
4343 t = build2 (PLUS_EXPR, itype, trip_main, t);
4344 stmt = gimple_build_assign (trip_back, t);
4345 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4346
4347 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4348 si = gsi_last_bb (exit_bb);
4349 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4350 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4351 false, GSI_SAME_STMT);
4352 gsi_remove (&si, true);
4353
4354 /* Connect the new blocks. */
4355 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4356 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4357
4358 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4359 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4360
4361 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4362
4363 if (gimple_in_ssa_p (cfun))
4364 {
4365 gimple_stmt_iterator psi;
4366 gimple phi;
4367 edge re, ene;
4368 edge_var_map_vector head;
4369 edge_var_map *vm;
4370 size_t i;
4371
4372 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4373 remove arguments of the phi nodes in fin_bb. We need to create
4374 appropriate phi nodes in iter_part_bb instead. */
4375 se = single_pred_edge (fin_bb);
4376 re = single_succ_edge (trip_update_bb);
4377 head = redirect_edge_var_map_vector (re);
4378 ene = single_succ_edge (entry_bb);
4379
4380 psi = gsi_start_phis (fin_bb);
4381 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4382 gsi_next (&psi), ++i)
4383 {
4384 gimple nphi;
4385
4386 phi = gsi_stmt (psi);
4387 t = gimple_phi_result (phi);
4388 gcc_assert (t == redirect_edge_var_map_result (vm));
4389 nphi = create_phi_node (t, iter_part_bb);
4390 SSA_NAME_DEF_STMT (t) = nphi;
4391
4392 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4393 /* A special case -- fd->loop.v is not yet computed in
4394 iter_part_bb, we need to use v_extra instead. */
4395 if (t == fd->loop.v)
4396 t = v_extra;
4397 add_phi_arg (nphi, t, ene);
4398 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4399 }
4400 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4401 redirect_edge_var_map_clear (re);
4402 while (1)
4403 {
4404 psi = gsi_start_phis (fin_bb);
4405 if (gsi_end_p (psi))
4406 break;
4407 remove_phi_node (&psi, false);
4408 }
4409
4410 /* Make phi node for trip. */
4411 phi = create_phi_node (trip_main, iter_part_bb);
4412 SSA_NAME_DEF_STMT (trip_main) = phi;
4413 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4414 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4415 }
4416
4417 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4418 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4419 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4420 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4421 recompute_dominator (CDI_DOMINATORS, fin_bb));
4422 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4423 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4424 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4425 recompute_dominator (CDI_DOMINATORS, body_bb));
4426 }
4427
4428
4429 /* Expand the OpenMP loop defined by REGION. */
4430
4431 static void
4432 expand_omp_for (struct omp_region *region)
4433 {
4434 struct omp_for_data fd;
4435 struct omp_for_data_loop *loops;
4436
4437 loops
4438 = (struct omp_for_data_loop *)
4439 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4440 * sizeof (struct omp_for_data_loop));
4441 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4442 region->sched_kind = fd.sched_kind;
4443
4444 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4445 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4446 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4447 if (region->cont)
4448 {
4449 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4450 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4451 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4452 }
4453
4454 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4455 && !fd.have_ordered
4456 && fd.collapse == 1
4457 && region->cont != NULL)
4458 {
4459 if (fd.chunk_size == NULL)
4460 expand_omp_for_static_nochunk (region, &fd);
4461 else
4462 expand_omp_for_static_chunk (region, &fd);
4463 }
4464 else
4465 {
4466 int fn_index, start_ix, next_ix;
4467
4468 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4469 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4470 ? 3 : fd.sched_kind;
4471 fn_index += fd.have_ordered * 4;
4472 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4473 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4474 if (fd.iter_type == long_long_unsigned_type_node)
4475 {
4476 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4477 - BUILT_IN_GOMP_LOOP_STATIC_START;
4478 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4479 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4480 }
4481 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4482 }
4483
4484 update_ssa (TODO_update_ssa_only_virtuals);
4485 }
4486
4487
4488 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4489
4490 v = GOMP_sections_start (n);
4491 L0:
4492 switch (v)
4493 {
4494 case 0:
4495 goto L2;
4496 case 1:
4497 section 1;
4498 goto L1;
4499 case 2:
4500 ...
4501 case n:
4502 ...
4503 default:
4504 abort ();
4505 }
4506 L1:
4507 v = GOMP_sections_next ();
4508 goto L0;
4509 L2:
4510 reduction;
4511
4512 If this is a combined parallel sections, replace the call to
4513 GOMP_sections_start with call to GOMP_sections_next. */
4514
4515 static void
4516 expand_omp_sections (struct omp_region *region)
4517 {
4518 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4519 VEC (tree,heap) *label_vec;
4520 unsigned len;
4521 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4522 gimple_stmt_iterator si, switch_si;
4523 gimple sections_stmt, stmt, cont;
4524 edge_iterator ei;
4525 edge e;
4526 struct omp_region *inner;
4527 unsigned i, casei;
4528 bool exit_reachable = region->cont != NULL;
4529
4530 gcc_assert (exit_reachable == (region->exit != NULL));
4531 entry_bb = region->entry;
4532 l0_bb = single_succ (entry_bb);
4533 l1_bb = region->cont;
4534 l2_bb = region->exit;
4535 if (exit_reachable)
4536 {
4537 if (single_pred (l2_bb) == l0_bb)
4538 l2 = gimple_block_label (l2_bb);
4539 else
4540 {
4541 /* This can happen if there are reductions. */
4542 len = EDGE_COUNT (l0_bb->succs);
4543 gcc_assert (len > 0);
4544 e = EDGE_SUCC (l0_bb, len - 1);
4545 si = gsi_last_bb (e->dest);
4546 l2 = NULL_TREE;
4547 if (gsi_end_p (si)
4548 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4549 l2 = gimple_block_label (e->dest);
4550 else
4551 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4552 {
4553 si = gsi_last_bb (e->dest);
4554 if (gsi_end_p (si)
4555 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4556 {
4557 l2 = gimple_block_label (e->dest);
4558 break;
4559 }
4560 }
4561 }
4562 default_bb = create_empty_bb (l1_bb->prev_bb);
4563 l1 = gimple_block_label (l1_bb);
4564 }
4565 else
4566 {
4567 default_bb = create_empty_bb (l0_bb);
4568 l1 = NULL_TREE;
4569 l2 = gimple_block_label (default_bb);
4570 }
4571
4572 /* We will build a switch() with enough cases for all the
4573 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4574 and a default case to abort if something goes wrong. */
4575 len = EDGE_COUNT (l0_bb->succs);
4576
4577 /* Use VEC_quick_push on label_vec throughout, since we know the size
4578 in advance. */
4579 label_vec = VEC_alloc (tree, heap, len);
4580
4581 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4582 GIMPLE_OMP_SECTIONS statement. */
4583 si = gsi_last_bb (entry_bb);
4584 sections_stmt = gsi_stmt (si);
4585 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4586 vin = gimple_omp_sections_control (sections_stmt);
4587 if (!is_combined_parallel (region))
4588 {
4589 /* If we are not inside a combined parallel+sections region,
4590 call GOMP_sections_start. */
4591 t = build_int_cst (unsigned_type_node,
4592 exit_reachable ? len - 1 : len);
4593 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4594 stmt = gimple_build_call (u, 1, t);
4595 }
4596 else
4597 {
4598 /* Otherwise, call GOMP_sections_next. */
4599 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4600 stmt = gimple_build_call (u, 0);
4601 }
4602 gimple_call_set_lhs (stmt, vin);
4603 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4604 gsi_remove (&si, true);
4605
4606 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4607 L0_BB. */
4608 switch_si = gsi_last_bb (l0_bb);
4609 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4610 if (exit_reachable)
4611 {
4612 cont = last_stmt (l1_bb);
4613 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4614 vmain = gimple_omp_continue_control_use (cont);
4615 vnext = gimple_omp_continue_control_def (cont);
4616 }
4617 else
4618 {
4619 vmain = vin;
4620 vnext = NULL_TREE;
4621 }
4622
4623 i = 0;
4624 if (exit_reachable)
4625 {
4626 t = build3 (CASE_LABEL_EXPR, void_type_node,
4627 build_int_cst (unsigned_type_node, 0), NULL, l2);
4628 VEC_quick_push (tree, label_vec, t);
4629 i++;
4630 }
4631
4632 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4633 for (inner = region->inner, casei = 1;
4634 inner;
4635 inner = inner->next, i++, casei++)
4636 {
4637 basic_block s_entry_bb, s_exit_bb;
4638
4639 /* Skip optional reduction region. */
4640 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4641 {
4642 --i;
4643 --casei;
4644 continue;
4645 }
4646
4647 s_entry_bb = inner->entry;
4648 s_exit_bb = inner->exit;
4649
4650 t = gimple_block_label (s_entry_bb);
4651 u = build_int_cst (unsigned_type_node, casei);
4652 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4653 VEC_quick_push (tree, label_vec, u);
4654
4655 si = gsi_last_bb (s_entry_bb);
4656 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4657 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4658 gsi_remove (&si, true);
4659 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4660
4661 if (s_exit_bb == NULL)
4662 continue;
4663
4664 si = gsi_last_bb (s_exit_bb);
4665 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4666 gsi_remove (&si, true);
4667
4668 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4669 }
4670
4671 /* Error handling code goes in DEFAULT_BB. */
4672 t = gimple_block_label (default_bb);
4673 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4674 make_edge (l0_bb, default_bb, 0);
4675
4676 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4677 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4678 gsi_remove (&switch_si, true);
4679 VEC_free (tree, heap, label_vec);
4680
4681 si = gsi_start_bb (default_bb);
4682 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4683 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4684
4685 if (exit_reachable)
4686 {
4687 /* Code to get the next section goes in L1_BB. */
4688 si = gsi_last_bb (l1_bb);
4689 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4690
4691 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4692 gimple_call_set_lhs (stmt, vnext);
4693 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4694 gsi_remove (&si, true);
4695
4696 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4697
4698 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4699 si = gsi_last_bb (l2_bb);
4700 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4701 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4702 else
4703 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4704 stmt = gimple_build_call (t, 0);
4705 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4706 gsi_remove (&si, true);
4707 }
4708
4709 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4710 }
4711
4712
4713 /* Expand code for an OpenMP single directive. We've already expanded
4714 much of the code, here we simply place the GOMP_barrier call. */
4715
4716 static void
4717 expand_omp_single (struct omp_region *region)
4718 {
4719 basic_block entry_bb, exit_bb;
4720 gimple_stmt_iterator si;
4721 bool need_barrier = false;
4722
4723 entry_bb = region->entry;
4724 exit_bb = region->exit;
4725
4726 si = gsi_last_bb (entry_bb);
4727 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4728 be removed. We need to ensure that the thread that entered the single
4729 does not exit before the data is copied out by the other threads. */
4730 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4731 OMP_CLAUSE_COPYPRIVATE))
4732 need_barrier = true;
4733 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4734 gsi_remove (&si, true);
4735 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4736
4737 si = gsi_last_bb (exit_bb);
4738 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4739 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4740 false, GSI_SAME_STMT);
4741 gsi_remove (&si, true);
4742 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4743 }
4744
4745
4746 /* Generic expansion for OpenMP synchronization directives: master,
4747 ordered and critical. All we need to do here is remove the entry
4748 and exit markers for REGION. */
4749
4750 static void
4751 expand_omp_synch (struct omp_region *region)
4752 {
4753 basic_block entry_bb, exit_bb;
4754 gimple_stmt_iterator si;
4755
4756 entry_bb = region->entry;
4757 exit_bb = region->exit;
4758
4759 si = gsi_last_bb (entry_bb);
4760 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4761 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4762 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4763 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4764 gsi_remove (&si, true);
4765 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4766
4767 if (exit_bb)
4768 {
4769 si = gsi_last_bb (exit_bb);
4770 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4771 gsi_remove (&si, true);
4772 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4773 }
4774 }
4775
4776 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4777 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4778 size of the data type, and thus usable to find the index of the builtin
4779 decl. Returns false if the expression is not of the proper form. */
4780
4781 static bool
4782 expand_omp_atomic_fetch_op (basic_block load_bb,
4783 tree addr, tree loaded_val,
4784 tree stored_val, int index)
4785 {
4786 enum built_in_function base;
4787 tree decl, itype, call;
4788 enum insn_code *optab;
4789 tree rhs;
4790 basic_block store_bb = single_succ (load_bb);
4791 gimple_stmt_iterator gsi;
4792 gimple stmt;
4793
4794 /* We expect to find the following sequences:
4795
4796 load_bb:
4797 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4798
4799 store_bb:
4800 val = tmp OP something; (or: something OP tmp)
4801 GIMPLE_OMP_STORE (val)
4802
4803 ???FIXME: Allow a more flexible sequence.
4804 Perhaps use data flow to pick the statements.
4805
4806 */
4807
4808 gsi = gsi_after_labels (store_bb);
4809 stmt = gsi_stmt (gsi);
4810 if (!is_gimple_assign (stmt))
4811 return false;
4812 gsi_next (&gsi);
4813 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4814 return false;
4815
4816 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4817 return false;
4818
4819 /* Check for one of the supported fetch-op operations. */
4820 switch (gimple_assign_rhs_code (stmt))
4821 {
4822 case PLUS_EXPR:
4823 case POINTER_PLUS_EXPR:
4824 base = BUILT_IN_FETCH_AND_ADD_N;
4825 optab = sync_add_optab;
4826 break;
4827 case MINUS_EXPR:
4828 base = BUILT_IN_FETCH_AND_SUB_N;
4829 optab = sync_add_optab;
4830 break;
4831 case BIT_AND_EXPR:
4832 base = BUILT_IN_FETCH_AND_AND_N;
4833 optab = sync_and_optab;
4834 break;
4835 case BIT_IOR_EXPR:
4836 base = BUILT_IN_FETCH_AND_OR_N;
4837 optab = sync_ior_optab;
4838 break;
4839 case BIT_XOR_EXPR:
4840 base = BUILT_IN_FETCH_AND_XOR_N;
4841 optab = sync_xor_optab;
4842 break;
4843 default:
4844 return false;
4845 }
4846 /* Make sure the expression is of the proper form. */
4847 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4848 rhs = gimple_assign_rhs2 (stmt);
4849 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4850 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4851 rhs = gimple_assign_rhs1 (stmt);
4852 else
4853 return false;
4854
4855 decl = built_in_decls[base + index + 1];
4856 itype = TREE_TYPE (TREE_TYPE (decl));
4857
4858 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4859 return false;
4860
4861 gsi = gsi_last_bb (load_bb);
4862 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4863 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4864 call = fold_convert (void_type_node, call);
4865 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4866 gsi_remove (&gsi, true);
4867
4868 gsi = gsi_last_bb (store_bb);
4869 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4870 gsi_remove (&gsi, true);
4871 gsi = gsi_last_bb (store_bb);
4872 gsi_remove (&gsi, true);
4873
4874 if (gimple_in_ssa_p (cfun))
4875 update_ssa (TODO_update_ssa_no_phi);
4876
4877 return true;
4878 }
4879
4880 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4881
4882 oldval = *addr;
4883 repeat:
4884 newval = rhs; // with oldval replacing *addr in rhs
4885 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4886 if (oldval != newval)
4887 goto repeat;
4888
4889 INDEX is log2 of the size of the data type, and thus usable to find the
4890 index of the builtin decl. */
4891
4892 static bool
4893 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4894 tree addr, tree loaded_val, tree stored_val,
4895 int index)
4896 {
4897 tree loadedi, storedi, initial, new_storedi, old_vali;
4898 tree type, itype, cmpxchg, iaddr;
4899 gimple_stmt_iterator si;
4900 basic_block loop_header = single_succ (load_bb);
4901 gimple phi, stmt;
4902 edge e;
4903
4904 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4905 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4906 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4907
4908 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4909 return false;
4910
4911 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4912 si = gsi_last_bb (load_bb);
4913 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4914
4915 /* For floating-point values, we'll need to view-convert them to integers
4916 so that we can perform the atomic compare and swap. Simplify the
4917 following code by always setting up the "i"ntegral variables. */
4918 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4919 {
4920 tree iaddr_val;
4921
4922 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
4923 iaddr_val
4924 = force_gimple_operand_gsi (&si,
4925 fold_convert (TREE_TYPE (iaddr), addr),
4926 false, NULL_TREE, true, GSI_SAME_STMT);
4927 stmt = gimple_build_assign (iaddr, iaddr_val);
4928 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4929 DECL_NO_TBAA_P (iaddr) = 1;
4930 DECL_POINTER_ALIAS_SET (iaddr) = 0;
4931 loadedi = create_tmp_var (itype, NULL);
4932 if (gimple_in_ssa_p (cfun))
4933 {
4934 add_referenced_var (iaddr);
4935 add_referenced_var (loadedi);
4936 loadedi = make_ssa_name (loadedi, NULL);
4937 }
4938 }
4939 else
4940 {
4941 iaddr = addr;
4942 loadedi = loaded_val;
4943 }
4944
4945 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
4946 true, NULL_TREE, true, GSI_SAME_STMT);
4947
4948 /* Move the value to the LOADEDI temporary. */
4949 if (gimple_in_ssa_p (cfun))
4950 {
4951 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
4952 phi = create_phi_node (loadedi, loop_header);
4953 SSA_NAME_DEF_STMT (loadedi) = phi;
4954 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
4955 initial);
4956 }
4957 else
4958 gsi_insert_before (&si,
4959 gimple_build_assign (loadedi, initial),
4960 GSI_SAME_STMT);
4961 if (loadedi != loaded_val)
4962 {
4963 gimple_stmt_iterator gsi2;
4964 tree x;
4965
4966 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
4967 gsi2 = gsi_start_bb (loop_header);
4968 if (gimple_in_ssa_p (cfun))
4969 {
4970 gimple stmt;
4971 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4972 true, GSI_SAME_STMT);
4973 stmt = gimple_build_assign (loaded_val, x);
4974 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
4975 }
4976 else
4977 {
4978 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
4979 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
4980 true, GSI_SAME_STMT);
4981 }
4982 }
4983 gsi_remove (&si, true);
4984
4985 si = gsi_last_bb (store_bb);
4986 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
4987
4988 if (iaddr == addr)
4989 storedi = stored_val;
4990 else
4991 storedi =
4992 force_gimple_operand_gsi (&si,
4993 build1 (VIEW_CONVERT_EXPR, itype,
4994 stored_val), true, NULL_TREE, true,
4995 GSI_SAME_STMT);
4996
4997 /* Build the compare&swap statement. */
4998 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
4999 new_storedi = force_gimple_operand_gsi (&si,
5000 fold_convert (itype, new_storedi),
5001 true, NULL_TREE,
5002 true, GSI_SAME_STMT);
5003
5004 if (gimple_in_ssa_p (cfun))
5005 old_vali = loadedi;
5006 else
5007 {
5008 old_vali = create_tmp_var (itype, NULL);
5009 if (gimple_in_ssa_p (cfun))
5010 add_referenced_var (old_vali);
5011 stmt = gimple_build_assign (old_vali, loadedi);
5012 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5013
5014 stmt = gimple_build_assign (loadedi, new_storedi);
5015 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5016 }
5017
5018 /* Note that we always perform the comparison as an integer, even for
5019 floating point. This allows the atomic operation to properly
5020 succeed even with NaNs and -0.0. */
5021 stmt = gimple_build_cond_empty
5022 (build2 (NE_EXPR, boolean_type_node,
5023 new_storedi, old_vali));
5024 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5025
5026 /* Update cfg. */
5027 e = single_succ_edge (store_bb);
5028 e->flags &= ~EDGE_FALLTHRU;
5029 e->flags |= EDGE_FALSE_VALUE;
5030
5031 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5032
5033 /* Copy the new value to loadedi (we already did that before the condition
5034 if we are not in SSA). */
5035 if (gimple_in_ssa_p (cfun))
5036 {
5037 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5038 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5039 }
5040
5041 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5042 gsi_remove (&si, true);
5043
5044 if (gimple_in_ssa_p (cfun))
5045 update_ssa (TODO_update_ssa_no_phi);
5046
5047 return true;
5048 }
5049
5050 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5051
5052 GOMP_atomic_start ();
5053 *addr = rhs;
5054 GOMP_atomic_end ();
5055
5056 The result is not globally atomic, but works so long as all parallel
5057 references are within #pragma omp atomic directives. According to
5058 responses received from omp@openmp.org, appears to be within spec.
5059 Which makes sense, since that's how several other compilers handle
5060 this situation as well.
5061 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5062 expanding. STORED_VAL is the operand of the matching
5063 GIMPLE_OMP_ATOMIC_STORE.
5064
5065 We replace
5066 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5067 loaded_val = *addr;
5068
5069 and replace
5070 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5071 *addr = stored_val;
5072 */
5073
5074 static bool
5075 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5076 tree addr, tree loaded_val, tree stored_val)
5077 {
5078 gimple_stmt_iterator si;
5079 gimple stmt;
5080 tree t;
5081
5082 si = gsi_last_bb (load_bb);
5083 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5084
5085 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5086 t = build_function_call_expr (t, 0);
5087 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5088
5089 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5090 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5091 gsi_remove (&si, true);
5092
5093 si = gsi_last_bb (store_bb);
5094 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5095
5096 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5097 stored_val);
5098 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5099
5100 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5101 t = build_function_call_expr (t, 0);
5102 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5103 gsi_remove (&si, true);
5104
5105 if (gimple_in_ssa_p (cfun))
5106 update_ssa (TODO_update_ssa_no_phi);
5107 return true;
5108 }
5109
5110 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5111 using expand_omp_atomic_fetch_op. If it failed, we try to
5112 call expand_omp_atomic_pipeline, and if it fails too, the
5113 ultimate fallback is wrapping the operation in a mutex
5114 (expand_omp_atomic_mutex). REGION is the atomic region built
5115 by build_omp_regions_1(). */
5116
5117 static void
5118 expand_omp_atomic (struct omp_region *region)
5119 {
5120 basic_block load_bb = region->entry, store_bb = region->exit;
5121 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5122 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5123 tree addr = gimple_omp_atomic_load_rhs (load);
5124 tree stored_val = gimple_omp_atomic_store_val (store);
5125 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5126 HOST_WIDE_INT index;
5127
5128 /* Make sure the type is one of the supported sizes. */
5129 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5130 index = exact_log2 (index);
5131 if (index >= 0 && index <= 4)
5132 {
5133 unsigned int align = TYPE_ALIGN_UNIT (type);
5134
5135 /* __sync builtins require strict data alignment. */
5136 if (exact_log2 (align) >= index)
5137 {
5138 /* When possible, use specialized atomic update functions. */
5139 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5140 && store_bb == single_succ (load_bb))
5141 {
5142 if (expand_omp_atomic_fetch_op (load_bb, addr,
5143 loaded_val, stored_val, index))
5144 return;
5145 }
5146
5147 /* If we don't have specialized __sync builtins, try and implement
5148 as a compare and swap loop. */
5149 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5150 loaded_val, stored_val, index))
5151 return;
5152 }
5153 }
5154
5155 /* The ultimate fallback is wrapping the operation in a mutex. */
5156 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5157 }
5158
5159
5160 /* Expand the parallel region tree rooted at REGION. Expansion
5161 proceeds in depth-first order. Innermost regions are expanded
5162 first. This way, parallel regions that require a new function to
5163 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5164 internal dependencies in their body. */
5165
5166 static void
5167 expand_omp (struct omp_region *region)
5168 {
5169 while (region)
5170 {
5171 location_t saved_location;
5172
5173 /* First, determine whether this is a combined parallel+workshare
5174 region. */
5175 if (region->type == GIMPLE_OMP_PARALLEL)
5176 determine_parallel_type (region);
5177
5178 if (region->inner)
5179 expand_omp (region->inner);
5180
5181 saved_location = input_location;
5182 if (gimple_has_location (last_stmt (region->entry)))
5183 input_location = gimple_location (last_stmt (region->entry));
5184
5185 switch (region->type)
5186 {
5187 case GIMPLE_OMP_PARALLEL:
5188 case GIMPLE_OMP_TASK:
5189 expand_omp_taskreg (region);
5190 break;
5191
5192 case GIMPLE_OMP_FOR:
5193 expand_omp_for (region);
5194 break;
5195
5196 case GIMPLE_OMP_SECTIONS:
5197 expand_omp_sections (region);
5198 break;
5199
5200 case GIMPLE_OMP_SECTION:
5201 /* Individual omp sections are handled together with their
5202 parent GIMPLE_OMP_SECTIONS region. */
5203 break;
5204
5205 case GIMPLE_OMP_SINGLE:
5206 expand_omp_single (region);
5207 break;
5208
5209 case GIMPLE_OMP_MASTER:
5210 case GIMPLE_OMP_ORDERED:
5211 case GIMPLE_OMP_CRITICAL:
5212 expand_omp_synch (region);
5213 break;
5214
5215 case GIMPLE_OMP_ATOMIC_LOAD:
5216 expand_omp_atomic (region);
5217 break;
5218
5219 default:
5220 gcc_unreachable ();
5221 }
5222
5223 input_location = saved_location;
5224 region = region->next;
5225 }
5226 }
5227
5228
5229 /* Helper for build_omp_regions. Scan the dominator tree starting at
5230 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5231 true, the function ends once a single tree is built (otherwise, whole
5232 forest of OMP constructs may be built). */
5233
5234 static void
5235 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5236 bool single_tree)
5237 {
5238 gimple_stmt_iterator gsi;
5239 gimple stmt;
5240 basic_block son;
5241
5242 gsi = gsi_last_bb (bb);
5243 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5244 {
5245 struct omp_region *region;
5246 enum gimple_code code;
5247
5248 stmt = gsi_stmt (gsi);
5249 code = gimple_code (stmt);
5250 if (code == GIMPLE_OMP_RETURN)
5251 {
5252 /* STMT is the return point out of region PARENT. Mark it
5253 as the exit point and make PARENT the immediately
5254 enclosing region. */
5255 gcc_assert (parent);
5256 region = parent;
5257 region->exit = bb;
5258 parent = parent->outer;
5259 }
5260 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5261 {
5262 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5263 GIMPLE_OMP_RETURN, but matches with
5264 GIMPLE_OMP_ATOMIC_LOAD. */
5265 gcc_assert (parent);
5266 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5267 region = parent;
5268 region->exit = bb;
5269 parent = parent->outer;
5270 }
5271
5272 else if (code == GIMPLE_OMP_CONTINUE)
5273 {
5274 gcc_assert (parent);
5275 parent->cont = bb;
5276 }
5277 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5278 {
5279 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5280 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5281 ;
5282 }
5283 else
5284 {
5285 /* Otherwise, this directive becomes the parent for a new
5286 region. */
5287 region = new_omp_region (bb, code, parent);
5288 parent = region;
5289 }
5290 }
5291
5292 if (single_tree && !parent)
5293 return;
5294
5295 for (son = first_dom_son (CDI_DOMINATORS, bb);
5296 son;
5297 son = next_dom_son (CDI_DOMINATORS, son))
5298 build_omp_regions_1 (son, parent, single_tree);
5299 }
5300
5301 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5302 root_omp_region. */
5303
5304 static void
5305 build_omp_regions_root (basic_block root)
5306 {
5307 gcc_assert (root_omp_region == NULL);
5308 build_omp_regions_1 (root, NULL, true);
5309 gcc_assert (root_omp_region != NULL);
5310 }
5311
5312 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5313
5314 void
5315 omp_expand_local (basic_block head)
5316 {
5317 build_omp_regions_root (head);
5318 if (dump_file && (dump_flags & TDF_DETAILS))
5319 {
5320 fprintf (dump_file, "\nOMP region tree\n\n");
5321 dump_omp_region (dump_file, root_omp_region, 0);
5322 fprintf (dump_file, "\n");
5323 }
5324
5325 remove_exit_barriers (root_omp_region);
5326 expand_omp (root_omp_region);
5327
5328 free_omp_regions ();
5329 }
5330
5331 /* Scan the CFG and build a tree of OMP regions. Return the root of
5332 the OMP region tree. */
5333
5334 static void
5335 build_omp_regions (void)
5336 {
5337 gcc_assert (root_omp_region == NULL);
5338 calculate_dominance_info (CDI_DOMINATORS);
5339 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5340 }
5341
5342 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5343
5344 static unsigned int
5345 execute_expand_omp (void)
5346 {
5347 build_omp_regions ();
5348
5349 if (!root_omp_region)
5350 return 0;
5351
5352 if (dump_file)
5353 {
5354 fprintf (dump_file, "\nOMP region tree\n\n");
5355 dump_omp_region (dump_file, root_omp_region, 0);
5356 fprintf (dump_file, "\n");
5357 }
5358
5359 remove_exit_barriers (root_omp_region);
5360
5361 expand_omp (root_omp_region);
5362
5363 cleanup_tree_cfg ();
5364
5365 free_omp_regions ();
5366
5367 return 0;
5368 }
5369
5370 /* OMP expansion -- the default pass, run before creation of SSA form. */
5371
5372 static bool
5373 gate_expand_omp (void)
5374 {
5375 return (flag_openmp != 0 && errorcount == 0);
5376 }
5377
5378 struct gimple_opt_pass pass_expand_omp =
5379 {
5380 {
5381 GIMPLE_PASS,
5382 "ompexp", /* name */
5383 gate_expand_omp, /* gate */
5384 execute_expand_omp, /* execute */
5385 NULL, /* sub */
5386 NULL, /* next */
5387 0, /* static_pass_number */
5388 0, /* tv_id */
5389 PROP_gimple_any, /* properties_required */
5390 PROP_gimple_lomp, /* properties_provided */
5391 0, /* properties_destroyed */
5392 0, /* todo_flags_start */
5393 TODO_dump_func /* todo_flags_finish */
5394 }
5395 };
5396 \f
5397 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5398
5399 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5400 CTX is the enclosing OMP context for the current statement. */
5401
5402 static void
5403 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5404 {
5405 tree block, control;
5406 gimple_stmt_iterator tgsi;
5407 unsigned i, len;
5408 gimple stmt, new_stmt, bind, t;
5409 gimple_seq ilist, dlist, olist, new_body, body;
5410 struct gimplify_ctx gctx;
5411
5412 stmt = gsi_stmt (*gsi_p);
5413
5414 push_gimplify_context (&gctx);
5415
5416 dlist = NULL;
5417 ilist = NULL;
5418 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5419 &ilist, &dlist, ctx);
5420
5421 tgsi = gsi_start (gimple_omp_body (stmt));
5422 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5423 continue;
5424
5425 tgsi = gsi_start (gimple_omp_body (stmt));
5426 body = NULL;
5427 for (i = 0; i < len; i++, gsi_next (&tgsi))
5428 {
5429 omp_context *sctx;
5430 gimple sec_start;
5431
5432 sec_start = gsi_stmt (tgsi);
5433 sctx = maybe_lookup_ctx (sec_start);
5434 gcc_assert (sctx);
5435
5436 gimple_seq_add_stmt (&body, sec_start);
5437
5438 lower_omp (gimple_omp_body (sec_start), sctx);
5439 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5440 gimple_omp_set_body (sec_start, NULL);
5441
5442 if (i == len - 1)
5443 {
5444 gimple_seq l = NULL;
5445 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5446 &l, ctx);
5447 gimple_seq_add_seq (&body, l);
5448 gimple_omp_section_set_last (sec_start);
5449 }
5450
5451 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5452 }
5453
5454 block = make_node (BLOCK);
5455 bind = gimple_build_bind (NULL, body, block);
5456
5457 olist = NULL;
5458 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5459
5460 block = make_node (BLOCK);
5461 new_stmt = gimple_build_bind (NULL, NULL, block);
5462
5463 pop_gimplify_context (new_stmt);
5464 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5465 BLOCK_VARS (block) = gimple_bind_vars (bind);
5466 if (BLOCK_VARS (block))
5467 TREE_USED (block) = 1;
5468
5469 new_body = NULL;
5470 gimple_seq_add_seq (&new_body, ilist);
5471 gimple_seq_add_stmt (&new_body, stmt);
5472 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5473 gimple_seq_add_stmt (&new_body, bind);
5474
5475 control = create_tmp_var (unsigned_type_node, ".section");
5476 t = gimple_build_omp_continue (control, control);
5477 gimple_omp_sections_set_control (stmt, control);
5478 gimple_seq_add_stmt (&new_body, t);
5479
5480 gimple_seq_add_seq (&new_body, olist);
5481 gimple_seq_add_seq (&new_body, dlist);
5482
5483 new_body = maybe_catch_exception (new_body);
5484
5485 t = gimple_build_omp_return
5486 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5487 OMP_CLAUSE_NOWAIT));
5488 gimple_seq_add_stmt (&new_body, t);
5489
5490 gimple_bind_set_body (new_stmt, new_body);
5491 gimple_omp_set_body (stmt, NULL);
5492
5493 gsi_replace (gsi_p, new_stmt, true);
5494 }
5495
5496
5497 /* A subroutine of lower_omp_single. Expand the simple form of
5498 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5499
5500 if (GOMP_single_start ())
5501 BODY;
5502 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5503
5504 FIXME. It may be better to delay expanding the logic of this until
5505 pass_expand_omp. The expanded logic may make the job more difficult
5506 to a synchronization analysis pass. */
5507
5508 static void
5509 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5510 {
5511 tree tlabel = create_artificial_label ();
5512 tree flabel = create_artificial_label ();
5513 gimple call, cond;
5514 tree lhs, decl;
5515
5516 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5517 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5518 call = gimple_build_call (decl, 0);
5519 gimple_call_set_lhs (call, lhs);
5520 gimple_seq_add_stmt (pre_p, call);
5521
5522 cond = gimple_build_cond (EQ_EXPR, lhs,
5523 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5524 tlabel, flabel);
5525 gimple_seq_add_stmt (pre_p, cond);
5526 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5527 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5528 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5529 }
5530
5531
5532 /* A subroutine of lower_omp_single. Expand the simple form of
5533 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5534
5535 #pragma omp single copyprivate (a, b, c)
5536
5537 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5538
5539 {
5540 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5541 {
5542 BODY;
5543 copyout.a = a;
5544 copyout.b = b;
5545 copyout.c = c;
5546 GOMP_single_copy_end (&copyout);
5547 }
5548 else
5549 {
5550 a = copyout_p->a;
5551 b = copyout_p->b;
5552 c = copyout_p->c;
5553 }
5554 GOMP_barrier ();
5555 }
5556
5557 FIXME. It may be better to delay expanding the logic of this until
5558 pass_expand_omp. The expanded logic may make the job more difficult
5559 to a synchronization analysis pass. */
5560
5561 static void
5562 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5563 {
5564 tree ptr_type, t, l0, l1, l2;
5565 gimple_seq copyin_seq;
5566
5567 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5568
5569 ptr_type = build_pointer_type (ctx->record_type);
5570 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5571
5572 l0 = create_artificial_label ();
5573 l1 = create_artificial_label ();
5574 l2 = create_artificial_label ();
5575
5576 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5577 t = fold_convert (ptr_type, t);
5578 gimplify_assign (ctx->receiver_decl, t, pre_p);
5579
5580 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5581 build_int_cst (ptr_type, 0));
5582 t = build3 (COND_EXPR, void_type_node, t,
5583 build_and_jump (&l0), build_and_jump (&l1));
5584 gimplify_and_add (t, pre_p);
5585
5586 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5587
5588 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5589
5590 copyin_seq = NULL;
5591 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5592 &copyin_seq, ctx);
5593
5594 t = build_fold_addr_expr (ctx->sender_decl);
5595 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5596 gimplify_and_add (t, pre_p);
5597
5598 t = build_and_jump (&l2);
5599 gimplify_and_add (t, pre_p);
5600
5601 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5602
5603 gimple_seq_add_seq (pre_p, copyin_seq);
5604
5605 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5606 }
5607
5608
5609 /* Expand code for an OpenMP single directive. */
5610
5611 static void
5612 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5613 {
5614 tree block;
5615 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5616 gimple_seq bind_body, dlist;
5617 struct gimplify_ctx gctx;
5618
5619 push_gimplify_context (&gctx);
5620
5621 bind_body = NULL;
5622 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5623 &bind_body, &dlist, ctx);
5624 lower_omp (gimple_omp_body (single_stmt), ctx);
5625
5626 gimple_seq_add_stmt (&bind_body, single_stmt);
5627
5628 if (ctx->record_type)
5629 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5630 else
5631 lower_omp_single_simple (single_stmt, &bind_body);
5632
5633 gimple_omp_set_body (single_stmt, NULL);
5634
5635 gimple_seq_add_seq (&bind_body, dlist);
5636
5637 bind_body = maybe_catch_exception (bind_body);
5638
5639 t = gimple_build_omp_return
5640 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5641 OMP_CLAUSE_NOWAIT));
5642 gimple_seq_add_stmt (&bind_body, t);
5643
5644 block = make_node (BLOCK);
5645 bind = gimple_build_bind (NULL, bind_body, block);
5646
5647 pop_gimplify_context (bind);
5648
5649 gimple_bind_append_vars (bind, ctx->block_vars);
5650 BLOCK_VARS (block) = ctx->block_vars;
5651 gsi_replace (gsi_p, bind, true);
5652 if (BLOCK_VARS (block))
5653 TREE_USED (block) = 1;
5654 }
5655
5656
5657 /* Expand code for an OpenMP master directive. */
5658
5659 static void
5660 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5661 {
5662 tree block, lab = NULL, x;
5663 gimple stmt = gsi_stmt (*gsi_p), bind;
5664 gimple_seq tseq;
5665 struct gimplify_ctx gctx;
5666
5667 push_gimplify_context (&gctx);
5668
5669 block = make_node (BLOCK);
5670 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5671 block);
5672
5673 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5674 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5675 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5676 tseq = NULL;
5677 gimplify_and_add (x, &tseq);
5678 gimple_bind_add_seq (bind, tseq);
5679
5680 lower_omp (gimple_omp_body (stmt), ctx);
5681 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5682 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5683 gimple_omp_set_body (stmt, NULL);
5684
5685 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5686
5687 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5688
5689 pop_gimplify_context (bind);
5690
5691 gimple_bind_append_vars (bind, ctx->block_vars);
5692 BLOCK_VARS (block) = ctx->block_vars;
5693 gsi_replace (gsi_p, bind, true);
5694 }
5695
5696
5697 /* Expand code for an OpenMP ordered directive. */
5698
5699 static void
5700 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5701 {
5702 tree block;
5703 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5704 struct gimplify_ctx gctx;
5705
5706 push_gimplify_context (&gctx);
5707
5708 block = make_node (BLOCK);
5709 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5710 block);
5711
5712 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5713 gimple_bind_add_stmt (bind, x);
5714
5715 lower_omp (gimple_omp_body (stmt), ctx);
5716 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5717 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5718 gimple_omp_set_body (stmt, NULL);
5719
5720 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5721 gimple_bind_add_stmt (bind, x);
5722
5723 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5724
5725 pop_gimplify_context (bind);
5726
5727 gimple_bind_append_vars (bind, ctx->block_vars);
5728 BLOCK_VARS (block) = gimple_bind_vars (bind);
5729 gsi_replace (gsi_p, bind, true);
5730 }
5731
5732
5733 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5734 substitution of a couple of function calls. But in the NAMED case,
5735 requires that languages coordinate a symbol name. It is therefore
5736 best put here in common code. */
5737
5738 static GTY((param1_is (tree), param2_is (tree)))
5739 splay_tree critical_name_mutexes;
5740
5741 static void
5742 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5743 {
5744 tree block;
5745 tree name, lock, unlock;
5746 gimple stmt = gsi_stmt (*gsi_p), bind;
5747 gimple_seq tbody;
5748 struct gimplify_ctx gctx;
5749
5750 name = gimple_omp_critical_name (stmt);
5751 if (name)
5752 {
5753 tree decl;
5754 splay_tree_node n;
5755
5756 if (!critical_name_mutexes)
5757 critical_name_mutexes
5758 = splay_tree_new_ggc (splay_tree_compare_pointers);
5759
5760 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5761 if (n == NULL)
5762 {
5763 char *new_str;
5764
5765 decl = create_tmp_var_raw (ptr_type_node, NULL);
5766
5767 new_str = ACONCAT ((".gomp_critical_user_",
5768 IDENTIFIER_POINTER (name), NULL));
5769 DECL_NAME (decl) = get_identifier (new_str);
5770 TREE_PUBLIC (decl) = 1;
5771 TREE_STATIC (decl) = 1;
5772 DECL_COMMON (decl) = 1;
5773 DECL_ARTIFICIAL (decl) = 1;
5774 DECL_IGNORED_P (decl) = 1;
5775 varpool_finalize_decl (decl);
5776
5777 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5778 (splay_tree_value) decl);
5779 }
5780 else
5781 decl = (tree) n->value;
5782
5783 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5784 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5785
5786 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5787 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5788 }
5789 else
5790 {
5791 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5792 lock = build_call_expr (lock, 0);
5793
5794 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5795 unlock = build_call_expr (unlock, 0);
5796 }
5797
5798 push_gimplify_context (&gctx);
5799
5800 block = make_node (BLOCK);
5801 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5802
5803 tbody = gimple_bind_body (bind);
5804 gimplify_and_add (lock, &tbody);
5805 gimple_bind_set_body (bind, tbody);
5806
5807 lower_omp (gimple_omp_body (stmt), ctx);
5808 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5809 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5810 gimple_omp_set_body (stmt, NULL);
5811
5812 tbody = gimple_bind_body (bind);
5813 gimplify_and_add (unlock, &tbody);
5814 gimple_bind_set_body (bind, tbody);
5815
5816 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5817
5818 pop_gimplify_context (bind);
5819 gimple_bind_append_vars (bind, ctx->block_vars);
5820 BLOCK_VARS (block) = gimple_bind_vars (bind);
5821 gsi_replace (gsi_p, bind, true);
5822 }
5823
5824
5825 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5826 for a lastprivate clause. Given a loop control predicate of (V
5827 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5828 is appended to *DLIST, iterator initialization is appended to
5829 *BODY_P. */
5830
5831 static void
5832 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5833 gimple_seq *dlist, struct omp_context *ctx)
5834 {
5835 tree clauses, cond, vinit;
5836 enum tree_code cond_code;
5837 gimple_seq stmts;
5838
5839 cond_code = fd->loop.cond_code;
5840 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5841
5842 /* When possible, use a strict equality expression. This can let VRP
5843 type optimizations deduce the value and remove a copy. */
5844 if (host_integerp (fd->loop.step, 0))
5845 {
5846 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5847 if (step == 1 || step == -1)
5848 cond_code = EQ_EXPR;
5849 }
5850
5851 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5852
5853 clauses = gimple_omp_for_clauses (fd->for_stmt);
5854 stmts = NULL;
5855 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5856 if (!gimple_seq_empty_p (stmts))
5857 {
5858 gimple_seq_add_seq (&stmts, *dlist);
5859 *dlist = stmts;
5860
5861 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5862 vinit = fd->loop.n1;
5863 if (cond_code == EQ_EXPR
5864 && host_integerp (fd->loop.n2, 0)
5865 && ! integer_zerop (fd->loop.n2))
5866 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5867
5868 /* Initialize the iterator variable, so that threads that don't execute
5869 any iterations don't execute the lastprivate clauses by accident. */
5870 gimplify_assign (fd->loop.v, vinit, body_p);
5871 }
5872 }
5873
5874
5875 /* Lower code for an OpenMP loop directive. */
5876
5877 static void
5878 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5879 {
5880 tree *rhs_p, block;
5881 struct omp_for_data fd;
5882 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5883 gimple_seq omp_for_body, body, dlist, ilist;
5884 size_t i;
5885 struct gimplify_ctx gctx;
5886
5887 push_gimplify_context (&gctx);
5888
5889 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5890 lower_omp (gimple_omp_body (stmt), ctx);
5891
5892 block = make_node (BLOCK);
5893 new_stmt = gimple_build_bind (NULL, NULL, block);
5894
5895 /* Move declaration of temporaries in the loop body before we make
5896 it go away. */
5897 omp_for_body = gimple_omp_body (stmt);
5898 if (!gimple_seq_empty_p (omp_for_body)
5899 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5900 {
5901 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5902 gimple_bind_append_vars (new_stmt, vars);
5903 }
5904
5905 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5906 ilist = NULL;
5907 dlist = NULL;
5908 body = NULL;
5909 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5910 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5911
5912 /* Lower the header expressions. At this point, we can assume that
5913 the header is of the form:
5914
5915 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5916
5917 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5918 using the .omp_data_s mapping, if needed. */
5919 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5920 {
5921 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
5922 if (!is_gimple_min_invariant (*rhs_p))
5923 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5924
5925 rhs_p = gimple_omp_for_final_ptr (stmt, i);
5926 if (!is_gimple_min_invariant (*rhs_p))
5927 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5928
5929 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
5930 if (!is_gimple_min_invariant (*rhs_p))
5931 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5932 }
5933
5934 /* Once lowered, extract the bounds and clauses. */
5935 extract_omp_for_data (stmt, &fd, NULL);
5936
5937 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
5938
5939 gimple_seq_add_stmt (&body, stmt);
5940 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
5941
5942 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
5943 fd.loop.v));
5944
5945 /* After the loop, add exit clauses. */
5946 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
5947 gimple_seq_add_seq (&body, dlist);
5948
5949 body = maybe_catch_exception (body);
5950
5951 /* Region exit marker goes at the end of the loop body. */
5952 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
5953
5954 pop_gimplify_context (new_stmt);
5955
5956 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5957 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
5958 if (BLOCK_VARS (block))
5959 TREE_USED (block) = 1;
5960
5961 gimple_bind_set_body (new_stmt, body);
5962 gimple_omp_set_body (stmt, NULL);
5963 gimple_omp_for_set_pre_body (stmt, NULL);
5964 gsi_replace (gsi_p, new_stmt, true);
5965 }
5966
5967 /* Callback for walk_stmts. Check if the current statement only contains
5968 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
5969
5970 static tree
5971 check_combined_parallel (gimple_stmt_iterator *gsi_p,
5972 bool *handled_ops_p,
5973 struct walk_stmt_info *wi)
5974 {
5975 int *info = (int *) wi->info;
5976 gimple stmt = gsi_stmt (*gsi_p);
5977
5978 *handled_ops_p = true;
5979 switch (gimple_code (stmt))
5980 {
5981 WALK_SUBSTMTS;
5982
5983 case GIMPLE_OMP_FOR:
5984 case GIMPLE_OMP_SECTIONS:
5985 *info = *info == 0 ? 1 : -1;
5986 break;
5987 default:
5988 *info = -1;
5989 break;
5990 }
5991 return NULL;
5992 }
5993
5994 struct omp_taskcopy_context
5995 {
5996 /* This field must be at the beginning, as we do "inheritance": Some
5997 callback functions for tree-inline.c (e.g., omp_copy_decl)
5998 receive a copy_body_data pointer that is up-casted to an
5999 omp_context pointer. */
6000 copy_body_data cb;
6001 omp_context *ctx;
6002 };
6003
6004 static tree
6005 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6006 {
6007 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6008
6009 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6010 return create_tmp_var (TREE_TYPE (var), NULL);
6011
6012 return var;
6013 }
6014
6015 static tree
6016 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6017 {
6018 tree name, new_fields = NULL, type, f;
6019
6020 type = lang_hooks.types.make_type (RECORD_TYPE);
6021 name = DECL_NAME (TYPE_NAME (orig_type));
6022 name = build_decl (TYPE_DECL, name, type);
6023 TYPE_NAME (type) = name;
6024
6025 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6026 {
6027 tree new_f = copy_node (f);
6028 DECL_CONTEXT (new_f) = type;
6029 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6030 TREE_CHAIN (new_f) = new_fields;
6031 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6032 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6033 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6034 &tcctx->cb, NULL);
6035 new_fields = new_f;
6036 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6037 }
6038 TYPE_FIELDS (type) = nreverse (new_fields);
6039 layout_type (type);
6040 return type;
6041 }
6042
6043 /* Create task copyfn. */
6044
6045 static void
6046 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6047 {
6048 struct function *child_cfun;
6049 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6050 tree record_type, srecord_type, bind, list;
6051 bool record_needs_remap = false, srecord_needs_remap = false;
6052 splay_tree_node n;
6053 struct omp_taskcopy_context tcctx;
6054 struct gimplify_ctx gctx;
6055
6056 child_fn = gimple_omp_task_copy_fn (task_stmt);
6057 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6058 gcc_assert (child_cfun->cfg == NULL);
6059 child_cfun->dont_save_pending_sizes_p = 1;
6060 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6061
6062 /* Reset DECL_CONTEXT on function arguments. */
6063 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6064 DECL_CONTEXT (t) = child_fn;
6065
6066 /* Populate the function. */
6067 push_gimplify_context (&gctx);
6068 current_function_decl = child_fn;
6069
6070 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6071 TREE_SIDE_EFFECTS (bind) = 1;
6072 list = NULL;
6073 DECL_SAVED_TREE (child_fn) = bind;
6074 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6075
6076 /* Remap src and dst argument types if needed. */
6077 record_type = ctx->record_type;
6078 srecord_type = ctx->srecord_type;
6079 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6080 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6081 {
6082 record_needs_remap = true;
6083 break;
6084 }
6085 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6086 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6087 {
6088 srecord_needs_remap = true;
6089 break;
6090 }
6091
6092 if (record_needs_remap || srecord_needs_remap)
6093 {
6094 memset (&tcctx, '\0', sizeof (tcctx));
6095 tcctx.cb.src_fn = ctx->cb.src_fn;
6096 tcctx.cb.dst_fn = child_fn;
6097 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6098 tcctx.cb.dst_node = tcctx.cb.src_node;
6099 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6100 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6101 tcctx.cb.eh_region = -1;
6102 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6103 tcctx.cb.decl_map = pointer_map_create ();
6104 tcctx.ctx = ctx;
6105
6106 if (record_needs_remap)
6107 record_type = task_copyfn_remap_type (&tcctx, record_type);
6108 if (srecord_needs_remap)
6109 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6110 }
6111 else
6112 tcctx.cb.decl_map = NULL;
6113
6114 push_cfun (child_cfun);
6115
6116 arg = DECL_ARGUMENTS (child_fn);
6117 TREE_TYPE (arg) = build_pointer_type (record_type);
6118 sarg = TREE_CHAIN (arg);
6119 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6120
6121 /* First pass: initialize temporaries used in record_type and srecord_type
6122 sizes and field offsets. */
6123 if (tcctx.cb.decl_map)
6124 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6125 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6126 {
6127 tree *p;
6128
6129 decl = OMP_CLAUSE_DECL (c);
6130 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6131 if (p == NULL)
6132 continue;
6133 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6134 sf = (tree) n->value;
6135 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6136 src = build_fold_indirect_ref (sarg);
6137 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6138 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6139 append_to_statement_list (t, &list);
6140 }
6141
6142 /* Second pass: copy shared var pointers and copy construct non-VLA
6143 firstprivate vars. */
6144 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6145 switch (OMP_CLAUSE_CODE (c))
6146 {
6147 case OMP_CLAUSE_SHARED:
6148 decl = OMP_CLAUSE_DECL (c);
6149 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6150 if (n == NULL)
6151 break;
6152 f = (tree) n->value;
6153 if (tcctx.cb.decl_map)
6154 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6155 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6156 sf = (tree) n->value;
6157 if (tcctx.cb.decl_map)
6158 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6159 src = build_fold_indirect_ref (sarg);
6160 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6161 dst = build_fold_indirect_ref (arg);
6162 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6163 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6164 append_to_statement_list (t, &list);
6165 break;
6166 case OMP_CLAUSE_FIRSTPRIVATE:
6167 decl = OMP_CLAUSE_DECL (c);
6168 if (is_variable_sized (decl))
6169 break;
6170 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6171 if (n == NULL)
6172 break;
6173 f = (tree) n->value;
6174 if (tcctx.cb.decl_map)
6175 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6176 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6177 if (n != NULL)
6178 {
6179 sf = (tree) n->value;
6180 if (tcctx.cb.decl_map)
6181 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6182 src = build_fold_indirect_ref (sarg);
6183 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6184 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6185 src = build_fold_indirect_ref (src);
6186 }
6187 else
6188 src = decl;
6189 dst = build_fold_indirect_ref (arg);
6190 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6191 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6192 append_to_statement_list (t, &list);
6193 break;
6194 case OMP_CLAUSE_PRIVATE:
6195 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6196 break;
6197 decl = OMP_CLAUSE_DECL (c);
6198 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6199 f = (tree) n->value;
6200 if (tcctx.cb.decl_map)
6201 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6202 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6203 if (n != NULL)
6204 {
6205 sf = (tree) n->value;
6206 if (tcctx.cb.decl_map)
6207 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6208 src = build_fold_indirect_ref (sarg);
6209 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6210 if (use_pointer_for_field (decl, NULL))
6211 src = build_fold_indirect_ref (src);
6212 }
6213 else
6214 src = decl;
6215 dst = build_fold_indirect_ref (arg);
6216 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6217 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6218 append_to_statement_list (t, &list);
6219 break;
6220 default:
6221 break;
6222 }
6223
6224 /* Last pass: handle VLA firstprivates. */
6225 if (tcctx.cb.decl_map)
6226 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6227 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6228 {
6229 tree ind, ptr, df;
6230
6231 decl = OMP_CLAUSE_DECL (c);
6232 if (!is_variable_sized (decl))
6233 continue;
6234 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6235 if (n == NULL)
6236 continue;
6237 f = (tree) n->value;
6238 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6239 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6240 ind = DECL_VALUE_EXPR (decl);
6241 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6242 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6243 n = splay_tree_lookup (ctx->sfield_map,
6244 (splay_tree_key) TREE_OPERAND (ind, 0));
6245 sf = (tree) n->value;
6246 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6247 src = build_fold_indirect_ref (sarg);
6248 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6249 src = build_fold_indirect_ref (src);
6250 dst = build_fold_indirect_ref (arg);
6251 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6252 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6253 append_to_statement_list (t, &list);
6254 n = splay_tree_lookup (ctx->field_map,
6255 (splay_tree_key) TREE_OPERAND (ind, 0));
6256 df = (tree) n->value;
6257 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6258 ptr = build_fold_indirect_ref (arg);
6259 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6260 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6261 build_fold_addr_expr (dst));
6262 append_to_statement_list (t, &list);
6263 }
6264
6265 t = build1 (RETURN_EXPR, void_type_node, NULL);
6266 append_to_statement_list (t, &list);
6267
6268 if (tcctx.cb.decl_map)
6269 pointer_map_destroy (tcctx.cb.decl_map);
6270 pop_gimplify_context (NULL);
6271 BIND_EXPR_BODY (bind) = list;
6272 pop_cfun ();
6273 current_function_decl = ctx->cb.src_fn;
6274 }
6275
6276 /* Lower the OpenMP parallel or task directive in the current statement
6277 in GSI_P. CTX holds context information for the directive. */
6278
6279 static void
6280 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6281 {
6282 tree clauses;
6283 tree child_fn, t;
6284 gimple stmt = gsi_stmt (*gsi_p);
6285 gimple par_bind, bind;
6286 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6287 struct gimplify_ctx gctx;
6288
6289 clauses = gimple_omp_taskreg_clauses (stmt);
6290 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6291 par_body = gimple_bind_body (par_bind);
6292 child_fn = ctx->cb.dst_fn;
6293 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6294 && !gimple_omp_parallel_combined_p (stmt))
6295 {
6296 struct walk_stmt_info wi;
6297 int ws_num = 0;
6298
6299 memset (&wi, 0, sizeof (wi));
6300 wi.info = &ws_num;
6301 wi.val_only = true;
6302 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6303 if (ws_num == 1)
6304 gimple_omp_parallel_set_combined_p (stmt, true);
6305 }
6306 if (ctx->srecord_type)
6307 create_task_copyfn (stmt, ctx);
6308
6309 push_gimplify_context (&gctx);
6310
6311 par_olist = NULL;
6312 par_ilist = NULL;
6313 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6314 lower_omp (par_body, ctx);
6315 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6316 lower_reduction_clauses (clauses, &par_olist, ctx);
6317
6318 /* Declare all the variables created by mapping and the variables
6319 declared in the scope of the parallel body. */
6320 record_vars_into (ctx->block_vars, child_fn);
6321 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6322
6323 if (ctx->record_type)
6324 {
6325 ctx->sender_decl
6326 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6327 : ctx->record_type, ".omp_data_o");
6328 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6329 }
6330
6331 olist = NULL;
6332 ilist = NULL;
6333 lower_send_clauses (clauses, &ilist, &olist, ctx);
6334 lower_send_shared_vars (&ilist, &olist, ctx);
6335
6336 /* Once all the expansions are done, sequence all the different
6337 fragments inside gimple_omp_body. */
6338
6339 new_body = NULL;
6340
6341 if (ctx->record_type)
6342 {
6343 t = build_fold_addr_expr (ctx->sender_decl);
6344 /* fixup_child_record_type might have changed receiver_decl's type. */
6345 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6346 gimple_seq_add_stmt (&new_body,
6347 gimple_build_assign (ctx->receiver_decl, t));
6348 }
6349
6350 gimple_seq_add_seq (&new_body, par_ilist);
6351 gimple_seq_add_seq (&new_body, par_body);
6352 gimple_seq_add_seq (&new_body, par_olist);
6353 new_body = maybe_catch_exception (new_body);
6354 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6355 gimple_omp_set_body (stmt, new_body);
6356
6357 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6358 gimple_bind_add_stmt (bind, stmt);
6359 if (ilist || olist)
6360 {
6361 gimple_seq_add_stmt (&ilist, bind);
6362 gimple_seq_add_seq (&ilist, olist);
6363 bind = gimple_build_bind (NULL, ilist, NULL);
6364 }
6365
6366 gsi_replace (gsi_p, bind, true);
6367
6368 pop_gimplify_context (NULL);
6369 }
6370
6371 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6372 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6373 of OpenMP context, but with task_shared_vars set. */
6374
6375 static tree
6376 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6377 void *data)
6378 {
6379 tree t = *tp;
6380
6381 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6382 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6383 return t;
6384
6385 if (task_shared_vars
6386 && DECL_P (t)
6387 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6388 return t;
6389
6390 /* If a global variable has been privatized, TREE_CONSTANT on
6391 ADDR_EXPR might be wrong. */
6392 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6393 recompute_tree_invariant_for_addr_expr (t);
6394
6395 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6396 return NULL_TREE;
6397 }
6398
6399 static void
6400 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6401 {
6402 gimple stmt = gsi_stmt (*gsi_p);
6403 struct walk_stmt_info wi;
6404
6405 if (gimple_has_location (stmt))
6406 input_location = gimple_location (stmt);
6407
6408 if (task_shared_vars)
6409 memset (&wi, '\0', sizeof (wi));
6410
6411 /* If we have issued syntax errors, avoid doing any heavy lifting.
6412 Just replace the OpenMP directives with a NOP to avoid
6413 confusing RTL expansion. */
6414 if (errorcount && is_gimple_omp (stmt))
6415 {
6416 gsi_replace (gsi_p, gimple_build_nop (), true);
6417 return;
6418 }
6419
6420 switch (gimple_code (stmt))
6421 {
6422 case GIMPLE_COND:
6423 if ((ctx || task_shared_vars)
6424 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6425 ctx ? NULL : &wi, NULL)
6426 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6427 ctx ? NULL : &wi, NULL)))
6428 gimple_regimplify_operands (stmt, gsi_p);
6429 break;
6430 case GIMPLE_CATCH:
6431 lower_omp (gimple_catch_handler (stmt), ctx);
6432 break;
6433 case GIMPLE_EH_FILTER:
6434 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6435 break;
6436 case GIMPLE_TRY:
6437 lower_omp (gimple_try_eval (stmt), ctx);
6438 lower_omp (gimple_try_cleanup (stmt), ctx);
6439 break;
6440 case GIMPLE_BIND:
6441 lower_omp (gimple_bind_body (stmt), ctx);
6442 break;
6443 case GIMPLE_OMP_PARALLEL:
6444 case GIMPLE_OMP_TASK:
6445 ctx = maybe_lookup_ctx (stmt);
6446 lower_omp_taskreg (gsi_p, ctx);
6447 break;
6448 case GIMPLE_OMP_FOR:
6449 ctx = maybe_lookup_ctx (stmt);
6450 gcc_assert (ctx);
6451 lower_omp_for (gsi_p, ctx);
6452 break;
6453 case GIMPLE_OMP_SECTIONS:
6454 ctx = maybe_lookup_ctx (stmt);
6455 gcc_assert (ctx);
6456 lower_omp_sections (gsi_p, ctx);
6457 break;
6458 case GIMPLE_OMP_SINGLE:
6459 ctx = maybe_lookup_ctx (stmt);
6460 gcc_assert (ctx);
6461 lower_omp_single (gsi_p, ctx);
6462 break;
6463 case GIMPLE_OMP_MASTER:
6464 ctx = maybe_lookup_ctx (stmt);
6465 gcc_assert (ctx);
6466 lower_omp_master (gsi_p, ctx);
6467 break;
6468 case GIMPLE_OMP_ORDERED:
6469 ctx = maybe_lookup_ctx (stmt);
6470 gcc_assert (ctx);
6471 lower_omp_ordered (gsi_p, ctx);
6472 break;
6473 case GIMPLE_OMP_CRITICAL:
6474 ctx = maybe_lookup_ctx (stmt);
6475 gcc_assert (ctx);
6476 lower_omp_critical (gsi_p, ctx);
6477 break;
6478 case GIMPLE_OMP_ATOMIC_LOAD:
6479 if ((ctx || task_shared_vars)
6480 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6481 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6482 gimple_regimplify_operands (stmt, gsi_p);
6483 break;
6484 default:
6485 if ((ctx || task_shared_vars)
6486 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6487 ctx ? NULL : &wi))
6488 gimple_regimplify_operands (stmt, gsi_p);
6489 break;
6490 }
6491 }
6492
6493 static void
6494 lower_omp (gimple_seq body, omp_context *ctx)
6495 {
6496 location_t saved_location = input_location;
6497 gimple_stmt_iterator gsi = gsi_start (body);
6498 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6499 lower_omp_1 (&gsi, ctx);
6500 input_location = saved_location;
6501 }
6502 \f
6503 /* Main entry point. */
6504
6505 static unsigned int
6506 execute_lower_omp (void)
6507 {
6508 gimple_seq body;
6509
6510 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6511 delete_omp_context);
6512
6513 body = gimple_body (current_function_decl);
6514 scan_omp (body, NULL);
6515 gcc_assert (taskreg_nesting_level == 0);
6516
6517 if (all_contexts->root)
6518 {
6519 struct gimplify_ctx gctx;
6520
6521 if (task_shared_vars)
6522 push_gimplify_context (&gctx);
6523 lower_omp (body, NULL);
6524 if (task_shared_vars)
6525 pop_gimplify_context (NULL);
6526 }
6527
6528 if (all_contexts)
6529 {
6530 splay_tree_delete (all_contexts);
6531 all_contexts = NULL;
6532 }
6533 BITMAP_FREE (task_shared_vars);
6534 return 0;
6535 }
6536
6537 static bool
6538 gate_lower_omp (void)
6539 {
6540 return flag_openmp != 0;
6541 }
6542
6543 struct gimple_opt_pass pass_lower_omp =
6544 {
6545 {
6546 GIMPLE_PASS,
6547 "omplower", /* name */
6548 gate_lower_omp, /* gate */
6549 execute_lower_omp, /* execute */
6550 NULL, /* sub */
6551 NULL, /* next */
6552 0, /* static_pass_number */
6553 0, /* tv_id */
6554 PROP_gimple_any, /* properties_required */
6555 PROP_gimple_lomp, /* properties_provided */
6556 0, /* properties_destroyed */
6557 0, /* todo_flags_start */
6558 TODO_dump_func /* todo_flags_finish */
6559 }
6560 };
6561 \f
6562 /* The following is a utility to diagnose OpenMP structured block violations.
6563 It is not part of the "omplower" pass, as that's invoked too late. It
6564 should be invoked by the respective front ends after gimplification. */
6565
6566 static splay_tree all_labels;
6567
6568 /* Check for mismatched contexts and generate an error if needed. Return
6569 true if an error is detected. */
6570
6571 static bool
6572 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6573 gimple branch_ctx, gimple label_ctx)
6574 {
6575 if (label_ctx == branch_ctx)
6576 return false;
6577
6578
6579 /*
6580 Previously we kept track of the label's entire context in diagnose_sb_[12]
6581 so we could traverse it and issue a correct "exit" or "enter" error
6582 message upon a structured block violation.
6583
6584 We built the context by building a list with tree_cons'ing, but there is
6585 no easy counterpart in gimple tuples. It seems like far too much work
6586 for issuing exit/enter error messages. If someone really misses the
6587 distinct error message... patches welcome.
6588 */
6589
6590 #if 0
6591 /* Try to avoid confusing the user by producing and error message
6592 with correct "exit" or "enter" verbiage. We prefer "exit"
6593 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6594 if (branch_ctx == NULL)
6595 exit_p = false;
6596 else
6597 {
6598 while (label_ctx)
6599 {
6600 if (TREE_VALUE (label_ctx) == branch_ctx)
6601 {
6602 exit_p = false;
6603 break;
6604 }
6605 label_ctx = TREE_CHAIN (label_ctx);
6606 }
6607 }
6608
6609 if (exit_p)
6610 error ("invalid exit from OpenMP structured block");
6611 else
6612 error ("invalid entry to OpenMP structured block");
6613 #endif
6614
6615 /* If it's obvious we have an invalid entry, be specific about the error. */
6616 if (branch_ctx == NULL)
6617 error ("invalid entry to OpenMP structured block");
6618 else
6619 /* Otherwise, be vague and lazy, but efficient. */
6620 error ("invalid branch to/from an OpenMP structured block");
6621
6622 gsi_replace (gsi_p, gimple_build_nop (), false);
6623 return true;
6624 }
6625
6626 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6627 where each label is found. */
6628
6629 static tree
6630 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6631 struct walk_stmt_info *wi)
6632 {
6633 gimple context = (gimple) wi->info;
6634 gimple inner_context;
6635 gimple stmt = gsi_stmt (*gsi_p);
6636
6637 *handled_ops_p = true;
6638
6639 switch (gimple_code (stmt))
6640 {
6641 WALK_SUBSTMTS;
6642
6643 case GIMPLE_OMP_PARALLEL:
6644 case GIMPLE_OMP_TASK:
6645 case GIMPLE_OMP_SECTIONS:
6646 case GIMPLE_OMP_SINGLE:
6647 case GIMPLE_OMP_SECTION:
6648 case GIMPLE_OMP_MASTER:
6649 case GIMPLE_OMP_ORDERED:
6650 case GIMPLE_OMP_CRITICAL:
6651 /* The minimal context here is just the current OMP construct. */
6652 inner_context = stmt;
6653 wi->info = inner_context;
6654 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6655 wi->info = context;
6656 break;
6657
6658 case GIMPLE_OMP_FOR:
6659 inner_context = stmt;
6660 wi->info = inner_context;
6661 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6662 walk them. */
6663 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6664 diagnose_sb_1, NULL, wi);
6665 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6666 wi->info = context;
6667 break;
6668
6669 case GIMPLE_LABEL:
6670 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6671 (splay_tree_value) context);
6672 break;
6673
6674 default:
6675 break;
6676 }
6677
6678 return NULL_TREE;
6679 }
6680
6681 /* Pass 2: Check each branch and see if its context differs from that of
6682 the destination label's context. */
6683
6684 static tree
6685 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6686 struct walk_stmt_info *wi)
6687 {
6688 gimple context = (gimple) wi->info;
6689 splay_tree_node n;
6690 gimple stmt = gsi_stmt (*gsi_p);
6691
6692 *handled_ops_p = true;
6693
6694 switch (gimple_code (stmt))
6695 {
6696 WALK_SUBSTMTS;
6697
6698 case GIMPLE_OMP_PARALLEL:
6699 case GIMPLE_OMP_TASK:
6700 case GIMPLE_OMP_SECTIONS:
6701 case GIMPLE_OMP_SINGLE:
6702 case GIMPLE_OMP_SECTION:
6703 case GIMPLE_OMP_MASTER:
6704 case GIMPLE_OMP_ORDERED:
6705 case GIMPLE_OMP_CRITICAL:
6706 wi->info = stmt;
6707 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6708 wi->info = context;
6709 break;
6710
6711 case GIMPLE_OMP_FOR:
6712 wi->info = stmt;
6713 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6714 walk them. */
6715 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6716 diagnose_sb_2, NULL, wi);
6717 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6718 wi->info = context;
6719 break;
6720
6721 case GIMPLE_GOTO:
6722 {
6723 tree lab = gimple_goto_dest (stmt);
6724 if (TREE_CODE (lab) != LABEL_DECL)
6725 break;
6726
6727 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6728 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6729 }
6730 break;
6731
6732 case GIMPLE_SWITCH:
6733 {
6734 unsigned int i;
6735 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6736 {
6737 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6738 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6739 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6740 break;
6741 }
6742 }
6743 break;
6744
6745 case GIMPLE_RETURN:
6746 diagnose_sb_0 (gsi_p, context, NULL);
6747 break;
6748
6749 default:
6750 break;
6751 }
6752
6753 return NULL_TREE;
6754 }
6755
6756 void
6757 diagnose_omp_structured_block_errors (tree fndecl)
6758 {
6759 tree save_current = current_function_decl;
6760 struct walk_stmt_info wi;
6761 struct function *old_cfun = cfun;
6762 gimple_seq body = gimple_body (fndecl);
6763
6764 current_function_decl = fndecl;
6765 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6766
6767 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6768
6769 memset (&wi, 0, sizeof (wi));
6770 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6771
6772 memset (&wi, 0, sizeof (wi));
6773 wi.want_locations = true;
6774 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6775
6776 splay_tree_delete (all_labels);
6777 all_labels = NULL;
6778
6779 set_cfun (old_cfun);
6780 current_function_decl = save_current;
6781 }
6782
6783 #include "gt-omp-low.h"