omp-low.c (pass_expand_omp): Don't claim to provide PROP_gimple_lomp.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
464 }
465
466
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511 {
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
520
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
522
523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542 }
543
544
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549 static tree
550 get_ws_args_for (gimple ws_stmt)
551 {
552 tree t;
553
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
559 extract_omp_for_data (ws_stmt, &fd, NULL);
560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
570
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
573
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
580 {
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591 }
592
593
594 /* Discover whether REGION is a combined parallel+workshare region. */
595
596 static void
597 determine_parallel_type (struct omp_region *region)
598 {
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
606
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
626 {
627 gimple ws_stmt = last_stmt (ws_entry_bb);
628
629 if (region->inner->type == GIMPLE_OMP_FOR)
630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
655 }
656 }
657
658
659 /* Return true if EXPR is variable sized. */
660
661 static inline bool
662 is_variable_sized (const_tree expr)
663 {
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665 }
666
667 /* Return true if DECL is a reference type. */
668
669 static inline bool
670 is_reference (tree decl)
671 {
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673 }
674
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
681 {
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
685 }
686
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
693 }
694
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
697 {
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701 }
702
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719 }
720
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
723
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
726 {
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
765
766 if (up)
767 {
768 tree c;
769
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
800 }
801
802 return false;
803 }
804
805 /* Create a new VAR_DECL and copy information from VAR to it. */
806
807 tree
808 copy_var_decl (tree var, tree name, tree type)
809 {
810 tree copy = build_decl (VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822
823 return copy;
824 }
825
826 /* Construct a new automatic decl similar to VAR. */
827
828 static tree
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 {
831 tree copy = copy_var_decl (var, name, type);
832
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
836
837 return copy;
838 }
839
840 static tree
841 omp_copy_decl_1 (tree var, omp_context *ctx)
842 {
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
844 }
845
846 /* Build tree nodes to access the field for VAR on the receiver side. */
847
848 static tree
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 {
851 tree x, field = lookup_field (var, ctx);
852
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
858
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
863
864 return x;
865 }
866
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
870
871 static tree
872 build_outer_var_ref (tree var, omp_context *ctx)
873 {
874 tree x;
875
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
877 x = var;
878 else if (is_variable_sized (var))
879 {
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
883 }
884 else if (is_taskreg_ctx (ctx))
885 {
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
888 }
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
895 else
896 gcc_unreachable ();
897
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
900
901 return x;
902 }
903
904 /* Build tree nodes to access the field for VAR on the sender side. */
905
906 static tree
907 build_sender_ref (tree var, omp_context *ctx)
908 {
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
912 }
913
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915
916 static void
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 {
919 tree field, type, sfield = NULL_TREE;
920
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
931
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
939 {
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 }
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
946
947 if ((mask & 3) == 3)
948 {
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
951 {
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
976 }
977 }
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
981 }
982
983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
989 }
990
991 static tree
992 install_var_local (tree var, omp_context *ctx)
993 {
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
997 }
998
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001
1002 static void
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 {
1005 tree new_decl, size;
1006
1007 new_decl = lookup_decl (decl, ctx);
1008
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1013 {
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1018 }
1019
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 {
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1026
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1031 }
1032 }
1033
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1038
1039 static tree
1040 omp_copy_decl (tree var, copy_body_data *cb)
1041 {
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1044
1045 if (TREE_CODE (var) == LABEL_DECL)
1046 {
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1051 }
1052
1053 while (!is_taskreg_ctx (ctx))
1054 {
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1061 }
1062
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1065
1066 return error_mark_node;
1067 }
1068
1069
1070 /* Return the parallel region associated with STMT. */
1071
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1076
1077 /* Dump the parallel region tree rooted at REGION. */
1078
1079 void
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 {
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1084
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1087
1088 if (region->cont)
1089 {
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1092 }
1093
1094 if (region->exit)
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1097 else
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099
1100 if (region->next)
1101 dump_omp_region (file, region->next, indent);
1102 }
1103
1104 void
1105 debug_omp_region (struct omp_region *region)
1106 {
1107 dump_omp_region (stderr, region, 0);
1108 }
1109
1110 void
1111 debug_all_omp_regions (void)
1112 {
1113 dump_omp_region (stderr, root_omp_region, 0);
1114 }
1115
1116
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1118
1119 struct omp_region *
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1122 {
1123 struct omp_region *region = XCNEW (struct omp_region);
1124
1125 region->outer = parent;
1126 region->entry = bb;
1127 region->type = type;
1128
1129 if (parent)
1130 {
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1135 }
1136 else
1137 {
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1142 }
1143
1144 return region;
1145 }
1146
1147 /* Release the memory associated with the region tree rooted at REGION. */
1148
1149 static void
1150 free_omp_region_1 (struct omp_region *region)
1151 {
1152 struct omp_region *i, *n;
1153
1154 for (i = region->inner; i ; i = n)
1155 {
1156 n = i->next;
1157 free_omp_region_1 (i);
1158 }
1159
1160 free (region);
1161 }
1162
1163 /* Release the memory for the entire omp region tree. */
1164
1165 void
1166 free_omp_regions (void)
1167 {
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1170 {
1171 n = r->next;
1172 free_omp_region_1 (r);
1173 }
1174 root_omp_region = NULL;
1175 }
1176
1177
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 {
1183 omp_context *ctx = XCNEW (omp_context);
1184
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1188
1189 if (outer_ctx)
1190 {
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1195 }
1196 else
1197 {
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1207 }
1208
1209 ctx->cb.decl_map = pointer_map_create ();
1210
1211 return ctx;
1212 }
1213
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1215
1216 /* Finalize task copyfn. */
1217
1218 static void
1219 finalize_task_copyfn (gimple task_stmt)
1220 {
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1224 gimple bind;
1225
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1228 return;
1229
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1235
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1244 {
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1248 }
1249 gimple_set_body (child_fn, seq);
1250 pop_cfun ();
1251 current_function_decl = old_fn;
1252
1253 cgraph_add_new_function (child_fn, false);
1254 }
1255
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1258
1259 static void
1260 delete_omp_context (splay_tree_value value)
1261 {
1262 omp_context *ctx = (omp_context *) value;
1263
1264 pointer_map_destroy (ctx->cb.decl_map);
1265
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1270
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1274 {
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 }
1279 if (ctx->srecord_type)
1280 {
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1284 }
1285
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1288
1289 XDELETE (ctx);
1290 }
1291
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1294
1295 static void
1296 fixup_child_record_type (omp_context *ctx)
1297 {
1298 tree f, type = ctx->record_type;
1299
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1308 {
1309 tree name, new_fields = NULL;
1310
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1315
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 {
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 new_fields = new_f;
1328
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1333 }
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1336 }
1337
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1339 }
1340
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1343
1344 static void
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 {
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1349
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 {
1352 bool by_ref;
1353
1354 switch (OMP_CLAUSE_CODE (c))
1355 {
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1362 break;
1363
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1378 {
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1381 break;
1382 }
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1385 goto do_private;
1386
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1393
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1399 {
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1403 }
1404 else if (is_taskreg_ctx (ctx))
1405 {
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1409
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1412 {
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1416 }
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1419 }
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1426 /* FALLTHRU */
1427
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1432 break;
1433
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1437
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1443 break;
1444
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1449 break;
1450
1451 default:
1452 gcc_unreachable ();
1453 }
1454 }
1455
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 {
1458 switch (OMP_CLAUSE_CODE (c))
1459 {
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1468
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1482
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1487 break;
1488
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1499 break;
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 {
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 }
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1517 }
1518
1519 /* Create a new name for omp child function. Returns an identifier. */
1520
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1522
1523 static tree
1524 create_omp_child_function_name (bool task_copy)
1525 {
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1529 const char *suffix;
1530
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537 #elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539 #endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1542 }
1543
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 {
1550 tree decl, type, name, t;
1551
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1561
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1576
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1581
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1591 {
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_ADDRESSABLE (t) = 1;
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1601 }
1602
1603 /* Allocate memory for the function structure. The call to
1604 allocate_struct_function clobbers CFUN, so we need to restore
1605 it afterward. */
1606 push_struct_function (decl);
1607 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1608 cfun->function_end_locus = gimple_location (ctx->stmt);
1609 pop_cfun ();
1610 }
1611
1612
1613 /* Scan an OpenMP parallel directive. */
1614
1615 static void
1616 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1617 {
1618 omp_context *ctx;
1619 tree name;
1620 gimple stmt = gsi_stmt (*gsi);
1621
1622 /* Ignore parallel directives with empty bodies, unless there
1623 are copyin clauses. */
1624 if (optimize > 0
1625 && empty_body_p (gimple_omp_body (stmt))
1626 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1627 OMP_CLAUSE_COPYIN) == NULL)
1628 {
1629 gsi_replace (gsi, gimple_build_nop (), false);
1630 return;
1631 }
1632
1633 ctx = new_omp_context (stmt, outer_ctx);
1634 if (taskreg_nesting_level > 1)
1635 ctx->is_nested = true;
1636 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1637 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1638 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1639 name = create_tmp_var_name (".omp_data_s");
1640 name = build_decl (TYPE_DECL, name, ctx->record_type);
1641 TYPE_NAME (ctx->record_type) = name;
1642 create_omp_child_function (ctx, false);
1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1644
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
1647
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1650 else
1651 {
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1654 }
1655 }
1656
1657 /* Scan an OpenMP task directive. */
1658
1659 static void
1660 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1661 {
1662 omp_context *ctx;
1663 tree name, t;
1664 gimple stmt = gsi_stmt (*gsi);
1665
1666 /* Ignore task directives with empty bodies. */
1667 if (optimize > 0
1668 && empty_body_p (gimple_omp_body (stmt)))
1669 {
1670 gsi_replace (gsi, gimple_build_nop (), false);
1671 return;
1672 }
1673
1674 ctx = new_omp_context (stmt, outer_ctx);
1675 if (taskreg_nesting_level > 1)
1676 ctx->is_nested = true;
1677 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1678 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1679 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1680 name = create_tmp_var_name (".omp_data_s");
1681 name = build_decl (TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687
1688 if (ctx->srecord_type)
1689 {
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1692 TYPE_NAME (ctx->srecord_type) = name;
1693 create_omp_child_function (ctx, true);
1694 }
1695
1696 scan_omp (gimple_omp_body (stmt), ctx);
1697
1698 if (TYPE_FIELDS (ctx->record_type) == NULL)
1699 {
1700 ctx->record_type = ctx->receiver_decl = NULL;
1701 t = build_int_cst (long_integer_type_node, 0);
1702 gimple_omp_task_set_arg_size (stmt, t);
1703 t = build_int_cst (long_integer_type_node, 1);
1704 gimple_omp_task_set_arg_align (stmt, t);
1705 }
1706 else
1707 {
1708 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1709 /* Move VLA fields to the end. */
1710 p = &TYPE_FIELDS (ctx->record_type);
1711 while (*p)
1712 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1713 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1714 {
1715 *q = *p;
1716 *p = TREE_CHAIN (*p);
1717 TREE_CHAIN (*q) = NULL_TREE;
1718 q = &TREE_CHAIN (*q);
1719 }
1720 else
1721 p = &TREE_CHAIN (*p);
1722 *p = vla_fields;
1723 layout_type (ctx->record_type);
1724 fixup_child_record_type (ctx);
1725 if (ctx->srecord_type)
1726 layout_type (ctx->srecord_type);
1727 t = fold_convert (long_integer_type_node,
1728 TYPE_SIZE_UNIT (ctx->record_type));
1729 gimple_omp_task_set_arg_size (stmt, t);
1730 t = build_int_cst (long_integer_type_node,
1731 TYPE_ALIGN_UNIT (ctx->record_type));
1732 gimple_omp_task_set_arg_align (stmt, t);
1733 }
1734 }
1735
1736
1737 /* Scan an OpenMP loop directive. */
1738
1739 static void
1740 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1741 {
1742 omp_context *ctx;
1743 size_t i;
1744
1745 ctx = new_omp_context (stmt, outer_ctx);
1746
1747 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1748
1749 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1750 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1751 {
1752 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1756 }
1757 scan_omp (gimple_omp_body (stmt), ctx);
1758 }
1759
1760 /* Scan an OpenMP sections directive. */
1761
1762 static void
1763 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1764 {
1765 omp_context *ctx;
1766
1767 ctx = new_omp_context (stmt, outer_ctx);
1768 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1769 scan_omp (gimple_omp_body (stmt), ctx);
1770 }
1771
1772 /* Scan an OpenMP single directive. */
1773
1774 static void
1775 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1776 {
1777 omp_context *ctx;
1778 tree name;
1779
1780 ctx = new_omp_context (stmt, outer_ctx);
1781 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1782 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1783 name = create_tmp_var_name (".omp_copy_s");
1784 name = build_decl (TYPE_DECL, name, ctx->record_type);
1785 TYPE_NAME (ctx->record_type) = name;
1786
1787 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1788 scan_omp (gimple_omp_body (stmt), ctx);
1789
1790 if (TYPE_FIELDS (ctx->record_type) == NULL)
1791 ctx->record_type = NULL;
1792 else
1793 layout_type (ctx->record_type);
1794 }
1795
1796
1797 /* Check OpenMP nesting restrictions. */
1798 static void
1799 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1800 {
1801 switch (gimple_code (stmt))
1802 {
1803 case GIMPLE_OMP_FOR:
1804 case GIMPLE_OMP_SECTIONS:
1805 case GIMPLE_OMP_SINGLE:
1806 case GIMPLE_CALL:
1807 for (; ctx != NULL; ctx = ctx->outer)
1808 switch (gimple_code (ctx->stmt))
1809 {
1810 case GIMPLE_OMP_FOR:
1811 case GIMPLE_OMP_SECTIONS:
1812 case GIMPLE_OMP_SINGLE:
1813 case GIMPLE_OMP_ORDERED:
1814 case GIMPLE_OMP_MASTER:
1815 case GIMPLE_OMP_TASK:
1816 if (is_gimple_call (stmt))
1817 {
1818 warning (0, "barrier region may not be closely nested inside "
1819 "of work-sharing, critical, ordered, master or "
1820 "explicit task region");
1821 return;
1822 }
1823 warning (0, "work-sharing region may not be closely nested inside "
1824 "of work-sharing, critical, ordered, master or explicit "
1825 "task region");
1826 return;
1827 case GIMPLE_OMP_PARALLEL:
1828 return;
1829 default:
1830 break;
1831 }
1832 break;
1833 case GIMPLE_OMP_MASTER:
1834 for (; ctx != NULL; ctx = ctx->outer)
1835 switch (gimple_code (ctx->stmt))
1836 {
1837 case GIMPLE_OMP_FOR:
1838 case GIMPLE_OMP_SECTIONS:
1839 case GIMPLE_OMP_SINGLE:
1840 case GIMPLE_OMP_TASK:
1841 warning (0, "master region may not be closely nested inside "
1842 "of work-sharing or explicit task region");
1843 return;
1844 case GIMPLE_OMP_PARALLEL:
1845 return;
1846 default:
1847 break;
1848 }
1849 break;
1850 case GIMPLE_OMP_ORDERED:
1851 for (; ctx != NULL; ctx = ctx->outer)
1852 switch (gimple_code (ctx->stmt))
1853 {
1854 case GIMPLE_OMP_CRITICAL:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "ordered region may not be closely nested inside "
1857 "of critical or explicit task region");
1858 return;
1859 case GIMPLE_OMP_FOR:
1860 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1861 OMP_CLAUSE_ORDERED) == NULL)
1862 warning (0, "ordered region must be closely nested inside "
1863 "a loop region with an ordered clause");
1864 return;
1865 case GIMPLE_OMP_PARALLEL:
1866 return;
1867 default:
1868 break;
1869 }
1870 break;
1871 case GIMPLE_OMP_CRITICAL:
1872 for (; ctx != NULL; ctx = ctx->outer)
1873 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1874 && (gimple_omp_critical_name (stmt)
1875 == gimple_omp_critical_name (ctx->stmt)))
1876 {
1877 warning (0, "critical region may not be nested inside a critical "
1878 "region with the same name");
1879 return;
1880 }
1881 break;
1882 default:
1883 break;
1884 }
1885 }
1886
1887
1888 /* Helper function scan_omp.
1889
1890 Callback for walk_tree or operators in walk_gimple_stmt used to
1891 scan for OpenMP directives in TP. */
1892
1893 static tree
1894 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1895 {
1896 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1897 omp_context *ctx = (omp_context *) wi->info;
1898 tree t = *tp;
1899
1900 switch (TREE_CODE (t))
1901 {
1902 case VAR_DECL:
1903 case PARM_DECL:
1904 case LABEL_DECL:
1905 case RESULT_DECL:
1906 if (ctx)
1907 *tp = remap_decl (t, &ctx->cb);
1908 break;
1909
1910 default:
1911 if (ctx && TYPE_P (t))
1912 *tp = remap_type (t, &ctx->cb);
1913 else if (!DECL_P (t))
1914 *walk_subtrees = 1;
1915 break;
1916 }
1917
1918 return NULL_TREE;
1919 }
1920
1921
1922 /* Helper function for scan_omp.
1923
1924 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1925 the current statement in GSI. */
1926
1927 static tree
1928 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1929 struct walk_stmt_info *wi)
1930 {
1931 gimple stmt = gsi_stmt (*gsi);
1932 omp_context *ctx = (omp_context *) wi->info;
1933
1934 if (gimple_has_location (stmt))
1935 input_location = gimple_location (stmt);
1936
1937 /* Check the OpenMP nesting restrictions. */
1938 if (ctx != NULL)
1939 {
1940 if (is_gimple_omp (stmt))
1941 check_omp_nesting_restrictions (stmt, ctx);
1942 else if (is_gimple_call (stmt))
1943 {
1944 tree fndecl = gimple_call_fndecl (stmt);
1945 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1946 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1947 check_omp_nesting_restrictions (stmt, ctx);
1948 }
1949 }
1950
1951 *handled_ops_p = true;
1952
1953 switch (gimple_code (stmt))
1954 {
1955 case GIMPLE_OMP_PARALLEL:
1956 taskreg_nesting_level++;
1957 scan_omp_parallel (gsi, ctx);
1958 taskreg_nesting_level--;
1959 break;
1960
1961 case GIMPLE_OMP_TASK:
1962 taskreg_nesting_level++;
1963 scan_omp_task (gsi, ctx);
1964 taskreg_nesting_level--;
1965 break;
1966
1967 case GIMPLE_OMP_FOR:
1968 scan_omp_for (stmt, ctx);
1969 break;
1970
1971 case GIMPLE_OMP_SECTIONS:
1972 scan_omp_sections (stmt, ctx);
1973 break;
1974
1975 case GIMPLE_OMP_SINGLE:
1976 scan_omp_single (stmt, ctx);
1977 break;
1978
1979 case GIMPLE_OMP_SECTION:
1980 case GIMPLE_OMP_MASTER:
1981 case GIMPLE_OMP_ORDERED:
1982 case GIMPLE_OMP_CRITICAL:
1983 ctx = new_omp_context (stmt, ctx);
1984 scan_omp (gimple_omp_body (stmt), ctx);
1985 break;
1986
1987 case GIMPLE_BIND:
1988 {
1989 tree var;
1990
1991 *handled_ops_p = false;
1992 if (ctx)
1993 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1994 insert_decl_map (&ctx->cb, var, var);
1995 }
1996 break;
1997 default:
1998 *handled_ops_p = false;
1999 break;
2000 }
2001
2002 return NULL_TREE;
2003 }
2004
2005
2006 /* Scan all the statements starting at the current statement. CTX
2007 contains context information about the OpenMP directives and
2008 clauses found during the scan. */
2009
2010 static void
2011 scan_omp (gimple_seq body, omp_context *ctx)
2012 {
2013 location_t saved_location;
2014 struct walk_stmt_info wi;
2015
2016 memset (&wi, 0, sizeof (wi));
2017 wi.info = ctx;
2018 wi.want_locations = true;
2019
2020 saved_location = input_location;
2021 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2022 input_location = saved_location;
2023 }
2024 \f
2025 /* Re-gimplification and code generation routines. */
2026
2027 /* Build a call to GOMP_barrier. */
2028
2029 static tree
2030 build_omp_barrier (void)
2031 {
2032 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2033 }
2034
2035 /* If a context was created for STMT when it was scanned, return it. */
2036
2037 static omp_context *
2038 maybe_lookup_ctx (gimple stmt)
2039 {
2040 splay_tree_node n;
2041 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2042 return n ? (omp_context *) n->value : NULL;
2043 }
2044
2045
2046 /* Find the mapping for DECL in CTX or the immediately enclosing
2047 context that has a mapping for DECL.
2048
2049 If CTX is a nested parallel directive, we may have to use the decl
2050 mappings created in CTX's parent context. Suppose that we have the
2051 following parallel nesting (variable UIDs showed for clarity):
2052
2053 iD.1562 = 0;
2054 #omp parallel shared(iD.1562) -> outer parallel
2055 iD.1562 = iD.1562 + 1;
2056
2057 #omp parallel shared (iD.1562) -> inner parallel
2058 iD.1562 = iD.1562 - 1;
2059
2060 Each parallel structure will create a distinct .omp_data_s structure
2061 for copying iD.1562 in/out of the directive:
2062
2063 outer parallel .omp_data_s.1.i -> iD.1562
2064 inner parallel .omp_data_s.2.i -> iD.1562
2065
2066 A shared variable mapping will produce a copy-out operation before
2067 the parallel directive and a copy-in operation after it. So, in
2068 this case we would have:
2069
2070 iD.1562 = 0;
2071 .omp_data_o.1.i = iD.1562;
2072 #omp parallel shared(iD.1562) -> outer parallel
2073 .omp_data_i.1 = &.omp_data_o.1
2074 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2075
2076 .omp_data_o.2.i = iD.1562; -> **
2077 #omp parallel shared(iD.1562) -> inner parallel
2078 .omp_data_i.2 = &.omp_data_o.2
2079 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2080
2081
2082 ** This is a problem. The symbol iD.1562 cannot be referenced
2083 inside the body of the outer parallel region. But since we are
2084 emitting this copy operation while expanding the inner parallel
2085 directive, we need to access the CTX structure of the outer
2086 parallel directive to get the correct mapping:
2087
2088 .omp_data_o.2.i = .omp_data_i.1->i
2089
2090 Since there may be other workshare or parallel directives enclosing
2091 the parallel directive, it may be necessary to walk up the context
2092 parent chain. This is not a problem in general because nested
2093 parallelism happens only rarely. */
2094
2095 static tree
2096 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2097 {
2098 tree t;
2099 omp_context *up;
2100
2101 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2102 t = maybe_lookup_decl (decl, up);
2103
2104 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2105
2106 return t ? t : decl;
2107 }
2108
2109
2110 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2111 in outer contexts. */
2112
2113 static tree
2114 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2115 {
2116 tree t = NULL;
2117 omp_context *up;
2118
2119 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2120 t = maybe_lookup_decl (decl, up);
2121
2122 return t ? t : decl;
2123 }
2124
2125
2126 /* Construct the initialization value for reduction CLAUSE. */
2127
2128 tree
2129 omp_reduction_init (tree clause, tree type)
2130 {
2131 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2132 {
2133 case PLUS_EXPR:
2134 case MINUS_EXPR:
2135 case BIT_IOR_EXPR:
2136 case BIT_XOR_EXPR:
2137 case TRUTH_OR_EXPR:
2138 case TRUTH_ORIF_EXPR:
2139 case TRUTH_XOR_EXPR:
2140 case NE_EXPR:
2141 return fold_convert (type, integer_zero_node);
2142
2143 case MULT_EXPR:
2144 case TRUTH_AND_EXPR:
2145 case TRUTH_ANDIF_EXPR:
2146 case EQ_EXPR:
2147 return fold_convert (type, integer_one_node);
2148
2149 case BIT_AND_EXPR:
2150 return fold_convert (type, integer_minus_one_node);
2151
2152 case MAX_EXPR:
2153 if (SCALAR_FLOAT_TYPE_P (type))
2154 {
2155 REAL_VALUE_TYPE max, min;
2156 if (HONOR_INFINITIES (TYPE_MODE (type)))
2157 {
2158 real_inf (&max);
2159 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2160 }
2161 else
2162 real_maxval (&min, 1, TYPE_MODE (type));
2163 return build_real (type, min);
2164 }
2165 else
2166 {
2167 gcc_assert (INTEGRAL_TYPE_P (type));
2168 return TYPE_MIN_VALUE (type);
2169 }
2170
2171 case MIN_EXPR:
2172 if (SCALAR_FLOAT_TYPE_P (type))
2173 {
2174 REAL_VALUE_TYPE max;
2175 if (HONOR_INFINITIES (TYPE_MODE (type)))
2176 real_inf (&max);
2177 else
2178 real_maxval (&max, 0, TYPE_MODE (type));
2179 return build_real (type, max);
2180 }
2181 else
2182 {
2183 gcc_assert (INTEGRAL_TYPE_P (type));
2184 return TYPE_MAX_VALUE (type);
2185 }
2186
2187 default:
2188 gcc_unreachable ();
2189 }
2190 }
2191
2192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2194 private variables. Initialization statements go in ILIST, while calls
2195 to destructors go in DLIST. */
2196
2197 static void
2198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2199 omp_context *ctx)
2200 {
2201 gimple_stmt_iterator diter;
2202 tree c, dtor, copyin_seq, x, ptr;
2203 bool copyin_by_ref = false;
2204 bool lastprivate_firstprivate = false;
2205 int pass;
2206
2207 *dlist = gimple_seq_alloc ();
2208 diter = gsi_start (*dlist);
2209 copyin_seq = NULL;
2210
2211 /* Do all the fixed sized types in the first pass, and the variable sized
2212 types in the second pass. This makes sure that the scalar arguments to
2213 the variable sized types are processed before we use them in the
2214 variable sized operations. */
2215 for (pass = 0; pass < 2; ++pass)
2216 {
2217 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2218 {
2219 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2220 tree var, new_var;
2221 bool by_ref;
2222
2223 switch (c_kind)
2224 {
2225 case OMP_CLAUSE_PRIVATE:
2226 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2227 continue;
2228 break;
2229 case OMP_CLAUSE_SHARED:
2230 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2231 {
2232 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2233 continue;
2234 }
2235 case OMP_CLAUSE_FIRSTPRIVATE:
2236 case OMP_CLAUSE_COPYIN:
2237 case OMP_CLAUSE_REDUCTION:
2238 break;
2239 case OMP_CLAUSE_LASTPRIVATE:
2240 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2241 {
2242 lastprivate_firstprivate = true;
2243 if (pass != 0)
2244 continue;
2245 }
2246 break;
2247 default:
2248 continue;
2249 }
2250
2251 new_var = var = OMP_CLAUSE_DECL (c);
2252 if (c_kind != OMP_CLAUSE_COPYIN)
2253 new_var = lookup_decl (var, ctx);
2254
2255 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2256 {
2257 if (pass != 0)
2258 continue;
2259 }
2260 else if (is_variable_sized (var))
2261 {
2262 /* For variable sized types, we need to allocate the
2263 actual storage here. Call alloca and store the
2264 result in the pointer decl that we created elsewhere. */
2265 if (pass == 0)
2266 continue;
2267
2268 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2269 {
2270 gimple stmt;
2271 tree tmp;
2272
2273 ptr = DECL_VALUE_EXPR (new_var);
2274 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2275 ptr = TREE_OPERAND (ptr, 0);
2276 gcc_assert (DECL_P (ptr));
2277 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2278
2279 /* void *tmp = __builtin_alloca */
2280 stmt
2281 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2282 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2283 gimple_add_tmp_var (tmp);
2284 gimple_call_set_lhs (stmt, tmp);
2285
2286 gimple_seq_add_stmt (ilist, stmt);
2287
2288 x = fold_convert (TREE_TYPE (ptr), tmp);
2289 gimplify_assign (ptr, x, ilist);
2290 }
2291 }
2292 else if (is_reference (var))
2293 {
2294 /* For references that are being privatized for Fortran,
2295 allocate new backing storage for the new pointer
2296 variable. This allows us to avoid changing all the
2297 code that expects a pointer to something that expects
2298 a direct variable. Note that this doesn't apply to
2299 C++, since reference types are disallowed in data
2300 sharing clauses there, except for NRV optimized
2301 return values. */
2302 if (pass == 0)
2303 continue;
2304
2305 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2306 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2307 {
2308 x = build_receiver_ref (var, false, ctx);
2309 x = build_fold_addr_expr (x);
2310 }
2311 else if (TREE_CONSTANT (x))
2312 {
2313 const char *name = NULL;
2314 if (DECL_NAME (var))
2315 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2316
2317 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2318 name);
2319 gimple_add_tmp_var (x);
2320 TREE_ADDRESSABLE (x) = 1;
2321 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2322 }
2323 else
2324 {
2325 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2326 x = fold_convert (TREE_TYPE (new_var), x);
2327 }
2328
2329 gimplify_assign (new_var, x, ilist);
2330
2331 new_var = build_fold_indirect_ref (new_var);
2332 }
2333 else if (c_kind == OMP_CLAUSE_REDUCTION
2334 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2335 {
2336 if (pass == 0)
2337 continue;
2338 }
2339 else if (pass != 0)
2340 continue;
2341
2342 switch (OMP_CLAUSE_CODE (c))
2343 {
2344 case OMP_CLAUSE_SHARED:
2345 /* Shared global vars are just accessed directly. */
2346 if (is_global_var (new_var))
2347 break;
2348 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2349 needs to be delayed until after fixup_child_record_type so
2350 that we get the correct type during the dereference. */
2351 by_ref = use_pointer_for_field (var, ctx);
2352 x = build_receiver_ref (var, by_ref, ctx);
2353 SET_DECL_VALUE_EXPR (new_var, x);
2354 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2355
2356 /* ??? If VAR is not passed by reference, and the variable
2357 hasn't been initialized yet, then we'll get a warning for
2358 the store into the omp_data_s structure. Ideally, we'd be
2359 able to notice this and not store anything at all, but
2360 we're generating code too early. Suppress the warning. */
2361 if (!by_ref)
2362 TREE_NO_WARNING (var) = 1;
2363 break;
2364
2365 case OMP_CLAUSE_LASTPRIVATE:
2366 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2367 break;
2368 /* FALLTHRU */
2369
2370 case OMP_CLAUSE_PRIVATE:
2371 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2372 x = build_outer_var_ref (var, ctx);
2373 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2374 {
2375 if (is_task_ctx (ctx))
2376 x = build_receiver_ref (var, false, ctx);
2377 else
2378 x = build_outer_var_ref (var, ctx);
2379 }
2380 else
2381 x = NULL;
2382 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2383 if (x)
2384 gimplify_and_add (x, ilist);
2385 /* FALLTHRU */
2386
2387 do_dtor:
2388 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2389 if (x)
2390 {
2391 gimple_seq tseq = NULL;
2392
2393 dtor = x;
2394 gimplify_stmt (&dtor, &tseq);
2395 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2396 }
2397 break;
2398
2399 case OMP_CLAUSE_FIRSTPRIVATE:
2400 if (is_task_ctx (ctx))
2401 {
2402 if (is_reference (var) || is_variable_sized (var))
2403 goto do_dtor;
2404 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2405 ctx))
2406 || use_pointer_for_field (var, NULL))
2407 {
2408 x = build_receiver_ref (var, false, ctx);
2409 SET_DECL_VALUE_EXPR (new_var, x);
2410 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2411 goto do_dtor;
2412 }
2413 }
2414 x = build_outer_var_ref (var, ctx);
2415 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2416 gimplify_and_add (x, ilist);
2417 goto do_dtor;
2418 break;
2419
2420 case OMP_CLAUSE_COPYIN:
2421 by_ref = use_pointer_for_field (var, NULL);
2422 x = build_receiver_ref (var, by_ref, ctx);
2423 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2424 append_to_statement_list (x, &copyin_seq);
2425 copyin_by_ref |= by_ref;
2426 break;
2427
2428 case OMP_CLAUSE_REDUCTION:
2429 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2430 {
2431 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2432 x = build_outer_var_ref (var, ctx);
2433
2434 if (is_reference (var))
2435 x = build_fold_addr_expr (x);
2436 SET_DECL_VALUE_EXPR (placeholder, x);
2437 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2438 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2439 gimple_seq_add_seq (ilist,
2440 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2441 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2442 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2443 }
2444 else
2445 {
2446 x = omp_reduction_init (c, TREE_TYPE (new_var));
2447 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2448 gimplify_assign (new_var, x, ilist);
2449 }
2450 break;
2451
2452 default:
2453 gcc_unreachable ();
2454 }
2455 }
2456 }
2457
2458 /* The copyin sequence is not to be executed by the main thread, since
2459 that would result in self-copies. Perhaps not visible to scalars,
2460 but it certainly is to C++ operator=. */
2461 if (copyin_seq)
2462 {
2463 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2464 x = build2 (NE_EXPR, boolean_type_node, x,
2465 build_int_cst (TREE_TYPE (x), 0));
2466 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2467 gimplify_and_add (x, ilist);
2468 }
2469
2470 /* If any copyin variable is passed by reference, we must ensure the
2471 master thread doesn't modify it before it is copied over in all
2472 threads. Similarly for variables in both firstprivate and
2473 lastprivate clauses we need to ensure the lastprivate copying
2474 happens after firstprivate copying in all threads. */
2475 if (copyin_by_ref || lastprivate_firstprivate)
2476 gimplify_and_add (build_omp_barrier (), ilist);
2477 }
2478
2479
2480 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2481 both parallel and workshare constructs. PREDICATE may be NULL if it's
2482 always true. */
2483
2484 static void
2485 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2486 omp_context *ctx)
2487 {
2488 tree x, c, label = NULL;
2489 bool par_clauses = false;
2490
2491 /* Early exit if there are no lastprivate clauses. */
2492 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2493 if (clauses == NULL)
2494 {
2495 /* If this was a workshare clause, see if it had been combined
2496 with its parallel. In that case, look for the clauses on the
2497 parallel statement itself. */
2498 if (is_parallel_ctx (ctx))
2499 return;
2500
2501 ctx = ctx->outer;
2502 if (ctx == NULL || !is_parallel_ctx (ctx))
2503 return;
2504
2505 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2506 OMP_CLAUSE_LASTPRIVATE);
2507 if (clauses == NULL)
2508 return;
2509 par_clauses = true;
2510 }
2511
2512 if (predicate)
2513 {
2514 gimple stmt;
2515 tree label_true, arm1, arm2;
2516
2517 label = create_artificial_label ();
2518 label_true = create_artificial_label ();
2519 arm1 = TREE_OPERAND (predicate, 0);
2520 arm2 = TREE_OPERAND (predicate, 1);
2521 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2522 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2523 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2524 label_true, label);
2525 gimple_seq_add_stmt (stmt_list, stmt);
2526 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2527 }
2528
2529 for (c = clauses; c ;)
2530 {
2531 tree var, new_var;
2532
2533 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2534 {
2535 var = OMP_CLAUSE_DECL (c);
2536 new_var = lookup_decl (var, ctx);
2537
2538 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2539 {
2540 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2541 gimple_seq_add_seq (stmt_list,
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2543 }
2544 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2545
2546 x = build_outer_var_ref (var, ctx);
2547 if (is_reference (var))
2548 new_var = build_fold_indirect_ref (new_var);
2549 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2550 gimplify_and_add (x, stmt_list);
2551 }
2552 c = OMP_CLAUSE_CHAIN (c);
2553 if (c == NULL && !par_clauses)
2554 {
2555 /* If this was a workshare clause, see if it had been combined
2556 with its parallel. In that case, continue looking for the
2557 clauses also on the parallel statement itself. */
2558 if (is_parallel_ctx (ctx))
2559 break;
2560
2561 ctx = ctx->outer;
2562 if (ctx == NULL || !is_parallel_ctx (ctx))
2563 break;
2564
2565 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2566 OMP_CLAUSE_LASTPRIVATE);
2567 par_clauses = true;
2568 }
2569 }
2570
2571 if (label)
2572 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2573 }
2574
2575
2576 /* Generate code to implement the REDUCTION clauses. */
2577
2578 static void
2579 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2580 {
2581 gimple_seq sub_seq = NULL;
2582 gimple stmt;
2583 tree x, c;
2584 int count = 0;
2585
2586 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2587 update in that case, otherwise use a lock. */
2588 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2589 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2590 {
2591 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2592 {
2593 /* Never use OMP_ATOMIC for array reductions. */
2594 count = -1;
2595 break;
2596 }
2597 count++;
2598 }
2599
2600 if (count == 0)
2601 return;
2602
2603 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2604 {
2605 tree var, ref, new_var;
2606 enum tree_code code;
2607
2608 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2609 continue;
2610
2611 var = OMP_CLAUSE_DECL (c);
2612 new_var = lookup_decl (var, ctx);
2613 if (is_reference (var))
2614 new_var = build_fold_indirect_ref (new_var);
2615 ref = build_outer_var_ref (var, ctx);
2616 code = OMP_CLAUSE_REDUCTION_CODE (c);
2617
2618 /* reduction(-:var) sums up the partial results, so it acts
2619 identically to reduction(+:var). */
2620 if (code == MINUS_EXPR)
2621 code = PLUS_EXPR;
2622
2623 if (count == 1)
2624 {
2625 tree addr = build_fold_addr_expr (ref);
2626
2627 addr = save_expr (addr);
2628 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2629 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2630 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2631 gimplify_and_add (x, stmt_seqp);
2632 return;
2633 }
2634
2635 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2636 {
2637 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2638
2639 if (is_reference (var))
2640 ref = build_fold_addr_expr (ref);
2641 SET_DECL_VALUE_EXPR (placeholder, ref);
2642 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2643 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2644 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2645 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2646 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2647 }
2648 else
2649 {
2650 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2651 ref = build_outer_var_ref (var, ctx);
2652 gimplify_assign (ref, x, &sub_seq);
2653 }
2654 }
2655
2656 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2657 gimple_seq_add_stmt (stmt_seqp, stmt);
2658
2659 gimple_seq_add_seq (stmt_seqp, sub_seq);
2660
2661 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2662 gimple_seq_add_stmt (stmt_seqp, stmt);
2663 }
2664
2665
2666 /* Generate code to implement the COPYPRIVATE clauses. */
2667
2668 static void
2669 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2670 omp_context *ctx)
2671 {
2672 tree c;
2673
2674 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2675 {
2676 tree var, ref, x;
2677 bool by_ref;
2678
2679 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2680 continue;
2681
2682 var = OMP_CLAUSE_DECL (c);
2683 by_ref = use_pointer_for_field (var, NULL);
2684
2685 ref = build_sender_ref (var, ctx);
2686 x = lookup_decl_in_outer_ctx (var, ctx);
2687 x = by_ref ? build_fold_addr_expr (x) : x;
2688 gimplify_assign (ref, x, slist);
2689
2690 ref = build_receiver_ref (var, by_ref, ctx);
2691 if (is_reference (var))
2692 {
2693 ref = build_fold_indirect_ref (ref);
2694 var = build_fold_indirect_ref (var);
2695 }
2696 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2697 gimplify_and_add (x, rlist);
2698 }
2699 }
2700
2701
2702 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2703 and REDUCTION from the sender (aka parent) side. */
2704
2705 static void
2706 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2707 omp_context *ctx)
2708 {
2709 tree c;
2710
2711 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2712 {
2713 tree val, ref, x, var;
2714 bool by_ref, do_in = false, do_out = false;
2715
2716 switch (OMP_CLAUSE_CODE (c))
2717 {
2718 case OMP_CLAUSE_PRIVATE:
2719 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2720 break;
2721 continue;
2722 case OMP_CLAUSE_FIRSTPRIVATE:
2723 case OMP_CLAUSE_COPYIN:
2724 case OMP_CLAUSE_LASTPRIVATE:
2725 case OMP_CLAUSE_REDUCTION:
2726 break;
2727 default:
2728 continue;
2729 }
2730
2731 val = OMP_CLAUSE_DECL (c);
2732 var = lookup_decl_in_outer_ctx (val, ctx);
2733
2734 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2735 && is_global_var (var))
2736 continue;
2737 if (is_variable_sized (val))
2738 continue;
2739 by_ref = use_pointer_for_field (val, NULL);
2740
2741 switch (OMP_CLAUSE_CODE (c))
2742 {
2743 case OMP_CLAUSE_PRIVATE:
2744 case OMP_CLAUSE_FIRSTPRIVATE:
2745 case OMP_CLAUSE_COPYIN:
2746 do_in = true;
2747 break;
2748
2749 case OMP_CLAUSE_LASTPRIVATE:
2750 if (by_ref || is_reference (val))
2751 {
2752 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2753 continue;
2754 do_in = true;
2755 }
2756 else
2757 {
2758 do_out = true;
2759 if (lang_hooks.decls.omp_private_outer_ref (val))
2760 do_in = true;
2761 }
2762 break;
2763
2764 case OMP_CLAUSE_REDUCTION:
2765 do_in = true;
2766 do_out = !(by_ref || is_reference (val));
2767 break;
2768
2769 default:
2770 gcc_unreachable ();
2771 }
2772
2773 if (do_in)
2774 {
2775 ref = build_sender_ref (val, ctx);
2776 x = by_ref ? build_fold_addr_expr (var) : var;
2777 gimplify_assign (ref, x, ilist);
2778 if (is_task_ctx (ctx))
2779 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2780 }
2781
2782 if (do_out)
2783 {
2784 ref = build_sender_ref (val, ctx);
2785 gimplify_assign (var, ref, olist);
2786 }
2787 }
2788 }
2789
2790 /* Generate code to implement SHARED from the sender (aka parent)
2791 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2792 list things that got automatically shared. */
2793
2794 static void
2795 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2796 {
2797 tree var, ovar, nvar, f, x, record_type;
2798
2799 if (ctx->record_type == NULL)
2800 return;
2801
2802 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2803 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2804 {
2805 ovar = DECL_ABSTRACT_ORIGIN (f);
2806 nvar = maybe_lookup_decl (ovar, ctx);
2807 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2808 continue;
2809
2810 /* If CTX is a nested parallel directive. Find the immediately
2811 enclosing parallel or workshare construct that contains a
2812 mapping for OVAR. */
2813 var = lookup_decl_in_outer_ctx (ovar, ctx);
2814
2815 if (use_pointer_for_field (ovar, ctx))
2816 {
2817 x = build_sender_ref (ovar, ctx);
2818 var = build_fold_addr_expr (var);
2819 gimplify_assign (x, var, ilist);
2820 }
2821 else
2822 {
2823 x = build_sender_ref (ovar, ctx);
2824 gimplify_assign (x, var, ilist);
2825
2826 if (!TREE_READONLY (var)
2827 /* We don't need to receive a new reference to a result
2828 or parm decl. In fact we may not store to it as we will
2829 invalidate any pending RSO and generate wrong gimple
2830 during inlining. */
2831 && !((TREE_CODE (var) == RESULT_DECL
2832 || TREE_CODE (var) == PARM_DECL)
2833 && DECL_BY_REFERENCE (var)))
2834 {
2835 x = build_sender_ref (ovar, ctx);
2836 gimplify_assign (var, x, olist);
2837 }
2838 }
2839 }
2840 }
2841
2842
2843 /* A convenience function to build an empty GIMPLE_COND with just the
2844 condition. */
2845
2846 static gimple
2847 gimple_build_cond_empty (tree cond)
2848 {
2849 enum tree_code pred_code;
2850 tree lhs, rhs;
2851
2852 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2853 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2854 }
2855
2856
2857 /* Build the function calls to GOMP_parallel_start etc to actually
2858 generate the parallel operation. REGION is the parallel region
2859 being expanded. BB is the block where to insert the code. WS_ARGS
2860 will be set if this is a call to a combined parallel+workshare
2861 construct, it contains the list of additional arguments needed by
2862 the workshare construct. */
2863
2864 static void
2865 expand_parallel_call (struct omp_region *region, basic_block bb,
2866 gimple entry_stmt, tree ws_args)
2867 {
2868 tree t, t1, t2, val, cond, c, clauses;
2869 gimple_stmt_iterator gsi;
2870 gimple stmt;
2871 int start_ix;
2872
2873 clauses = gimple_omp_parallel_clauses (entry_stmt);
2874
2875 /* Determine what flavor of GOMP_parallel_start we will be
2876 emitting. */
2877 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2878 if (is_combined_parallel (region))
2879 {
2880 switch (region->inner->type)
2881 {
2882 case GIMPLE_OMP_FOR:
2883 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2884 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2885 + (region->inner->sched_kind
2886 == OMP_CLAUSE_SCHEDULE_RUNTIME
2887 ? 3 : region->inner->sched_kind);
2888 break;
2889 case GIMPLE_OMP_SECTIONS:
2890 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2891 break;
2892 default:
2893 gcc_unreachable ();
2894 }
2895 }
2896
2897 /* By default, the value of NUM_THREADS is zero (selected at run time)
2898 and there is no conditional. */
2899 cond = NULL_TREE;
2900 val = build_int_cst (unsigned_type_node, 0);
2901
2902 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2903 if (c)
2904 cond = OMP_CLAUSE_IF_EXPR (c);
2905
2906 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2907 if (c)
2908 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2909
2910 /* Ensure 'val' is of the correct type. */
2911 val = fold_convert (unsigned_type_node, val);
2912
2913 /* If we found the clause 'if (cond)', build either
2914 (cond != 0) or (cond ? val : 1u). */
2915 if (cond)
2916 {
2917 gimple_stmt_iterator gsi;
2918
2919 cond = gimple_boolify (cond);
2920
2921 if (integer_zerop (val))
2922 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2923 build_int_cst (TREE_TYPE (cond), 0));
2924 else
2925 {
2926 basic_block cond_bb, then_bb, else_bb;
2927 edge e, e_then, e_else;
2928 tree tmp_then, tmp_else, tmp_join, tmp_var;
2929
2930 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2931 if (gimple_in_ssa_p (cfun))
2932 {
2933 tmp_then = make_ssa_name (tmp_var, NULL);
2934 tmp_else = make_ssa_name (tmp_var, NULL);
2935 tmp_join = make_ssa_name (tmp_var, NULL);
2936 }
2937 else
2938 {
2939 tmp_then = tmp_var;
2940 tmp_else = tmp_var;
2941 tmp_join = tmp_var;
2942 }
2943
2944 e = split_block (bb, NULL);
2945 cond_bb = e->src;
2946 bb = e->dest;
2947 remove_edge (e);
2948
2949 then_bb = create_empty_bb (cond_bb);
2950 else_bb = create_empty_bb (then_bb);
2951 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2952 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2953
2954 stmt = gimple_build_cond_empty (cond);
2955 gsi = gsi_start_bb (cond_bb);
2956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2957
2958 gsi = gsi_start_bb (then_bb);
2959 stmt = gimple_build_assign (tmp_then, val);
2960 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2961
2962 gsi = gsi_start_bb (else_bb);
2963 stmt = gimple_build_assign
2964 (tmp_else, build_int_cst (unsigned_type_node, 1));
2965 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2966
2967 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2968 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2969 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2970 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2971
2972 if (gimple_in_ssa_p (cfun))
2973 {
2974 gimple phi = create_phi_node (tmp_join, bb);
2975 SSA_NAME_DEF_STMT (tmp_join) = phi;
2976 add_phi_arg (phi, tmp_then, e_then);
2977 add_phi_arg (phi, tmp_else, e_else);
2978 }
2979
2980 val = tmp_join;
2981 }
2982
2983 gsi = gsi_start_bb (bb);
2984 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2985 false, GSI_CONTINUE_LINKING);
2986 }
2987
2988 gsi = gsi_last_bb (bb);
2989 t = gimple_omp_parallel_data_arg (entry_stmt);
2990 if (t == NULL)
2991 t1 = null_pointer_node;
2992 else
2993 t1 = build_fold_addr_expr (t);
2994 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2995
2996 if (ws_args)
2997 {
2998 tree args = tree_cons (NULL, t2,
2999 tree_cons (NULL, t1,
3000 tree_cons (NULL, val, ws_args)));
3001 t = build_function_call_expr (built_in_decls[start_ix], args);
3002 }
3003 else
3004 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3005
3006 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3007 false, GSI_CONTINUE_LINKING);
3008
3009 t = gimple_omp_parallel_data_arg (entry_stmt);
3010 if (t == NULL)
3011 t = null_pointer_node;
3012 else
3013 t = build_fold_addr_expr (t);
3014 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3015 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3016 false, GSI_CONTINUE_LINKING);
3017
3018 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3019 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3020 false, GSI_CONTINUE_LINKING);
3021 }
3022
3023
3024 /* Build the function call to GOMP_task to actually
3025 generate the task operation. BB is the block where to insert the code. */
3026
3027 static void
3028 expand_task_call (basic_block bb, gimple entry_stmt)
3029 {
3030 tree t, t1, t2, t3, flags, cond, c, clauses;
3031 gimple_stmt_iterator gsi;
3032
3033 clauses = gimple_omp_task_clauses (entry_stmt);
3034
3035 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3036 if (c)
3037 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3038 else
3039 cond = boolean_true_node;
3040
3041 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3042 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3043
3044 gsi = gsi_last_bb (bb);
3045 t = gimple_omp_task_data_arg (entry_stmt);
3046 if (t == NULL)
3047 t2 = null_pointer_node;
3048 else
3049 t2 = build_fold_addr_expr (t);
3050 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3051 t = gimple_omp_task_copy_fn (entry_stmt);
3052 if (t == NULL)
3053 t3 = null_pointer_node;
3054 else
3055 t3 = build_fold_addr_expr (t);
3056
3057 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3058 gimple_omp_task_arg_size (entry_stmt),
3059 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3060
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
3063 }
3064
3065
3066 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3067 catch handler and return it. This prevents programs from violating the
3068 structured block semantics with throws. */
3069
3070 static gimple_seq
3071 maybe_catch_exception (gimple_seq body)
3072 {
3073 gimple f, t;
3074
3075 if (!flag_exceptions)
3076 return body;
3077
3078 if (lang_protect_cleanup_actions)
3079 t = lang_protect_cleanup_actions ();
3080 else
3081 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3082
3083 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3084 gimple_eh_filter_set_must_not_throw (f, true);
3085
3086 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3087 GIMPLE_TRY_CATCH);
3088
3089 return gimple_seq_alloc_with_stmt (t);
3090 }
3091
3092 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3093
3094 static tree
3095 list2chain (tree list)
3096 {
3097 tree t;
3098
3099 for (t = list; t; t = TREE_CHAIN (t))
3100 {
3101 tree var = TREE_VALUE (t);
3102 if (TREE_CHAIN (t))
3103 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3104 else
3105 TREE_CHAIN (var) = NULL_TREE;
3106 }
3107
3108 return list ? TREE_VALUE (list) : NULL_TREE;
3109 }
3110
3111
3112 /* Remove barriers in REGION->EXIT's block. Note that this is only
3113 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3114 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3115 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3116 removed. */
3117
3118 static void
3119 remove_exit_barrier (struct omp_region *region)
3120 {
3121 gimple_stmt_iterator gsi;
3122 basic_block exit_bb;
3123 edge_iterator ei;
3124 edge e;
3125 gimple stmt;
3126 int any_addressable_vars = -1;
3127
3128 exit_bb = region->exit;
3129
3130 /* If the parallel region doesn't return, we don't have REGION->EXIT
3131 block at all. */
3132 if (! exit_bb)
3133 return;
3134
3135 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3136 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3137 statements that can appear in between are extremely limited -- no
3138 memory operations at all. Here, we allow nothing at all, so the
3139 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3140 gsi = gsi_last_bb (exit_bb);
3141 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3142 gsi_prev (&gsi);
3143 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3144 return;
3145
3146 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3147 {
3148 gsi = gsi_last_bb (e->src);
3149 if (gsi_end_p (gsi))
3150 continue;
3151 stmt = gsi_stmt (gsi);
3152 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3153 && !gimple_omp_return_nowait_p (stmt))
3154 {
3155 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3156 in many cases. If there could be tasks queued, the barrier
3157 might be needed to let the tasks run before some local
3158 variable of the parallel that the task uses as shared
3159 runs out of scope. The task can be spawned either
3160 from within current function (this would be easy to check)
3161 or from some function it calls and gets passed an address
3162 of such a variable. */
3163 if (any_addressable_vars < 0)
3164 {
3165 gimple parallel_stmt = last_stmt (region->entry);
3166 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3167 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3168 tree block;
3169
3170 any_addressable_vars = 0;
3171 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3172 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3173 {
3174 any_addressable_vars = 1;
3175 break;
3176 }
3177 for (block = gimple_block (stmt);
3178 !any_addressable_vars
3179 && block
3180 && TREE_CODE (block) == BLOCK;
3181 block = BLOCK_SUPERCONTEXT (block))
3182 {
3183 for (local_decls = BLOCK_VARS (block);
3184 local_decls;
3185 local_decls = TREE_CHAIN (local_decls))
3186 if (TREE_ADDRESSABLE (local_decls))
3187 {
3188 any_addressable_vars = 1;
3189 break;
3190 }
3191 if (block == gimple_block (parallel_stmt))
3192 break;
3193 }
3194 }
3195 if (!any_addressable_vars)
3196 gimple_omp_return_set_nowait (stmt);
3197 }
3198 }
3199 }
3200
3201 static void
3202 remove_exit_barriers (struct omp_region *region)
3203 {
3204 if (region->type == GIMPLE_OMP_PARALLEL)
3205 remove_exit_barrier (region);
3206
3207 if (region->inner)
3208 {
3209 region = region->inner;
3210 remove_exit_barriers (region);
3211 while (region->next)
3212 {
3213 region = region->next;
3214 remove_exit_barriers (region);
3215 }
3216 }
3217 }
3218
3219 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3220 calls. These can't be declared as const functions, but
3221 within one parallel body they are constant, so they can be
3222 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3223 which are declared const. Similarly for task body, except
3224 that in untied task omp_get_thread_num () can change at any task
3225 scheduling point. */
3226
3227 static void
3228 optimize_omp_library_calls (gimple entry_stmt)
3229 {
3230 basic_block bb;
3231 gimple_stmt_iterator gsi;
3232 tree thr_num_id
3233 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3234 tree num_thr_id
3235 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3236 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3237 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3238 OMP_CLAUSE_UNTIED) != NULL);
3239
3240 FOR_EACH_BB (bb)
3241 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3242 {
3243 gimple call = gsi_stmt (gsi);
3244 tree decl;
3245
3246 if (is_gimple_call (call)
3247 && (decl = gimple_call_fndecl (call))
3248 && DECL_EXTERNAL (decl)
3249 && TREE_PUBLIC (decl)
3250 && DECL_INITIAL (decl) == NULL)
3251 {
3252 tree built_in;
3253
3254 if (DECL_NAME (decl) == thr_num_id)
3255 {
3256 /* In #pragma omp task untied omp_get_thread_num () can change
3257 during the execution of the task region. */
3258 if (untied_task)
3259 continue;
3260 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3261 }
3262 else if (DECL_NAME (decl) == num_thr_id)
3263 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3264 else
3265 continue;
3266
3267 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3268 || gimple_call_num_args (call) != 0)
3269 continue;
3270
3271 if (flag_exceptions && !TREE_NOTHROW (decl))
3272 continue;
3273
3274 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3275 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3276 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3277 continue;
3278
3279 gimple_call_set_fndecl (call, built_in);
3280 }
3281 }
3282 }
3283
3284 /* Expand the OpenMP parallel or task directive starting at REGION. */
3285
3286 static void
3287 expand_omp_taskreg (struct omp_region *region)
3288 {
3289 basic_block entry_bb, exit_bb, new_bb;
3290 struct function *child_cfun;
3291 tree child_fn, block, t, ws_args, *tp;
3292 tree save_current;
3293 gimple_stmt_iterator gsi;
3294 gimple entry_stmt, stmt;
3295 edge e;
3296
3297 entry_stmt = last_stmt (region->entry);
3298 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3299 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3300 /* If this function has been already instrumented, make sure
3301 the child function isn't instrumented again. */
3302 child_cfun->after_tree_profile = cfun->after_tree_profile;
3303
3304 entry_bb = region->entry;
3305 exit_bb = region->exit;
3306
3307 if (is_combined_parallel (region))
3308 ws_args = region->ws_args;
3309 else
3310 ws_args = NULL_TREE;
3311
3312 if (child_cfun->cfg)
3313 {
3314 /* Due to inlining, it may happen that we have already outlined
3315 the region, in which case all we need to do is make the
3316 sub-graph unreachable and emit the parallel call. */
3317 edge entry_succ_e, exit_succ_e;
3318 gimple_stmt_iterator gsi;
3319
3320 entry_succ_e = single_succ_edge (entry_bb);
3321
3322 gsi = gsi_last_bb (entry_bb);
3323 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3324 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3325 gsi_remove (&gsi, true);
3326
3327 new_bb = entry_bb;
3328 if (exit_bb)
3329 {
3330 exit_succ_e = single_succ_edge (exit_bb);
3331 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3332 }
3333 remove_edge_and_dominated_blocks (entry_succ_e);
3334 }
3335 else
3336 {
3337 /* If the parallel region needs data sent from the parent
3338 function, then the very first statement (except possible
3339 tree profile counter updates) of the parallel body
3340 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3341 &.OMP_DATA_O is passed as an argument to the child function,
3342 we need to replace it with the argument as seen by the child
3343 function.
3344
3345 In most cases, this will end up being the identity assignment
3346 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3347 a function call that has been inlined, the original PARM_DECL
3348 .OMP_DATA_I may have been converted into a different local
3349 variable. In which case, we need to keep the assignment. */
3350 if (gimple_omp_taskreg_data_arg (entry_stmt))
3351 {
3352 basic_block entry_succ_bb = single_succ (entry_bb);
3353 gimple_stmt_iterator gsi;
3354 tree arg, narg;
3355 gimple parcopy_stmt = NULL;
3356
3357 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3358 {
3359 gimple stmt;
3360
3361 gcc_assert (!gsi_end_p (gsi));
3362 stmt = gsi_stmt (gsi);
3363 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3364 continue;
3365
3366 if (gimple_num_ops (stmt) == 2)
3367 {
3368 tree arg = gimple_assign_rhs1 (stmt);
3369
3370 /* We're ignore the subcode because we're
3371 effectively doing a STRIP_NOPS. */
3372
3373 if (TREE_CODE (arg) == ADDR_EXPR
3374 && TREE_OPERAND (arg, 0)
3375 == gimple_omp_taskreg_data_arg (entry_stmt))
3376 {
3377 parcopy_stmt = stmt;
3378 break;
3379 }
3380 }
3381 }
3382
3383 gcc_assert (parcopy_stmt != NULL);
3384 arg = DECL_ARGUMENTS (child_fn);
3385
3386 if (!gimple_in_ssa_p (cfun))
3387 {
3388 if (gimple_assign_lhs (parcopy_stmt) == arg)
3389 gsi_remove (&gsi, true);
3390 else
3391 {
3392 /* ?? Is setting the subcode really necessary ?? */
3393 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3394 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3395 }
3396 }
3397 else
3398 {
3399 /* If we are in ssa form, we must load the value from the default
3400 definition of the argument. That should not be defined now,
3401 since the argument is not used uninitialized. */
3402 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3403 narg = make_ssa_name (arg, gimple_build_nop ());
3404 set_default_def (arg, narg);
3405 /* ?? Is setting the subcode really necessary ?? */
3406 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3407 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3408 update_stmt (parcopy_stmt);
3409 }
3410 }
3411
3412 /* Declare local variables needed in CHILD_CFUN. */
3413 block = DECL_INITIAL (child_fn);
3414 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3415 DECL_SAVED_TREE (child_fn) = NULL;
3416 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3417 TREE_USED (block) = 1;
3418
3419 /* Reset DECL_CONTEXT on function arguments. */
3420 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3421 DECL_CONTEXT (t) = child_fn;
3422
3423 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3424 so that it can be moved to the child function. */
3425 gsi = gsi_last_bb (entry_bb);
3426 stmt = gsi_stmt (gsi);
3427 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3428 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3429 gsi_remove (&gsi, true);
3430 e = split_block (entry_bb, stmt);
3431 entry_bb = e->dest;
3432 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3433
3434 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3435 if (exit_bb)
3436 {
3437 gsi = gsi_last_bb (exit_bb);
3438 gcc_assert (!gsi_end_p (gsi)
3439 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3440 stmt = gimple_build_return (NULL);
3441 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3442 gsi_remove (&gsi, true);
3443 }
3444
3445 /* Move the parallel region into CHILD_CFUN. */
3446
3447 if (gimple_in_ssa_p (cfun))
3448 {
3449 push_cfun (child_cfun);
3450 init_tree_ssa (child_cfun);
3451 init_ssa_operands ();
3452 cfun->gimple_df->in_ssa_p = true;
3453 pop_cfun ();
3454 block = NULL_TREE;
3455 }
3456 else
3457 block = gimple_block (entry_stmt);
3458
3459 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3460 if (exit_bb)
3461 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3462
3463 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3464 for (tp = &child_cfun->local_decls; *tp; )
3465 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3466 tp = &TREE_CHAIN (*tp);
3467 else
3468 *tp = TREE_CHAIN (*tp);
3469
3470 /* Inform the callgraph about the new function. */
3471 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3472 = cfun->curr_properties;
3473 cgraph_add_new_function (child_fn, true);
3474
3475 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3476 fixed in a following pass. */
3477 push_cfun (child_cfun);
3478 save_current = current_function_decl;
3479 current_function_decl = child_fn;
3480 if (optimize)
3481 optimize_omp_library_calls (entry_stmt);
3482 rebuild_cgraph_edges ();
3483
3484 /* Some EH regions might become dead, see PR34608. If
3485 pass_cleanup_cfg isn't the first pass to happen with the
3486 new child, these dead EH edges might cause problems.
3487 Clean them up now. */
3488 if (flag_exceptions)
3489 {
3490 basic_block bb;
3491 bool changed = false;
3492
3493 FOR_EACH_BB (bb)
3494 changed |= gimple_purge_dead_eh_edges (bb);
3495 if (changed)
3496 cleanup_tree_cfg ();
3497 }
3498 if (gimple_in_ssa_p (cfun))
3499 update_ssa (TODO_update_ssa);
3500 current_function_decl = save_current;
3501 pop_cfun ();
3502 }
3503
3504 /* Emit a library call to launch the children threads. */
3505 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3506 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3507 else
3508 expand_task_call (new_bb, entry_stmt);
3509 update_ssa (TODO_update_ssa_only_virtuals);
3510 }
3511
3512
3513 /* A subroutine of expand_omp_for. Generate code for a parallel
3514 loop with any schedule. Given parameters:
3515
3516 for (V = N1; V cond N2; V += STEP) BODY;
3517
3518 where COND is "<" or ">", we generate pseudocode
3519
3520 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3521 if (more) goto L0; else goto L3;
3522 L0:
3523 V = istart0;
3524 iend = iend0;
3525 L1:
3526 BODY;
3527 V += STEP;
3528 if (V cond iend) goto L1; else goto L2;
3529 L2:
3530 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3531 L3:
3532
3533 If this is a combined omp parallel loop, instead of the call to
3534 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3535
3536 For collapsed loops, given parameters:
3537 collapse(3)
3538 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3539 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3540 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3541 BODY;
3542
3543 we generate pseudocode
3544
3545 if (cond3 is <)
3546 adj = STEP3 - 1;
3547 else
3548 adj = STEP3 + 1;
3549 count3 = (adj + N32 - N31) / STEP3;
3550 if (cond2 is <)
3551 adj = STEP2 - 1;
3552 else
3553 adj = STEP2 + 1;
3554 count2 = (adj + N22 - N21) / STEP2;
3555 if (cond1 is <)
3556 adj = STEP1 - 1;
3557 else
3558 adj = STEP1 + 1;
3559 count1 = (adj + N12 - N11) / STEP1;
3560 count = count1 * count2 * count3;
3561 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3562 if (more) goto L0; else goto L3;
3563 L0:
3564 V = istart0;
3565 T = V;
3566 V3 = N31 + (T % count3) * STEP3;
3567 T = T / count3;
3568 V2 = N21 + (T % count2) * STEP2;
3569 T = T / count2;
3570 V1 = N11 + T * STEP1;
3571 iend = iend0;
3572 L1:
3573 BODY;
3574 V += 1;
3575 if (V < iend) goto L10; else goto L2;
3576 L10:
3577 V3 += STEP3;
3578 if (V3 cond3 N32) goto L1; else goto L11;
3579 L11:
3580 V3 = N31;
3581 V2 += STEP2;
3582 if (V2 cond2 N22) goto L1; else goto L12;
3583 L12:
3584 V2 = N21;
3585 V1 += STEP1;
3586 goto L1;
3587 L2:
3588 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3589 L3:
3590
3591 */
3592
3593 static void
3594 expand_omp_for_generic (struct omp_region *region,
3595 struct omp_for_data *fd,
3596 enum built_in_function start_fn,
3597 enum built_in_function next_fn)
3598 {
3599 tree type, istart0, iend0, iend;
3600 tree t, vmain, vback, bias = NULL_TREE;
3601 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3602 basic_block l2_bb = NULL, l3_bb = NULL;
3603 gimple_stmt_iterator gsi;
3604 gimple stmt;
3605 bool in_combined_parallel = is_combined_parallel (region);
3606 bool broken_loop = region->cont == NULL;
3607 edge e, ne;
3608 tree *counts = NULL;
3609 int i;
3610
3611 gcc_assert (!broken_loop || !in_combined_parallel);
3612 gcc_assert (fd->iter_type == long_integer_type_node
3613 || !in_combined_parallel);
3614
3615 type = TREE_TYPE (fd->loop.v);
3616 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3617 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3618 TREE_ADDRESSABLE (istart0) = 1;
3619 TREE_ADDRESSABLE (iend0) = 1;
3620 if (gimple_in_ssa_p (cfun))
3621 {
3622 add_referenced_var (istart0);
3623 add_referenced_var (iend0);
3624 }
3625
3626 /* See if we need to bias by LLONG_MIN. */
3627 if (fd->iter_type == long_long_unsigned_type_node
3628 && TREE_CODE (type) == INTEGER_TYPE
3629 && !TYPE_UNSIGNED (type))
3630 {
3631 tree n1, n2;
3632
3633 if (fd->loop.cond_code == LT_EXPR)
3634 {
3635 n1 = fd->loop.n1;
3636 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3637 }
3638 else
3639 {
3640 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3641 n2 = fd->loop.n1;
3642 }
3643 if (TREE_CODE (n1) != INTEGER_CST
3644 || TREE_CODE (n2) != INTEGER_CST
3645 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3646 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3647 }
3648
3649 entry_bb = region->entry;
3650 cont_bb = region->cont;
3651 collapse_bb = NULL;
3652 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3653 gcc_assert (broken_loop
3654 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3655 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3656 l1_bb = single_succ (l0_bb);
3657 if (!broken_loop)
3658 {
3659 l2_bb = create_empty_bb (cont_bb);
3660 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3661 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3662 }
3663 else
3664 l2_bb = NULL;
3665 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3666 exit_bb = region->exit;
3667
3668 gsi = gsi_last_bb (entry_bb);
3669
3670 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3671 if (fd->collapse > 1)
3672 {
3673 /* collapsed loops need work for expansion in SSA form. */
3674 gcc_assert (!gimple_in_ssa_p (cfun));
3675 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3676 for (i = 0; i < fd->collapse; i++)
3677 {
3678 tree itype = TREE_TYPE (fd->loops[i].v);
3679
3680 if (POINTER_TYPE_P (itype))
3681 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3682 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3683 ? -1 : 1));
3684 t = fold_build2 (PLUS_EXPR, itype,
3685 fold_convert (itype, fd->loops[i].step), t);
3686 t = fold_build2 (PLUS_EXPR, itype, t,
3687 fold_convert (itype, fd->loops[i].n2));
3688 t = fold_build2 (MINUS_EXPR, itype, t,
3689 fold_convert (itype, fd->loops[i].n1));
3690 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3691 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3692 fold_build1 (NEGATE_EXPR, itype, t),
3693 fold_build1 (NEGATE_EXPR, itype,
3694 fold_convert (itype,
3695 fd->loops[i].step)));
3696 else
3697 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3698 fold_convert (itype, fd->loops[i].step));
3699 t = fold_convert (type, t);
3700 if (TREE_CODE (t) == INTEGER_CST)
3701 counts[i] = t;
3702 else
3703 {
3704 counts[i] = create_tmp_var (type, ".count");
3705 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3706 true, GSI_SAME_STMT);
3707 stmt = gimple_build_assign (counts[i], t);
3708 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3709 }
3710 if (SSA_VAR_P (fd->loop.n2))
3711 {
3712 if (i == 0)
3713 t = counts[0];
3714 else
3715 {
3716 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3717 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3718 true, GSI_SAME_STMT);
3719 }
3720 stmt = gimple_build_assign (fd->loop.n2, t);
3721 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3722 }
3723 }
3724 }
3725 if (in_combined_parallel)
3726 {
3727 /* In a combined parallel loop, emit a call to
3728 GOMP_loop_foo_next. */
3729 t = build_call_expr (built_in_decls[next_fn], 2,
3730 build_fold_addr_expr (istart0),
3731 build_fold_addr_expr (iend0));
3732 }
3733 else
3734 {
3735 tree t0, t1, t2, t3, t4;
3736 /* If this is not a combined parallel loop, emit a call to
3737 GOMP_loop_foo_start in ENTRY_BB. */
3738 t4 = build_fold_addr_expr (iend0);
3739 t3 = build_fold_addr_expr (istart0);
3740 t2 = fold_convert (fd->iter_type, fd->loop.step);
3741 if (POINTER_TYPE_P (type)
3742 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3743 {
3744 /* Avoid casting pointers to integer of a different size. */
3745 tree itype
3746 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3747 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3748 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3749 }
3750 else
3751 {
3752 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3753 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3754 }
3755 if (bias)
3756 {
3757 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3758 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3759 }
3760 if (fd->iter_type == long_integer_type_node)
3761 {
3762 if (fd->chunk_size)
3763 {
3764 t = fold_convert (fd->iter_type, fd->chunk_size);
3765 t = build_call_expr (built_in_decls[start_fn], 6,
3766 t0, t1, t2, t, t3, t4);
3767 }
3768 else
3769 t = build_call_expr (built_in_decls[start_fn], 5,
3770 t0, t1, t2, t3, t4);
3771 }
3772 else
3773 {
3774 tree t5;
3775 tree c_bool_type;
3776
3777 /* The GOMP_loop_ull_*start functions have additional boolean
3778 argument, true for < loops and false for > loops.
3779 In Fortran, the C bool type can be different from
3780 boolean_type_node. */
3781 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3782 t5 = build_int_cst (c_bool_type,
3783 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3784 if (fd->chunk_size)
3785 {
3786 t = fold_convert (fd->iter_type, fd->chunk_size);
3787 t = build_call_expr (built_in_decls[start_fn], 7,
3788 t5, t0, t1, t2, t, t3, t4);
3789 }
3790 else
3791 t = build_call_expr (built_in_decls[start_fn], 6,
3792 t5, t0, t1, t2, t3, t4);
3793 }
3794 }
3795 if (TREE_TYPE (t) != boolean_type_node)
3796 t = fold_build2 (NE_EXPR, boolean_type_node,
3797 t, build_int_cst (TREE_TYPE (t), 0));
3798 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3799 true, GSI_SAME_STMT);
3800 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3801
3802 /* Remove the GIMPLE_OMP_FOR statement. */
3803 gsi_remove (&gsi, true);
3804
3805 /* Iteration setup for sequential loop goes in L0_BB. */
3806 gsi = gsi_start_bb (l0_bb);
3807 if (bias)
3808 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3809 istart0, bias));
3810 else
3811 t = fold_convert (type, istart0);
3812 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3813 false, GSI_CONTINUE_LINKING);
3814 stmt = gimple_build_assign (fd->loop.v, t);
3815 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3816
3817 if (bias)
3818 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3819 iend0, bias));
3820 else
3821 t = fold_convert (type, iend0);
3822 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3823 false, GSI_CONTINUE_LINKING);
3824 if (fd->collapse > 1)
3825 {
3826 tree tem = create_tmp_var (type, ".tem");
3827
3828 stmt = gimple_build_assign (tem, fd->loop.v);
3829 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3830 for (i = fd->collapse - 1; i >= 0; i--)
3831 {
3832 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3833 itype = vtype;
3834 if (POINTER_TYPE_P (vtype))
3835 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3836 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3837 t = fold_convert (itype, t);
3838 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3839 if (POINTER_TYPE_P (vtype))
3840 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3841 fd->loops[i].n1, fold_convert (sizetype, t));
3842 else
3843 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3844 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3845 false, GSI_CONTINUE_LINKING);
3846 stmt = gimple_build_assign (fd->loops[i].v, t);
3847 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3848 if (i != 0)
3849 {
3850 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3851 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3852 false, GSI_CONTINUE_LINKING);
3853 stmt = gimple_build_assign (tem, t);
3854 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3855 }
3856 }
3857 }
3858
3859 if (!broken_loop)
3860 {
3861 /* Code to control the increment and predicate for the sequential
3862 loop goes in the CONT_BB. */
3863 gsi = gsi_last_bb (cont_bb);
3864 stmt = gsi_stmt (gsi);
3865 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3866 vmain = gimple_omp_continue_control_use (stmt);
3867 vback = gimple_omp_continue_control_def (stmt);
3868
3869 if (POINTER_TYPE_P (type))
3870 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3871 fold_convert (sizetype, fd->loop.step));
3872 else
3873 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3874 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3875 true, GSI_SAME_STMT);
3876 stmt = gimple_build_assign (vback, t);
3877 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3878
3879 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3880 stmt = gimple_build_cond_empty (t);
3881 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3882
3883 /* Remove GIMPLE_OMP_CONTINUE. */
3884 gsi_remove (&gsi, true);
3885
3886 if (fd->collapse > 1)
3887 {
3888 basic_block last_bb, bb;
3889
3890 last_bb = cont_bb;
3891 for (i = fd->collapse - 1; i >= 0; i--)
3892 {
3893 tree vtype = TREE_TYPE (fd->loops[i].v);
3894
3895 bb = create_empty_bb (last_bb);
3896 gsi = gsi_start_bb (bb);
3897
3898 if (i < fd->collapse - 1)
3899 {
3900 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3901 e->probability = REG_BR_PROB_BASE / 8;
3902
3903 t = fd->loops[i + 1].n1;
3904 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3905 false, GSI_CONTINUE_LINKING);
3906 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3907 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3908 }
3909 else
3910 collapse_bb = bb;
3911
3912 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3913
3914 if (POINTER_TYPE_P (vtype))
3915 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3916 fd->loops[i].v,
3917 fold_convert (sizetype, fd->loops[i].step));
3918 else
3919 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3920 fd->loops[i].step);
3921 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923 stmt = gimple_build_assign (fd->loops[i].v, t);
3924 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3925
3926 if (i > 0)
3927 {
3928 t = fd->loops[i].n2;
3929 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3930 false, GSI_CONTINUE_LINKING);
3931 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3932 fd->loops[i].v, t);
3933 stmt = gimple_build_cond_empty (t);
3934 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3935 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3936 e->probability = REG_BR_PROB_BASE * 7 / 8;
3937 }
3938 else
3939 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3940 last_bb = bb;
3941 }
3942 }
3943
3944 /* Emit code to get the next parallel iteration in L2_BB. */
3945 gsi = gsi_start_bb (l2_bb);
3946
3947 t = build_call_expr (built_in_decls[next_fn], 2,
3948 build_fold_addr_expr (istart0),
3949 build_fold_addr_expr (iend0));
3950 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3951 false, GSI_CONTINUE_LINKING);
3952 if (TREE_TYPE (t) != boolean_type_node)
3953 t = fold_build2 (NE_EXPR, boolean_type_node,
3954 t, build_int_cst (TREE_TYPE (t), 0));
3955 stmt = gimple_build_cond_empty (t);
3956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3957 }
3958
3959 /* Add the loop cleanup function. */
3960 gsi = gsi_last_bb (exit_bb);
3961 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3962 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3963 else
3964 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3965 stmt = gimple_build_call (t, 0);
3966 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3967 gsi_remove (&gsi, true);
3968
3969 /* Connect the new blocks. */
3970 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3971 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3972
3973 if (!broken_loop)
3974 {
3975 gimple_seq phis;
3976
3977 e = find_edge (cont_bb, l3_bb);
3978 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3979
3980 phis = phi_nodes (l3_bb);
3981 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3982 {
3983 gimple phi = gsi_stmt (gsi);
3984 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3985 PHI_ARG_DEF_FROM_EDGE (phi, e));
3986 }
3987 remove_edge (e);
3988
3989 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3990 if (fd->collapse > 1)
3991 {
3992 e = find_edge (cont_bb, l1_bb);
3993 remove_edge (e);
3994 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3995 }
3996 else
3997 {
3998 e = find_edge (cont_bb, l1_bb);
3999 e->flags = EDGE_TRUE_VALUE;
4000 }
4001 e->probability = REG_BR_PROB_BASE * 7 / 8;
4002 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4003 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4004
4005 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4006 recompute_dominator (CDI_DOMINATORS, l2_bb));
4007 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4008 recompute_dominator (CDI_DOMINATORS, l3_bb));
4009 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4010 recompute_dominator (CDI_DOMINATORS, l0_bb));
4011 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4012 recompute_dominator (CDI_DOMINATORS, l1_bb));
4013 }
4014 }
4015
4016
4017 /* A subroutine of expand_omp_for. Generate code for a parallel
4018 loop with static schedule and no specified chunk size. Given
4019 parameters:
4020
4021 for (V = N1; V cond N2; V += STEP) BODY;
4022
4023 where COND is "<" or ">", we generate pseudocode
4024
4025 if (cond is <)
4026 adj = STEP - 1;
4027 else
4028 adj = STEP + 1;
4029 if ((__typeof (V)) -1 > 0 && cond is >)
4030 n = -(adj + N2 - N1) / -STEP;
4031 else
4032 n = (adj + N2 - N1) / STEP;
4033 q = n / nthreads;
4034 q += (q * nthreads != n);
4035 s0 = q * threadid;
4036 e0 = min(s0 + q, n);
4037 V = s0 * STEP + N1;
4038 if (s0 >= e0) goto L2; else goto L0;
4039 L0:
4040 e = e0 * STEP + N1;
4041 L1:
4042 BODY;
4043 V += STEP;
4044 if (V cond e) goto L1;
4045 L2:
4046 */
4047
4048 static void
4049 expand_omp_for_static_nochunk (struct omp_region *region,
4050 struct omp_for_data *fd)
4051 {
4052 tree n, q, s0, e0, e, t, nthreads, threadid;
4053 tree type, itype, vmain, vback;
4054 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4055 basic_block fin_bb;
4056 gimple_stmt_iterator gsi;
4057 gimple stmt;
4058
4059 itype = type = TREE_TYPE (fd->loop.v);
4060 if (POINTER_TYPE_P (type))
4061 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4062
4063 entry_bb = region->entry;
4064 cont_bb = region->cont;
4065 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4066 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4067 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4068 body_bb = single_succ (seq_start_bb);
4069 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4070 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4071 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4072 exit_bb = region->exit;
4073
4074 /* Iteration space partitioning goes in ENTRY_BB. */
4075 gsi = gsi_last_bb (entry_bb);
4076 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4077
4078 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4079 t = fold_convert (itype, t);
4080 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4081 true, GSI_SAME_STMT);
4082
4083 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4084 t = fold_convert (itype, t);
4085 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4086 true, GSI_SAME_STMT);
4087
4088 fd->loop.n1
4089 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4090 true, NULL_TREE, true, GSI_SAME_STMT);
4091 fd->loop.n2
4092 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4093 true, NULL_TREE, true, GSI_SAME_STMT);
4094 fd->loop.step
4095 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4096 true, NULL_TREE, true, GSI_SAME_STMT);
4097
4098 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4099 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4100 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4101 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4102 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4103 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4104 fold_build1 (NEGATE_EXPR, itype, t),
4105 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4106 else
4107 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4108 t = fold_convert (itype, t);
4109 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4110
4111 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4112 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4113
4114 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4115 t = fold_build2 (NE_EXPR, itype, t, n);
4116 t = fold_build2 (PLUS_EXPR, itype, q, t);
4117 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4118
4119 t = build2 (MULT_EXPR, itype, q, threadid);
4120 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4121
4122 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4123 t = fold_build2 (MIN_EXPR, itype, t, n);
4124 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4125
4126 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4127 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4128
4129 /* Remove the GIMPLE_OMP_FOR statement. */
4130 gsi_remove (&gsi, true);
4131
4132 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4133 gsi = gsi_start_bb (seq_start_bb);
4134
4135 t = fold_convert (itype, s0);
4136 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4137 if (POINTER_TYPE_P (type))
4138 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4139 fold_convert (sizetype, t));
4140 else
4141 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4142 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4143 false, GSI_CONTINUE_LINKING);
4144 stmt = gimple_build_assign (fd->loop.v, t);
4145 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4146
4147 t = fold_convert (itype, e0);
4148 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4149 if (POINTER_TYPE_P (type))
4150 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4151 fold_convert (sizetype, t));
4152 else
4153 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4154 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4155 false, GSI_CONTINUE_LINKING);
4156
4157 /* The code controlling the sequential loop replaces the
4158 GIMPLE_OMP_CONTINUE. */
4159 gsi = gsi_last_bb (cont_bb);
4160 stmt = gsi_stmt (gsi);
4161 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4162 vmain = gimple_omp_continue_control_use (stmt);
4163 vback = gimple_omp_continue_control_def (stmt);
4164
4165 if (POINTER_TYPE_P (type))
4166 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4167 fold_convert (sizetype, fd->loop.step));
4168 else
4169 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4170 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4171 true, GSI_SAME_STMT);
4172 stmt = gimple_build_assign (vback, t);
4173 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4174
4175 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4176 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4177
4178 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4179 gsi_remove (&gsi, true);
4180
4181 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4182 gsi = gsi_last_bb (exit_bb);
4183 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4184 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4185 false, GSI_SAME_STMT);
4186 gsi_remove (&gsi, true);
4187
4188 /* Connect all the blocks. */
4189 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4190 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4191
4192 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4193 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4194
4195 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4196 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4197 recompute_dominator (CDI_DOMINATORS, body_bb));
4198 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4199 recompute_dominator (CDI_DOMINATORS, fin_bb));
4200 }
4201
4202
4203 /* A subroutine of expand_omp_for. Generate code for a parallel
4204 loop with static schedule and a specified chunk size. Given
4205 parameters:
4206
4207 for (V = N1; V cond N2; V += STEP) BODY;
4208
4209 where COND is "<" or ">", we generate pseudocode
4210
4211 if (cond is <)
4212 adj = STEP - 1;
4213 else
4214 adj = STEP + 1;
4215 if ((__typeof (V)) -1 > 0 && cond is >)
4216 n = -(adj + N2 - N1) / -STEP;
4217 else
4218 n = (adj + N2 - N1) / STEP;
4219 trip = 0;
4220 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4221 here so that V is defined
4222 if the loop is not entered
4223 L0:
4224 s0 = (trip * nthreads + threadid) * CHUNK;
4225 e0 = min(s0 + CHUNK, n);
4226 if (s0 < n) goto L1; else goto L4;
4227 L1:
4228 V = s0 * STEP + N1;
4229 e = e0 * STEP + N1;
4230 L2:
4231 BODY;
4232 V += STEP;
4233 if (V cond e) goto L2; else goto L3;
4234 L3:
4235 trip += 1;
4236 goto L0;
4237 L4:
4238 */
4239
4240 static void
4241 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4242 {
4243 tree n, s0, e0, e, t;
4244 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4245 tree type, itype, v_main, v_back, v_extra;
4246 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4247 basic_block trip_update_bb, cont_bb, fin_bb;
4248 gimple_stmt_iterator si;
4249 gimple stmt;
4250 edge se;
4251
4252 itype = type = TREE_TYPE (fd->loop.v);
4253 if (POINTER_TYPE_P (type))
4254 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4255
4256 entry_bb = region->entry;
4257 se = split_block (entry_bb, last_stmt (entry_bb));
4258 entry_bb = se->src;
4259 iter_part_bb = se->dest;
4260 cont_bb = region->cont;
4261 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4262 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4263 == FALLTHRU_EDGE (cont_bb)->dest);
4264 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4265 body_bb = single_succ (seq_start_bb);
4266 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4267 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4268 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4269 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4270 exit_bb = region->exit;
4271
4272 /* Trip and adjustment setup goes in ENTRY_BB. */
4273 si = gsi_last_bb (entry_bb);
4274 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4275
4276 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4277 t = fold_convert (itype, t);
4278 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4279 true, GSI_SAME_STMT);
4280
4281 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4282 t = fold_convert (itype, t);
4283 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4284 true, GSI_SAME_STMT);
4285
4286 fd->loop.n1
4287 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4288 true, NULL_TREE, true, GSI_SAME_STMT);
4289 fd->loop.n2
4290 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4291 true, NULL_TREE, true, GSI_SAME_STMT);
4292 fd->loop.step
4293 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4294 true, NULL_TREE, true, GSI_SAME_STMT);
4295 fd->chunk_size
4296 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4297 true, NULL_TREE, true, GSI_SAME_STMT);
4298
4299 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4300 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4301 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4302 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4303 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4304 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4305 fold_build1 (NEGATE_EXPR, itype, t),
4306 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4307 else
4308 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4309 t = fold_convert (itype, t);
4310 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4311 true, GSI_SAME_STMT);
4312
4313 trip_var = create_tmp_var (itype, ".trip");
4314 if (gimple_in_ssa_p (cfun))
4315 {
4316 add_referenced_var (trip_var);
4317 trip_init = make_ssa_name (trip_var, NULL);
4318 trip_main = make_ssa_name (trip_var, NULL);
4319 trip_back = make_ssa_name (trip_var, NULL);
4320 }
4321 else
4322 {
4323 trip_init = trip_var;
4324 trip_main = trip_var;
4325 trip_back = trip_var;
4326 }
4327
4328 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4329 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4330
4331 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4332 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4333 if (POINTER_TYPE_P (type))
4334 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4335 fold_convert (sizetype, t));
4336 else
4337 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4338 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4339 true, GSI_SAME_STMT);
4340
4341 /* Remove the GIMPLE_OMP_FOR. */
4342 gsi_remove (&si, true);
4343
4344 /* Iteration space partitioning goes in ITER_PART_BB. */
4345 si = gsi_last_bb (iter_part_bb);
4346
4347 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4348 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4349 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4350 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4351 false, GSI_CONTINUE_LINKING);
4352
4353 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4354 t = fold_build2 (MIN_EXPR, itype, t, n);
4355 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4356 false, GSI_CONTINUE_LINKING);
4357
4358 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4359 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4360
4361 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4362 si = gsi_start_bb (seq_start_bb);
4363
4364 t = fold_convert (itype, s0);
4365 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4366 if (POINTER_TYPE_P (type))
4367 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4368 fold_convert (sizetype, t));
4369 else
4370 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4371 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4372 false, GSI_CONTINUE_LINKING);
4373 stmt = gimple_build_assign (fd->loop.v, t);
4374 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4375
4376 t = fold_convert (itype, e0);
4377 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4378 if (POINTER_TYPE_P (type))
4379 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4380 fold_convert (sizetype, t));
4381 else
4382 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4383 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4384 false, GSI_CONTINUE_LINKING);
4385
4386 /* The code controlling the sequential loop goes in CONT_BB,
4387 replacing the GIMPLE_OMP_CONTINUE. */
4388 si = gsi_last_bb (cont_bb);
4389 stmt = gsi_stmt (si);
4390 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4391 v_main = gimple_omp_continue_control_use (stmt);
4392 v_back = gimple_omp_continue_control_def (stmt);
4393
4394 if (POINTER_TYPE_P (type))
4395 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4396 fold_convert (sizetype, fd->loop.step));
4397 else
4398 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4399 stmt = gimple_build_assign (v_back, t);
4400 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4401
4402 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4403 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4404
4405 /* Remove GIMPLE_OMP_CONTINUE. */
4406 gsi_remove (&si, true);
4407
4408 /* Trip update code goes into TRIP_UPDATE_BB. */
4409 si = gsi_start_bb (trip_update_bb);
4410
4411 t = build_int_cst (itype, 1);
4412 t = build2 (PLUS_EXPR, itype, trip_main, t);
4413 stmt = gimple_build_assign (trip_back, t);
4414 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4415
4416 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4417 si = gsi_last_bb (exit_bb);
4418 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4419 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4420 false, GSI_SAME_STMT);
4421 gsi_remove (&si, true);
4422
4423 /* Connect the new blocks. */
4424 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4425 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4426
4427 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4428 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4429
4430 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4431
4432 if (gimple_in_ssa_p (cfun))
4433 {
4434 gimple_stmt_iterator psi;
4435 gimple phi;
4436 edge re, ene;
4437 edge_var_map_vector head;
4438 edge_var_map *vm;
4439 size_t i;
4440
4441 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4442 remove arguments of the phi nodes in fin_bb. We need to create
4443 appropriate phi nodes in iter_part_bb instead. */
4444 se = single_pred_edge (fin_bb);
4445 re = single_succ_edge (trip_update_bb);
4446 head = redirect_edge_var_map_vector (re);
4447 ene = single_succ_edge (entry_bb);
4448
4449 psi = gsi_start_phis (fin_bb);
4450 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4451 gsi_next (&psi), ++i)
4452 {
4453 gimple nphi;
4454
4455 phi = gsi_stmt (psi);
4456 t = gimple_phi_result (phi);
4457 gcc_assert (t == redirect_edge_var_map_result (vm));
4458 nphi = create_phi_node (t, iter_part_bb);
4459 SSA_NAME_DEF_STMT (t) = nphi;
4460
4461 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4462 /* A special case -- fd->loop.v is not yet computed in
4463 iter_part_bb, we need to use v_extra instead. */
4464 if (t == fd->loop.v)
4465 t = v_extra;
4466 add_phi_arg (nphi, t, ene);
4467 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4468 }
4469 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4470 redirect_edge_var_map_clear (re);
4471 while (1)
4472 {
4473 psi = gsi_start_phis (fin_bb);
4474 if (gsi_end_p (psi))
4475 break;
4476 remove_phi_node (&psi, false);
4477 }
4478
4479 /* Make phi node for trip. */
4480 phi = create_phi_node (trip_main, iter_part_bb);
4481 SSA_NAME_DEF_STMT (trip_main) = phi;
4482 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4483 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4484 }
4485
4486 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4487 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4488 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4489 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4490 recompute_dominator (CDI_DOMINATORS, fin_bb));
4491 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4492 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4493 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4494 recompute_dominator (CDI_DOMINATORS, body_bb));
4495 }
4496
4497
4498 /* Expand the OpenMP loop defined by REGION. */
4499
4500 static void
4501 expand_omp_for (struct omp_region *region)
4502 {
4503 struct omp_for_data fd;
4504 struct omp_for_data_loop *loops;
4505
4506 loops
4507 = (struct omp_for_data_loop *)
4508 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4509 * sizeof (struct omp_for_data_loop));
4510 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4511 region->sched_kind = fd.sched_kind;
4512
4513 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4514 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4515 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4516 if (region->cont)
4517 {
4518 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4519 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4520 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4521 }
4522
4523 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4524 && !fd.have_ordered
4525 && fd.collapse == 1
4526 && region->cont != NULL)
4527 {
4528 if (fd.chunk_size == NULL)
4529 expand_omp_for_static_nochunk (region, &fd);
4530 else
4531 expand_omp_for_static_chunk (region, &fd);
4532 }
4533 else
4534 {
4535 int fn_index, start_ix, next_ix;
4536
4537 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4538 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4539 ? 3 : fd.sched_kind;
4540 fn_index += fd.have_ordered * 4;
4541 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4542 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4543 if (fd.iter_type == long_long_unsigned_type_node)
4544 {
4545 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4546 - BUILT_IN_GOMP_LOOP_STATIC_START;
4547 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4548 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4549 }
4550 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4551 }
4552
4553 update_ssa (TODO_update_ssa_only_virtuals);
4554 }
4555
4556
4557 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4558
4559 v = GOMP_sections_start (n);
4560 L0:
4561 switch (v)
4562 {
4563 case 0:
4564 goto L2;
4565 case 1:
4566 section 1;
4567 goto L1;
4568 case 2:
4569 ...
4570 case n:
4571 ...
4572 default:
4573 abort ();
4574 }
4575 L1:
4576 v = GOMP_sections_next ();
4577 goto L0;
4578 L2:
4579 reduction;
4580
4581 If this is a combined parallel sections, replace the call to
4582 GOMP_sections_start with call to GOMP_sections_next. */
4583
4584 static void
4585 expand_omp_sections (struct omp_region *region)
4586 {
4587 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4588 VEC (tree,heap) *label_vec;
4589 unsigned len;
4590 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4591 gimple_stmt_iterator si, switch_si;
4592 gimple sections_stmt, stmt, cont;
4593 edge_iterator ei;
4594 edge e;
4595 struct omp_region *inner;
4596 unsigned i, casei;
4597 bool exit_reachable = region->cont != NULL;
4598
4599 gcc_assert (exit_reachable == (region->exit != NULL));
4600 entry_bb = region->entry;
4601 l0_bb = single_succ (entry_bb);
4602 l1_bb = region->cont;
4603 l2_bb = region->exit;
4604 if (exit_reachable)
4605 {
4606 if (single_pred (l2_bb) == l0_bb)
4607 l2 = gimple_block_label (l2_bb);
4608 else
4609 {
4610 /* This can happen if there are reductions. */
4611 len = EDGE_COUNT (l0_bb->succs);
4612 gcc_assert (len > 0);
4613 e = EDGE_SUCC (l0_bb, len - 1);
4614 si = gsi_last_bb (e->dest);
4615 l2 = NULL_TREE;
4616 if (gsi_end_p (si)
4617 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4618 l2 = gimple_block_label (e->dest);
4619 else
4620 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4621 {
4622 si = gsi_last_bb (e->dest);
4623 if (gsi_end_p (si)
4624 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4625 {
4626 l2 = gimple_block_label (e->dest);
4627 break;
4628 }
4629 }
4630 }
4631 default_bb = create_empty_bb (l1_bb->prev_bb);
4632 l1 = gimple_block_label (l1_bb);
4633 }
4634 else
4635 {
4636 default_bb = create_empty_bb (l0_bb);
4637 l1 = NULL_TREE;
4638 l2 = gimple_block_label (default_bb);
4639 }
4640
4641 /* We will build a switch() with enough cases for all the
4642 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4643 and a default case to abort if something goes wrong. */
4644 len = EDGE_COUNT (l0_bb->succs);
4645
4646 /* Use VEC_quick_push on label_vec throughout, since we know the size
4647 in advance. */
4648 label_vec = VEC_alloc (tree, heap, len);
4649
4650 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4651 GIMPLE_OMP_SECTIONS statement. */
4652 si = gsi_last_bb (entry_bb);
4653 sections_stmt = gsi_stmt (si);
4654 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4655 vin = gimple_omp_sections_control (sections_stmt);
4656 if (!is_combined_parallel (region))
4657 {
4658 /* If we are not inside a combined parallel+sections region,
4659 call GOMP_sections_start. */
4660 t = build_int_cst (unsigned_type_node,
4661 exit_reachable ? len - 1 : len);
4662 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4663 stmt = gimple_build_call (u, 1, t);
4664 }
4665 else
4666 {
4667 /* Otherwise, call GOMP_sections_next. */
4668 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4669 stmt = gimple_build_call (u, 0);
4670 }
4671 gimple_call_set_lhs (stmt, vin);
4672 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4673 gsi_remove (&si, true);
4674
4675 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4676 L0_BB. */
4677 switch_si = gsi_last_bb (l0_bb);
4678 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4679 if (exit_reachable)
4680 {
4681 cont = last_stmt (l1_bb);
4682 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4683 vmain = gimple_omp_continue_control_use (cont);
4684 vnext = gimple_omp_continue_control_def (cont);
4685 }
4686 else
4687 {
4688 vmain = vin;
4689 vnext = NULL_TREE;
4690 }
4691
4692 i = 0;
4693 if (exit_reachable)
4694 {
4695 t = build3 (CASE_LABEL_EXPR, void_type_node,
4696 build_int_cst (unsigned_type_node, 0), NULL, l2);
4697 VEC_quick_push (tree, label_vec, t);
4698 i++;
4699 }
4700
4701 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4702 for (inner = region->inner, casei = 1;
4703 inner;
4704 inner = inner->next, i++, casei++)
4705 {
4706 basic_block s_entry_bb, s_exit_bb;
4707
4708 /* Skip optional reduction region. */
4709 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4710 {
4711 --i;
4712 --casei;
4713 continue;
4714 }
4715
4716 s_entry_bb = inner->entry;
4717 s_exit_bb = inner->exit;
4718
4719 t = gimple_block_label (s_entry_bb);
4720 u = build_int_cst (unsigned_type_node, casei);
4721 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4722 VEC_quick_push (tree, label_vec, u);
4723
4724 si = gsi_last_bb (s_entry_bb);
4725 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4726 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4727 gsi_remove (&si, true);
4728 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4729
4730 if (s_exit_bb == NULL)
4731 continue;
4732
4733 si = gsi_last_bb (s_exit_bb);
4734 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4735 gsi_remove (&si, true);
4736
4737 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4738 }
4739
4740 /* Error handling code goes in DEFAULT_BB. */
4741 t = gimple_block_label (default_bb);
4742 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4743 make_edge (l0_bb, default_bb, 0);
4744
4745 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4746 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4747 gsi_remove (&switch_si, true);
4748 VEC_free (tree, heap, label_vec);
4749
4750 si = gsi_start_bb (default_bb);
4751 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4752 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4753
4754 if (exit_reachable)
4755 {
4756 /* Code to get the next section goes in L1_BB. */
4757 si = gsi_last_bb (l1_bb);
4758 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4759
4760 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4761 gimple_call_set_lhs (stmt, vnext);
4762 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4763 gsi_remove (&si, true);
4764
4765 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4766
4767 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4768 si = gsi_last_bb (l2_bb);
4769 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4770 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4771 else
4772 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4773 stmt = gimple_build_call (t, 0);
4774 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4775 gsi_remove (&si, true);
4776 }
4777
4778 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4779 }
4780
4781
4782 /* Expand code for an OpenMP single directive. We've already expanded
4783 much of the code, here we simply place the GOMP_barrier call. */
4784
4785 static void
4786 expand_omp_single (struct omp_region *region)
4787 {
4788 basic_block entry_bb, exit_bb;
4789 gimple_stmt_iterator si;
4790 bool need_barrier = false;
4791
4792 entry_bb = region->entry;
4793 exit_bb = region->exit;
4794
4795 si = gsi_last_bb (entry_bb);
4796 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4797 be removed. We need to ensure that the thread that entered the single
4798 does not exit before the data is copied out by the other threads. */
4799 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4800 OMP_CLAUSE_COPYPRIVATE))
4801 need_barrier = true;
4802 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4803 gsi_remove (&si, true);
4804 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4805
4806 si = gsi_last_bb (exit_bb);
4807 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4808 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4809 false, GSI_SAME_STMT);
4810 gsi_remove (&si, true);
4811 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4812 }
4813
4814
4815 /* Generic expansion for OpenMP synchronization directives: master,
4816 ordered and critical. All we need to do here is remove the entry
4817 and exit markers for REGION. */
4818
4819 static void
4820 expand_omp_synch (struct omp_region *region)
4821 {
4822 basic_block entry_bb, exit_bb;
4823 gimple_stmt_iterator si;
4824
4825 entry_bb = region->entry;
4826 exit_bb = region->exit;
4827
4828 si = gsi_last_bb (entry_bb);
4829 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4830 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4831 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4832 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4833 gsi_remove (&si, true);
4834 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4835
4836 if (exit_bb)
4837 {
4838 si = gsi_last_bb (exit_bb);
4839 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4840 gsi_remove (&si, true);
4841 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4842 }
4843 }
4844
4845 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4846 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4847 size of the data type, and thus usable to find the index of the builtin
4848 decl. Returns false if the expression is not of the proper form. */
4849
4850 static bool
4851 expand_omp_atomic_fetch_op (basic_block load_bb,
4852 tree addr, tree loaded_val,
4853 tree stored_val, int index)
4854 {
4855 enum built_in_function base;
4856 tree decl, itype, call;
4857 enum insn_code *optab;
4858 tree rhs;
4859 basic_block store_bb = single_succ (load_bb);
4860 gimple_stmt_iterator gsi;
4861 gimple stmt;
4862
4863 /* We expect to find the following sequences:
4864
4865 load_bb:
4866 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4867
4868 store_bb:
4869 val = tmp OP something; (or: something OP tmp)
4870 GIMPLE_OMP_STORE (val)
4871
4872 ???FIXME: Allow a more flexible sequence.
4873 Perhaps use data flow to pick the statements.
4874
4875 */
4876
4877 gsi = gsi_after_labels (store_bb);
4878 stmt = gsi_stmt (gsi);
4879 if (!is_gimple_assign (stmt))
4880 return false;
4881 gsi_next (&gsi);
4882 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4883 return false;
4884
4885 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4886 return false;
4887
4888 /* Check for one of the supported fetch-op operations. */
4889 switch (gimple_assign_rhs_code (stmt))
4890 {
4891 case PLUS_EXPR:
4892 case POINTER_PLUS_EXPR:
4893 base = BUILT_IN_FETCH_AND_ADD_N;
4894 optab = sync_add_optab;
4895 break;
4896 case MINUS_EXPR:
4897 base = BUILT_IN_FETCH_AND_SUB_N;
4898 optab = sync_add_optab;
4899 break;
4900 case BIT_AND_EXPR:
4901 base = BUILT_IN_FETCH_AND_AND_N;
4902 optab = sync_and_optab;
4903 break;
4904 case BIT_IOR_EXPR:
4905 base = BUILT_IN_FETCH_AND_OR_N;
4906 optab = sync_ior_optab;
4907 break;
4908 case BIT_XOR_EXPR:
4909 base = BUILT_IN_FETCH_AND_XOR_N;
4910 optab = sync_xor_optab;
4911 break;
4912 default:
4913 return false;
4914 }
4915 /* Make sure the expression is of the proper form. */
4916 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4917 rhs = gimple_assign_rhs2 (stmt);
4918 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4919 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4920 rhs = gimple_assign_rhs1 (stmt);
4921 else
4922 return false;
4923
4924 decl = built_in_decls[base + index + 1];
4925 itype = TREE_TYPE (TREE_TYPE (decl));
4926
4927 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4928 return false;
4929
4930 gsi = gsi_last_bb (load_bb);
4931 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4932 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4933 call = fold_convert (void_type_node, call);
4934 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4935 gsi_remove (&gsi, true);
4936
4937 gsi = gsi_last_bb (store_bb);
4938 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4939 gsi_remove (&gsi, true);
4940 gsi = gsi_last_bb (store_bb);
4941 gsi_remove (&gsi, true);
4942
4943 if (gimple_in_ssa_p (cfun))
4944 update_ssa (TODO_update_ssa_no_phi);
4945
4946 return true;
4947 }
4948
4949 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4950
4951 oldval = *addr;
4952 repeat:
4953 newval = rhs; // with oldval replacing *addr in rhs
4954 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4955 if (oldval != newval)
4956 goto repeat;
4957
4958 INDEX is log2 of the size of the data type, and thus usable to find the
4959 index of the builtin decl. */
4960
4961 static bool
4962 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4963 tree addr, tree loaded_val, tree stored_val,
4964 int index)
4965 {
4966 tree loadedi, storedi, initial, new_storedi, old_vali;
4967 tree type, itype, cmpxchg, iaddr;
4968 gimple_stmt_iterator si;
4969 basic_block loop_header = single_succ (load_bb);
4970 gimple phi, stmt;
4971 edge e;
4972
4973 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4974 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4975 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4976
4977 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4978 return false;
4979
4980 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4981 si = gsi_last_bb (load_bb);
4982 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4983
4984 /* For floating-point values, we'll need to view-convert them to integers
4985 so that we can perform the atomic compare and swap. Simplify the
4986 following code by always setting up the "i"ntegral variables. */
4987 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4988 {
4989 tree iaddr_val;
4990
4991 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
4992 iaddr_val
4993 = force_gimple_operand_gsi (&si,
4994 fold_convert (TREE_TYPE (iaddr), addr),
4995 false, NULL_TREE, true, GSI_SAME_STMT);
4996 stmt = gimple_build_assign (iaddr, iaddr_val);
4997 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4998 DECL_NO_TBAA_P (iaddr) = 1;
4999 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5000 loadedi = create_tmp_var (itype, NULL);
5001 if (gimple_in_ssa_p (cfun))
5002 {
5003 add_referenced_var (iaddr);
5004 add_referenced_var (loadedi);
5005 loadedi = make_ssa_name (loadedi, NULL);
5006 }
5007 }
5008 else
5009 {
5010 iaddr = addr;
5011 loadedi = loaded_val;
5012 }
5013
5014 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5015 true, NULL_TREE, true, GSI_SAME_STMT);
5016
5017 /* Move the value to the LOADEDI temporary. */
5018 if (gimple_in_ssa_p (cfun))
5019 {
5020 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5021 phi = create_phi_node (loadedi, loop_header);
5022 SSA_NAME_DEF_STMT (loadedi) = phi;
5023 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5024 initial);
5025 }
5026 else
5027 gsi_insert_before (&si,
5028 gimple_build_assign (loadedi, initial),
5029 GSI_SAME_STMT);
5030 if (loadedi != loaded_val)
5031 {
5032 gimple_stmt_iterator gsi2;
5033 tree x;
5034
5035 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5036 gsi2 = gsi_start_bb (loop_header);
5037 if (gimple_in_ssa_p (cfun))
5038 {
5039 gimple stmt;
5040 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5041 true, GSI_SAME_STMT);
5042 stmt = gimple_build_assign (loaded_val, x);
5043 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5044 }
5045 else
5046 {
5047 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5048 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5049 true, GSI_SAME_STMT);
5050 }
5051 }
5052 gsi_remove (&si, true);
5053
5054 si = gsi_last_bb (store_bb);
5055 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5056
5057 if (iaddr == addr)
5058 storedi = stored_val;
5059 else
5060 storedi =
5061 force_gimple_operand_gsi (&si,
5062 build1 (VIEW_CONVERT_EXPR, itype,
5063 stored_val), true, NULL_TREE, true,
5064 GSI_SAME_STMT);
5065
5066 /* Build the compare&swap statement. */
5067 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5068 new_storedi = force_gimple_operand_gsi (&si,
5069 fold_convert (itype, new_storedi),
5070 true, NULL_TREE,
5071 true, GSI_SAME_STMT);
5072
5073 if (gimple_in_ssa_p (cfun))
5074 old_vali = loadedi;
5075 else
5076 {
5077 old_vali = create_tmp_var (itype, NULL);
5078 if (gimple_in_ssa_p (cfun))
5079 add_referenced_var (old_vali);
5080 stmt = gimple_build_assign (old_vali, loadedi);
5081 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5082
5083 stmt = gimple_build_assign (loadedi, new_storedi);
5084 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5085 }
5086
5087 /* Note that we always perform the comparison as an integer, even for
5088 floating point. This allows the atomic operation to properly
5089 succeed even with NaNs and -0.0. */
5090 stmt = gimple_build_cond_empty
5091 (build2 (NE_EXPR, boolean_type_node,
5092 new_storedi, old_vali));
5093 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5094
5095 /* Update cfg. */
5096 e = single_succ_edge (store_bb);
5097 e->flags &= ~EDGE_FALLTHRU;
5098 e->flags |= EDGE_FALSE_VALUE;
5099
5100 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5101
5102 /* Copy the new value to loadedi (we already did that before the condition
5103 if we are not in SSA). */
5104 if (gimple_in_ssa_p (cfun))
5105 {
5106 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5107 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5108 }
5109
5110 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5111 gsi_remove (&si, true);
5112
5113 if (gimple_in_ssa_p (cfun))
5114 update_ssa (TODO_update_ssa_no_phi);
5115
5116 return true;
5117 }
5118
5119 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5120
5121 GOMP_atomic_start ();
5122 *addr = rhs;
5123 GOMP_atomic_end ();
5124
5125 The result is not globally atomic, but works so long as all parallel
5126 references are within #pragma omp atomic directives. According to
5127 responses received from omp@openmp.org, appears to be within spec.
5128 Which makes sense, since that's how several other compilers handle
5129 this situation as well.
5130 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5131 expanding. STORED_VAL is the operand of the matching
5132 GIMPLE_OMP_ATOMIC_STORE.
5133
5134 We replace
5135 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5136 loaded_val = *addr;
5137
5138 and replace
5139 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5140 *addr = stored_val;
5141 */
5142
5143 static bool
5144 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5145 tree addr, tree loaded_val, tree stored_val)
5146 {
5147 gimple_stmt_iterator si;
5148 gimple stmt;
5149 tree t;
5150
5151 si = gsi_last_bb (load_bb);
5152 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5153
5154 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5155 t = build_function_call_expr (t, 0);
5156 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5157
5158 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5159 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5160 gsi_remove (&si, true);
5161
5162 si = gsi_last_bb (store_bb);
5163 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5164
5165 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5166 stored_val);
5167 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5168
5169 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5170 t = build_function_call_expr (t, 0);
5171 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5172 gsi_remove (&si, true);
5173
5174 if (gimple_in_ssa_p (cfun))
5175 update_ssa (TODO_update_ssa_no_phi);
5176 return true;
5177 }
5178
5179 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5180 using expand_omp_atomic_fetch_op. If it failed, we try to
5181 call expand_omp_atomic_pipeline, and if it fails too, the
5182 ultimate fallback is wrapping the operation in a mutex
5183 (expand_omp_atomic_mutex). REGION is the atomic region built
5184 by build_omp_regions_1(). */
5185
5186 static void
5187 expand_omp_atomic (struct omp_region *region)
5188 {
5189 basic_block load_bb = region->entry, store_bb = region->exit;
5190 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5191 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5192 tree addr = gimple_omp_atomic_load_rhs (load);
5193 tree stored_val = gimple_omp_atomic_store_val (store);
5194 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5195 HOST_WIDE_INT index;
5196
5197 /* Make sure the type is one of the supported sizes. */
5198 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5199 index = exact_log2 (index);
5200 if (index >= 0 && index <= 4)
5201 {
5202 unsigned int align = TYPE_ALIGN_UNIT (type);
5203
5204 /* __sync builtins require strict data alignment. */
5205 if (exact_log2 (align) >= index)
5206 {
5207 /* When possible, use specialized atomic update functions. */
5208 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5209 && store_bb == single_succ (load_bb))
5210 {
5211 if (expand_omp_atomic_fetch_op (load_bb, addr,
5212 loaded_val, stored_val, index))
5213 return;
5214 }
5215
5216 /* If we don't have specialized __sync builtins, try and implement
5217 as a compare and swap loop. */
5218 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5219 loaded_val, stored_val, index))
5220 return;
5221 }
5222 }
5223
5224 /* The ultimate fallback is wrapping the operation in a mutex. */
5225 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5226 }
5227
5228
5229 /* Expand the parallel region tree rooted at REGION. Expansion
5230 proceeds in depth-first order. Innermost regions are expanded
5231 first. This way, parallel regions that require a new function to
5232 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5233 internal dependencies in their body. */
5234
5235 static void
5236 expand_omp (struct omp_region *region)
5237 {
5238 while (region)
5239 {
5240 location_t saved_location;
5241
5242 /* First, determine whether this is a combined parallel+workshare
5243 region. */
5244 if (region->type == GIMPLE_OMP_PARALLEL)
5245 determine_parallel_type (region);
5246
5247 if (region->inner)
5248 expand_omp (region->inner);
5249
5250 saved_location = input_location;
5251 if (gimple_has_location (last_stmt (region->entry)))
5252 input_location = gimple_location (last_stmt (region->entry));
5253
5254 switch (region->type)
5255 {
5256 case GIMPLE_OMP_PARALLEL:
5257 case GIMPLE_OMP_TASK:
5258 expand_omp_taskreg (region);
5259 break;
5260
5261 case GIMPLE_OMP_FOR:
5262 expand_omp_for (region);
5263 break;
5264
5265 case GIMPLE_OMP_SECTIONS:
5266 expand_omp_sections (region);
5267 break;
5268
5269 case GIMPLE_OMP_SECTION:
5270 /* Individual omp sections are handled together with their
5271 parent GIMPLE_OMP_SECTIONS region. */
5272 break;
5273
5274 case GIMPLE_OMP_SINGLE:
5275 expand_omp_single (region);
5276 break;
5277
5278 case GIMPLE_OMP_MASTER:
5279 case GIMPLE_OMP_ORDERED:
5280 case GIMPLE_OMP_CRITICAL:
5281 expand_omp_synch (region);
5282 break;
5283
5284 case GIMPLE_OMP_ATOMIC_LOAD:
5285 expand_omp_atomic (region);
5286 break;
5287
5288 default:
5289 gcc_unreachable ();
5290 }
5291
5292 input_location = saved_location;
5293 region = region->next;
5294 }
5295 }
5296
5297
5298 /* Helper for build_omp_regions. Scan the dominator tree starting at
5299 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5300 true, the function ends once a single tree is built (otherwise, whole
5301 forest of OMP constructs may be built). */
5302
5303 static void
5304 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5305 bool single_tree)
5306 {
5307 gimple_stmt_iterator gsi;
5308 gimple stmt;
5309 basic_block son;
5310
5311 gsi = gsi_last_bb (bb);
5312 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5313 {
5314 struct omp_region *region;
5315 enum gimple_code code;
5316
5317 stmt = gsi_stmt (gsi);
5318 code = gimple_code (stmt);
5319 if (code == GIMPLE_OMP_RETURN)
5320 {
5321 /* STMT is the return point out of region PARENT. Mark it
5322 as the exit point and make PARENT the immediately
5323 enclosing region. */
5324 gcc_assert (parent);
5325 region = parent;
5326 region->exit = bb;
5327 parent = parent->outer;
5328 }
5329 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5330 {
5331 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5332 GIMPLE_OMP_RETURN, but matches with
5333 GIMPLE_OMP_ATOMIC_LOAD. */
5334 gcc_assert (parent);
5335 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5336 region = parent;
5337 region->exit = bb;
5338 parent = parent->outer;
5339 }
5340
5341 else if (code == GIMPLE_OMP_CONTINUE)
5342 {
5343 gcc_assert (parent);
5344 parent->cont = bb;
5345 }
5346 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5347 {
5348 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5349 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5350 ;
5351 }
5352 else
5353 {
5354 /* Otherwise, this directive becomes the parent for a new
5355 region. */
5356 region = new_omp_region (bb, code, parent);
5357 parent = region;
5358 }
5359 }
5360
5361 if (single_tree && !parent)
5362 return;
5363
5364 for (son = first_dom_son (CDI_DOMINATORS, bb);
5365 son;
5366 son = next_dom_son (CDI_DOMINATORS, son))
5367 build_omp_regions_1 (son, parent, single_tree);
5368 }
5369
5370 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5371 root_omp_region. */
5372
5373 static void
5374 build_omp_regions_root (basic_block root)
5375 {
5376 gcc_assert (root_omp_region == NULL);
5377 build_omp_regions_1 (root, NULL, true);
5378 gcc_assert (root_omp_region != NULL);
5379 }
5380
5381 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5382
5383 void
5384 omp_expand_local (basic_block head)
5385 {
5386 build_omp_regions_root (head);
5387 if (dump_file && (dump_flags & TDF_DETAILS))
5388 {
5389 fprintf (dump_file, "\nOMP region tree\n\n");
5390 dump_omp_region (dump_file, root_omp_region, 0);
5391 fprintf (dump_file, "\n");
5392 }
5393
5394 remove_exit_barriers (root_omp_region);
5395 expand_omp (root_omp_region);
5396
5397 free_omp_regions ();
5398 }
5399
5400 /* Scan the CFG and build a tree of OMP regions. Return the root of
5401 the OMP region tree. */
5402
5403 static void
5404 build_omp_regions (void)
5405 {
5406 gcc_assert (root_omp_region == NULL);
5407 calculate_dominance_info (CDI_DOMINATORS);
5408 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5409 }
5410
5411 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5412
5413 static unsigned int
5414 execute_expand_omp (void)
5415 {
5416 build_omp_regions ();
5417
5418 if (!root_omp_region)
5419 return 0;
5420
5421 if (dump_file)
5422 {
5423 fprintf (dump_file, "\nOMP region tree\n\n");
5424 dump_omp_region (dump_file, root_omp_region, 0);
5425 fprintf (dump_file, "\n");
5426 }
5427
5428 remove_exit_barriers (root_omp_region);
5429
5430 expand_omp (root_omp_region);
5431
5432 cleanup_tree_cfg ();
5433
5434 free_omp_regions ();
5435
5436 return 0;
5437 }
5438
5439 /* OMP expansion -- the default pass, run before creation of SSA form. */
5440
5441 static bool
5442 gate_expand_omp (void)
5443 {
5444 return (flag_openmp != 0 && errorcount == 0);
5445 }
5446
5447 struct gimple_opt_pass pass_expand_omp =
5448 {
5449 {
5450 GIMPLE_PASS,
5451 "ompexp", /* name */
5452 gate_expand_omp, /* gate */
5453 execute_expand_omp, /* execute */
5454 NULL, /* sub */
5455 NULL, /* next */
5456 0, /* static_pass_number */
5457 0, /* tv_id */
5458 PROP_gimple_any, /* properties_required */
5459 0, /* properties_provided */
5460 0, /* properties_destroyed */
5461 0, /* todo_flags_start */
5462 TODO_dump_func /* todo_flags_finish */
5463 }
5464 };
5465 \f
5466 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5467
5468 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5469 CTX is the enclosing OMP context for the current statement. */
5470
5471 static void
5472 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5473 {
5474 tree block, control;
5475 gimple_stmt_iterator tgsi;
5476 unsigned i, len;
5477 gimple stmt, new_stmt, bind, t;
5478 gimple_seq ilist, dlist, olist, new_body, body;
5479 struct gimplify_ctx gctx;
5480
5481 stmt = gsi_stmt (*gsi_p);
5482
5483 push_gimplify_context (&gctx);
5484
5485 dlist = NULL;
5486 ilist = NULL;
5487 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5488 &ilist, &dlist, ctx);
5489
5490 tgsi = gsi_start (gimple_omp_body (stmt));
5491 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5492 continue;
5493
5494 tgsi = gsi_start (gimple_omp_body (stmt));
5495 body = NULL;
5496 for (i = 0; i < len; i++, gsi_next (&tgsi))
5497 {
5498 omp_context *sctx;
5499 gimple sec_start;
5500
5501 sec_start = gsi_stmt (tgsi);
5502 sctx = maybe_lookup_ctx (sec_start);
5503 gcc_assert (sctx);
5504
5505 gimple_seq_add_stmt (&body, sec_start);
5506
5507 lower_omp (gimple_omp_body (sec_start), sctx);
5508 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5509 gimple_omp_set_body (sec_start, NULL);
5510
5511 if (i == len - 1)
5512 {
5513 gimple_seq l = NULL;
5514 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5515 &l, ctx);
5516 gimple_seq_add_seq (&body, l);
5517 gimple_omp_section_set_last (sec_start);
5518 }
5519
5520 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5521 }
5522
5523 block = make_node (BLOCK);
5524 bind = gimple_build_bind (NULL, body, block);
5525
5526 olist = NULL;
5527 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5528
5529 block = make_node (BLOCK);
5530 new_stmt = gimple_build_bind (NULL, NULL, block);
5531
5532 pop_gimplify_context (new_stmt);
5533 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5534 BLOCK_VARS (block) = gimple_bind_vars (bind);
5535 if (BLOCK_VARS (block))
5536 TREE_USED (block) = 1;
5537
5538 new_body = NULL;
5539 gimple_seq_add_seq (&new_body, ilist);
5540 gimple_seq_add_stmt (&new_body, stmt);
5541 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5542 gimple_seq_add_stmt (&new_body, bind);
5543
5544 control = create_tmp_var (unsigned_type_node, ".section");
5545 t = gimple_build_omp_continue (control, control);
5546 gimple_omp_sections_set_control (stmt, control);
5547 gimple_seq_add_stmt (&new_body, t);
5548
5549 gimple_seq_add_seq (&new_body, olist);
5550 gimple_seq_add_seq (&new_body, dlist);
5551
5552 new_body = maybe_catch_exception (new_body);
5553
5554 t = gimple_build_omp_return
5555 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5556 OMP_CLAUSE_NOWAIT));
5557 gimple_seq_add_stmt (&new_body, t);
5558
5559 gimple_bind_set_body (new_stmt, new_body);
5560 gimple_omp_set_body (stmt, NULL);
5561
5562 gsi_replace (gsi_p, new_stmt, true);
5563 }
5564
5565
5566 /* A subroutine of lower_omp_single. Expand the simple form of
5567 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5568
5569 if (GOMP_single_start ())
5570 BODY;
5571 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5572
5573 FIXME. It may be better to delay expanding the logic of this until
5574 pass_expand_omp. The expanded logic may make the job more difficult
5575 to a synchronization analysis pass. */
5576
5577 static void
5578 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5579 {
5580 tree tlabel = create_artificial_label ();
5581 tree flabel = create_artificial_label ();
5582 gimple call, cond;
5583 tree lhs, decl;
5584
5585 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5586 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5587 call = gimple_build_call (decl, 0);
5588 gimple_call_set_lhs (call, lhs);
5589 gimple_seq_add_stmt (pre_p, call);
5590
5591 cond = gimple_build_cond (EQ_EXPR, lhs,
5592 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5593 tlabel, flabel);
5594 gimple_seq_add_stmt (pre_p, cond);
5595 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5596 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5597 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5598 }
5599
5600
5601 /* A subroutine of lower_omp_single. Expand the simple form of
5602 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5603
5604 #pragma omp single copyprivate (a, b, c)
5605
5606 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5607
5608 {
5609 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5610 {
5611 BODY;
5612 copyout.a = a;
5613 copyout.b = b;
5614 copyout.c = c;
5615 GOMP_single_copy_end (&copyout);
5616 }
5617 else
5618 {
5619 a = copyout_p->a;
5620 b = copyout_p->b;
5621 c = copyout_p->c;
5622 }
5623 GOMP_barrier ();
5624 }
5625
5626 FIXME. It may be better to delay expanding the logic of this until
5627 pass_expand_omp. The expanded logic may make the job more difficult
5628 to a synchronization analysis pass. */
5629
5630 static void
5631 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5632 {
5633 tree ptr_type, t, l0, l1, l2;
5634 gimple_seq copyin_seq;
5635
5636 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5637
5638 ptr_type = build_pointer_type (ctx->record_type);
5639 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5640
5641 l0 = create_artificial_label ();
5642 l1 = create_artificial_label ();
5643 l2 = create_artificial_label ();
5644
5645 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5646 t = fold_convert (ptr_type, t);
5647 gimplify_assign (ctx->receiver_decl, t, pre_p);
5648
5649 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5650 build_int_cst (ptr_type, 0));
5651 t = build3 (COND_EXPR, void_type_node, t,
5652 build_and_jump (&l0), build_and_jump (&l1));
5653 gimplify_and_add (t, pre_p);
5654
5655 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5656
5657 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5658
5659 copyin_seq = NULL;
5660 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5661 &copyin_seq, ctx);
5662
5663 t = build_fold_addr_expr (ctx->sender_decl);
5664 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5665 gimplify_and_add (t, pre_p);
5666
5667 t = build_and_jump (&l2);
5668 gimplify_and_add (t, pre_p);
5669
5670 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5671
5672 gimple_seq_add_seq (pre_p, copyin_seq);
5673
5674 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5675 }
5676
5677
5678 /* Expand code for an OpenMP single directive. */
5679
5680 static void
5681 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5682 {
5683 tree block;
5684 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5685 gimple_seq bind_body, dlist;
5686 struct gimplify_ctx gctx;
5687
5688 push_gimplify_context (&gctx);
5689
5690 bind_body = NULL;
5691 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5692 &bind_body, &dlist, ctx);
5693 lower_omp (gimple_omp_body (single_stmt), ctx);
5694
5695 gimple_seq_add_stmt (&bind_body, single_stmt);
5696
5697 if (ctx->record_type)
5698 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5699 else
5700 lower_omp_single_simple (single_stmt, &bind_body);
5701
5702 gimple_omp_set_body (single_stmt, NULL);
5703
5704 gimple_seq_add_seq (&bind_body, dlist);
5705
5706 bind_body = maybe_catch_exception (bind_body);
5707
5708 t = gimple_build_omp_return
5709 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5710 OMP_CLAUSE_NOWAIT));
5711 gimple_seq_add_stmt (&bind_body, t);
5712
5713 block = make_node (BLOCK);
5714 bind = gimple_build_bind (NULL, bind_body, block);
5715
5716 pop_gimplify_context (bind);
5717
5718 gimple_bind_append_vars (bind, ctx->block_vars);
5719 BLOCK_VARS (block) = ctx->block_vars;
5720 gsi_replace (gsi_p, bind, true);
5721 if (BLOCK_VARS (block))
5722 TREE_USED (block) = 1;
5723 }
5724
5725
5726 /* Expand code for an OpenMP master directive. */
5727
5728 static void
5729 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5730 {
5731 tree block, lab = NULL, x;
5732 gimple stmt = gsi_stmt (*gsi_p), bind;
5733 gimple_seq tseq;
5734 struct gimplify_ctx gctx;
5735
5736 push_gimplify_context (&gctx);
5737
5738 block = make_node (BLOCK);
5739 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5740 block);
5741
5742 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5743 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5744 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5745 tseq = NULL;
5746 gimplify_and_add (x, &tseq);
5747 gimple_bind_add_seq (bind, tseq);
5748
5749 lower_omp (gimple_omp_body (stmt), ctx);
5750 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5751 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5752 gimple_omp_set_body (stmt, NULL);
5753
5754 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5755
5756 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5757
5758 pop_gimplify_context (bind);
5759
5760 gimple_bind_append_vars (bind, ctx->block_vars);
5761 BLOCK_VARS (block) = ctx->block_vars;
5762 gsi_replace (gsi_p, bind, true);
5763 }
5764
5765
5766 /* Expand code for an OpenMP ordered directive. */
5767
5768 static void
5769 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5770 {
5771 tree block;
5772 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5773 struct gimplify_ctx gctx;
5774
5775 push_gimplify_context (&gctx);
5776
5777 block = make_node (BLOCK);
5778 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5779 block);
5780
5781 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5782 gimple_bind_add_stmt (bind, x);
5783
5784 lower_omp (gimple_omp_body (stmt), ctx);
5785 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5786 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5787 gimple_omp_set_body (stmt, NULL);
5788
5789 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5790 gimple_bind_add_stmt (bind, x);
5791
5792 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5793
5794 pop_gimplify_context (bind);
5795
5796 gimple_bind_append_vars (bind, ctx->block_vars);
5797 BLOCK_VARS (block) = gimple_bind_vars (bind);
5798 gsi_replace (gsi_p, bind, true);
5799 }
5800
5801
5802 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5803 substitution of a couple of function calls. But in the NAMED case,
5804 requires that languages coordinate a symbol name. It is therefore
5805 best put here in common code. */
5806
5807 static GTY((param1_is (tree), param2_is (tree)))
5808 splay_tree critical_name_mutexes;
5809
5810 static void
5811 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5812 {
5813 tree block;
5814 tree name, lock, unlock;
5815 gimple stmt = gsi_stmt (*gsi_p), bind;
5816 gimple_seq tbody;
5817 struct gimplify_ctx gctx;
5818
5819 name = gimple_omp_critical_name (stmt);
5820 if (name)
5821 {
5822 tree decl;
5823 splay_tree_node n;
5824
5825 if (!critical_name_mutexes)
5826 critical_name_mutexes
5827 = splay_tree_new_ggc (splay_tree_compare_pointers);
5828
5829 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5830 if (n == NULL)
5831 {
5832 char *new_str;
5833
5834 decl = create_tmp_var_raw (ptr_type_node, NULL);
5835
5836 new_str = ACONCAT ((".gomp_critical_user_",
5837 IDENTIFIER_POINTER (name), NULL));
5838 DECL_NAME (decl) = get_identifier (new_str);
5839 TREE_PUBLIC (decl) = 1;
5840 TREE_STATIC (decl) = 1;
5841 DECL_COMMON (decl) = 1;
5842 DECL_ARTIFICIAL (decl) = 1;
5843 DECL_IGNORED_P (decl) = 1;
5844 varpool_finalize_decl (decl);
5845
5846 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5847 (splay_tree_value) decl);
5848 }
5849 else
5850 decl = (tree) n->value;
5851
5852 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5853 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5854
5855 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5856 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5857 }
5858 else
5859 {
5860 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5861 lock = build_call_expr (lock, 0);
5862
5863 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5864 unlock = build_call_expr (unlock, 0);
5865 }
5866
5867 push_gimplify_context (&gctx);
5868
5869 block = make_node (BLOCK);
5870 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5871
5872 tbody = gimple_bind_body (bind);
5873 gimplify_and_add (lock, &tbody);
5874 gimple_bind_set_body (bind, tbody);
5875
5876 lower_omp (gimple_omp_body (stmt), ctx);
5877 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5878 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5879 gimple_omp_set_body (stmt, NULL);
5880
5881 tbody = gimple_bind_body (bind);
5882 gimplify_and_add (unlock, &tbody);
5883 gimple_bind_set_body (bind, tbody);
5884
5885 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5886
5887 pop_gimplify_context (bind);
5888 gimple_bind_append_vars (bind, ctx->block_vars);
5889 BLOCK_VARS (block) = gimple_bind_vars (bind);
5890 gsi_replace (gsi_p, bind, true);
5891 }
5892
5893
5894 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5895 for a lastprivate clause. Given a loop control predicate of (V
5896 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5897 is appended to *DLIST, iterator initialization is appended to
5898 *BODY_P. */
5899
5900 static void
5901 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5902 gimple_seq *dlist, struct omp_context *ctx)
5903 {
5904 tree clauses, cond, vinit;
5905 enum tree_code cond_code;
5906 gimple_seq stmts;
5907
5908 cond_code = fd->loop.cond_code;
5909 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5910
5911 /* When possible, use a strict equality expression. This can let VRP
5912 type optimizations deduce the value and remove a copy. */
5913 if (host_integerp (fd->loop.step, 0))
5914 {
5915 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5916 if (step == 1 || step == -1)
5917 cond_code = EQ_EXPR;
5918 }
5919
5920 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5921
5922 clauses = gimple_omp_for_clauses (fd->for_stmt);
5923 stmts = NULL;
5924 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5925 if (!gimple_seq_empty_p (stmts))
5926 {
5927 gimple_seq_add_seq (&stmts, *dlist);
5928 *dlist = stmts;
5929
5930 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5931 vinit = fd->loop.n1;
5932 if (cond_code == EQ_EXPR
5933 && host_integerp (fd->loop.n2, 0)
5934 && ! integer_zerop (fd->loop.n2))
5935 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5936
5937 /* Initialize the iterator variable, so that threads that don't execute
5938 any iterations don't execute the lastprivate clauses by accident. */
5939 gimplify_assign (fd->loop.v, vinit, body_p);
5940 }
5941 }
5942
5943
5944 /* Lower code for an OpenMP loop directive. */
5945
5946 static void
5947 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5948 {
5949 tree *rhs_p, block;
5950 struct omp_for_data fd;
5951 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5952 gimple_seq omp_for_body, body, dlist, ilist;
5953 size_t i;
5954 struct gimplify_ctx gctx;
5955
5956 push_gimplify_context (&gctx);
5957
5958 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5959 lower_omp (gimple_omp_body (stmt), ctx);
5960
5961 block = make_node (BLOCK);
5962 new_stmt = gimple_build_bind (NULL, NULL, block);
5963
5964 /* Move declaration of temporaries in the loop body before we make
5965 it go away. */
5966 omp_for_body = gimple_omp_body (stmt);
5967 if (!gimple_seq_empty_p (omp_for_body)
5968 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5969 {
5970 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5971 gimple_bind_append_vars (new_stmt, vars);
5972 }
5973
5974 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5975 ilist = NULL;
5976 dlist = NULL;
5977 body = NULL;
5978 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5979 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5980
5981 /* Lower the header expressions. At this point, we can assume that
5982 the header is of the form:
5983
5984 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5985
5986 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5987 using the .omp_data_s mapping, if needed. */
5988 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5989 {
5990 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
5991 if (!is_gimple_min_invariant (*rhs_p))
5992 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5993
5994 rhs_p = gimple_omp_for_final_ptr (stmt, i);
5995 if (!is_gimple_min_invariant (*rhs_p))
5996 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
5997
5998 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
5999 if (!is_gimple_min_invariant (*rhs_p))
6000 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6001 }
6002
6003 /* Once lowered, extract the bounds and clauses. */
6004 extract_omp_for_data (stmt, &fd, NULL);
6005
6006 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6007
6008 gimple_seq_add_stmt (&body, stmt);
6009 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6010
6011 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6012 fd.loop.v));
6013
6014 /* After the loop, add exit clauses. */
6015 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6016 gimple_seq_add_seq (&body, dlist);
6017
6018 body = maybe_catch_exception (body);
6019
6020 /* Region exit marker goes at the end of the loop body. */
6021 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6022
6023 pop_gimplify_context (new_stmt);
6024
6025 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6026 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6027 if (BLOCK_VARS (block))
6028 TREE_USED (block) = 1;
6029
6030 gimple_bind_set_body (new_stmt, body);
6031 gimple_omp_set_body (stmt, NULL);
6032 gimple_omp_for_set_pre_body (stmt, NULL);
6033 gsi_replace (gsi_p, new_stmt, true);
6034 }
6035
6036 /* Callback for walk_stmts. Check if the current statement only contains
6037 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6038
6039 static tree
6040 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6041 bool *handled_ops_p,
6042 struct walk_stmt_info *wi)
6043 {
6044 int *info = (int *) wi->info;
6045 gimple stmt = gsi_stmt (*gsi_p);
6046
6047 *handled_ops_p = true;
6048 switch (gimple_code (stmt))
6049 {
6050 WALK_SUBSTMTS;
6051
6052 case GIMPLE_OMP_FOR:
6053 case GIMPLE_OMP_SECTIONS:
6054 *info = *info == 0 ? 1 : -1;
6055 break;
6056 default:
6057 *info = -1;
6058 break;
6059 }
6060 return NULL;
6061 }
6062
6063 struct omp_taskcopy_context
6064 {
6065 /* This field must be at the beginning, as we do "inheritance": Some
6066 callback functions for tree-inline.c (e.g., omp_copy_decl)
6067 receive a copy_body_data pointer that is up-casted to an
6068 omp_context pointer. */
6069 copy_body_data cb;
6070 omp_context *ctx;
6071 };
6072
6073 static tree
6074 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6075 {
6076 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6077
6078 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6079 return create_tmp_var (TREE_TYPE (var), NULL);
6080
6081 return var;
6082 }
6083
6084 static tree
6085 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6086 {
6087 tree name, new_fields = NULL, type, f;
6088
6089 type = lang_hooks.types.make_type (RECORD_TYPE);
6090 name = DECL_NAME (TYPE_NAME (orig_type));
6091 name = build_decl (TYPE_DECL, name, type);
6092 TYPE_NAME (type) = name;
6093
6094 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6095 {
6096 tree new_f = copy_node (f);
6097 DECL_CONTEXT (new_f) = type;
6098 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6099 TREE_CHAIN (new_f) = new_fields;
6100 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6101 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6102 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6103 &tcctx->cb, NULL);
6104 new_fields = new_f;
6105 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6106 }
6107 TYPE_FIELDS (type) = nreverse (new_fields);
6108 layout_type (type);
6109 return type;
6110 }
6111
6112 /* Create task copyfn. */
6113
6114 static void
6115 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6116 {
6117 struct function *child_cfun;
6118 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6119 tree record_type, srecord_type, bind, list;
6120 bool record_needs_remap = false, srecord_needs_remap = false;
6121 splay_tree_node n;
6122 struct omp_taskcopy_context tcctx;
6123 struct gimplify_ctx gctx;
6124
6125 child_fn = gimple_omp_task_copy_fn (task_stmt);
6126 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6127 gcc_assert (child_cfun->cfg == NULL);
6128 child_cfun->dont_save_pending_sizes_p = 1;
6129 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6130
6131 /* Reset DECL_CONTEXT on function arguments. */
6132 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6133 DECL_CONTEXT (t) = child_fn;
6134
6135 /* Populate the function. */
6136 push_gimplify_context (&gctx);
6137 current_function_decl = child_fn;
6138
6139 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6140 TREE_SIDE_EFFECTS (bind) = 1;
6141 list = NULL;
6142 DECL_SAVED_TREE (child_fn) = bind;
6143 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6144
6145 /* Remap src and dst argument types if needed. */
6146 record_type = ctx->record_type;
6147 srecord_type = ctx->srecord_type;
6148 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6149 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6150 {
6151 record_needs_remap = true;
6152 break;
6153 }
6154 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6155 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6156 {
6157 srecord_needs_remap = true;
6158 break;
6159 }
6160
6161 if (record_needs_remap || srecord_needs_remap)
6162 {
6163 memset (&tcctx, '\0', sizeof (tcctx));
6164 tcctx.cb.src_fn = ctx->cb.src_fn;
6165 tcctx.cb.dst_fn = child_fn;
6166 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6167 tcctx.cb.dst_node = tcctx.cb.src_node;
6168 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6169 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6170 tcctx.cb.eh_region = -1;
6171 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6172 tcctx.cb.decl_map = pointer_map_create ();
6173 tcctx.ctx = ctx;
6174
6175 if (record_needs_remap)
6176 record_type = task_copyfn_remap_type (&tcctx, record_type);
6177 if (srecord_needs_remap)
6178 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6179 }
6180 else
6181 tcctx.cb.decl_map = NULL;
6182
6183 push_cfun (child_cfun);
6184
6185 arg = DECL_ARGUMENTS (child_fn);
6186 TREE_TYPE (arg) = build_pointer_type (record_type);
6187 sarg = TREE_CHAIN (arg);
6188 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6189
6190 /* First pass: initialize temporaries used in record_type and srecord_type
6191 sizes and field offsets. */
6192 if (tcctx.cb.decl_map)
6193 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6194 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6195 {
6196 tree *p;
6197
6198 decl = OMP_CLAUSE_DECL (c);
6199 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6200 if (p == NULL)
6201 continue;
6202 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6203 sf = (tree) n->value;
6204 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6205 src = build_fold_indirect_ref (sarg);
6206 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6207 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6208 append_to_statement_list (t, &list);
6209 }
6210
6211 /* Second pass: copy shared var pointers and copy construct non-VLA
6212 firstprivate vars. */
6213 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6214 switch (OMP_CLAUSE_CODE (c))
6215 {
6216 case OMP_CLAUSE_SHARED:
6217 decl = OMP_CLAUSE_DECL (c);
6218 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6219 if (n == NULL)
6220 break;
6221 f = (tree) n->value;
6222 if (tcctx.cb.decl_map)
6223 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6224 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6225 sf = (tree) n->value;
6226 if (tcctx.cb.decl_map)
6227 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6228 src = build_fold_indirect_ref (sarg);
6229 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6230 dst = build_fold_indirect_ref (arg);
6231 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6232 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6233 append_to_statement_list (t, &list);
6234 break;
6235 case OMP_CLAUSE_FIRSTPRIVATE:
6236 decl = OMP_CLAUSE_DECL (c);
6237 if (is_variable_sized (decl))
6238 break;
6239 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6240 if (n == NULL)
6241 break;
6242 f = (tree) n->value;
6243 if (tcctx.cb.decl_map)
6244 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6245 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6246 if (n != NULL)
6247 {
6248 sf = (tree) n->value;
6249 if (tcctx.cb.decl_map)
6250 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6251 src = build_fold_indirect_ref (sarg);
6252 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6253 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6254 src = build_fold_indirect_ref (src);
6255 }
6256 else
6257 src = decl;
6258 dst = build_fold_indirect_ref (arg);
6259 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6260 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6261 append_to_statement_list (t, &list);
6262 break;
6263 case OMP_CLAUSE_PRIVATE:
6264 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6265 break;
6266 decl = OMP_CLAUSE_DECL (c);
6267 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6268 f = (tree) n->value;
6269 if (tcctx.cb.decl_map)
6270 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6271 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6272 if (n != NULL)
6273 {
6274 sf = (tree) n->value;
6275 if (tcctx.cb.decl_map)
6276 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6277 src = build_fold_indirect_ref (sarg);
6278 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6279 if (use_pointer_for_field (decl, NULL))
6280 src = build_fold_indirect_ref (src);
6281 }
6282 else
6283 src = decl;
6284 dst = build_fold_indirect_ref (arg);
6285 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6286 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6287 append_to_statement_list (t, &list);
6288 break;
6289 default:
6290 break;
6291 }
6292
6293 /* Last pass: handle VLA firstprivates. */
6294 if (tcctx.cb.decl_map)
6295 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6296 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6297 {
6298 tree ind, ptr, df;
6299
6300 decl = OMP_CLAUSE_DECL (c);
6301 if (!is_variable_sized (decl))
6302 continue;
6303 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6304 if (n == NULL)
6305 continue;
6306 f = (tree) n->value;
6307 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6308 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6309 ind = DECL_VALUE_EXPR (decl);
6310 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6311 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6312 n = splay_tree_lookup (ctx->sfield_map,
6313 (splay_tree_key) TREE_OPERAND (ind, 0));
6314 sf = (tree) n->value;
6315 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6316 src = build_fold_indirect_ref (sarg);
6317 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6318 src = build_fold_indirect_ref (src);
6319 dst = build_fold_indirect_ref (arg);
6320 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6321 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6322 append_to_statement_list (t, &list);
6323 n = splay_tree_lookup (ctx->field_map,
6324 (splay_tree_key) TREE_OPERAND (ind, 0));
6325 df = (tree) n->value;
6326 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6327 ptr = build_fold_indirect_ref (arg);
6328 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6329 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6330 build_fold_addr_expr (dst));
6331 append_to_statement_list (t, &list);
6332 }
6333
6334 t = build1 (RETURN_EXPR, void_type_node, NULL);
6335 append_to_statement_list (t, &list);
6336
6337 if (tcctx.cb.decl_map)
6338 pointer_map_destroy (tcctx.cb.decl_map);
6339 pop_gimplify_context (NULL);
6340 BIND_EXPR_BODY (bind) = list;
6341 pop_cfun ();
6342 current_function_decl = ctx->cb.src_fn;
6343 }
6344
6345 /* Lower the OpenMP parallel or task directive in the current statement
6346 in GSI_P. CTX holds context information for the directive. */
6347
6348 static void
6349 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6350 {
6351 tree clauses;
6352 tree child_fn, t;
6353 gimple stmt = gsi_stmt (*gsi_p);
6354 gimple par_bind, bind;
6355 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6356 struct gimplify_ctx gctx;
6357
6358 clauses = gimple_omp_taskreg_clauses (stmt);
6359 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6360 par_body = gimple_bind_body (par_bind);
6361 child_fn = ctx->cb.dst_fn;
6362 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6363 && !gimple_omp_parallel_combined_p (stmt))
6364 {
6365 struct walk_stmt_info wi;
6366 int ws_num = 0;
6367
6368 memset (&wi, 0, sizeof (wi));
6369 wi.info = &ws_num;
6370 wi.val_only = true;
6371 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6372 if (ws_num == 1)
6373 gimple_omp_parallel_set_combined_p (stmt, true);
6374 }
6375 if (ctx->srecord_type)
6376 create_task_copyfn (stmt, ctx);
6377
6378 push_gimplify_context (&gctx);
6379
6380 par_olist = NULL;
6381 par_ilist = NULL;
6382 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6383 lower_omp (par_body, ctx);
6384 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6385 lower_reduction_clauses (clauses, &par_olist, ctx);
6386
6387 /* Declare all the variables created by mapping and the variables
6388 declared in the scope of the parallel body. */
6389 record_vars_into (ctx->block_vars, child_fn);
6390 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6391
6392 if (ctx->record_type)
6393 {
6394 ctx->sender_decl
6395 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6396 : ctx->record_type, ".omp_data_o");
6397 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6398 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6399 }
6400
6401 olist = NULL;
6402 ilist = NULL;
6403 lower_send_clauses (clauses, &ilist, &olist, ctx);
6404 lower_send_shared_vars (&ilist, &olist, ctx);
6405
6406 /* Once all the expansions are done, sequence all the different
6407 fragments inside gimple_omp_body. */
6408
6409 new_body = NULL;
6410
6411 if (ctx->record_type)
6412 {
6413 t = build_fold_addr_expr (ctx->sender_decl);
6414 /* fixup_child_record_type might have changed receiver_decl's type. */
6415 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6416 gimple_seq_add_stmt (&new_body,
6417 gimple_build_assign (ctx->receiver_decl, t));
6418 }
6419
6420 gimple_seq_add_seq (&new_body, par_ilist);
6421 gimple_seq_add_seq (&new_body, par_body);
6422 gimple_seq_add_seq (&new_body, par_olist);
6423 new_body = maybe_catch_exception (new_body);
6424 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6425 gimple_omp_set_body (stmt, new_body);
6426
6427 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6428 gimple_bind_add_stmt (bind, stmt);
6429 if (ilist || olist)
6430 {
6431 gimple_seq_add_stmt (&ilist, bind);
6432 gimple_seq_add_seq (&ilist, olist);
6433 bind = gimple_build_bind (NULL, ilist, NULL);
6434 }
6435
6436 gsi_replace (gsi_p, bind, true);
6437
6438 pop_gimplify_context (NULL);
6439 }
6440
6441 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6442 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6443 of OpenMP context, but with task_shared_vars set. */
6444
6445 static tree
6446 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6447 void *data)
6448 {
6449 tree t = *tp;
6450
6451 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6452 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6453 return t;
6454
6455 if (task_shared_vars
6456 && DECL_P (t)
6457 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6458 return t;
6459
6460 /* If a global variable has been privatized, TREE_CONSTANT on
6461 ADDR_EXPR might be wrong. */
6462 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6463 recompute_tree_invariant_for_addr_expr (t);
6464
6465 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6466 return NULL_TREE;
6467 }
6468
6469 static void
6470 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6471 {
6472 gimple stmt = gsi_stmt (*gsi_p);
6473 struct walk_stmt_info wi;
6474
6475 if (gimple_has_location (stmt))
6476 input_location = gimple_location (stmt);
6477
6478 if (task_shared_vars)
6479 memset (&wi, '\0', sizeof (wi));
6480
6481 /* If we have issued syntax errors, avoid doing any heavy lifting.
6482 Just replace the OpenMP directives with a NOP to avoid
6483 confusing RTL expansion. */
6484 if (errorcount && is_gimple_omp (stmt))
6485 {
6486 gsi_replace (gsi_p, gimple_build_nop (), true);
6487 return;
6488 }
6489
6490 switch (gimple_code (stmt))
6491 {
6492 case GIMPLE_COND:
6493 if ((ctx || task_shared_vars)
6494 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6495 ctx ? NULL : &wi, NULL)
6496 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6497 ctx ? NULL : &wi, NULL)))
6498 gimple_regimplify_operands (stmt, gsi_p);
6499 break;
6500 case GIMPLE_CATCH:
6501 lower_omp (gimple_catch_handler (stmt), ctx);
6502 break;
6503 case GIMPLE_EH_FILTER:
6504 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6505 break;
6506 case GIMPLE_TRY:
6507 lower_omp (gimple_try_eval (stmt), ctx);
6508 lower_omp (gimple_try_cleanup (stmt), ctx);
6509 break;
6510 case GIMPLE_BIND:
6511 lower_omp (gimple_bind_body (stmt), ctx);
6512 break;
6513 case GIMPLE_OMP_PARALLEL:
6514 case GIMPLE_OMP_TASK:
6515 ctx = maybe_lookup_ctx (stmt);
6516 lower_omp_taskreg (gsi_p, ctx);
6517 break;
6518 case GIMPLE_OMP_FOR:
6519 ctx = maybe_lookup_ctx (stmt);
6520 gcc_assert (ctx);
6521 lower_omp_for (gsi_p, ctx);
6522 break;
6523 case GIMPLE_OMP_SECTIONS:
6524 ctx = maybe_lookup_ctx (stmt);
6525 gcc_assert (ctx);
6526 lower_omp_sections (gsi_p, ctx);
6527 break;
6528 case GIMPLE_OMP_SINGLE:
6529 ctx = maybe_lookup_ctx (stmt);
6530 gcc_assert (ctx);
6531 lower_omp_single (gsi_p, ctx);
6532 break;
6533 case GIMPLE_OMP_MASTER:
6534 ctx = maybe_lookup_ctx (stmt);
6535 gcc_assert (ctx);
6536 lower_omp_master (gsi_p, ctx);
6537 break;
6538 case GIMPLE_OMP_ORDERED:
6539 ctx = maybe_lookup_ctx (stmt);
6540 gcc_assert (ctx);
6541 lower_omp_ordered (gsi_p, ctx);
6542 break;
6543 case GIMPLE_OMP_CRITICAL:
6544 ctx = maybe_lookup_ctx (stmt);
6545 gcc_assert (ctx);
6546 lower_omp_critical (gsi_p, ctx);
6547 break;
6548 case GIMPLE_OMP_ATOMIC_LOAD:
6549 if ((ctx || task_shared_vars)
6550 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6551 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6552 gimple_regimplify_operands (stmt, gsi_p);
6553 break;
6554 default:
6555 if ((ctx || task_shared_vars)
6556 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6557 ctx ? NULL : &wi))
6558 gimple_regimplify_operands (stmt, gsi_p);
6559 break;
6560 }
6561 }
6562
6563 static void
6564 lower_omp (gimple_seq body, omp_context *ctx)
6565 {
6566 location_t saved_location = input_location;
6567 gimple_stmt_iterator gsi = gsi_start (body);
6568 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6569 lower_omp_1 (&gsi, ctx);
6570 input_location = saved_location;
6571 }
6572 \f
6573 /* Main entry point. */
6574
6575 static unsigned int
6576 execute_lower_omp (void)
6577 {
6578 gimple_seq body;
6579
6580 /* This pass always runs, to provide PROP_gimple_lomp.
6581 But there is nothing to do unless -fopenmp is given. */
6582 if (flag_openmp == 0)
6583 return 0;
6584
6585 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6586 delete_omp_context);
6587
6588 body = gimple_body (current_function_decl);
6589 scan_omp (body, NULL);
6590 gcc_assert (taskreg_nesting_level == 0);
6591
6592 if (all_contexts->root)
6593 {
6594 struct gimplify_ctx gctx;
6595
6596 if (task_shared_vars)
6597 push_gimplify_context (&gctx);
6598 lower_omp (body, NULL);
6599 if (task_shared_vars)
6600 pop_gimplify_context (NULL);
6601 }
6602
6603 if (all_contexts)
6604 {
6605 splay_tree_delete (all_contexts);
6606 all_contexts = NULL;
6607 }
6608 BITMAP_FREE (task_shared_vars);
6609 return 0;
6610 }
6611
6612 struct gimple_opt_pass pass_lower_omp =
6613 {
6614 {
6615 GIMPLE_PASS,
6616 "omplower", /* name */
6617 NULL, /* gate */
6618 execute_lower_omp, /* execute */
6619 NULL, /* sub */
6620 NULL, /* next */
6621 0, /* static_pass_number */
6622 0, /* tv_id */
6623 PROP_gimple_any, /* properties_required */
6624 PROP_gimple_lomp, /* properties_provided */
6625 0, /* properties_destroyed */
6626 0, /* todo_flags_start */
6627 TODO_dump_func /* todo_flags_finish */
6628 }
6629 };
6630 \f
6631 /* The following is a utility to diagnose OpenMP structured block violations.
6632 It is not part of the "omplower" pass, as that's invoked too late. It
6633 should be invoked by the respective front ends after gimplification. */
6634
6635 static splay_tree all_labels;
6636
6637 /* Check for mismatched contexts and generate an error if needed. Return
6638 true if an error is detected. */
6639
6640 static bool
6641 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6642 gimple branch_ctx, gimple label_ctx)
6643 {
6644 if (label_ctx == branch_ctx)
6645 return false;
6646
6647
6648 /*
6649 Previously we kept track of the label's entire context in diagnose_sb_[12]
6650 so we could traverse it and issue a correct "exit" or "enter" error
6651 message upon a structured block violation.
6652
6653 We built the context by building a list with tree_cons'ing, but there is
6654 no easy counterpart in gimple tuples. It seems like far too much work
6655 for issuing exit/enter error messages. If someone really misses the
6656 distinct error message... patches welcome.
6657 */
6658
6659 #if 0
6660 /* Try to avoid confusing the user by producing and error message
6661 with correct "exit" or "enter" verbiage. We prefer "exit"
6662 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6663 if (branch_ctx == NULL)
6664 exit_p = false;
6665 else
6666 {
6667 while (label_ctx)
6668 {
6669 if (TREE_VALUE (label_ctx) == branch_ctx)
6670 {
6671 exit_p = false;
6672 break;
6673 }
6674 label_ctx = TREE_CHAIN (label_ctx);
6675 }
6676 }
6677
6678 if (exit_p)
6679 error ("invalid exit from OpenMP structured block");
6680 else
6681 error ("invalid entry to OpenMP structured block");
6682 #endif
6683
6684 /* If it's obvious we have an invalid entry, be specific about the error. */
6685 if (branch_ctx == NULL)
6686 error ("invalid entry to OpenMP structured block");
6687 else
6688 /* Otherwise, be vague and lazy, but efficient. */
6689 error ("invalid branch to/from an OpenMP structured block");
6690
6691 gsi_replace (gsi_p, gimple_build_nop (), false);
6692 return true;
6693 }
6694
6695 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6696 where each label is found. */
6697
6698 static tree
6699 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6700 struct walk_stmt_info *wi)
6701 {
6702 gimple context = (gimple) wi->info;
6703 gimple inner_context;
6704 gimple stmt = gsi_stmt (*gsi_p);
6705
6706 *handled_ops_p = true;
6707
6708 switch (gimple_code (stmt))
6709 {
6710 WALK_SUBSTMTS;
6711
6712 case GIMPLE_OMP_PARALLEL:
6713 case GIMPLE_OMP_TASK:
6714 case GIMPLE_OMP_SECTIONS:
6715 case GIMPLE_OMP_SINGLE:
6716 case GIMPLE_OMP_SECTION:
6717 case GIMPLE_OMP_MASTER:
6718 case GIMPLE_OMP_ORDERED:
6719 case GIMPLE_OMP_CRITICAL:
6720 /* The minimal context here is just the current OMP construct. */
6721 inner_context = stmt;
6722 wi->info = inner_context;
6723 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6724 wi->info = context;
6725 break;
6726
6727 case GIMPLE_OMP_FOR:
6728 inner_context = stmt;
6729 wi->info = inner_context;
6730 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6731 walk them. */
6732 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6733 diagnose_sb_1, NULL, wi);
6734 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6735 wi->info = context;
6736 break;
6737
6738 case GIMPLE_LABEL:
6739 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6740 (splay_tree_value) context);
6741 break;
6742
6743 default:
6744 break;
6745 }
6746
6747 return NULL_TREE;
6748 }
6749
6750 /* Pass 2: Check each branch and see if its context differs from that of
6751 the destination label's context. */
6752
6753 static tree
6754 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6755 struct walk_stmt_info *wi)
6756 {
6757 gimple context = (gimple) wi->info;
6758 splay_tree_node n;
6759 gimple stmt = gsi_stmt (*gsi_p);
6760
6761 *handled_ops_p = true;
6762
6763 switch (gimple_code (stmt))
6764 {
6765 WALK_SUBSTMTS;
6766
6767 case GIMPLE_OMP_PARALLEL:
6768 case GIMPLE_OMP_TASK:
6769 case GIMPLE_OMP_SECTIONS:
6770 case GIMPLE_OMP_SINGLE:
6771 case GIMPLE_OMP_SECTION:
6772 case GIMPLE_OMP_MASTER:
6773 case GIMPLE_OMP_ORDERED:
6774 case GIMPLE_OMP_CRITICAL:
6775 wi->info = stmt;
6776 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6777 wi->info = context;
6778 break;
6779
6780 case GIMPLE_OMP_FOR:
6781 wi->info = stmt;
6782 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6783 walk them. */
6784 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6785 diagnose_sb_2, NULL, wi);
6786 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6787 wi->info = context;
6788 break;
6789
6790 case GIMPLE_GOTO:
6791 {
6792 tree lab = gimple_goto_dest (stmt);
6793 if (TREE_CODE (lab) != LABEL_DECL)
6794 break;
6795
6796 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6797 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6798 }
6799 break;
6800
6801 case GIMPLE_SWITCH:
6802 {
6803 unsigned int i;
6804 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6805 {
6806 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6807 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6808 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6809 break;
6810 }
6811 }
6812 break;
6813
6814 case GIMPLE_RETURN:
6815 diagnose_sb_0 (gsi_p, context, NULL);
6816 break;
6817
6818 default:
6819 break;
6820 }
6821
6822 return NULL_TREE;
6823 }
6824
6825 void
6826 diagnose_omp_structured_block_errors (tree fndecl)
6827 {
6828 tree save_current = current_function_decl;
6829 struct walk_stmt_info wi;
6830 struct function *old_cfun = cfun;
6831 gimple_seq body = gimple_body (fndecl);
6832
6833 current_function_decl = fndecl;
6834 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6835
6836 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6837
6838 memset (&wi, 0, sizeof (wi));
6839 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6840
6841 memset (&wi, 0, sizeof (wi));
6842 wi.want_locations = true;
6843 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6844
6845 splay_tree_delete (all_labels);
6846 all_labels = NULL;
6847
6848 set_cfun (old_cfun);
6849 current_function_decl = save_current;
6850 }
6851
6852 #include "gt-omp-low.h"